seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
16723311170 | import json
import threading
import cv2
import PySimpleGUI as sg
import trt_pose.coco
import trt_pose.models
from flask import Flask
from flask_restful import Api, Resource
from trt_pose.parse_objects import ParseObjects
from camera import Camera
from exercise import LeftBicepCurl, RightBicepCurl, ShoulderPress, Squat
from helper import HEIGHT, WIDTH, preprocess
from model import Model
executing = False # global flag for session start
exercise = None # global exercise object required for model inference and drawing
stopExercise = False # global flag for stopping exercise after loop ends
drawn = None # global for our image
class LeftCurlAPI(Resource):
def get(self):
global exercise, executing
exercise = LeftBicepCurl()
executing = True
return {"leftCurl": f"{id}"}
class RightCurlAPI(Resource):
def get(self):
global exercise, executing
exercise = RightBicepCurl()
executing = True
return {"rightCurl": f"{id}"}
class ShoulderPressAPI(Resource):
def get(self):
global exercise, executing
exercise = ShoulderPress()
executing = True
return {"shoulderPress": f"{id}"}
class SquatAPI(Resource):
def get(self):
global exercise, executing
exercise = Squat()
executing = True
return {"squat": f"{id}"}
class RepCountAPI(Resource):
def get(self):
global exercise
reps = exercise.rep_count if exercise else 0
return {"repCount": f"{reps}"}
class EndExerciseAPI(Resource):
def get(self):
global stopExercise
stopExercise = True
return {"endExercise": f"{id}"}
class StartSessionAPI(Resource):
def get(self):
return {"startSession": f"{id}"}
class DebugAPI(Resource):
def get(self):
return {"debug": f"{id}"}
# ------ Begin GUI layout ------
video_viewer_column = [
# image will be flab2ab image
[sg.Image(filename="", key="image")],
]
repcount_list_column = [
[
# current rep count
sg.Text("Rep Count"),
# change folder to pull actual rep count
sg.In(size=(25, 1), enable_events=True, key="repCount"),
],
[
# previous exercise list
sg.Listbox(values=[], enable_events=True, size=(40, 20), key="exerciseList")
],
]
# finally builds layout of gui
layout = [
[
sg.Column(video_viewer_column),
sg.VSeperator(),
sg.Column(repcount_list_column),
]
]
# ------ End GUI layout ------
def main():
global exercise, stopExercise, drawn
print("Beginning script")
# Load the annotation file and create a topology tensor
with open("human_pose.json", "r") as f:
human_pose = json.load(f)
# Create a topology tensor (intermediate DS that describes part linkages)
topology = trt_pose.coco.coco_category_to_topology(human_pose)
# Construct and load the model
model = Model(pose_annotations=human_pose)
model.load_model("resnet")
model.get_optimized()
model.log_fps()
print("Set up model")
# Set up the camera
camera = Camera(width=640, height=480)
camera.capture_video("mp4v", "/tmp/output.mp4")
assert camera.cap is not None, "Camera Open Error"
print("Set up camera")
# Set up callable class used to parse the objects from the neural network
parse_objects = ParseObjects(topology)
app = Flask(__name__)
api = Api(app)
# add endpoints
api.add_resource(LeftCurlAPI, "/leftCurl")
api.add_resource(RightCurlAPI, "/rightCurl")
api.add_resource(ShoulderPressAPI, "/shoulderPress")
api.add_resource(SquatAPI, "/squat")
api.add_resource(RepCountAPI, "/repCount")
api.add_resource(EndExerciseAPI, "/endExercise")
api.add_resource(StartSessionAPI, "/startSession")
api.add_resource(DebugAPI, "/debug")
t = threading.Thread(target=app.run, kwargs={"host": "0.0.0.0"})
t.start()
print("After networking")
while not executing:
pass
window = sg.Window("Flab2Ab", location=(800, 400))
window.Layout(layout).Finalize()
print("Executing...")
while True:
while camera.cap.isOpened() and exercise:
succeeded, image = camera.cap.read()
if not succeeded:
print("Camera read Error")
break
resized_img = cv2.resize(
image, dsize=(WIDTH, HEIGHT), interpolation=cv2.INTER_AREA
)
preprocessed = preprocess(resized_img)
counts, objects, peaks = model.execute_neural_net(
data=preprocessed, parser=parse_objects
)
drawn = exercise.draw(image, counts, objects, peaks, t)
encoded_img = cv2.imencode(".png", image)[1].tobytes()
window.FindElement("image").update(data=encoded_img)
if camera.out:
camera.out.write(drawn)
cv2.waitKey(1)
if stopExercise:
exercise = None
stopExercise = False
print("exercise ended successfully")
# Clean up resources
print("Cleaning up")
cv2.destroyAllWindows()
camera.out.release()
camera.cap.release()
if __name__ == "__main__":
main()
| CashMemory/SeniorProject | tasks/human_pose/get_video.py | get_video.py | py | 5,493 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "flask_restful.Resource",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "exercise.LeftBicepCurl",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "flask_restful.Resource",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "... |
41202660931 | import enum
import logging
import os
import subprocess
import sys
import tempfile
from pathlib import Path
from typing import Iterable, List, Tuple, Union
from pprint import pprint
import commentjson
import config
import resume.sections as sections
def create_resume_(data: dict, output_filename: str):
class SECTIONS(enum.Enum):
none = enum.auto()
achv = enum.auto()
skills = enum.auto()
experience = enum.auto()
education = enum.auto()
project = enum.auto()
section_mapping = {
"experience": SECTIONS.experience,
"education": SECTIONS.education,
"technical_skill": SECTIONS.skills,
"project": SECTIONS.project,
"achievement": SECTIONS.achv,
}
def get_order(data: dict):
default_order = ["experience", "education", "technical_skill", "project", "achievement"]
if data.get("meta"):
if data["meta"].get("order"):
order = data["meta"].get("order")
return [section_mapping.get(item, SECTIONS.none) for item in order]
return [section_mapping.get(item, SECTIONS.none) for item in default_order]
def create_metadata() -> str:
nonlocal data
meta_text = ""
metadata = sections.MetaData(data["basics"])
metadata.set_colors(data.get("meta"))
meta_text += metadata.to_latex()
profile_text = "\n"
profiles = sections.ProfileLinks(data["basics"]["profiles"])
profile_text += profiles.to_latex()
return meta_text + profile_text
def get_section_text(section_type: SECTIONS) -> str:
"""get text for all sections except meta and profile"""
def get_section_name():
nonlocal section_type
mapping = {
SECTIONS.achv: "Achievements",
SECTIONS.skills: "Technical Skills",
SECTIONS.experience: "Experience",
SECTIONS.education: "Education",
SECTIONS.project: "Projects",
}
return mapping[section_type]
nonlocal data
section_begin = "\\section{" + get_section_name() + "}\n"
section_text = ""
if section_type is SECTIONS.achv:
section_text += sections.Achievements(data["awards"]).to_latex()
if section_type is SECTIONS.skills:
section_text += sections.TechnicalSkills(data["skills"]).to_latex()
if section_type is SECTIONS.experience:
section_text += sections.Experience(data["work"]).to_latex()
if section_type is SECTIONS.education:
section_text += sections.Education(data["education"]).to_latex()
if section_type is SECTIONS.project:
section_text += sections.Projects(data["projects"]).to_latex()
return section_begin + section_text + "\n"
order = get_order(data)
meta_text = create_metadata()
content_text = ""
for section_type in order:
content_text += get_section_text(section_type)
logging.info(f"generated text, moving files to compilation")
compile_tex_file(content_text, meta_text, output_filename)
def compile_tex_file(content_text: str, meta_text: str, output_filename: str) -> None:
"""compile tex file with main.tex string passed into input with temporary directory"""
template_dir = config.TEMPLATE_DIR
logging.info(f"using template {template_dir.name}")
with tempfile.TemporaryDirectory() as td:
temp_path = Path(td)
main_cwd = Path(os.getcwd())
outdir_nm = output_filename
with open(temp_path.joinpath("content.tex"), "w") as content_file:
content_file.write(content_text)
with open(temp_path.joinpath("meta.tex"), "w") as meta_file:
meta_file.write(meta_text)
def run_process(cmd: str, timeout=config.TIMEOUT):
process = subprocess.run(
cmd,
shell=True,
executable="/bin/bash",
capture_output=True,
text=True,
check=True,
timeout=timeout,
)
return process
latemk_stdout = None
error_raised = False
try:
move_process = run_process(
f"""
cp "{template_dir}/macros.tex" "{temp_path}/macros.tex"
cp "{template_dir}/resume.tex" "{temp_path}/resume.tex"
cp -R "./assets" "{temp_path}"
mkdir -p out
"""
)
logging.info("moved files into temp directory")
if config.KEEP_GENERATED_TEX:
out_resume_path = f"{main_cwd}/out/resume"
move_created_tex_files = run_process(
f"""
mkdir -p out/resume
cp "{template_dir}/macros.tex" "{out_resume_path}/macros.tex"
cp "{template_dir}/resume.tex" "{out_resume_path}/resume.tex"
cp "{temp_path}/content.tex" "{out_resume_path}/content.tex"
cp "{temp_path}/meta.tex" "{out_resume_path}/meta.tex"
"""
)
except subprocess.TimeoutExpired as e:
logging.error(f"Timeout during initial move\n" + str(e))
except subprocess.CalledProcessError as e:
logging.error(f"ProcessError for initial move:\n" + str(e))
else:
# no exception generated in move block, can move to compilation phase
try:
latexmk_process = run_process(
f"""
cd "{temp_path}"
latexmk -xelatex resume.tex
""",
timeout=config.LATEXMK_TIMEOUT,
)
except subprocess.TimeoutExpired as e:
logging.error("Timeout during latexmk run:\n" + str(e))
error_raised = True
latemk_stdout = str(e.output.decode("utf-8"))
except subprocess.CalledProcessError as e:
logging.error("ProcessError for latexmk:\n" + str(e))
error_raised = True
else: # get pdf file, as no exceptions raised
get_pdf_file_proc = run_process(
f"""
cd "{temp_path}"
cp -R "resume.pdf" "{main_cwd}/out/{output_filename}.pdf"
"""
)
logging.info(f"build and saved {output_filename}.pdf")
finally: # get latexmk log, in any case, evenif exceptions raised or not
if config.KEEP_LOG_FILES:
try:
get_latex_log_process = run_process(
f"""
cd "{temp_path}"
cp -R "resume.log" "{main_cwd}/out/{output_filename}.log"
"""
)
log_text = open(f"{main_cwd}/out/{output_filename}.log", "r").read()
if error_raised:
pprint("LaTeX Log\n" + log_text)
except subprocess.CalledProcessError as e:
logging.error(f"error during log_extraction process")
if latemk_stdout:
with open(f"{main_cwd}/out/latex_stdout.txt", "w") as stdout_file:
stdout_file.write(latemk_stdout)
def main():
logging.basicConfig(
level=config.LOG_LEVEL,
format="%(levelname)s - %(asctime)s - %(message)s",
datefmt="%d-%b-%y %H:%M:%S",
)
def parse_json(path: Path = "./resume.jsonc") -> dict:
with open(path, "r") as f:
data = commentjson.load(f)
return data
args = sys.argv
if len(args) - 1 == 2:
data = parse_json(Path(args[1]))
output_filename = args[2]
elif len(args) - 1 == 1:
data = parse_json(Path(args[1]))
output_filename = args[1].split("/")[1].split(".")[0]
create_resume_(data, output_filename)
if __name__ == "__main__":
main()
| ankan-ekansh/JSON-Resume-LaTeX | script/create.py | create.py | py | 8,191 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "enum.auto",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "enum.auto",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "enum.auto",
"line_number": 20,... |
30293317516 | from gc import callbacks
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
import scipy
import keras
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import BatchNormalization
from keras.layers import Dropout
from keras.callbacks import TensorBoard
import datetime
from gc import callbacks
#NAME = "Teeth-Cavity-Detection-{}".format(int(time.time()))
#tensorboard = TensorBoard(log_dir='logs/{}'.format(NAME))
classifier = Sequential()
#CNN_Layer1
classifier.add(Convolution2D(32,(3,3), strides = 1 , padding = 'same', input_shape= (256,256,3), activation ='relu'))
classifier.add(BatchNormalization())
classifier.add(MaxPooling2D((2,2) , strides = 2 , padding = 'same'))
#CNN_Layer2
classifier.add(Convolution2D(64,(3,3), strides = 1 , padding = 'same', activation ='relu'))
classifier.add(Dropout(0.1))
classifier.add(BatchNormalization())
classifier.add(MaxPooling2D((2,2) , strides = 2 , padding = 'same'))
#CNN_layer3
classifier.add(Convolution2D(64 , (3,3) , strides = 1 , padding = 'same' , activation = 'relu'))
classifier.add(BatchNormalization())
classifier.add(MaxPooling2D((2,2) , strides = 2 , padding = 'same'))
#CNN_Layer4
classifier.add(Convolution2D(128 , (3,3) , strides = 1 , padding = 'same' , activation = 'relu'))
classifier.add(Dropout(0.1))
classifier.add(BatchNormalization())
classifier.add(MaxPooling2D((2,2) , strides = 2 , padding = 'same'))
#CNN_Layer5
#classifier.add(Convolution2D(256 , (3,3) , strides = 1 , padding = 'same' , activation = 'relu'))
#classifier.add(Dropout(0.1))
#classifier.add(BatchNormalization())
#classifier.add(MaxPooling2D((2,2) , strides = 2 , padding = 'same'))
#Flattening
classifier.add(Flatten())
#NN_Layer1
classifier.add(Dense(activation = 'relu',units = 128))
classifier.add(Dropout(0.1))
#NN_Layer2
classifier.add(Dense(activation = 'sigmoid',units = 1))
#Compile
classifier.compile(optimizer='adam',loss= 'binary_crossentropy', metrics = ['accuracy'])
classifier.summary()
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
featurewise_center=False,samplewise_center=False, featurewise_std_normalization=False,
samplewise_std_normalization=False, zca_whitening=False, rotation_range = 30,
zoom_range = 0.2,width_shift_range=0.1, height_shift_range=0.1,
horizontal_flip = True,vertical_flip=False)
test_datagen = ImageDataGenerator(rescale=1./255, rotation_range = 30,
zoom_range = 0.2,width_shift_range=0.1, height_shift_range=0.1,
horizontal_flip = True,vertical_flip=False)
training_set = train_datagen.flow_from_directory(
'../environment/teeth_dataset/Trianing',
target_size=(256,256),
batch_size=16,
class_mode='binary')
test_set = test_datagen.flow_from_directory(
'../environment/teeth_dataset/Test',
target_size=(256,256),
batch_size=16,
class_mode='binary')
history = classifier.fit(x = training_set, validation_data = test_set, epochs = 25)
epochs = [i for i in range(25)]
fig , ax = plt.subplots(1,2)
train_acc = history.history['accuracy']
train_loss = history.history['loss']
val_acc = history.history['val_accuracy']
val_loss = history.history['val_loss']
fig.set_size_inches(20,10)
ax[0].plot(epochs , train_acc , 'go-' , label = 'Training Accuracy')
ax[0].plot(epochs , val_acc , 'ro-' , label = 'Validation Accuracy')
ax[0].set_title('Training & Validation Accuracy')
ax[0].legend()
ax[0].set_xlabel("Epochs")
ax[0].set_ylabel("Accuracy")
ax[1].plot(epochs , train_loss , 'g-o' , label = 'Training Loss')
ax[1].plot(epochs , val_loss , 'r-o' , label = 'Validation Loss')
ax[1].set_title('Testing Accuracy & Loss')
ax[1].legend()
ax[1].set_xlabel("Epochs")
ax[1].set_ylabel("Training & Validation Loss")
plt.show()
import numpy as np
from keras.preprocessing import image
test_image = image.load_img(r'C:\Users\HAFEEZ KHAN\Desktop\Cavity Dataset\cavity\10.jpg', target_size = (256, 256))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict(test_image)
print(training_set.class_indices)
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img=mpimg.imread(r'C:\Users\HAFEEZ KHAN\Desktop\Cavity Dataset\cavity\10.jpg')
imgplot = plt.imshow(img)
plt=plt.title('Cavity Detected')
if result[0][0] == 1:
prediction = 'healthy teeth'
else:
prediction = 'cavity'
print("AI's prediction is: "+ prediction)
import matplotlib.pyplot as plt
def plot_acc_loss(results, epochs):
acc = results.history['accuracy']
loss = results.history['loss']
val_acc = results.history['val_accuracy']
val_loss = results.history['val_loss']
plt.figure(figsize=(15, 5))
plt.subplot(121)
plt.plot(range(1,epochs), acc[1:], label='Train_acc')
plt.plot(range(1,epochs), val_acc[1:], label='Test_acc')
plt.title('Accuracy over' + str(epochs) + 'Epochs', size=15)
plt.legend()
plt.grid(True)
plt.subplot(122)
plt.plot(range(1,epochs), loss[1:], label='Train_loss')
plt.plot(range(1,epochs), val_loss[1:], label='Test_loss')
plt.title('Loss over' + str(epochs) + 'Epochs', size=15)
plt.legend()
plt.grid(True)
plt.show()
plot_acc_loss(history, 25)
| hafeezkhan909/Detection-of-Cavities-from-Oral-Images-using-Convolutional-Neural-Networks | main.py | main.py | py | 5,653 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "keras.models.Sequential",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "keras.layers.Convolution2D",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "keras.layers.BatchNormalization",
"line_number": 29,
"usage_type": "call"
},
{
... |
30579767477 | import os
import numpy as np
import torch
import transformers
import torch.nn as nn
from transformers import AutoModel, BertTokenizer
import nltk
nltk.download("stopwords")
from nltk.corpus import stopwords
from string import punctuation
russian_stopwords = stopwords.words("russian")
'''import keras
import numpy as np
from sklearn.preprocessing import LabelEncoder
from keras.models import Sequential, model_from_json
from keras.layers import Dense, Embedding, LSTM
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import tokenizer_from_json
import json
class ClassifierLSTM:
def __init__(self, weights = 'lstm'):
self.model = keras.models.load_model('lstm')
with open('lstm_tokenizer.json') as f:
data = json.load(f)
self.tokenizer = tokenizer_from_json(data)
self.encoder = LabelEncoder()
self.encoder.classes_ = np.load('lstm_encoder_classes.npy')
def preprocess(self, line):
support_chars = {33: ' ', 34: ' ', 35: ' ', 36: ' ', 37: ' ', 38: ' ', 39: ' ', 40: ' ', 41: ' ', 42: ' ', 43: ' ', 44: ' ', 45: ' ', 46: ' ', 47: ' ', 58: ' ', 59: ' ', 60: ' ', 61: ' ', 62: ' ', 63: ' ', 64: ' ', 91: ' ', 92: ' ', 93: ' ', 94: ' ', 95: ' ', 96: ' ', 123: ' ', 124: ' ', 125: ' ', 126: ' '}
line = line.translate(support_chars).lower().split(' ')
t = [token for token in line if token not in russian_stopwords and token != " " and token.strip() not in punctuation]
return ' '.join(t)
def predict(self, line):
line = self.preprocess(line)
text_sec = self.tokenizer.texts_to_sequences([input_line])
text_sec = pad_sequences(text_sec, maxlen=69)
pred = self.model.predict(text_sec, batch_size=1, verbose=1)
pred = np.argmax(pred,axis=1)
pred = self.encoder.inverse_transform(pred) - 1
if pred < 0:
pred = 0
return pred
input_line = 'изделия прочие пластмасс изделия прочих материалов товарных позиций 3901 3914 прочие прочие прочие прочие'
myLSTMmodel = ClassifierLSTM()
def LSTM(input_line):
global myLSTMmodel
res = myLSTMmodel.predict(input_line)
return res[0]
'''
class BERT_Arch(nn.Module):
def __init__(self, bert, num_classes = 96):
super(BERT_Arch, self).__init__()
self.bert = bert
self.dropout = nn.Dropout(0.3)
self.relu = nn.ReLU()
self.fc1 = nn.Linear(768,512)
self.fc2 = nn.Linear(512,num_classes)
self.softmax = nn.LogSoftmax(dim = 1)
def forward(self, sent_id, mask):
_, cls_hs = self.bert(sent_id, attention_mask = mask, return_dict = False)
x = self.fc1(cls_hs)
x = self.relu(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.softmax(x)
return x
class TansformerRuBERT:
def __init__(self, weights = 'models/bert.pt', dev='cpu'):
# dev='cuda'
self.device = torch.device(dev)
self.bert = AutoModel.from_pretrained("DeepPavlov/rubert-base-cased-sentence")
self.tokenizer = BertTokenizer.from_pretrained("DeepPavlov/rubert-base-cased-sentence")
for param in self.bert.parameters():
param.requires_grad = False
self.model = BERT_Arch(self.bert)
self.model = self.model.to(self.device)
self.model.load_state_dict(torch.load(weights, map_location=torch.device(self.device)))
self.model.eval()
def preprocess(self, line):
support_chars = {33: ' ', 34: ' ', 35: ' ', 36: ' ', 37: ' ', 38: ' ', 39: ' ', 40: ' ', 41: ' ', 42: ' ', 43: ' ', 44: ' ', 45: ' ', 46: ' ', 47: ' ', 58: ' ', 59: ' ', 60: ' ', 61: ' ', 62: ' ', 63: ' ', 64: ' ', 91: ' ', 92: ' ', 93: ' ', 94: ' ', 95: ' ', 96: ' ', 123: ' ', 124: ' ', 125: ' ', 126: ' '}
line = line.translate(support_chars).lower().split(' ')
t = [token for token in line if token not in russian_stopwords and token != " " and token.strip() not in punctuation]
return ' '.join(t)
def predict(self, line):
line = self.preprocess(line)
sequence = self.tokenizer.encode(line,
max_length = 15,
padding = 'max_length',
truncation = True)
mask = torch.tensor([1]*len(sequence)).to(self.device)
sequence = torch.tensor(sequence).to(self.device)
mask = torch.unsqueeze(mask, 0)
sequence = torch.unsqueeze(sequence, 0)
res = self.model(sequence, mask)
res = int(res.argmax(dim=1).cpu().numpy())
if res> 77:
return res+2
else:
return res+1
bertModel = TansformerRuBERT()
def bert(input_line):
try:
global bertModel
res = bertModel.predict(input_line)
return res
except:
return 00
def preprocess(text):
global bertModel
return bertModel.preprocess(text)
from sklearn.pipeline import Pipeline
# pipeline позволяет объединить в один блок трансформер и модель, что упрощает написание кода и улучшает его читаемость
from sklearn.feature_extraction.text import TfidfVectorizer
# TfidfVectorizer преобразует тексты в числовые вектора, отражающие важность использования каждого слова из некоторого набора слов (количество слов набора определяет размерность вектора) в каждом тексте
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
# линейный классификатор и классификатор методом ближайших соседей
from sklearn import metrics
# набор метрик для оценки качества модели
from sklearn.model_selection import GridSearchCV
import sklearn
import numpy as np
from sklearn.pipeline import make_pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.neighbors import BallTree
from sklearn.base import BaseEstimator
import pickle
modelSGD2 = pickle.load(open('models/sgd_ppl_clf_big.pkl', 'rb'))
def SGD2(input_line):
try:
global modelSGD2
res = modelSGD2.predict([input_line])
return res[0]
except:
return 00
modelSGD4 = pickle.load(open('models/sgd_ppl_clf_4.pkl', 'rb'))
def SGD4(input_line):
try:
global modelSGD4
res = modelSGD4.predict([input_line])
return res[0]
except:
return 0000
modelSGD2PipLines = pickle.load(open('models/sgd_ppl_clf_pipline_big.pkl','rb'))
def SGD2Piplines(input_line):
try:
global modelSGD2PipLines
res = modelSGD2PipLines.predict([input_line])
return res[0]
except:
return 00
def softmax(x):
#создание вероятностного распределения
proba = np.exp(-x)
return proba / sum(proba)
class NeighborSampler(BaseEstimator):
def __init__(self, k=5, temperature=10.0):
self.k=k
self.temperature = temperature
def fit(self, X, y):
self.tree_ = BallTree(X)
self.y_ = np.array(y)
def predict(self, X, random_state=None):
distances, indices = self.tree_.query(X, return_distance=True, k=self.k)
result = []
resultDist = []
for distance, index in zip(distances, indices):
result.append(np.random.choice(index, p=softmax(distance * self.temperature)))
resultDist.append(np.random.choice(distance, p=softmax(distance * self.temperature)))
return self.y_[result] , resultDist
def SGD2_2(class_name,input_line):
model = pickle.load(open('models/TNVED'+str(class_name)+'.pkl', 'rb'))
res = model.predict([input_line])
return res[0][0]
| FenixFly/Neimark-hack-FSC | backend/MLmodels.py | MLmodels.py | py | 8,042 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.download",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.... |
26966511059 | import pandas as pd
import numpy as np
import re
from itertools import chain
from sklearn.preprocessing import StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
import pickle
with open('only_viruses.pkl','rb') as f:
only_viruses = pickle.load(f)
with open('only_clear.pkl','rb') as f:
only_clear = pickle.load(f)
with open('model.pkl','rb') as f:
model = pickle.load(f)
class ExtensionCountTransformer():
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
df = pd.DataFrame(data=[0]*X.shape[0], columns=['count_of_exe'])
df['count_of_drv'] = 0
for index in range(X.shape[0]):
lst = list(X.iloc[index].split(','))
format = list(chain.from_iterable(list(map(lambda x: re.findall(r'\.[a-z]{3}', x), lst))))
for i in format:
if i == '.exe':
df.loc[index, 'count_of_exe'] += 1
elif i == '.drv':
df.loc[index, 'count_of_drv'] += 1
return df
class FeatureExtractionTransformer():
def __init__(self, viruses, clear):
self.only_viruses = viruses
self.only_clear = clear
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
count_viruses = []
count_clear = []
count_libs = []
average_len_libs = []
for index in range(X.shape[0]):
libs_preproc = list(map(lambda x: re.sub(r'\.(.+)$', r'', x).strip(), list(X.iloc[index].split(','))))
virus_libs = 0
clear_libs = 0
len_libs = []
for elem_libs in libs_preproc:
if elem_libs in self.only_viruses:
virus_libs += 1
if elem_libs in self.only_clear:
clear_libs += 1
len_libs.append(len(elem_libs))
average_len_libs.append(np.mean(len_libs))
count_viruses.append(virus_libs)
count_clear.append(clear_libs)
count_libs.append(len(libs_preproc))
df = pd.DataFrame(data=count_viruses, columns=['count_viruses'])
df['count_clear'] = count_clear
df['count_libs'] = count_libs
df['average_len_libs'] = average_len_libs
return df
test = pd.read_csv('test.tsv', sep='\t')
feature_pipe = Pipeline(
steps=[
('Feature_extraction', FeatureExtractionTransformer(viruses=only_viruses, clear=only_clear)),
('scaler', StandardScaler())
]
)
ext_pipe = Pipeline(
steps=[
('Count_extensions', ExtensionCountTransformer()),
('scaler', StandardScaler())
]
)
preprocessor = ColumnTransformer(
transformers=[
('feature_ext', feature_pipe, 'libs'),
('extensions', ext_pipe, 'libs')
]
)
prep = preprocessor
X_trans = prep.fit_transform(test)
y_pred = model.predict(X_trans)
pd.Series(y_pred, name='prediction').to_csv('prediction.txt', index=False) | IlyaMescheryakov1402/virus-predictor | predict.py | predict.py | py | 2,977 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pickle.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_numb... |
3026797044 | # -*- coding: utf-8 -*-
from Basic_Tools import *
import arcpy,math
import pandas as pd
import numpy as np
import uuid,json,datetime,sys,csv,os
from scipy.spatial import distance_matrix
arcpy.env.overwriteOutPut = True
class Layer_Engine():
def __init__(self,layer,columns = 'all'):
if columns == 'all':
columns = [str(f.name.encode('UTF-8')) for f in arcpy.ListFields(layer)]
columns.extend(['SHAPE@AREA'])
columns.extend(['SHAPE@WKT'])
self.layer = layer
self.gdb = os.path.dirname (layer)
self.name = os.path.basename (layer)
self.desc = arcpy.Describe(layer)
self.shapetype = ShapeType(self.desc)
self.oid = str(self.desc.OIDFieldName)
self.len_columns = len(columns)
self.data = [row[:] for row in arcpy.da.SearchCursor (self.layer,columns)]
self.df = pd.DataFrame(data = self.data, columns = columns)
self.df["geom_type"] = self.shapetype
self.len_rows = self.df.shape[0]
self.columns = columns
self.data_shape, self.df_shape , self.Not_closed = None, None, None
self.exists_curves, self.bad_area = None, None
def Count_field(self,field):
self.df['count'] = self.df.groupby(field)[field].transform('count')
def Extract_shape(self):
if self.shapetype != 'POINT':
columns_shape = [self.oid,'X','Y','Layer','Area','SHAPE']
self.data_shape = [[i[1],j.X,j.Y,i[2],i[3],i[0]] for i in arcpy.da.SearchCursor (self.layer,["SHAPE@",self.oid,'Layer','SHAPE@AREA']) for n in i[0] for j in n if j]
self.df_shape = pd.DataFrame(data = self.data_shape, columns = columns_shape)
self.df_shape['index1'] = self.df_shape.index
self.df_shape['X_Y'] = self.df_shape.apply(lambda row: Connect_rows(row['X'] , row['Y']),axis = 1)
else:
columns_shape = [self.oid,'Layer','Entity','LyrHandle','X','Y']
self.data_shape = [[i[1],i[2],i[3],i[4],i[0].labelPoint.X,i[0].labelPoint.Y] for i in arcpy.da.SearchCursor (self.layer,["SHAPE@",self.oid,"Layer","Entity","LyrHandle"]) if i[0]]
self.df_shape = pd.DataFrame(data = self.data_shape, columns = columns_shape)
self.df_shape['X_Y'] = self.df_shape.apply(lambda row: Connect_rows(row['X'] , row['Y']),axis = 1)
def Filter_point_by_max_distance(self,X_Y,distance):
if self.shapetype == 'POINT':
if self.data_shape:
point_data = [[item[4],item[5]] for item in self.data_shape]
result = Dis_2arrays_max(point_data,X_Y,distance,15)
result = [i[0] for i in result]
df2 = self.df_shape.copy()
df2 = df2[df2['X_Y'].isin(result)]
return df2
else:
print ("Func Extract_shape wasnt activated")
else:
print ("Feature isn't POINT")
def Len_field(self,field,as_int = False):
if as_int:
len_field = self.df[field].apply(str).apply(len).astype(int)
if len_field.shape[0] > 1:
len_field = len_field[0]
return int(len_field)
else:
self.df[field + '_len'] = self.df[field].apply(len)
def Filter_df(self,field,Value,Update_df = False):
if Update_df:
self.df = self.df[self.df[field] == Value]
else:
df_filter = self.df[self.df[field] == Value]
return df_filter
def Shape_closed(self):
if not isinstance(self.df_shape, type(None)):
gb_obj = self.df_shape.groupby (by = self.oid)
df_min = gb_obj.agg ({'index1' : np.min})
df_max = gb_obj.agg ({'index1' : np.max})
df_edge = pd.concat ([df_min,df_max])
df2 = pd.merge (self.df_shape,df_edge, how='inner', on='index1')
df2['Count_X_Y'] = df2.groupby ('X_Y')['X_Y'].transform('count')
self.Not_closed = df2[df2['Count_X_Y'] < 2].values.tolist()
return self.Not_closed
def Close_vertxs(self,layer_name,Min_num):
'''
[INFO] - return close vrtxs but only if bigger then 0
'''
vertxs = [[i[1],i[2]] for i in self.data_shape if i[3] == layer_name]
if self.shapetype != 'POINT' and self.data_shape != None and len(vertxs) < 2000:
dis_array = distance_matrix(vertxs,vertxs)
dis_array = np.where(dis_array==0, 99999, dis_array)
closest_points = dis_array.argmin(axis = 0)
close_pnt = [[str(round(vertxs[i][0],2)) + '-' + str(round(vertxs[i][1],2))\
,vertxs[i],vertxs[closest_points[i]],round(dis_array[i,closest_points[i]],4)]\
for i in range(len(vertxs)) if dis_array[i,closest_points[i]] < Min_num and dis_array[i,closest_points[i]] > 0]
return close_pnt
def Zero_Area(self):
if self.shapetype == 'POLYGON':
self.bad_area = [[i[4],i[3]] for i in self.data_shape if i[4] <= 0]
if self.bad_area:
return self.bad_area
else:
self.bad_area = False
def Curves(self,Out_put):
if self.shapetype in ['POLYGON','POLYLINE']:
curves_list = [n for i in self.data for n in i if 'describe geometry object' in str(n) if 'curve' in str(json.loads(i[-1].JSON))]
if curves_list:
arcpy.CopyFeatures_management(curves_list,Out_put)
self.exists_curves = True
return len(curves_list)
else:
self.exists_curves = False
return self.exists_curves
def Check_Block_0_0(self):
if self.shapetype == 'POINT':
df2 = self.df_shape.copy()
df2.where ((df2["X"]==0) & (df2["Y"]==0) & (df2['Entity'] == "Insert"), inplace = True)
df2 = df2.dropna (axis=0, how='all')
Block_at_0_0 = df2.values.tolist()
return Block_at_0_0
def Check_Columns_letters(self,bad_charc = ['-','"','.']):
# df.columns[df.columns.str.contains('-|"|.')] # למה הנקודה תמיד מופיעה כאילו היא קיימת
cach_fields = False
try:
cach_fields = [[field,letter] for field in self.columns for letter in field if letter in bad_charc] # אם ריק אין בעיה עם השדות
except:
cach_fields = 'problem with: {} in field, Cant Get the field' # בעיה עם השדות איך אין אפשרות לדעת למה
return cach_fields
def Check_Columns_names(self,fields = "SURVEY_YYYY|SURVEY_MM|SURVEY_DD|FINISH_YYYY|FINISH_MM|FINISH_DD"):
exists_columns = set(self.df.columns[self.df.columns.str.contains(fields)])
fields_in = set(fields.split('|'))
not_exists = list(fields_in-exists_columns)
return not_exists
def Get_Field_Count_to_df(self,field,name_field_count = ''):
if name_field_count == '':
name_field_count = str(field) + "_num"
count_data = self.df.groupby(field).size()
count_data = count_data.to_frame().reset_index()
count_data = self.df.merge(count_data, on=field).reset_index()
count_data = count_data.rename(columns={0: name_field_count})
return count_data
def Dict(self,index_key):
dict_ = self.df.set_index(index_key)
dict_2 = dict_.T.to_dict()
return dict_2
def create_csv(self,out_put):
out_put = out_put + '\\' + self.shapetype + '.csv'
self.df.to_csv(out_put,encoding ='utf-8')
def Groupby_and_count(self,field,name_field_count = ''):
if name_field_count == '':
name_field_count = str(field) + "_num"
count_data = self.df.groupby(field).size()
count_data = count_data.to_frame().reset_index()
self.df = count_data
class Layer_Management():
def __init__(self,Layer):
if arcpy.Exists(Layer):
self.gdb = os.path.dirname (Layer)
self.name = os.path.basename (Layer)
self.layer = Layer
self.desc = arcpy.Describe(layer)
self.oid = str(self.desc.OIDFieldName)
self.sr = self.desc.spatialReference
self.Geom_type = ShapeType(self.desc)
else:
print_arcpy_message ("Layer is not exist")
pass
def fields(self):
return [str(f.name) for f in arcpy.ListFields(self.layer)]
def Get_Label_Point_As_Point(self,out_put):
arcpy.CopyFeatures_management([arcpy.PointGeometry(i.shape.labelPoint) for i in arcpy.SearchCursor (self.layer) if i.shape],out_put)
return out_put
def Multi_to_single(self):
multi = False
len_before = int(str(arcpy.GetCount_management(self.layer)))
temp_lyer = self.layer + 'Temp'
save_name = self.layer
arcpy.MultipartToSinglepart_management (self.layer,temp_lyer)
arcpy.Delete_management (self.layer)
arcpy.Rename_management (temp_lyer,save_name)
len_after = int(str(arcpy.GetCount_management(self.layer)))
if len_after > len_before:
multi = True
return multi
| medad-hoze/EM_3 | Old/Engine_class.py | Engine_class.py | py | 9,913 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "arcpy.env",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "arcpy.ListFields",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_n... |
29394478322 | # -*- coding: utf-8 -*-
"""Bottle web-server."""
from bottle import Bottle
from bottle import template, static_file
from os.path import dirname, abspath
from datetime import date, timedelta
app = Bottle()
BASE_DIR = dirname(abspath(__file__))
@app.route('/static/<filename:path>')
def server_static(filename):
"""Serve all static files which used in HTML."""
return static_file(filename, root=f'{BASE_DIR}/static')
@app.route("/")
def main():
"""Load main page."""
age = (date.today() - date(1999, 11, 11)) // timedelta(days=365.2425)
return template(f'{BASE_DIR}/views/main.html', age=age)
if __name__ == "__main__":
app.run(host='localhost', port=8080, debug=True)
| AnotherProksY/MyPage | src/mypage.py | mypage.py | py | 701 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "bottle.Bottle",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "bottle.static_file",
... |
3511505451 | from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class Orderpage:
def __init__(self, driver): # constructor method
self.driver = driver
self.wait = WebDriverWait(self.driver, 10)
def Buy(self):
try:
Buy = WebDriverWait(self.driver, 10).until(
EC.presence_of_element_located((By.XPATH, "(//input[contains(@type,'submit')])[2]"))
)
assert Buy.is_displayed(), "Buy button is not displayed on the page."
Buy.click()
except Exception as e:
print(f"Assertion failed: {e}") | tadios19/QA_19_POM1 | Src/Pages/OrderPage.py | OrderPage.py | py | 705 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.presence_of... |
37753193171 | import requests
from bs4 import BeautifulSoup
from pymongo import MongoClient
from loguru import logger
from tqdm import tqdm
from .dates import DATES
client = MongoClient()
db = client["echo_of_moscow"]
collection = db["error_log_links"]
def links() -> list:
links = []
logger.info("Starting generating")
for month in DATES:
for day in tqdm(DATES[month]):
url = f"https://echo.msk.ru/news/2020/{month}/{day}/"
try:
resp = requests.get(url)
soup = BeautifulSoup(resp.text, "html5lib")
all_links = soup.find("div", {"class", "column"})
page_link = all_links.find_all("h3")
for link in page_link:
url = link.find("a").get("href")
links.append(f"https://echo.msk.ru{url}")
except Exception as ex:
collection.insert_one({"url": url})
logger.error(ex)
continue
break
return links
| smapl/mos_parse | src/mos_parse/parse_links.py | parse_links.py | py | 1,022 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "loguru.logger.info",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "loguru.logger",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "dates.DATES",
... |
74120520424 | import pygame,sys
from pygame.locals import *
from GUI_formulario_prueba import FormPrincipal
from constantes import *
pygame.init()
pygame.display.set_caption("Robot Blaster Adventure")
RELOJ = pygame.time.Clock()
PANTALLA = pygame.display.set_mode((ANCHO_PANTALLA,ALTO_PANTALLA))
imagen_fondo = pygame.image.load(r"images/locations/FONDO.png")
imagen_fondo = pygame.transform.scale(imagen_fondo, (ANCHO_PANTALLA, ALTO_PANTALLA))
form_principal = FormPrincipal(PANTALLA, 0, 0, ANCHO_PANTALLA, ALTO_PANTALLA, imagen_fondo, (171, 1, 1))
pausa = pygame.image.load(r"images\menu\pause.png")
pausa = pygame.transform.scale(pausa,(ANCHO_PANTALLA,ALTO_PANTALLA))
esta_en_pausa = False
musica_en_pausa = False
flag = True
while flag:
RELOJ.tick(FPS)
lista_eventos = pygame.event.get()
for evento in lista_eventos:
if evento.type == pygame.QUIT:
pygame.quit()
sys.exit(0)
elif evento.type == pygame.KEYDOWN:
if evento.key == pygame.K_ESCAPE:
esta_en_pausa = not esta_en_pausa
if esta_en_pausa:
pygame.mixer.music.pause()
musica_en_pausa = True
else:
musica_en_pausa = False
pygame.mixer.music.unpause()
elif evento.type == pygame.KEYDOWN:
if evento.key == pygame.K_TAB:
cambiar_modo()
PANTALLA.fill(ROJO)
if not esta_en_pausa:
form_principal.update(lista_eventos)
else:
PANTALLA.blit(pausa, (0,0))
pygame.display.update() | valverdecristian/cristian_valverde_tp_pygame | main_principal.py | main_principal.py | py | 1,647 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.C... |
966762603 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from textwrap import dedent
import argparse
import logging
import numpy as np
np.set_printoptions(precision=1)
from scipy.sparse import load_npz
from surface_stiffness.matrix import (
fourier_transform_symmetric_square_block_matrix,
OrderedVectorToRectangularGrid
)
__author__ = "Wolfram Georg Nöhring"
__copyright__ = "Copyright 2020, Uni Freiburg"
__license__ = "GNU General Public License"
__email__ = "wolfram.noehring@imtek.uni-freiburg.de"
logger = logging.getLogger(
"surface_stiffness.scripts.fourier_transform_greens_functions"
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"greens_functions",
help="Numpy array containing the Green's functions. Alternatively: scipy sparse bsr matrix",
)
parser.add_argument(
"-f",
"--input_format",
choices=("numpy", "sparse"),
default="numpy",
help="Input format: 'numpy' if the file should be loaded with numpy.load, 'sparse' if it should be loaded with scipy.sparse.load_npz",
)
parser.add_argument(
"output",
default="fourier_transformed_greens_functions.npy",
help="Output array containig the Fourier transforms",
)
parser.add_argument(
"-b",
"--block_size",
type=int,
default=3,
help="Size of blocks in the Green's function matrix",
)
parser.add_argument(
"-g",
"--grid_shape",
type=int,
nargs=2,
help=dedent(
"""\
Number of sites along the x- and y-directions.
If this option is not set, it will be assumed
that the grid is square, and the dimensions
will be inferred from the shape of the greens
functions array.
"""
),
)
args = parser.parse_args()
if args.input_format == "numpy":
greens_functions = np.load(args.greens_functions)
elif args.input_format == "sparse":
sparse = load_npz(args.greens_functions)
greens_functions = sparse.todense()
else:
raise ValueError
# Green's functions calculated with PetSC inversion
# may be padded with zeros along the first dimension
num_cols = greens_functions.shape[1]
greens_functions = greens_functions[:num_cols, :]
if not args.grid_shape:
num_atoms_edge = int(np.sqrt(greens_functions.shape[0] // args.block_size))
logger.info(f"Setting up reshape for grid size ({num_atoms_edge}, {num_atoms_edge})")
reshape = OrderedVectorToRectangularGrid(num_atoms_edge, num_atoms_edge)
else:
logger.info(f"Setting up reshape for grid size {args.grid_shape}")
reshape = OrderedVectorToRectangularGrid(*args.grid_shape)
ft_greens_functions = fourier_transform_symmetric_square_block_matrix(
greens_functions, reshape
)
np.save(args.output, ft_greens_functions)
if __name__ == "__main__":
main()
| wgnoehring/surface_stiffness | scripts/fourier_transform_greens_functions.py | fourier_transform_greens_functions.py | py | 3,013 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.set_printoptions",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "textw... |
70584661544 | # create venv
# python3 -m venv env
import markdown
from datetime import datetime
md = markdown.Markdown(extensions=['markdown.extensions.fenced_code'])
file_data = None
with open('raw.md') as f:
file_data = f.read()
#print(file_data)
data = md.convert(file_data)
header_list = []
# link
i = len(data)
target = '<a '
while i != -1:
i = data.rfind(target, 0, i)
if i == -1:
break
#print(i)
j = i + len(target)
data = data[:j] + 'target="_blank" ' + data[j:]
data = '{}target="_blank" {}'.format(data[:j], data[j:])
# header
i = len(data)
target = '<h2>'
while i != -1:
i = data.rfind(target, 0, i)
if i == -1:
break
j = i + len(target)
k = data[i:].find('</h2>')
#print(j, i+k)
head_text = data[j:i+k]
header_id = head_text.lower().replace(' ', '-')
header_list.insert(0, (head_text, header_id))
data = '{} id="{}"{}'.format(data[:j-1], header_id, data[j-1:])
# code block
i = len(data)
target = 'class="code-block"'
prefix = '><code '
while i != -1:
i = data.rfind(target, 0, i)
if i == -1:
break
#print(i)
j = i + len(target)
data = '{} {}{}{}'.format(data[:i-len(prefix)], target, prefix[:-1], data[j:])
# runtime embedded box
i = len(data)
target = 'runtime-embedded-box'
prefix = '<pre><code class="'
postfix = '</code></pre>'
box_extra_class = {
0: '',
1: 'runtime-show-canvas',
2: 'runtime-hide-console',
3: 'runtime-show-canvas runtime-hide-console',
}
while i != -1:
i = data.rfind(target, 0, i) # class start
if i == -1:
break
#print(i)
j = i + len(target) # class params start
k = j + data[j:].find('"') # class end
l = k + data[k:].find(postfix) # code block end
box_style = data[j+1:k].split('-') # [type, height]
data = '{}<div class="runtime-embedded-box {}" style="width: 100%; height: {}px;">{}</div>{}'.format(
data[:i-len(prefix)],
box_extra_class[int(box_style[0])],
box_style[1],
data[k+2:l],
data[l+len(postfix):]
)
template = ''
with open('template.html') as f:
template = f.read()
toc = '\n'.join(['<li><a href="#{}">{}</a></li>'.format(header[1], header[0]) for header in header_list])
out_data = template.replace('{{toc}}', toc)
now = datetime.now()
out_data = out_data.replace('{{date}}', now.strftime("%d %b, %Y"))
out_data = out_data.replace('{{year}}', now.strftime("%Y"))
out_data = out_data.replace('{{contents}}', data)
with open('index.html', 'w') as f:
f.write(out_data)
| yjlo123/runtime-tutorial | gen.py | gen.py | py | 2,378 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "markdown.Markdown",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 93,
"usage_type": "name"
}
] |
34112296996 | #!/usr/bin/env python3
import time
import sys
import psycopg2
import configparser
from json import dumps
from json import loads
import stomp
###############################################################################
# Globals
###############################################################################
config = configparser.ConfigParser()
config.read('settings.ini')
# Our database info which we read from the ini file
conn = psycopg2.connect(
host=config['Database']['host'],
database=config['Database']['database'],
user=config['Database']['user'],
password=config['Database']['password'])
class DHQListener(stomp.ConnectionListener):
def on_error(self, headers, message):
print('received an error "%s"' % message)
def on_message(self, message):
print('received a message "%s"' % message.body)
# Now let's get the data we need for these orders
orderData = getDataForOrders(message.body)
# And hand it off for them to be processed...
processOrders(orderData)
# Now let's connect to the Apache MQ server
hosts = [(config['QueueServer']['host'], config['QueueServer']['port'])]
qconn = stomp.Connection(host_and_ports=hosts)
qconn.set_listener('', DHQListener())
qconn.connect(config['QueueServer']['user'], config['QueueServer']['password'], wait=True)
# And now register the consumer
qconn.subscribe(destination=config['QueueServer']['queue'], id=config['QueueServer']['qid'], ack='auto')
###############################################################################
# Functions that actually do stuff
###############################################################################
# This function makes a call to the plpgsql function "get_data_for_orders" which
# does a lot of the heavy lifting of getting the data associated with the
# individual orders. It returns all the data related to the orders for the specific
# member, where it gets cut up and sent to the appropriate topics for the
# downstream programs to handle it
def getDataForOrders(orders):
dataCursor = conn.cursor()
dataCursor.execute(f"select get_data_for_orders('{orders}')")
rawData = ''
for data in dataCursor:
rawData = data[0]
dataCursor.close()
return rawData
# Here we have received the data associated with the orders, so we are going
# to go through the array and send the data to the appropriate topics, which
# we'll get from the data itself
def processOrders(orderData):
def sendToTopic(topicName, dataToSend):
qconn.send(body=dumps(dataToSend), destination=topicName)
#print(f"----> {orderData}")
if len(orderData) == 0:
print("Hmm, no data?")
return
for o in orderData['orders']:
sendToTopic(o['topic'], o['send'])
###############################################################################
# Start of program
###############################################################################
print("*** DH Dispatcher Starting ***")
while True:
time.sleep(2)
print("Waiting for orders...")
qconn.disconnect()
# Should never get here
print("*** DH Dispatcher Exited ***") | pumpingstationone/DeepHarborCRM | DHDispatcher/dispatcher.py | dispatcher.py | py | 3,088 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "psycopg2.connect",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "stomp.ConnectionListener",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name... |
5221914888 | import matplotlib.pyplot as plt
from constants.spark import Session
from etl import parquet
def createDistributionGraph():
"""
Distribution analysis function
Calculates the distribution of cyclists over the different measurement points
Shows the results in a bar plot
:return: None
"""
with Session() as spark:
flow = parquet.readResults(spark, "flow")
flow.registerTempTable("flow")
locatie = parquet.readLocatie(spark)
locatie.registerTempTable("locatie")
meetpuntcodes = [str(i.MeetpuntCode) for i in
spark.sql("SELECT MeetpuntCode FROM locatie GROUP BY MeetpuntCode").collect()]
meetpuntcolumns = {}
map(lambda code: meetpuntcolumns.update({code: "flow_%s" % code}), meetpuntcodes)
avgflow = spark.sql("SELECT Tijd, MeetpuntCode, avg(Flow) Flow "
"FROM flow GROUP BY Tijd, MeetpuntCode "
"ORDER BY max(Timestamp)").toPandas()
groups = avgflow.groupby("MeetpuntCode")
grouplist = {}
map(lambda code: grouplist.update(
{code: groups.get_group(code).rename(index=str, columns={"Flow": meetpuntcolumns[code]})}), meetpuntcodes)
tijden = spark.sql("SELECT Tijd FROM flow GROUP BY Tijd").toPandas()
for code in meetpuntcodes:
tijden = tijden.join(grouplist[code], on="Tijd", rsuffix="_%s" % code)
tijden.plot(x="Tijd", y=meetpuntcolumns.values(), kind="bar", stacked=False)
plt.show()
if __name__ == '__main__':
createDistributionGraph()
| BigUtrecht/BigUtrecht | analysis/distribution.py | distribution.py | py | 1,598 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "constants.spark.Session",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "etl.parquet.readResults",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "etl.parquet",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "etl.parqu... |
16667023084 | import torch, torch.nn, torch.nn.functional as F
import pytorch_lightning as pl
from tqdm import tqdm
from pathlib import Path
from argparse import ArgumentParser, Namespace
from itertools import count
import time, os
from train import Unet3D, QureDataset
def predict(args):
checkpoint_path = Path(args.checkpoint)
ckpt = torch.load(args.checkpoint)
if args.out is None: out_path = checkpoint_path.parent/checkpoint_path.stem
else: out_path = Path(args.out)
if not out_path.exists(): out_path.mkdir()
dev = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
# model = Unet3D.load_from_checkpoint(checkpoint_path=args.checkpoint)
model = Unet3D(Namespace(**ckpt['hparams']))
model.load_state_dict(ckpt['state_dict'])
model.to(dev)
model.eval()
model.freeze()
item_path = Path(args.item_path)
if args.valid:
items = model.items[model.split_idx:]
ds_path = args.ds_path
elif item_path.is_dir():
items = [n for n in os.listdir(item_path)]
ds_path = item_path
else:
items = [item_path]
ds_path = item_path.parent
ds = QureDataset(ds_path, items=items, output_meta=True,
tf_as_pts=model.tf_as_pts, vol_sz=model.hparams.vol_sz,
device=torch.device('cpu') if args.tfm_cpu else dev)
for i, batch in tqdm(enumerate(ds)):
vol, ao, tf, meta = batch
if not model.tf_as_pts: tf = tf[None]
elif model.tf_as_pts: tf = [tf.to(dev)]
if i > args.only and args.only > 0: break
tf_name = meta['ao_uuid']
out_name = f"Prediction_{meta['name']}_{tf_name}.pt"
vol_in = vol.squeeze()[None, None].to(dev).float()
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record() # Log Time
pred = model.forward(vol_in, tf) # Model.forward()
end.record()
torch.cuda.synchronize()
dur = start.elapsed_time(end)
torch.save({
'pred': pred.to(torch.float16).cpu(),
'vol': vol.to(torch.float16).cpu(),
'gt': ao.to(torch.float16).cpu(),
'tf': tf[0].cpu() if isinstance(tf, list) else tf.cpu(),
**meta
}, out_path/out_name)
tqdm.write(f"Saved prediction for {meta['name']} (TF={tf_name}) as {out_name}. Inferred in {dur}ms.")
if __name__ == '__main__':
parser = ArgumentParser('Infer DVAO')
parser.add_argument('checkpoint', type=str, help='Path to model checkpoint')
parser.add_argument('item_path', type=str, help='Path to Input Item')
parser.add_argument('--out', type=str, default=None, help='Path where the output predictions are saved to')
parser.add_argument('--only', type=int, default=0, help='Number of volumes to predict from the ds')
parser.add_argument('--valid', action='store_true', help='Whether to use the validation items according to the training runs split')
parser.add_argument('--tfm_cpu', action='store_true', help='Whether the data preprocessing (cropping, resizing, ..) is done on CPU (to save GPU memory)')
args = parser.parse_args()
predict(args)
| xeTaiz/dvao | infer.py | infer.py | py | 3,255 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"l... |
32707616661 | import pygame
from pygame.locals import *
class abstractCell:
def __init__ (self):
"cell basic class"
self.northWallBroken = False
self.eastWallBroken = False
self.westWallBroken = False
self.southWallBroken = False
"if visited"
self.visited = False
self.neighbours = []
"coords"
self.x = 0
self.y = 0
self.height = 0
self.width = 0
def render(self, surface, linecolour):
if not self.northWallBroken:
pygame.draw.line(surface, linecolour, (self.x, self.y), (self.x+self.width, self.y), 1)
if not self.eastWallBroken:
pygame.draw.line(surface,linecolour, (self.x+self.width, self.y), (self.x+self.width, self.y+self.height), 1)
if not self.westWallBroken:
pygame.draw.line(surface, linecolour, (self.x, self.y), (self.x, self.y+self.height), 1)
if not self.southWallBroken:
pygame.draw.line(surface, linecolour, (self.x, self.y+self.height), (self.x+self.width, self.y+self.height),1)
def findvalidNeighbours(self, maze):
try :
self.neighbours = []
positionX = int(self.x/maze.cellWidth)
positionY = int(self.y/maze.cellHeight)
#north neighbour
if positionY > 0 and maze.cells[positionY-1][positionX].visited == False :
self.neighbours.append( maze.cells[positionY-1][positionX] )
#east neighbour
if positionX*maze.cellWidth < maze.oWidth-maze.cellWidth and maze.cells[positionY][positionX+1].visited == False:
self.neighbours.append( maze.cells[positionY][positionX+1] )
#south neighbour
if positionY*maze.cellHeight < maze.oHeight-maze.cellHeight and maze.cells[positionY+1][positionX].visited == False :
self.neighbours.append( maze.cells[positionY+1][positionX] )
#west neighbour
if positionX > 0 and maze.cells[positionY][positionX-1].visited == False :
self.neighbours.append( maze.cells[positionY][positionX-1] )
except :
print ( self, "@", self.x, ", ", self.y, ", failed to find neighbour" )
print ("")
def findWall (self, cell):
if self.x == cell.x and self.y - cell.height == cell.y:
return 0 #N
if self.y == cell.y and self.x + cell.width == cell.x:
return 1 #E
if self.x == cell.x and self.y + cell.height == cell.y:
return 2 #S
if self.y == cell.y and self.x - cell.width == cell.x:
return 3 #W
| Sheepaay/Maze-Generation | cell.py | cell.py | py | 2,254 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.draw.line",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.line",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
... |
39497609909 | """
Module for Various functions to simplify and standardize dumping objects to json.
NOTE: this is taken from python-common in nomad-lab-base.
It is copied here to remove the dependency from nomad-lab-base.
For more info on python-common visit:
https://gitlab.mpcdf.mpg.de/nomad-lab/python-common
The author of this code is: Dr. Fawzi Roberto Mohamed
E-mail: mohamed@fhi-berlin.mpg.de
"""
from builtins import object
import json
from ai4materials.external.compact_sha import sha512
import numpy
def numpyEncoder(o):
"""new default function for json class so that numpy arrays and sets can be encoded"""
# check if object is a numpy array
if isinstance(o, numpy.ndarray):
# ensure that we have an array with row-major memory order (C like)
if not o.flags['C_CONTIGUOUS']:
o = numpy.ascontiguousarray(o)
return o.tolist()
# see default method in python/json/encoder.py
elif isinstance(o, set):
return list(sorted(o))
else:
raise TypeError(repr(o) + " is not JSON serializable")
class ExtraIndenter(object):
"""Helper class to add extra indent at the beginning of every line"""
def __init__(self, fStream, extraIndent):
self.fStream = fStream
self.indent = " " * extraIndent if extraIndent else ""
def write(self, val):
i = 0
while True:
j = val.find("\n", i)
if j == -1:
self.fStream.write(val[i:])
return
j += 1
self.fStream.write(val[i:j])
self.fStream.write(self.indent)
i = j
def jsonCompactF(obj, fOut, check_circular = False):
"""Dumps the object obj with a compact json representation using the utf_8 encoding
to the file stream fOut"""
json.dump(obj, fOut, sort_keys = True, indent = None, separators = (',', ':'),
ensure_ascii = False, check_circular = check_circular, default = numpyEncoder)
def jsonIndentF(obj, fOut, check_circular = False, extraIndent = None):
"""Dumps the object obj with an indented json representation using the utf_8 encoding
to the file stream fOut"""
fStream = fOut
if extraIndent:
fStream = ExtraIndenter(fOut, extraIndent = extraIndent)
json.dump(obj, fStream, sort_keys = True, indent = 2, separators = (',', ': '),
ensure_ascii = False, check_circular = check_circular, default = numpyEncoder)
class DumpToStream(object):
"""transform a dump function in a stream"""
def __init__(self, dumpF, extraIndent = None):
self.baseDumpF = dumpF
self.extraIndent = extraIndent
self.indent = " " * extraIndent if extraIndent else ""
self.dumpF = self.dumpIndented if extraIndent else dumpF
def dumpIndented(self, val):
if type(val) == type(u""):
val = val.encode("utf_8")
i = 0
while True:
j = val.find("\n", i)
if j == -1:
self.baseDumpF(val[i:])
return
j += 1
self.baseDumpF(val[i:j])
self.baseDumpF(self.indent)
i = j
def write(self, val):
self.dumpF(val)
def jsonCompactD(obj, dumpF, check_circular = False):
"""Dumps the object obj with a compact json representation using the utf_8 encoding
to the file stream fOut"""
json.dump(obj, DumpToStream(dumpF), sort_keys = True, indent = None, separators = (', ', ': '),
ensure_ascii = False, check_circular = check_circular, default = numpyEncoder)
def jsonIndentD(obj, dumpF, check_circular = False, extraIndent = None):
"""Dumps the object obj with an indented json representation using the utf_8 encoding
to the function dumpF"""
json.dump(obj, DumpToStream(dumpF, extraIndent = extraIndent), sort_keys = True, indent = 2, separators = (',', ': '),
ensure_ascii = False, check_circular = check_circular, encoding="utf_8", default = numpyEncoder)
def jsonCompactS(obj, check_circular = False):
"""returns a compact json representation of the object obj as a string"""
return json.dumps(obj, sort_keys = True, indent = None, separators = (', ', ': '),
ensure_ascii = False, check_circular = check_circular, encoding="utf_8", default = numpyEncoder)
def jsonIndentS(obj, check_circular = False, extraIndent = None):
"""retuns an indented json representation if the object obj as a string"""
res = json.dumps(obj, sort_keys = True, indent = 2, separators = (',', ': '),
ensure_ascii = False, check_circular = check_circular, encoding="utf_8", default = numpyEncoder)
if extraIndent:
indent = " " * extraIndent
res = res.replace("\n", "\n" + indent)
return res
def jsonDump(obj, path):
"""Dumps the object obj to an newly created utf_8 file at path"""
kwds = dict()
if sys.version_info.major > 2:
kwds["encoding"] = "utf_8"
with open(path, "w", **kwds) as f:
jsonIndentF(obj, f)
class ShaStreamer(object):
"""a file like object that calculates one or more shas"""
def __init__(self, shas = None):
self.shas = shas
if shas is None:
self.shas = (sha512(),)
def write(self, val):
for sha in self.shas:
sha.update(val)
def b64digests(self):
return [sha.b64digest() for sha in self.shas]
def addShasOfJson(obj, shas = None):
"""adds the jsonDump of obj to the shas"""
streamer = ShaStreamer(shas)
jsonCompactF(obj, streamer)
return streamer
def normalizedJsonGid(obj, shas = None):
"""returns the gid of the standard formatted jsonDump of obj"""
return ['j' + x for x in addShasOfJson(shas).b64digests()]
| angeloziletti/ai4materials | ai4materials/external/json_support.py | json_support.py | py | 5,727 | python | en | code | 36 | github-code | 36 | [
{
"api_name": "numpy.ndarray",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "numpy.ascontiguousarray",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "builtins.object",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "json.dump"... |
14208634774 | import trimesh
import subprocess
import time
def reduceVertex(mesh, vertices = 1024):
mesh.export("temp.obj")
subprocess.run(["Manifold/build/manifold", "temp.obj", "temp.obj", "1500"])
mesh = trimesh.load("temp.obj")
n_tries = 0
while(mesh.vertices.shape[0] != vertices):
n_vertices = mesh.vertices.shape[0]
n_faces = mesh.faces.shape[0]
out_faces = n_faces - ((n_vertices - vertices) * 2)
subprocess.run(["Manifold/build/simplify", "-i", "temp.obj", "-o","temp.obj","-m", "-f", str(out_faces)] )
mesh = trimesh.load("temp.obj")
n_tries += 1
if n_tries >= 3:
return None
subprocess.run(["rm","temp.obj"])
return mesh
# mesh = trimesh.load('Dataset/ModelNet10/bathtub/bathtub_0005.obj')
# mesh = reduceVertex(mesh)
# mesh.show()
# mesh = trimesh.load("/home/texs/hdd/Datasets/ModelNet10/ModelNet10/chair/train/chair_0772.off")
# mesh = reduceVertex(mesh)
# if mesh != None:
# mesh.show() | texsmv/PCLslimTreeRec | Code/utils.py | utils.py | py | 996 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "subprocess.run",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "trimesh.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "trimesh.load",
"line_num... |
8692281198 | from aiogram.dispatcher import FSMContext
from aiogram.utils.markdown import bold
from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton, ParseMode
import keyboards
import db
from StateMachine import NewStateMachine
from aiogram.dispatcher.filters.state import State, StatesGroup
from aiogram import Dispatcher, types
class DeleteDishStateMachine(StatesGroup):
admin_delete_dish = State()
async def category_delete_dish_callback(callback_query: types.CallbackQuery, state: FSMContext):
separated_data = callback_query.data.split(";")
food = db.get_food_by_category(separated_data[1])
kb = keyboards.beautiful_change_of_food(0, len(food), separated_data[1], food[0]['name'], 'delete')
try:
await callback_query.message.answer_photo(photo=food[0]['photo_id'],
caption=bold(f"{food[0]['name']}\n\n") +
f"{food[0]['description']}\n\n" +
bold(f"{food[0]['price']} BYN\n"),
parse_mode=ParseMode.MARKDOWN,
reply_markup=kb)
await callback_query.answer()
except IndexError:
await callback_query.message.answer(text="В этой категории нет еды")
async def change_delete_food_by_callback(callback_query: types.CallbackQuery):
categories = callback_query.data.split(';')
food = db.get_food_by_category(categories[1])
current_food = int(categories[2])
if len(food) > current_food >= 0:
try:
next_photo = types.input_media.InputMediaPhoto(str='photo', media=food[current_food]['photo_id'],
caption=bold(f"{food[current_food]['name']}\n\n") +
f"{food[current_food]['description']}\n\n" +
bold(f"{food[current_food]['price']} BYN\n"),
parse_mode=ParseMode.MARKDOWN)
await callback_query.message.edit_media(media=next_photo,
reply_markup=keyboards
.beautiful_change_of_food(current_food,
len(food),
categories[1],
food[current_food]['name'], 'delete'))
await callback_query.answer()
except:
await callback_query.answer()
else:
await callback_query.answer()
async def delete_dish(callback_query: types.CallbackQuery):
separated_data = callback_query.data.split(';')
name = separated_data[1]
db.delete_food_by_name(name)
exit_deletion_markup = InlineKeyboardMarkup(row_width=1)
exit_btn = InlineKeyboardButton("Выйти из режима удаления", callback_data=f"exitDeletion")
exit_deletion_markup.add(exit_btn)
await callback_query.message.answer(f"Блюдо {name} удалено", reply_markup=exit_deletion_markup)
# @dp.callback_query_handler(lambda c: c.data.startswith('exitDeletion'), state=NewStateMachine.ADMIN_DELETE_DISH)
async def exit_delete_dish(message: types.Message, state: FSMContext):
await state.set_state(NewStateMachine.ADMIN.state()) # set admin state
admin_kb = keyboards.admin_keyboard()
await message.answer("Вы вышли из режима удаления", reply_markup=admin_kb)
def register_delete_dish_admin(dp: Dispatcher):
dp.register_callback_query_handler(category_delete_dish_callback, lambda c: c.data.startswith('category'),
state=DeleteDishStateMachine.admin_delete_dish)
dp.register_message_handler(exit_delete_dish, lambda m: m.text.startswith('❌Выйти из режима удаления❌'),
state=DeleteDishStateMachine.admin_delete_dish)
dp.register_callback_query_handler(delete_dish, lambda c: c.data.startswith('delete'),
state=DeleteDishStateMachine.admin_delete_dish)
dp.register_callback_query_handler(change_delete_food_by_callback, lambda c: c.data.startswith('food'),
state=DeleteDishStateMachine.admin_delete_dish)
| malumbaaa/jerrybot | handlers/admin/delete_dish_handler.py | delete_dish_handler.py | py | 4,611 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "aiogram.dispatcher.filters.state.StatesGroup",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "aiogram.dispatcher.filters.state.State",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "aiogram.types.CallbackQuery",
"line_number": 17,
"usa... |
17090138466 | from flask import Blueprint, render_template, request
auth = Blueprint(
'auth',
__name__,
template_folder='templates',
static_folder='static'
)
@auth.route('/login', methods=['POST', 'GET'])
def login():
if request.method == 'POST':
print(request.form)
return render_template('login.html')
| fortisauris/PyDevJunior2 | FL05_BLUEPRINTS/blueprints/auth/auth.py | auth.py | py | 324 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "flask.request.... |
32824752707 | import cv2
import os
def register():
cam=cv2.VideoCapture(0)
detector=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
num=input('enter our ID : ')
sampleNum=0
while True:
pwd=os.getcwd()
ret,im=cam.read()
if not ret:
print(ret)
break
imgray=cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
cv2.imshow('FACE',im)
faces=detector.detectMultiScale(imgray,1.3,5)
for (x,y,w,h) in faces:
print(str(sampleNum) + ' / 300')
sampleNum+=1
cv2.rectangle(im,(x,y),(x+w,y+h),(255,0,0),2)
patch=imgray[y:y+h,x:x+w]
os.chdir(pwd+'/dataset')
cv2.imwrite('user.' +num+'.' +str(sampleNum)+ '.jpg',patch)
os.chdir(pwd)
cv2.imshow('FACES',im)
if cv2.waitKey(100)&0xFF==ord('q') or sampleNum>=300:
break
cam.release()
cv2.destroyAllWindows() | apoorvamilly/imirror | dataset.py | dataset.py | py | 975 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"li... |
12644189947 | import os
import music21 as m21
import numpy as np
from tensorflow import keras
import tensorflow as tf
import json
# globals
# ex : "dataset/deutschl/test"
DATASET_DIR = "dataset/deutschl/erk"
# ex : "preprocessed/encode/deutschl/test"
ENCODED_SAVE_DIR = "preprocessed/encode/deutschl/erk"
# ex : "preprocessed/single_string/deutschl/test"
SINGLE_STRING_DIR = "preprocessed/single_string/deutschl/erk"
# ex : "preprocessed/mapping/deutschl/test"
MAPPER_DIR = "preprocessed/mapping/deutschl/erk"
ACCEPTABLE_DURATIONS = [
0.25, # sixteenth note
0.5, # eighth note
0.75,
1.0, # quarter note
1.5,
2.0, # half note
3,
4.0, # whole note
]
SEQUENCE_LENGTH = 64
def load_songs(path):
"""
Loads all songs in the given path.
"""
songs = []
for file in os.listdir(path):
if file.endswith(".krn"):
song = m21.converter.parse(path + "/" + file)
songs.append(song)
return songs
def is_acceptable_duration(song, durations):
"""
Checks if the duration is acceptable.
"""
for note in song.flat.notesAndRests:
if note.duration.quarterLength not in durations:
return False
return True
def transpose_song(song):
"""
Transposes the song C major to A minor.
"""
# get key signature
parts = song.getElementsByClass(m21.stream.Part)
measures = parts[0].getElementsByClass(m21.stream.Measure)
key_signature = measures[0][4]
if not isinstance(key_signature, m21.key.Key):
key_signature = song.analyze('key')
# get intervels for tansposition
if key_signature.mode == "major":
interval = m21.interval.Interval(
key_signature.tonic, m21.pitch.Pitch("C"))
elif key_signature.mode == "minor":
interval = m21.interval.Interval(
key_signature.tonic, m21.pitch.Pitch("A"))
# transpose by intervel
return song.transpose(interval)
def encode_song(song, time_step=0.25):
"""
Encodes the song.
This gives time series representation of the song.
Convert score into time series.It is representing quater lengths of notes.
The symblos are:
r: rest
'_': notes/rests
integer: for midi notes
parameters are:
song: music21.stream
time_step : duraion of each time step in quater length
return : string (encoded song as time series string)
"""
encoded_songs = []
for event in song.flat.notesAndRests:
# for notes
if isinstance(event, m21.note.Note):
symbol = event.pitch.midi
# for rests
elif isinstance(event, m21.note.Rest):
symbol = "r"
for step in range(int(event.duration.quarterLength/time_step)):
if(step == 0):
encoded_songs.append(symbol)
else:
encoded_songs.append("_")
return " ".join(map(str, encoded_songs))
def save_song(song, path):
"""
Saves the song to the given path.
"""
with open(path, "w") as f:
f.write(song)
def dataset_preprocessing(dataset_path, save_path):
"""
Preprocesses the dataset.
"""
# load dataset
songs = load_songs(dataset_path)
for i, song in enumerate(songs):
# filter using acceptance criteria
if not is_acceptable_duration(song, ACCEPTABLE_DURATIONS):
continue
# transpose songs
song = transpose_song(song)
# endcode song
encoded_song = encode_song(song)
# check directory exists if not create
if not os.path.exists(save_path):
os.makedirs(save_path)
# write to file
save_song(encoded_song, save_path + "/" + str(i) + ".txt")
def create_single_string(dataset_path, save_path, sequence_length):
"""
Creates a single string from the song.
"""
delimiter = "/ "*sequence_length
songs = ""
# load songs
for file in os.listdir(dataset_path):
with open(dataset_path + "/" + file, "r") as f:
song = f.read()
# add to string with seperating delimiter
songs += song + " " + delimiter
# remove last delimiter
songs = songs[:-1]
# check directory exists if not create
if not os.path.exists(save_path):
os.makedirs(save_path)
# save to file
with open(save_path + '/single-string.txt', "w") as f:
f.write(songs)
return songs # return the single string
def mapping(songs, save_path):
"""
Creates a mapping file from the single string.
"""
songs = songs.split()
vocalbulary = list(set(songs))
# create mapping
mapping = {}
for i, symbol in enumerate(vocalbulary):
mapping[symbol] = i
# check directory exists if not create
if not os.path.exists(save_path):
os.makedirs(save_path)
# save to file
with open(save_path + '/mapping.json', "w") as fp:
json.dump(mapping, fp, indent=4)
def get_integer_song(songs, mapping_path):
"""
Converts the single string to integer song.
"""
# load mapping
with open(mapping_path + '/mapping.json', "r") as f:
mapping = json.load(f)
# convert to integer
integer_songs = []
for song in songs.split():
integer_songs.append(mapping[song])
return integer_songs
def generate_training_data(dataset_path, mapping_path, sequence_length):
"""
Generates training data from the dataset.
"""
# load songs
with open(dataset_path + '/single-string.txt', "r") as f:
songs = f.read()
# get integer songs
integer_songs = get_integer_song(songs, mapping_path)
# get traning data
training_data = [] # [[1,2,3],[2,3,4]]
target_data = [] # [4,5] # target data is the next note
# size of the array change due to sequence length
for i in range(len(integer_songs)-sequence_length):
training_data.append(integer_songs[i:i+sequence_length])
target_data.append(integer_songs[i+sequence_length])
# one hot encoding
vocabulary_size = len(set(integer_songs))
inputs = keras.utils.to_categorical(
training_data, num_classes=vocabulary_size)
targets = np.array(target_data)
return inputs, targets
def main():
"""
Main function.
"""
dataset_preprocessing(DATASET_DIR, ENCODED_SAVE_DIR)
print("Preprocessing done.")
songs = create_single_string(
ENCODED_SAVE_DIR, SINGLE_STRING_DIR, SEQUENCE_LENGTH)
print("Single string created.")
mapping(songs, MAPPER_DIR)
print("Mapping created.")
inputs, targets = generate_training_data(
SINGLE_STRING_DIR, MAPPER_DIR, SEQUENCE_LENGTH)
print("Training data generated.")
# np.save(TRAINING_DATA_DIR + '/inputs.npy', inputs)
print("target rank: {} , target shape: {}".format(
tf.rank(targets), tf.shape(targets)))
print("input rank: {} , input shape: {}".format(
tf.rank(inputs), tf.shape(inputs)))
if __name__ == "__main__":
main()
print("Done.")
print("Please check the files:")
# print("DATASET_DIR: " + DATASET_DIR + '')
print("ENCODED_SAVE_DIR: " + ENCODED_SAVE_DIR)
print("SINGLE_STRING_DIR: " + SINGLE_STRING_DIR + '/single-string.txt')
print("MAPPER_DIR: " + MAPPER_DIR + '/mapping.json')
print("Thank you.Bye.")
exit()
| Audirea/music-generator | preprocessing.py | preprocessing.py | py | 7,365 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "music21.converter.parse",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "music21.converter",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "music21.str... |
15343540294 | import os
import re
from collections import defaultdict
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
import chess
from chess.pgn import read_game
import numpy as np
def extract_moves(game):
# Takes a game from the pgn and creates list of the board state and the next
# move that was made from that position. The next move will be our
# prediction target when we turn this data over to the ConvNN.
positions = list()
board = chess.Board()
moves = list(game.main_line())
for move in moves:
position, move_code = board.fen(), move.uci()
positions.append([position, move_code])
board.push(move)
return positions
def replace_nums(line):
# This function cycles through a string which represents one line on the
# chess board from the FEN notation. It will then swap out the numbers
# for an equivalent number of spaces.
return ''.join([' '*8 if h=='8' else ' '*int(h) if h.isdigit() else'\n'if h=='/'else ''+h for h in line])
def split_fen(fen):
# Takes the fen string and splits it into its component lines corresponding
# to lines on the chess board and the game status.
fen_comps = fen.split(' ', maxsplit = 1)
board = fen_comps[0].split('/')
status = fen_comps[1]
board = [replace_nums(line) for line in board]
return board, status
def list_to_matrix(board_list):
# Converts a list of strings into a numpy array by first
# converting each string into a list of its characters.
pos_list = [list(line) for line in board_list]
return np.array(pos_list)
def channelize(mat):
# processes a board into a 8 x 8 x 6 matrix where there is a
# channel for each type of piece. 1's correspond to white, and
# -1's correpond to black.
output = np.empty([8, 8, 6])
wpcs = ['P', 'R', 'N', 'B', 'Q', 'K']
bpcs = ['p', 'r', 'n', 'b', 'q', 'k']
positions = [np.isin(mat, pc).astype('int') - np.isin(mat, bpcs[i]).astype('int') for i, pc in enumerate(wpcs)]
return np.stack(positions)
def uci_to_coords(uci):
def conv_alpha_num(alpha):
num = ord(alpha) - 97
return num
# Every UCI is a 4 character code indicated the from and to squares
fc, fr = uci[0:2]
tc, tr = uci[2:4]
return [8-int(fr), conv_alpha_num(fc)], [8-int(tr), conv_alpha_num(tc)]
def process_status(status):
# The last combination of characters in the FEN notation convey some different pieces of information
# like the player who is to move next, and who can still castle.
# I have written the code to extract all of the different pieces, but the Agent will only need to know next_to_move.
splt = status.split(" ")
next_to_move = splt[0]
castling = splt[1]
en_passant = splt[2]
half_clock = splt[3]
full_clock = splt[4]
return next_to_move
def process_game(positions):
# Takes a single game from a pgn and produces a dict of dicts which contains
# the board state, the next player to move, and the what the next move was (the prediction task).
boards = []
next_to_move = []
for position in positions:
board, status = split_fen(position[0])
orig, dest = uci_to_coords(position[1])
arrays = channelize(list_to_matrix(board))
boards.append(arrays)
piece_moved = [i for (i, mat) in enumerate(arrays) if (mat[int(orig[0]), int(orig[1])] == 1) | (mat[int(orig[0]), int(orig[1])] == -1)]
if piece_moved == []:
piece_moved = -1
else:
piece_moved = piece_moved[0]
next_to_move.append([process_status(status), piece_moved, orig[0], orig[1], dest[0], dest[1]])
try:
boards, ntm = np.stack(boards), np.stack(next_to_move)
except:
return [], []
return boards, ntm
def read_and_process(iteration):
gm = read_game(pgn)
positions = extract_moves(gm)
boards, next_to_move = process_game(positions)
#print("".join(["Completed: ", str(iteration),]))
return boards, next_to_move
def wrangle_data_ip(num_games=10000, save_file=False):
pool = ThreadPool(12) # Its even shorter than the single threaded version! Well... minus the other function I had to write...
results = pool.map(read_and_process, range(num_games)) #Runs into a problem which will kill a small percentage of your games.
pool.close() # But its totally worth it
pool.join() # lol (I'll figure it out eventually...)
return results
def wrangle_data(num_games=10000, save_file=False):
# Meta process for data extraction in serial.. See above for parallelized version!
boards, next_to_move = read_and_process(0)
for i in range(1, num_games):
new_boards, new_next_to_move = read_and_process(i)
boards, next_to_move = np.concatenate((boards, new_boards), axis=0), np.concatenate((next_to_move, new_next_to_move), axis=0)
if save_file:
np.savez_compressed('first_{}_games'.format(num_games), results)
return boards, next_to_move
def ip_results_to_np(results):
# Splits a list of tuples into two lists. Also filters out any errors which wrote as []'s.
boards = [result[0] for result in results if isinstance(result[0], np.ndarray)]
targets = [result[1] for result in results if isinstance(result[1], np.ndarray)]
# Then returns the full lists concatenated together
return np.concatenate(boards, axis=0), np.concatenate(targets, axis=0)
if __name__ == "__main__":
with open('../data/KingBase2017-A00-A39.pgn', encoding='latin1') as pgn:
num_games=50000
print("Recording the first {} games as matrices...".format(num_games))
results = wrangle_data_ip(num_games=num_games, save_file=True)
boards, targets = ip_results_to_np(results)
print("Writing {} positions to file".format(boards.shape[0]))
np.savez_compressed('../data/A00-139_first_{}'.format(num_games), boards, targets) | mrklees/deepconv-chess | py/data_generator.py | data_generator.py | py | 5,960 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "chess.Board",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.isin",
"line_number": 5... |
11469122465 | import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
import time
import utils
def get_pNK_test_obs(
ls, sigmas, sigma0s,
nhyp,
X, # (nobs, xdim)
test_xs, # (ntest, xdim)
dtype=tf.float32
):
"""
Returns
The covariance between (x_test, x_obs) vs. (x_test, x_obs)
The noise is only for x_obs part of K
pNKs (nhyp, ntest+nobs, ntest+nobs)
invpNKs (nhyp, ntest+nobs, ntest+nobs)
"""
nobs = tf.shape(X)[0]
ntest = tf.shape(test_xs)[0]
pNKs = [None] * nhyp
invpNKs = [None] * nhyp
for i in range(nhyp):
Xtest_obs = tf.concat([test_xs, X], axis=0)
noiselessK = utils.computeKmm(Xtest_obs, ls[i,...], sigmas[i,...], dtype=dtype)
noisemat = tf.eye(nobs, dtype=dtype) * sigma0s[i,...]
noisemat = tf.pad(noisemat, [[ntest,0], [ntest,0]], "CONSTANT")
pNK = noiselessK + noisemat
pNKs[i] = pNK
invpNKs[i] = tf.linalg.inv(pNK)
pNKs = tf.stack(pNKs)
invpNKs = tf.stack(invpNKs)
# (nhyp, ntest + nobs, ntest + nobs)
return pNKs, invpNKs
def get_queried_f_stat_given_test_samples(
x,
l, sigma, sigma0,
ntest, nobs,
X, # (nobs, xdim)
Y, # (nobs,1)
test_xs, # (ntest, xdim)
# invpNK, # (ntest + nobs, ntest + nobs)
invpNK_test, # (ntest + nobs, ntest)
invpNK_obs, # (ntest+nobs, nobs)
# samples of f-value given different maximum candidates
post_test_samples, # nmax, ntest, nsample
dtype=tf.float32):
nx = tf.shape(x)[0]
nmax = tf.shape(post_test_samples)[0]
ntest = tf.shape(post_test_samples)[1]
nobs = tf.shape(invpNK_obs)[1]
nsample = tf.shape(post_test_samples)[2]
Xtest_obs = tf.concat([test_xs, X], axis=0)
# (ntest+nobs,xdim)
k_x_xto = utils.computeKnm(x, Xtest_obs, l, sigma, dtype=dtype) # K_{x, Xtest_obs}
# (nx, ntest + nobs)
# NOTE: only compute diagonal elements!!
k_x = sigma * tf.ones(shape=(nx,), dtype=dtype)
# (nx,)
"""
(nx,ntest+nobs) x (ntest+nobs, ntest+nobs) x (ntest+nobs,nsample)
= (nx,nsample)
A + B
A
(nx,ntest+nobs) x (ntest+nobs, :ntest) x (:ntest,nsample)
= (nx,nsample)
B
(nx,ntest+nobs) x (ntest+nobs, ntest:) x (ntest:,nsample)
= (nx,nsample)
~ (nx,ntest+nobs) x (ntest+nobs, ntest:) x (nobs,1)
= (nx,1)
"""
post_test_samples = tf.expand_dims( tf.transpose(post_test_samples, perm=[2,0,1]), axis=-1 )
# nsample, nmax, ntest, 1
tmp_test = k_x_xto @ invpNK_test
# nx, ntest
tmp_test = tf.reshape(tmp_test, shape=(1,1,nx,ntest))
tmp_test = tf.tile(tmp_test, multiples=(1, nmax,1,1))
tmp_test = tf.tile(tmp_test, multiples=(nsample,1,1,1))
# nsample,nmax,nx,ntest
query_mean_test = tmp_test @ post_test_samples
# nsample, nmax, nx, 1
query_mean_test = tf.reshape(query_mean_test, shape=(nsample,nmax,nx))
query_mean_test = tf.transpose(query_mean_test, perm=[1,2,0])
# nmax, nx, nsample
query_mean_obs = k_x_xto @ (invpNK_obs @ Y)
# nx,1
query_mean_obs = tf.expand_dims( query_mean_obs, axis=0 )
# 1,nx,1
query_mean = query_mean_test + query_mean_obs
# nmax, nx, nsample
tmp = (k_x_xto @ tf.concat([invpNK_test, invpNK_obs], axis=1) )
query_var = k_x - tf.reduce_sum( tmp * k_x_xto, axis=1 )
# (nx,)
return query_mean, query_var
# (nmax, nx, nsample)
# (nx,)
def mp(x, # nx, xdim
ls, sigmas, sigma0s,
X, Y, # (nobs,xdim), (nobs,1)
xdim, nx, nobs, nhyp,
nysample,
test_xs, # ntest, xdim (same for all hyp)
max_probs_all, # nhyp, nmax
# samples of f-values
# given different maximum candidates
post_test_samples_all, # nhyp, nmax, ntest, nsample
post_test_mask_all, # nhyp, nmax, nsample, dtype: tf.bool
# as the numbers of samples for different nmax are different
# mask is to indicate which samples are used
# K_test_max needs to be precomputed
# and its inverse
# need naming convension for noisy
# vs. noiseless K
# and partial noisy-noiseless?
invpNK_all, # nhyp, ntest+nobs, ntest+nobs
dtype=tf.float32,
niteration=10,
use_loop=True,
parallel_iterations=1):
"""
ntest: # of test inputs
nmax: # of maximum candidate in test_xs
"""
ntest = tf.shape(post_test_samples_all)[2]
avg_mp = tf.zeros(shape=(nx,), dtype=dtype)
for i in range(nhyp):
l = tf.reshape(ls[i,:], shape=(1,xdim))
sigma = sigmas[i]
sigma0 = sigma0s[i]
# sigma, sigma0: scalar
# l: (1,xdim)
max_probs = max_probs_all[i,...] # nmax,
# samples of f-value given different maximum candidates
post_test_samples = post_test_samples_all[i,...] # nmax, ntest, nsample
post_test_masks = post_test_mask_all[i,...] # nmax, nsample
non_zero_prob_idxs = tf.squeeze(tf.where(max_probs > 0.))
nmax = tf.shape(non_zero_prob_idxs)[0]
post_test_samples = tf.gather(post_test_samples, non_zero_prob_idxs, axis=0)
# post_test_masks == 0.0 if the sample is invalid,
# 1.0 if the sample if valid
post_test_masks = tf.gather(post_test_masks, non_zero_prob_idxs, axis=0)
# (nmax,nsample)
max_probs = tf.gather(max_probs, non_zero_prob_idxs, axis=0)
invpNK = invpNK_all[i,...] # ntest+nobs,ntest+nobs
invpNK_test = tf.gather(invpNK, indices=tf.range(ntest, dtype=tf.int32), axis=1)
invpNK_obs = tf.gather(invpNK, indices=tf.range(ntest, ntest + nobs, dtype=tf.int32), axis=1)
query_meanf_given_test_samples, query_varf_given_test_samples = \
get_queried_f_stat_given_test_samples(
x,
l, sigma, sigma0,
ntest, nobs,
X, # (nobs, xdim)
Y, # (nobs,1)
test_xs, # (ntest, xdim)
invpNK_test, # (ntest + nobs, ntest)
invpNK_obs, # (ntest+nobs, nobs)
# samples of f-value given different maximum candidates
post_test_samples, # nmax, ntest, nsample
dtype=dtype)
# (nmax, nx, nsample)
# (nx,)
query_stdy_given_test_samples = tf.sqrt(query_varf_given_test_samples + sigma0)
# (nx,)
query_stdy_given_test_samples = tf.reshape(query_stdy_given_test_samples, shape=(1,nx,1))
# (1,nx,1)
body = lambda j, sum_mp: [j+1, \
sum_mp + mp_each_batch_y_samplemp_each_batch_y_sample(
x,
nx, nmax, nysample,
max_probs,
query_meanf_given_test_samples, # (nmax, nx, nsample)
query_stdy_given_test_samples, # (1,nx,1)
post_test_masks, # (nmax, nsample)
dtype=dtype
)]
_, sum_mp = tf.while_loop(
lambda j, sum_mp: j < niteration,
body,
(tf.constant(0), tf.zeros(shape=(nx,), dtype=dtype)),
parallel_iterations=parallel_iterations
)
mp_val = sum_mp / tf.constant(niteration, dtype=dtype)
avg_mp = avg_mp + mp_val / tf.constant(nhyp, dtype=dtype)
return avg_mp
def mp_each_batch_y_samplemp_each_batch_y_sample(
x,
nx, nmax, nysample,
max_probs, # (nmax)
query_meanf_given_test_samples, # (nmax, nx, nsample)
query_stdy_given_test_samples, # (1,nx,1)
post_test_masks, # (nmax, nsample)
dtype=tf.float32
):
nsample = tf.shape(query_meanf_given_test_samples)[-1]
normal_dists = tfp.distributions.Normal(loc=query_meanf_given_test_samples,
scale=query_stdy_given_test_samples)
# (nmax, nx, nsample)
# sampling y given posterior | max_idx, data
# shape (nysample, nmax, nx)
ysample = normal_dists.sample(nysample)
# (nysample, nmax, nx, nsample)
# (1) H[y|max_idx]
log_prob = normal_dists.log_prob(ysample)
# (nysample, nmax, nx, nsample)
ext_post_test_masks = tf.reshape(post_test_masks, shape=(1,nmax,1,nsample))
ext_post_test_masks = tf.tile(ext_post_test_masks, multiples=(nysample,1,1,1))
ext_post_test_masks = tf.tile(ext_post_test_masks, multiples=(1,1,nx,1))
# (nysample, nmax, nx, nsample)
log_prob = tf.where(ext_post_test_masks,
log_prob,
tf.ones_like(log_prob, dtype=dtype)
* tf.constant(-np.infty, dtype=dtype))
log_mixture_prob = tf.reduce_logsumexp(log_prob, axis=3)
# (nysample, nmax, nx)
weighted_log_mixure_prob = log_mixture_prob * tf.reshape(max_probs, shape=(1,nmax,1))
# (nysample, nmax, nx)
# print("evaluate_mp: the line below is incorrectly implemented for other stochastic criteria!")
cond_ent_y = -tf.reduce_mean( tf.reduce_sum(weighted_log_mixure_prob, axis=1), axis=0 )
# (nx,)
# (2) H[y]
print("sample from different max_idx should have different weight!, \
this is incorrectly implemented for evaluate_mp.py, \
this could be incorrect for evaluate_emes.py too! CHECK")
# ysample.shape = (nysample, nmax, nx, nsample)
marginal_ysample = tf.tile(
tf.expand_dims(ysample, axis=2),
multiples=(1,1,nmax,1,1))
# (nysample, nmax, nmax, nx, nsample)
# ____marginal___
# Marginalizing over nsample
log_prob = normal_dists.log_prob(marginal_ysample)
# (nysample, nmax, nmax, nx, nsample)
ext_post_test_masks = tf.expand_dims(ext_post_test_masks, axis=2)
ext_post_test_masks = tf.tile(ext_post_test_masks, multiples=(1,1,nmax,1,1))
# (nysample, nmax, nmax, nx, nsample)
log_prob = tf.where(ext_post_test_masks,
log_prob,
tf.ones_like(log_prob, dtype=dtype)
* tf.constant(-np.infty, dtype=dtype))
log_marginal_mixture_prob = tf.reduce_logsumexp(log_prob, axis=4)
# (nysample, nmax, nmax, nx)
# Marginalizing over nmax as p(y) mixture of nmax Gaussians
weighted_log_marginal_mixture_prob = log_marginal_mixture_prob + tf.log( tf.reshape(max_probs, shape=(1, 1, nmax, 1)) )
# (nysample, nmax, nmax, nx)
log_marginal_prob = tf.reduce_logsumexp(weighted_log_marginal_mixture_prob, axis=2)
# (nysample, nmax, nx)
# Weighted average
weighted_log_marginal_prob = log_marginal_prob * tf.reshape(max_probs, shape=(1,nmax,1))
# (nysample, nmax, nx)
ent_y = - tf.reduce_mean( tf.reduce_sum(weighted_log_marginal_prob, axis=1), axis=0)
# (nx,)
mp_val = tf.reshape(ent_y - cond_ent_y, shape=(nx,))
return mp_val
| ZhaoxuanWu/Trusted-Maximizers-Entropy-Search-BO | criteria/evaluate_sample_mp.py | evaluate_sample_mp.py | py | 11,291 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "tensorflow.float32",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.shape",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tensorflow.shape",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "tensorflow.... |
44310759659 | import pygame, colors, random, time, runclass, math, draw
from random import randint
def info(screen, WIDTH, HEIGHT):
# displays the goal of the game with the desired font and pauses for 1.5 seconds before the game starts
info_font = pygame.font.SysFont('Comic Sans MS', 100)
info_message = info_font.render('Reach the other side', False, colors.white)
info_width, info_height = info_font.size('Reach the other side')
screen.fill(colors.black)
screen.blit(info_message, ((WIDTH-info_width)/2, (HEIGHT - info_height)/2))
pygame.display.flip()
time.sleep(1.5)
def drawlose(screen, WIDTH, HEIGHT):
# displays a mesage when an object is hit and pauses for 1.5 seconds before restarting the game
lose_font = pygame.font.SysFont('Comic Sans MS', 100)
lose_message = lose_font.render('OOPS! Try Again!', False, colors.white)
lose_width, lose_height = lose_font.size('OOPS! Try Again!')
screen.fill(colors.red)
screen.blit(lose_message, ((WIDTH-lose_width)/2, (HEIGHT - lose_height)/2))
pygame.display.flip()
time.sleep(1.5)
def run(screen, WIDTH, HEIGHT, clock):
FPS = 60
# creates 6 objects to be avoided
object = [0]*6
#creates a player object and adds it to the sprite group
player1 = runclass.player(WIDTH, HEIGHT)
all_players = pygame.sprite.Group()
all_players.add(player1)
objectgroup = pygame.sprite.Group()
# initializes each object's position
for i in range(0,4):
object[i] = runclass.object(WIDTH,HEIGHT)
objectgroup.add(object[i])
object[0].rect.y = HEIGHT-120
object[0].rect.x = WIDTH/2
object[0].speed = randint(20,30)
for i in range (1,4):
object[i].rect.y = object[i-1].rect.y-150
object[i].rect.x = WIDTH/2
object[i].speed = randint(20,30)
info(screen, WIDTH, HEIGHT)
running = True
# gives parameters for how to move the player and each object. Also defines collisions and when the game is either won
# or lost
while running:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
keystate = pygame.key.get_pressed()
if keystate[pygame.K_a]:
player1.rect.x -= 8
if keystate[pygame.K_w]:
player1.rect.y -= 8
if keystate[pygame.K_d]:
player1.rect.x += 8
if keystate[pygame.K_s]:
player1.rect.y += 8
for i in range(0,4):
if object[i].rect.x >= WIDTH:
object[i].rect.x = 0
object[i].rect.x += object[i].speed
if player1.rect.y <= 0:
running = False
for i in range(0,4):
if abs(object[i].rect.x-player1.rect.x) <= 40 and abs(object[i].rect.y-player1.rect.y) <= 40:
drawlose(screen, WIDTH, HEIGHT)
player1.rect.x = WIDTH/2
player1.rect.y = HEIGHT-40
if player1.rect.x > WIDTH:
player1.rect.x = 0
if player1.rect.x < 0:
player1.rect.x = WIDTH
if player1.rect.y >= HEIGHT-40:
player1.rect.y = HEIGHT-40
screen.fill(colors.black)
objectgroup.draw(screen)
all_players.draw(screen)
pygame.display.flip()
if __name__ == "__main__":
# sets the resolution of the game window and runs the game
WIDTH = 800
HEIGHT = 600
pygame.init()
screen = pygame.display.set_mode((WIDTH,HEIGHT))
pygame.display.set_caption("My Game")
clock = pygame.time.Clock()
run(screen,WIDTH, HEIGHT, clock)
| RamboTheGreat/Minigame-Race | run.py | run.py | py | 3,594 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.font.SysFont",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "colors.white",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "colors.black",
... |
8863752563 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['font.family'] = 'cmu serif'
# use latex for font rendering
matplotlib.rcParams['text.usetex'] = True
SMALL_SIZE = 12
MEDIUM_SIZE = 14
BIGGER_SIZE = 16
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
from pymoo.algorithms.soo.nonconvex.ga import GA
from pymoo.core.problem import Problem
from pymoo.operators.crossover.sbx import SBX
from pymoo.operators.mutation.pm import PM
from pymoo.operators.repair.rounding import RoundingRepair
from pymoo.operators.sampling.rnd import IntegerRandomSampling
from pymoo.optimize import minimize
import os
import sys
import comtypes.client
import time
from threading import Thread
import subprocess
def split(a, n):
k, m = divmod(len(a), n)
return (a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n))
class MyProblem(Problem):
def __init__(self,SapModel, SapObject, LIST_HE, LIST_IPE, ModelPath , Main_File_name , File_ext):
self.SapModel = SapModel
self.SapObject = SapObject
self.LIST_HE = LIST_HE
self.LIST_IPE = LIST_IPE
self.ModelPath = ModelPath
self.Main_File_name = Main_File_name
self.File_ext = File_ext
self.gen = 0
n_var = 6
xl = np.zeros((n_var,))
xu = np.array([len(LIST_HE)-1,len(LIST_HE)-1,len(LIST_HE)-1,
len(LIST_IPE)-1,len(LIST_IPE)-1,len(LIST_IPE)-1])
super().__init__(n_var=n_var, n_obj=1, n_ieq_constr=4, xl=xl, xu=xu, vtype=int)
def _evaluate(self, x, out, *args, **kwargs):
List_file_names =[]
for ii in range(x.shape[0]):
List_file_names.append(f'{self.ModelPath}{self.Main_File_name}-{ii}{self.File_ext}')
List_file_names = list(split(List_file_names, 3))
X = np.array_split(x, 3)
global outF1
global outG1
global outF2
global outG2
global outF3
global outG3
outF1 = np.zeros(len(List_file_names[0]))
outG1 = np.zeros((len(List_file_names[0]),4))
outF2 = np.zeros(len(List_file_names[1]))
outG2 = np.zeros((len(List_file_names[1]),4))
outF3 = np.zeros(len(List_file_names[2]))
outG3 = np.zeros((len(List_file_names[2]),4))
def run1(SapModel, Files, X, OBJ):
global outF1
global outG1
for jj,file_name in enumerate(Files):
SapModel.File.OpenFile(OBJ.ModelPath + OBJ.Main_File_name + OBJ.File_ext)
ret = SapModel.SetModelIsLocked(False)
for kk in range(1,16):
if kk in [1,4,7]:
ret = SapModel.FrameObj.SetSection(f"{kk}", OBJ.LIST_HE[X[jj,0]])
# print(ret, self.LIST_HE[x[ii,0]])
elif kk in [2,5,8]:
ret = SapModel.FrameObj.SetSection(f"{kk}", OBJ.LIST_HE[X[jj,1]])
elif kk in [3,6,9]:
ret = SapModel.FrameObj.SetSection(f"{kk}", OBJ.LIST_HE[X[jj,2]])
elif kk in [10,11]:
ret = SapModel.FrameObj.SetSection(f"{kk}", OBJ.LIST_IPE[X[jj,3]])
elif kk in [12,13]:
ret = SapModel.FrameObj.SetSection(f"{kk}", OBJ.LIST_IPE[X[jj,4]])
elif kk in [14,15]:
ret = SapModel.FrameObj.SetSection(f"{kk}", OBJ.LIST_IPE[X[jj,5]])
# print(ret, self.LIST_IPE[x[ii,0]])
ret = SapModel.File.Save(file_name)
ret = SapModel.File.OpenFile(file_name)
ret = SapModel.Analyze.RunAnalysis()
ret = SapModel.DesignSteel.StartDesign()
ret = SapModel.Results.Setup.SetCaseSelectedForOutput("DEAD")
[NumberResults, LoadCase, StepType, StepNum, Fx, Fy, Fz, Mx, My, Mz, gx, gy, gz, ret] = SapModel.Results.BaseReact()
outF1[jj] = Fz[0]
g_summary = np.zeros(16)
g_NMM = np.zeros(16)
# g_V = np.zeros(16)
for kk in range(1,16):
[NumberItems, FrameName, Ratio, RatioType, Location, ComboName, ErrorSummary, WarningSummary, ret] = SapModel.DesignSteel.GetSummaryResults(f'{kk}')
g_summary[kk-1] = Ratio[0]
[NumberItems, FrameName, Value, ret] = SapModel.DesignSteel.GetDetailResultsValue(f"{kk}", 0, 2, "TotalRatio")
g_NMM[kk-1] = Value[0]
# [NumberItems, FrameName, Value, ret] = self.SapModel.DesignSteel.GetDetailResultsValue(f"{jj}", 0, 3, "V2Ratio")
# g_V[jj-1] = Value[0]
outG1[jj,0] = sum(np.max((g_NMM - 1, np.zeros(16)), axis=0))
outG1[jj,1] = sum(np.max((g_summary - 1, np.zeros(16)), axis=0))
# Vincoli geometrici
outG1[jj,2] = max( X[jj,1] - X[jj,0] , 0)
outG1[jj,3] = max( X[jj,2] - X[jj,1] , 0)
def run2(SapModel, Files, X, OBJ):
global outF2
global outG2
for jj,file_name in enumerate(Files):
SapModel.File.OpenFile(OBJ.ModelPath + OBJ.Main_File_name + OBJ.File_ext)
ret = SapModel.SetModelIsLocked(False)
for kk in range(1,16):
if kk in [1,4,7]:
ret = SapModel.FrameObj.SetSection(f"{kk}", OBJ.LIST_HE[X[jj,0]])
# print(ret, self.LIST_HE[x[ii,0]])
elif kk in [2,5,8]:
ret = SapModel.FrameObj.SetSection(f"{kk}", OBJ.LIST_HE[X[jj,1]])
elif kk in [3,6,9]:
ret = SapModel.FrameObj.SetSection(f"{kk}", OBJ.LIST_HE[X[jj,2]])
elif kk in [10,11]:
ret = SapModel.FrameObj.SetSection(f"{kk}", OBJ.LIST_IPE[X[jj,3]])
elif kk in [12,13]:
ret = SapModel.FrameObj.SetSection(f"{kk}", OBJ.LIST_IPE[X[jj,4]])
elif kk in [14,15]:
ret = SapModel.FrameObj.SetSection(f"{kk}", OBJ.LIST_IPE[X[jj,5]])
# print(ret, self.LIST_IPE[x[ii,0]])
ret = SapModel.File.Save(file_name)
ret = SapModel.File.OpenFile(file_name)
ret = SapModel.Analyze.RunAnalysis()
ret = SapModel.DesignSteel.StartDesign()
ret = SapModel.Results.Setup.SetCaseSelectedForOutput("DEAD")
[NumberResults, LoadCase, StepType, StepNum, Fx, Fy, Fz, Mx, My, Mz, gx, gy, gz, ret] = SapModel.Results.BaseReact()
outF2[jj] = Fz[0]
g_summary = np.zeros(16)
g_NMM = np.zeros(16)
# g_V = np.zeros(16)
for kk in range(1,16):
[NumberItems, FrameName, Ratio, RatioType, Location, ComboName, ErrorSummary, WarningSummary, ret] = SapModel.DesignSteel.GetSummaryResults(f'{kk}')
g_summary[kk-1] = Ratio[0]
[NumberItems, FrameName, Value, ret] = SapModel.DesignSteel.GetDetailResultsValue(f"{kk}", 0, 2, "TotalRatio")
g_NMM[kk-1] = Value[0]
# [NumberItems, FrameName, Value, ret] = self.SapModel.DesignSteel.GetDetailResultsValue(f"{jj}", 0, 3, "V2Ratio")
# g_V[jj-1] = Value[0]
outG2[jj,0] = sum(np.max((g_NMM - 1, np.zeros(16)), axis=0))
outG2[jj,1] = sum(np.max((g_summary - 1, np.zeros(16)), axis=0))
# Vincoli geometrici
outG2[jj,2] = max( X[jj,1] - X[jj,0] , 0)
outG2[jj,3] = max( X[jj,2] - X[jj,1] , 0)
def run3(SapModel, Files, X, OBJ):
global outF3
global outG3
for jj,file_name in enumerate(Files):
SapModel.File.OpenFile(OBJ.ModelPath + OBJ.Main_File_name + OBJ.File_ext)
ret = SapModel.SetModelIsLocked(False)
for kk in range(1,16):
if kk in [1,4,7]:
ret = SapModel.FrameObj.SetSection(f"{kk}", OBJ.LIST_HE[X[jj,0]])
# print(ret, self.LIST_HE[x[ii,0]])
elif kk in [2,5,8]:
ret = SapModel.FrameObj.SetSection(f"{kk}", OBJ.LIST_HE[X[jj,1]])
elif kk in [3,6,9]:
ret = SapModel.FrameObj.SetSection(f"{kk}", OBJ.LIST_HE[X[jj,2]])
elif kk in [10,11]:
ret = SapModel.FrameObj.SetSection(f"{kk}", OBJ.LIST_IPE[X[jj,3]])
elif kk in [12,13]:
ret = SapModel.FrameObj.SetSection(f"{kk}", OBJ.LIST_IPE[X[jj,4]])
elif kk in [14,15]:
ret = SapModel.FrameObj.SetSection(f"{kk}", OBJ.LIST_IPE[X[jj,5]])
# print(ret, self.LIST_IPE[x[ii,0]])
ret = SapModel.File.Save(file_name)
ret = SapModel.File.OpenFile(file_name)
ret = SapModel.Analyze.RunAnalysis()
ret = SapModel.DesignSteel.StartDesign()
ret = SapModel.Results.Setup.SetCaseSelectedForOutput("DEAD")
[NumberResults, LoadCase, StepType, StepNum, Fx, Fy, Fz, Mx, My, Mz, gx, gy, gz, ret] = SapModel.Results.BaseReact()
outF3[jj] = Fz[0]
g_summary = np.zeros(16)
g_NMM = np.zeros(16)
# g_V = np.zeros(16)
for kk in range(1,16):
[NumberItems, FrameName, Ratio, RatioType, Location, ComboName, ErrorSummary, WarningSummary, ret] = SapModel.DesignSteel.GetSummaryResults(f'{kk}')
g_summary[kk-1] = Ratio[0]
[NumberItems, FrameName, Value, ret] = SapModel.DesignSteel.GetDetailResultsValue(f"{kk}", 0, 2, "TotalRatio")
g_NMM[kk-1] = Value[0]
# [NumberItems, FrameName, Value, ret] = self.SapModel.DesignSteel.GetDetailResultsValue(f"{jj}", 0, 3, "V2Ratio")
# g_V[jj-1] = Value[0]
outG3[jj,0] = sum(np.max((g_NMM - 1, np.zeros(16)), axis=0))
outG3[jj,1] = sum(np.max((g_summary - 1, np.zeros(16)), axis=0))
# Vincoli geometrici
outG3[jj,2] = max( X[jj,1] - X[jj,0] , 0)
outG3[jj,3] = max( X[jj,2] - X[jj,1] , 0)
threads = []
t = Thread(target=run1, args=(SapModel[0], List_file_names[0], X[0], self))
threads.append(t)
t = Thread(target=run2, args=(SapModel[1], List_file_names[1], X[1], self))
threads.append(t)
t = Thread(target=run3, args=(SapModel[2], List_file_names[2], X[2], self))
threads.append(t)
# Start to evaluate time of analysis in parallel
start = time.time()
# Start all threads
for tt in threads:
tt.start()
# Wait for all of them to finish
for tt in threads:
tt.join()
# Evaluate time of analysis in parallel
end = time.time()
print('Elapsed time :',end - start,'s')
# some code after execution of all bat files
out["F"] = np.hstack( (outF1, outF2, outF3) )
out["G"] = np.vstack( (outG1, outG2, outG3) )
self.gen += 1
print(f'gen {self.gen}\n','F=', out["F"], '\n', 'G=', out["G"], '\n\n')
Num_SAP2000_istances = 3
SapObject = []
SapModel = []
for jj in range(Num_SAP2000_istances):
AttachToInstance = False
SpecifyPath = False
APIPath = 'C:\CSiAPIexample'
Main_File_name = 'modelFixed'
File_ext = '.sdb'
ModelPath = APIPath + os.sep + 'IWSS23_TEST_parallel' + os.sep #'API_1-001.sdb'
helper = comtypes.client.CreateObject('SAP2000v1.Helper')
helper = helper.QueryInterface(comtypes.gen.SAP2000v1.cHelper)
if AttachToInstance:
#attach to a running instance of SAP2000
try:
#get the active SapObject
mySapObject = helper.GetObject("CSI.SAP2000.API.SapObject")
except (OSError, comtypes.COMError):
print("No running instance of the program found or failed to attach.")
sys.exit(-1)
else:
if SpecifyPath:
try:
#'create an instance of the SAPObject from the specified path
mySapObject = helper.CreateObject(ProgramPath)
except (OSError, comtypes.COMError):
print("Cannot start a new instance of the program from " + ProgramPath)
sys.exit(-1)
else:
try:
#create an instance of the SAPObject from the latest installed SAP2000
mySapObject = helper.CreateObjectProgID("CSI.SAP2000.API.SapObject")
except (OSError, comtypes.COMError):
print("Cannot start a new instance of the program.")
sys.exit(-1)
#start SAP2000 application
mySapObject.ApplicationStart(Units=6)
SapObject.append(mySapObject)
SapModel.append(mySapObject.SapModel)
ret = mySapObject.Hide()
# ret = mySapObject.Hide()
# ret = mySapObject.ApplicationStart(Units=6, FileName=ModelPath + Main_File_name + File_ext) #model.sdb
# SapModel = mySapObject.SapModel
LIST_HE = np.load('LIST_HE.npy')
LIST_IPE = np.load('LIST_IPE.npy')
problem = MyProblem(SapModel, SapObject, LIST_HE, LIST_IPE, ModelPath , Main_File_name , File_ext)
method = GA(pop_size=48,
sampling=IntegerRandomSampling(),
crossover=SBX(prob=1.0, eta=3.0, vtype=float, repair=RoundingRepair()),
mutation=PM(prob=1.0, eta=3.0, vtype=float, repair=RoundingRepair()),
eliminate_duplicates=True,
)
res = minimize(problem,
method,
termination=('n_gen', 50),
# seed=1,
save_history=True
)
print("Best solution found: %s" % res.X)
print("Function value: %s" % res.F)
print("Constraint violation: %s" % res.CV)
n_evals = np.array([e.evaluator.n_eval for e in res.history])
opt = np.array([e.opt[0].F for e in res.history])
# plt.title("Convergence")
plt.plot(n_evals, opt, "--", color='C1')
plt.yscale("log")
plt.xlabel('OF evaluations [-]')
plt.ylabel('OF: Dead Load [kN]')
plt.tight_layout()
# plt.show()
plt.savefig('convergence.pdf')
# plt.close()
with open('n_evals.npy', 'wb') as f:
np.save(f, n_evals)
with open('opt.npy', 'wb') as f:
np.save(f, opt)
with open('resX.npy', 'wb') as f:
np.save(f, res.X)
with open('resF.npy', 'wb') as f:
np.save(f, res.F)
_X = np.row_stack([a.pop.get("X") for a in res.history])
feasible = np.row_stack([a.pop.get("feasible") for a in res.history])[:, 0]
with open('_X.npy', 'wb') as f:
np.save(f, _X)
with open('feasible.npy', 'wb') as f:
np.save(f, feasible)
print('finito')
for ii in range(Num_SAP2000_istances):
ret = SapObject[ii].ApplicationExit(False)
SapModel[ii] = None
SapObject[ii] = None
| marco-rosso-m/SAP2000-python-for-structural-optimization | Parallel_processing_optimization/Optimization_Parallel_sez_diverse_fixed.py | Optimization_Parallel_sez_diverse_fixed.py | py | 15,571 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.rcParams",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.rc",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "... |
23176774568 | from .models import Heroes, Properties
from django.forms import ModelForm, Form, TextInput, Textarea, Select, CheckboxSelectMultiple, CharField,\
MultipleChoiceField, PasswordInput
from django.contrib.auth.models import User
class HeroesForm(ModelForm):
class Meta:
model = Heroes
fields = [
'name',
'icon',
'rank',
'color',
'race',
'uniqueness',
'properties',
]
widgets = {
'name': TextInput(attrs={
'class': 'form-control',
'placeholder': 'Введите имя',
'autocomplete': 'off',
}),
'icon': TextInput(attrs={
'class': 'form-control',
'placeholder': 'Добавьте адрес иконки',
'autocomplete': 'off',
}),
'rank': Select(attrs={
'class': 'form-control',
}),
'color': Select(attrs={
'class': 'form-control',
}),
'race': Select(attrs={
'class': 'form-control',
}),
'uniqueness': Textarea(attrs={
'class': 'form-control',
'placeholder': 'Введите Уникальность',
}),
'properties': CheckboxSelectMultiple()
}
class PropertiesForm(ModelForm):
class Meta:
model = Properties
fields = ['name']
widgets = {
'name': TextInput(attrs={
'class': 'form-control',
'placeholder': 'Введите новое свойство',
'autocomplete': 'off',
})
}
class FilterForm(Form, ModelForm):
color = MultipleChoiceField(widget=CheckboxSelectMultiple, choices=[
('Зеленый', 'Зеленый'),
('Красный', 'Красный'),
('Синий', 'Синий'),
])
race = MultipleChoiceField(widget=CheckboxSelectMultiple, choices=[
('Богиня', 'Богиня'),
('Великан', 'Великан'),
('Демон', 'Демон'),
('Неизвестно', 'Неизвестно'),
('Фея', 'Фея'),
('Человек', 'Человек'),
])
uniqueness = CharField(required=False, widget=TextInput(attrs={
'class': 'form-control',
'placeholder': 'Уникальность',
'autocomplete': 'off',
}))
class Meta:
model = Heroes
fields = ['properties']
widgets = {
'properties': CheckboxSelectMultiple()
}
class UserForm(Form, ModelForm):
password = CharField(widget=PasswordInput(attrs={
'class': 'form-control',
'placeholder': 'Введите пароль',
'autocomplete': 'off',
}))
class Meta:
model = User
fields = ['username']
widgets = {
'username': TextInput(attrs={
'class': 'form-control',
'placeholder': 'Введите логин',
'autocomplete': 'off',
})
}
| Manakhov/7dsgc-wiki | sdsgc_wiki/main/forms.py | forms.py | py | 3,196 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "models.Heroes",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.forms.TextInput",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.form... |
71578936743 | #!/usr/bin/env python
"""
"""
import vtk
def main():
colors = vtk.vtkNamedColors()
fileName = get_program_parameters()
colors.SetColor("SkinColor", [255, 125, 64, 255])
colors.SetColor("BkgColor", [51, 77, 102, 255])
# Create the renderer, the render window, and the interactor. The renderer
# draws into the render window, the interactor enables mouse- and
# keyboard-based interaction with the data within the render window.
#
aRenderer = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(aRenderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
reader = vtk.vtkMetaImageReader()
reader.SetFileName(fileName)
# An isosurface, or contour value of 500 is known to correspond to the
# skin of the patient.
skinExtractor = vtk.vtkMarchingCubes()
skinExtractor.SetInputConnection(reader.GetOutputPort())
skinExtractor.SetValue(0, 500)
skinMapper = vtk.vtkPolyDataMapper()
skinMapper.SetInputConnection(skinExtractor.GetOutputPort())
skinMapper.ScalarVisibilityOff()
skin = vtk.vtkActor()
skin.SetMapper(skinMapper)
skin.GetProperty().SetDiffuseColor(colors.GetColor3d("SkinColor"))
# An outline provides context around the data.
#
outlineData = vtk.vtkOutlineFilter()
outlineData.SetInputConnection(reader.GetOutputPort())
mapOutline = vtk.vtkPolyDataMapper()
mapOutline.SetInputConnection(outlineData.GetOutputPort())
outline = vtk.vtkActor()
outline.SetMapper(mapOutline)
outline.GetProperty().SetColor(colors.GetColor3d("Black"))
# It is convenient to create an initial view of the data. The FocalPoint
# and Position form a vector direction. Later on (ResetCamera() method)
# this vector is used to position the camera to look at the data in
# this direction.
aCamera = vtk.vtkCamera()
aCamera.SetViewUp(0, 0, -1)
aCamera.SetPosition(0, -1, 0)
aCamera.SetFocalPoint(0, 0, 0)
aCamera.ComputeViewPlaneNormal()
aCamera.Azimuth(30.0)
aCamera.Elevation(30.0)
# Actors are added to the renderer. An initial camera view is created.
# The Dolly() method moves the camera towards the FocalPoint,
# thereby enlarging the image.
aRenderer.AddActor(outline)
aRenderer.AddActor(skin)
aRenderer.SetActiveCamera(aCamera)
aRenderer.ResetCamera()
aCamera.Dolly(1.5)
# Set a background color for the renderer and set the size of the
# render window (expressed in pixels).
aRenderer.SetBackground(colors.GetColor3d("BkgColor"))
renWin.SetSize(640, 480)
# Note that when camera movement occurs (as it does in the Dolly()
# method), the clipping planes often need adjusting. Clipping planes
# consist of two planes: near and far along the view direction. The
# near plane clips out objects in front of the plane the far plane
# clips out objects behind the plane. This way only what is drawn
# between the planes is actually rendered.
aRenderer.ResetCameraClippingRange()
# Initialize the event loop and then start it.
iren.Initialize()
iren.Start()
def get_program_parameters():
import argparse
description = 'The skin extracted from a CT dataset of the head.'
epilogue = '''
Derived from VTK/Examples/Cxx/Medical1.cxx
This example reads a volume dataset, extracts an isosurface that
represents the skin and displays it.
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('filename', help='FullHead.mhd.')
args = parser.parse_args()
return args.filename
if __name__ == '__main__':
main()
| lorensen/VTKExamples | src/Python/Medical/MedicalDemo1.py | MedicalDemo1.py | py | 3,793 | python | en | code | 319 | github-code | 36 | [
{
"api_name": "vtk.vtkNamedColors",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "vtk.vtkRenderer",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "vtk.vtkRenderWindow",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "vtk.vtkRenderW... |
72275954665 | from socket import *
from threading import Thread
import os,stat
import time, random
import statistics
def r2(n,ip,port):
if port == 20002: node = "s" # This is for output purposes
elif port == 20022: node = "d"
print("R2 WILL SEND MESSAGE TO: {} OVER {}".format(node, ip, port))
c = socket(AF_INET, SOCK_DGRAM) # SOCK_DGRAM opens a UDP socket
elapsed = [] # this list will hold all rtt values
for i in range(n):
"""
if i%100 == 0:
print("R2 SENT {} MESSAGES TO {}".format(i, node))
"""
start = time.time() # timestamp before sending packet
c.sendto(b'message from r2', (ip, port)) # to send packet, destination ip and port must be specified
result, peer = c.recvfrom(1000) # acknowledge is expected from server side
done = time.time() # timestamp after getting acknowledge
difference = done - start # rtt calculation
elapsed.append(difference)
#print("Result:" , result, "Peer:", peer)
elapsedMean = statistics.mean(elapsed) # average of all rtts are calculated
elapsedMean *= 1000 # Convert seconds to milliseconds
print("Average delay between r2 and {} is : {} ms".format(node, elapsedMean))
def r2_server(n,port):
if port == 20005: node = "r1" # This is for output purposes
elif port == 20006: node = "r3"
try:
r2 = socket(AF_INET, SOCK_DGRAM) # SOCK_DGRAM opens a UDP socket
r2.bind(('', port)) # server binds port to listen packets from it
print("R2_SERVER IS LISTENING THE PORT: {}".format(port))
except:
print("R2_SERVER CAN NOT BIND THE PORT: ".format(port))
finally:
for i in range(n):
req, peer = r2.recvfrom(1000) # getting message and peer
"""
if i%100 == 0:
print("R2_SERVER RECEIVED {} MESSAGES FROM {}".format(i, node))
#print("request from ",req, " from ", peer)
"""
r2.sendto(req, peer) # same message packet sent as acknowledge
r2.close()
r2Clients = []
r2Clients.append( Thread(target = r2, args=(1000,'10.10.2.2',20002)) )
r2Clients.append( Thread(target = r2, args=(1000,'10.10.5.2',20022)) )
r2Servers = []
r2Servers.append(Thread(target = r2_server, args=(1000,20005)))
r2Servers.append(Thread(target = r2_server, args=(1000,20006)))
for r2Client in r2Clients: r2Client.start() # different threads to do multiple tasks at the same time
for r2Server in r2Servers: r2Server.start()
for r2Client in r2Clients: r2Client.join() # different threads to do multiple tasks at the same time
for r2Server in r2Servers: r2Server.join()
| ilkersigirci/METU-CENG-Assignments | Ceng435-Data_Communications_and_Networking/Term-Project-Part1/discoveryScripts/r2.py | r2.py | py | 2,452 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "statistics.mean",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_numb... |
17797629984 | from __future__ import absolute_import, division, print_function, unicode_literals
import os
from builtins import str
from contextlib import contextmanager
from unittest import skipIf
from pants.java.distribution.distribution import Distribution, DistributionLocator
from pants.util.osutil import OS_ALIASES, get_os_name
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
from pants_test.subsystem.subsystem_util import global_subsystem_instance
@contextmanager
def _distribution_locator(distribution_locator_options=None):
options = {
DistributionLocator.options_scope: distribution_locator_options or {}
}
yield global_subsystem_instance(DistributionLocator, options=options)
def _get_two_distributions():
with _distribution_locator() as locator:
try:
java7 = locator.cached(minimum_version='1.7', maximum_version='1.7.9999')
java8 = locator.cached(minimum_version='1.8', maximum_version='1.8.9999')
return java7, java8
except DistributionLocator.Error:
return None
class DistributionIntegrationTest(PantsRunIntegrationTest):
def _test_two_distributions(self, os_name=None):
java7, java8 = _get_two_distributions()
os_name = os_name or get_os_name()
self.assertNotEqual(java7.home, java8.home)
for (one, two) in ((java7, java8), (java8, java7)):
target_spec = 'testprojects/src/java/org/pantsbuild/testproject/printversion'
run = self.run_pants(['run', target_spec],
config={
'jvm-distributions': {
'paths': {
os_name: [one.home],
}
},
'jvm-platform': {
'default_platform': 'java{}'.format(one.version.components[1]),
'compiler': 'javac',
}
},
extra_env={
'JDK_HOME': two.home,
})
self.assert_success(run)
self.assertIn('java.home:{}'.format(os.path.realpath(one.home)), run.stdout_data)
@skipIf(_get_two_distributions() is None, 'Could not find java 7 and java 8 jvms to test with.')
def test_jvm_jdk_paths_supercedes_environment_variables(self):
self._test_two_distributions()
@skipIf(_get_two_distributions() is None, 'Could not find java 7 and java 8 jvms to test with.')
def test_jdk_paths_with_aliased_os_names(self):
# NB(gmalmquist): This test will silently no-op and do nothing if the testing machine is running
# an esoteric os (eg, windows).
os_name = get_os_name()
if os_name in OS_ALIASES:
for other in OS_ALIASES[os_name]:
if other != os_name:
self._test_two_distributions(other)
def test_no_jvm_restriction(self):
with _distribution_locator() as locator:
distribution = locator.cached()
target_spec = 'testprojects/src/java/org/pantsbuild/testproject/printversion'
run = self.run_pants(['run', target_spec])
self.assert_success(run)
self.assertIn('java.home:{}'.format(os.path.realpath(distribution.home)), run.stdout_data)
def test_jvm_meets_min_and_max_distribution(self):
with _distribution_locator() as locator:
distribution = locator.cached()
target_spec = 'testprojects/src/java/org/pantsbuild/testproject/printversion'
run = self.run_pants(['run', target_spec],
config={
'jvm-distributions': {
'minimum_version': str(distribution.version),
'maximum_version': str(distribution.version)
}
})
self.assert_success(run)
self.assertIn('java.home:{}'.format(os.path.realpath(distribution.home)), run.stdout_data)
def test_impossible_distribution_requirements(self):
with _distribution_locator() as locator:
with self.assertRaisesRegexp(Distribution.Error, "impossible constraints"):
locator.cached('2', '1', jdk=False)
def _test_jvm_does_not_meet_distribution_requirements(self,
min_version_arg=None,
max_version_arg=None,
min_version_option=None,
max_version_option=None):
distribution_locator_options = {
'minimum_version': min_version_option,
'maximum_version': max_version_option,
}
with _distribution_locator(distribution_locator_options) as locator:
with self.assertRaises(Distribution.Error):
locator.cached(minimum_version=min_version_arg, maximum_version=max_version_arg, jdk=False)
# a version less than all other versions
BOTTOM = '0.00001'
# a version greater than all other versions
TOP = '999999'
def test_does_not_meet_min_version_option(self):
self._test_jvm_does_not_meet_distribution_requirements(min_version_option=self.TOP)
def test_does_not_meet_min_version_arg(self):
self._test_jvm_does_not_meet_distribution_requirements(min_version_arg=self.TOP)
def test_does_not_meet_max_option(self):
self._test_jvm_does_not_meet_distribution_requirements(max_version_option=self.BOTTOM)
def test_does_not_meet_max_arg(self):
self._test_jvm_does_not_meet_distribution_requirements(max_version_arg=self.BOTTOM)
def test_min_option_trumps_min_arg(self):
self._test_jvm_does_not_meet_distribution_requirements(min_version_arg=self.BOTTOM,
min_version_option=self.TOP)
def test_min_arg_trumps_min_option(self):
self._test_jvm_does_not_meet_distribution_requirements(min_version_arg=self.TOP,
min_version_option=self.BOTTOM)
def test_max_option_trumps_max_arg(self):
self._test_jvm_does_not_meet_distribution_requirements(max_version_arg=self.TOP,
max_version_option=self.BOTTOM)
def test_max_arg_trumps_max_option(self):
self._test_jvm_does_not_meet_distribution_requirements(max_version_arg=self.BOTTOM,
max_version_option=self.TOP)
| fakeNetflix/twitter-repo-pants | tests/python/pants_test/java/distribution/test_distribution_integration.py | test_distribution_integration.py | py | 6,476 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pants.java.distribution.distribution.DistributionLocator.options_scope",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pants.java.distribution.distribution.DistributionLocator",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "pants_test.s... |
16636445685 | from enum import Enum
from math import hypot
from typing import Optional, List, Tuple, Union, Type
import torch
from torch import nn, Tensor
__all__ = [
"bilinear_upsample_initializer",
"icnr_init",
"AbstractResizeLayer",
"PixelShuffle",
"PixelShuffleWithLinear",
"BilinearAdditiveUpsample2d",
"DeconvolutionUpsample2d",
"ResidualDeconvolutionUpsample2d",
"instantiate_upsample_block",
"UpsampleLayerType",
]
class UpsampleLayerType(Enum):
NEAREST = "nearest"
BILINEAR = "bilinear"
PIXEL_SHUFFLE = "pixel_shuffle"
PIXEL_SHUFFLE_LINEAR = "pixel_shuffle_linear"
DECONVOLUTION = "deconv"
RESIDUAL_DECONV = "residual_deconv"
class AbstractResizeLayer(nn.Module):
"""
Basic class for all upsampling blocks. It forces the upsample block to have a specific
signature of forward method.
"""
def forward(self, x: Tensor, output_size: Union[Tuple[int, int], torch.Size]) -> Tensor:
"""
:param x: Input feature map to resize
:param output_size: Target output size. This serves as a hint for the upsample block.
:return:
"""
raise NotImplementedError
def bilinear_upsample_initializer(x):
cc = x.size(2) // 2
cr = x.size(3) // 2
for i in range(x.size(2)):
for j in range(x.size(3)):
x[..., i, j] = hypot(cc - i, cr - j)
y = 1 - x / x.sum(dim=(2, 3), keepdim=True)
y = y / y.sum(dim=(2, 3), keepdim=True)
return y
def icnr_init(tensor: torch.Tensor, upscale_factor=2, initializer=nn.init.kaiming_normal):
"""Fill the input Tensor or Variable with values according to the method
described in "Checkerboard artifact free sub-pixel convolution"
- Andrew Aitken et al. (2017), this inizialization should be used in the
last convolutional layer before a PixelShuffle operation
Args:
tensor: an n-dimensional torch.Tensor or autograd.Variable
upscale_factor: factor to increase spatial resolution by
initializer: inizializer to be used for sub_kernel inizialization
Examples:
>>> upscale = 8
>>> num_classes = 10
>>> previous_layer_features = Variable(torch.Tensor(8, 64, 32, 32))
>>> conv_shuffle = Conv2d(64, num_classes * (upscale ** 2), 3, padding=1, bias=0)
>>> ps = PixelShuffle(upscale)
>>> kernel = ICNR(conv_shuffle.weight, scale_factor=upscale)
>>> conv_shuffle.weight.data.copy_(kernel)
>>> output = ps(conv_shuffle(previous_layer_features))
>>> print(output.shape)
torch.Size([8, 10, 256, 256])
.. _Checkerboard artifact free sub-pixel convolution:
https://arxiv.org/abs/1707.02937
"""
new_shape = [int(tensor.shape[0] / (upscale_factor**2))] + list(tensor.shape[1:])
subkernel = torch.zeros(new_shape)
subkernel = initializer(subkernel)
subkernel = subkernel.transpose(0, 1)
subkernel = subkernel.contiguous().view(subkernel.shape[0], subkernel.shape[1], -1)
kernel = subkernel.repeat(1, 1, upscale_factor**2)
transposed_shape = [tensor.shape[1]] + [tensor.shape[0]] + list(tensor.shape[2:])
kernel = kernel.contiguous().view(transposed_shape)
kernel = kernel.transpose(0, 1)
return kernel
class NearestNeighborResizeLayer(AbstractResizeLayer):
def __init__(self, in_channels: int, scale_factor: int):
super().__init__()
self.in_channels = in_channels
self.out_channels = in_channels
self.scale_factor = scale_factor
def forward(self, x: Tensor, output_size: Union[Tuple[int, int], torch.Size]) -> Tensor:
return nn.functional.interpolate(x, size=output_size, mode="nearest")
class BilinearInterpolationLayer(AbstractResizeLayer):
def __init__(self, in_channels: int, scale_factor: int, align_corners=True):
super().__init__()
self.in_channels = in_channels
self.out_channels = in_channels
self.scale_factor = scale_factor
self.align_corners = align_corners
def forward(self, x: Tensor, output_size: Union[Tuple[int, int], torch.Size]) -> Tensor:
return nn.functional.interpolate(x, size=output_size, mode="bilinear", align_corners=self.align_corners)
class PixelShuffle(AbstractResizeLayer):
"""
Depth to Space feature map upsampling that produces a spatially larger feature map but with smaller number of
channels. Of the input channels is not divisble by scale_factor^2, an additional 1x1 convolution will be
applied.
https://github.com/pytorch/pytorch/pull/5429
https://arxiv.org/ftp/arxiv/papers/1707/1707.02937.pdf
"""
def __init__(self, in_channels: int, scale_factor: int):
super().__init__()
n = 2**scale_factor
self.in_channels = in_channels
self.out_channels = in_channels // n
rounded_channels = self.out_channels * n
self.conv = (
nn.Conv2d(rounded_channels, self.out_channels * n, kernel_size=1, padding=1, bias=False)
if in_channels != rounded_channels
else nn.Identity()
)
self.shuffle = nn.PixelShuffle(upscale_factor=scale_factor)
def forward(self, x: Tensor, output_size: Union[Tuple[int, int], torch.Size] = None) -> Tensor:
x = self.shuffle(self.conv(x))
return x
class PixelShuffleWithLinear(AbstractResizeLayer):
"""
Depth to Space feature map upsampling that preserves the input channels.
This block performs grouped convolution to increase number of channels followed by pixel shuffle.
https://github.com/pytorch/pytorch/pull/5429
https://arxiv.org/ftp/arxiv/papers/1707/1707.02937.pdf
"""
def __init__(self, in_channels: int, scale_factor: int):
super().__init__()
n = scale_factor * scale_factor
self.conv = nn.Conv2d(in_channels, in_channels * n, kernel_size=3, padding=1, bias=False)
self.out_channels = in_channels
self.shuffle = nn.PixelShuffle(upscale_factor=scale_factor)
def forward(self, x: Tensor, output_size: Union[Tuple[int, int], torch.Size] = None) -> Tensor:
x = self.shuffle(self.conv(x))
return x
class BilinearAdditiveUpsample2d(AbstractResizeLayer):
"""
https://arxiv.org/abs/1707.05847
"""
def __init__(self, in_channels: int, scale_factor: int = 2):
super().__init__()
self.n = 2**scale_factor
self.in_channels = in_channels
self.out_channels = in_channels // self.n
if in_channels % self.n != 0:
raise ValueError(f"Number of input channels ({in_channels}) must be divisable by n ({self.n})")
self.in_channels = in_channels
self.upsample = nn.UpsamplingBilinear2d(scale_factor=scale_factor)
def forward(self, x: Tensor, output_size: Optional[List[int]] = None) -> Tensor: # skipcq: PYL-W0221
x = self.upsample(x)
n, c, h, w = x.size()
x = x.reshape(n, self.out_channels, self.n, h, w).mean(2)
return x
class DeconvolutionUpsample2d(AbstractResizeLayer):
def __init__(self, in_channels: int, scale_factor: int = 2):
if scale_factor != 2:
raise NotImplementedError("Scale factor other than 2 is not implemented")
super().__init__()
self.in_channels = in_channels
self.out_channels = in_channels
self.conv = nn.ConvTranspose2d(in_channels, in_channels, kernel_size=3, padding=1, stride=2)
def forward(self, x: Tensor, output_size: Optional[List[int]] = None) -> Tensor: # skipcq: PYL-W0221
return self.conv(x, output_size=output_size)
class ResidualDeconvolutionUpsample2d(AbstractResizeLayer):
def __init__(self, in_channels: int, scale_factor=2):
if scale_factor != 2:
raise NotImplementedError(
f"Scale factor other than 2 is not implemented. Got scale factor of {scale_factor}"
)
super().__init__()
n = scale_factor * scale_factor
self.in_channels = in_channels
self.out_channels = in_channels // n
self.conv = nn.ConvTranspose2d(
in_channels, in_channels // n, kernel_size=3, padding=1, stride=scale_factor, output_padding=1
)
self.residual = BilinearAdditiveUpsample2d(in_channels, scale_factor=scale_factor)
def forward(self, x: Tensor, output_size: Optional[List[int]]) -> Tensor: # skipcq: PYL-W0221
residual_up = self.residual(x)
return self.conv(x, output_size=residual_up.size()) + residual_up
def instantiate_upsample_block(
block: Union[str, Type[AbstractResizeLayer]], in_channels, scale_factor: int
) -> AbstractResizeLayer:
if isinstance(block, str):
block = UpsampleLayerType(block)
if isinstance(block, UpsampleLayerType):
block = {
UpsampleLayerType.NEAREST: NearestNeighborResizeLayer,
UpsampleLayerType.BILINEAR: BilinearInterpolationLayer,
UpsampleLayerType.PIXEL_SHUFFLE: PixelShuffle,
UpsampleLayerType.PIXEL_SHUFFLE_LINEAR: PixelShuffleWithLinear,
UpsampleLayerType.DECONVOLUTION: DeconvolutionUpsample2d,
UpsampleLayerType.RESIDUAL_DECONV: ResidualDeconvolutionUpsample2d,
}[block]
return block(in_channels, scale_factor=scale_factor)
| BloodAxe/pytorch-toolbelt | pytorch_toolbelt/modules/upsample.py | upsample.py | py | 9,298 | python | en | code | 1,447 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_numb... |
40798536786 | from keras.datasets import mnist
from keras.utils import np_utils
import numpy as np
import sys
import tensorflow as tf
seed = 0
np.random.seed(seed)
tf.set_random_seed(seed)
(X_train, y_class_train), (X_test, y_class_test) = mnist.load_data()
print("Num of images in Train set: %d" % (X_train.shape[0]))
print("Num of images in Test set: %d" % (X_test.shape[0]))
import matplotlib.pyplot as plt
plt.imshow(X_train[0], cmap='Greys')
plt.show()
for x in X_train[0]:
for i in x:
sys.stdout.write('%d\t' % i)
sys.stdout.write('\n')
X_train = X_train.reshape(X_train.shape[0], 784)
X_train = X_train.astype('float64')
X_train = X_train / 255
X_test = X_test.reshape(X_test.shape[0], 784).astype('float64') / 255
print('class: %d ' % (y_class_train[0]))
y_train = np_utils.to_categorical(y_class_train, 10)
y_test = np_utils.to_categorical(y_class_test, 10)
print(y_train[0]) | devjwsong/deep-learning-study-tensorflow | Chap5/MNIST_Data.py | MNIST_Data.py | py | 898 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.random.seed",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.set_random_seed",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "keras.da... |
15194589620 | import json
with open ( "../6.NLP/sarcasm.json" , 'r' ) as f : datastore = json.load ( f )
sentences = []
labels = []
for item in datastore :
sentences.append ( item [ 'headline' ] )
labels.append ( item [ 'is_sarcastic' ] )
training_size = 20000
training_sentences = sentences [ 0 : training_size ]
testing_sentences = sentences [ training_size : ]
training_labels = labels [ 0 : training_size ]
testing_labels = labels [ training_size : ]
# ----------------------------------------------------------------
## Data preprocessing
import numpy as np
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
vocab_size = 10000
max_length = 120
trunc_type = 'post'
padding_type = 'post'
oov_tok = "<OOV>"
tokenizer = Tokenizer ( num_words = vocab_size , oov_token = oov_tok )
tokenizer.fit_on_texts ( training_sentences )
training_sequences = tokenizer.texts_to_sequences ( training_sentences )
training_padded = pad_sequences ( training_sequences , maxlen = max_length , padding = padding_type, truncating = trunc_type )
testing_sequences = tokenizer.texts_to_sequences ( testing_sentences )
testing_padded = pad_sequences ( testing_sequences , maxlen = max_length , padding = padding_type , truncating = trunc_type )
training_labels = np.array ( training_labels )
testing_labels = np.array ( testing_labels )
# ----------------------------------------------------------------
# ## Build and Compile the Model
import tensorflow as tf
# Parameters
embedding_dim = 16
filters = 128 # units
kernel_size = 5 # kernel
dense_dim = 6 # units 2
model_conv = tf.keras.Sequential ( [
tf.keras.layers.Embedding ( vocab_size , embedding_dim , input_length = max_length ) ,
tf.keras.layers.Conv1D ( filters , kernel_size , activation = 'relu' ) ,
tf.keras.layers.GlobalMaxPooling1D () ,
tf.keras.layers.Dense ( dense_dim , activation = 'relu' ) ,
tf.keras.layers.Dense ( 1 , activation = 'sigmoid' )
])
model_conv.compile ( loss = 'binary_crossentropy' , optimizer = 'adam' , metrics = [ 'accuracy' ] )
model_conv.summary ()
NUM_EPOCHS = 10
history_conv = model_conv.fit (
training_padded , training_labels , epochs = NUM_EPOCHS ,
validation_data = ( testing_padded , testing_labels ) )
# Epoch 10/10
# 625/625 [==============================] - 3s 5ms/step - loss: 0.0012 - accuracy: 0.9997 - val_loss: 1.0603 - val_accuracy: 0.8269 | AmalLight/Neural_Network_2 | 7.sequences_RNN/12.sarcasm_with_1D_convolutional.py | 12.sarcasm_with_1D_convolutional.py | py | 2,519 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.preprocessing.text.Tokenizer",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.preprocessing.sequence.pad_sequences",
"line_number": 38,
"usage_... |
12338780708 | ################011011100110010101101111####
### neo Command Line #######################
############################################
def getcmdlist():
cmds = {
"sav" :"Select all in Active View, Model or Annotation.",
"samv" :"Select all in Active View, non Annotation.",
"ssv" :"Select similar in Active View of same kind of selection (multiple types allowed).",
"sap" :"Select all in Project, Model or Annotation.",
"samp" :"Select all in Project, non Annotation.",
"ssp" :"Select similar in Project of same kind of selection (multiple types allowed)."
}
return cmds
def runcmd(cmd, msg, recallCL=False):
if cmd == 'sav':
from lib.select import neo_selection_funcs as sel
sel.SelectAllInView(['Model', 'Annotation'])
elif cmd == 'samv':
from lib.select import neo_selection_funcs as sel
sel.SelectAllInView(['Model'])
elif cmd == 'ssv':
from lib.select import neo_selection_funcs as sel
sel.SelectSimilar('ActiveView')
elif cmd == 'sap':
from lib.select import neo_selection_funcs as sel
sel.SelectAllInProject(['Model', 'Annotation'])
elif cmd == 'samp':
from lib.select import neo_selection_funcs as sel
sel.SelectAllInProject(['Model'])
elif cmd == 'ssp':
from lib.select import neo_selection_funcs as sel
sel.SelectSimilar('Project')
else:
from neocl import unknowncmd
unknowncmd(cmd, recallCL, getcmdlist()) | 0neo/pyRevit.neoCL | neoCL.extension/neocl_s.py | neocl_s.py | py | 1,593 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "lib.select.neo_selection_funcs.SelectAllInView",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "lib.select.neo_selection_funcs",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "lib.select.neo_selection_funcs.SelectAllInView",
"line_number":... |
318564129 | import logging
import shutil
from os.path import isdir
from urllib.request import urlopen
from zipfile import ZipFile
from charms.nginx_ingress_integrator.v0.ingress import IngressRequires
from ops.charm import CharmBase
from ops.main import main
from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus
logger = logging.getLogger(__name__)
STORAGE_PATH = "/var/lib/juju/storage/webroot/0"
SITE_SRC = "https://github.com/jnsgruk/test-site/archive/refs/heads/master.zip"
class HelloKubeconCharm(CharmBase):
"""Charm the service."""
def __init__(self, *args):
super().__init__(*args)
self.framework.observe(self.on.install, self._on_install)
self.framework.observe(self.on.config_changed, self._on_config_changed)
self.framework.observe(self.on.pull_site_action, self._pull_site_action)
self.ingress = IngressRequires(self, {
"service-hostname": "hellokubecon.juju",
"service-name": self.app.name,
"service-port": 8080
})
def _on_install(self, _):
# Download the site
self._fetch_site()
def _on_config_changed(self, _):
"""Handle the config changed event."""
# Get the gosherve container so we can configure/manipulate it
container = self.unit.get_container("gosherve")
# Do not continue if the configuration is incomplete
if not self._check_config():
return
# Create a new config layer
layer = self._gosherve_layer()
# Get the current config
plan = container.get_plan()
# Check if there are any changes to services
if plan.services != layer["services"]:
# Changes were made, add the new layer
container.add_layer("gosherve", layer, combine=True)
logging.info("Added updated layer 'gosherve' to Pebble plan")
# Stop the service if it is already running
if container.get_service("gosherve").is_running():
container.stop("gosherve")
# Restart it and report a new status to Juju
container.start("gosherve")
logging.info("Restarted gosherve service")
# All is well, set an ActiveStatus
self.unit.status = ActiveStatus()
def _gosherve_layer(self) -> dict:
"""Returns a Pebble configuration layer for Gosherve"""
return {
"summary": "gosherve layer",
"description": "pebble config layer for gosherve",
"services": {
"gosherve": {
"override": "replace",
"summary": "gosherve service",
"command": "/gosherve",
"startup": "enabled",
"environment": {
"REDIRECT_MAP_URL": self.model.config["redirect-map"],
"WEBROOT": "/srv/hello-kubecon",
},
}
},
}
def _pull_site_action(self, event):
"""Action handler that pulls the latest site archive and unpacks it"""
self._fetch_site()
event.set_results({"result": "site pulled"})
def _fetch_site(self):
"""Fetch latest copy of website from Github and move into webroot"""
# Set some status and do some logging
self.unit.status = MaintenanceStatus("Fetching web site")
logger.info("Downloading site archive from %s", SITE_SRC)
# Download the zip
resp = urlopen(SITE_SRC)
with open("/tmp/site.zip", "wb") as tmp:
tmp.write(resp.read())
# Extract the zip
with ZipFile("/tmp/site.zip") as zf:
zf.extractall(path="/tmp/site")
# Remove existing version if it exists
if isdir(f"{STORAGE_PATH}/hello-kubecon"):
shutil.rmtree(f"{STORAGE_PATH}/hello-kubecon")
# Move the downloaded web files into place
shutil.move(src="/tmp/site/test-site-master", dst=f"{STORAGE_PATH}/hello-kubecon")
self.unit.status = ActiveStatus()
def _check_config(self):
"""Check that everything is in place to start Gosherve"""
if self.model.config["redirect-map"] == "":
self.unit.status = BlockedStatus("No 'redirect-map' config specified")
return False
return True
if __name__ == "__main__":
main(HelloKubeconCharm)
| mthaddon/hello-kubecon-k8s | src/charm.py | charm.py | py | 4,399 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "ops.charm.CharmBase",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "charms.nginx_ingress_integrator.v0.ingress.IngressRequires",
"line_number": 27,
"usage_type": "call... |
20077176339 | from django.urls import resolve
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase, APIRequestFactory
from cars.models import Car, Manufacturer, Rate
from cars.serializers import PopularSerializer
from cars.views import PopularListView
factory = APIRequestFactory()
class PopularListViewTest(APITestCase):
def setUp(self) -> None:
self.view = PopularListView.as_view()
self.request = factory.get(reverse("cars:list_popular"))
self.view_object = PopularListView()
def test_url_revers(self):
url = reverse("cars:list_popular")
found = resolve(url)
self.assertEqual(found.func.__name__, self.view.__name__)
self.assertEqual(url, "/popular/")
def test_empty_popular_list(self):
cars = self.view_object.get_queryset()
serializer = PopularSerializer(cars, many=True)
response = self.view(self.request)
response.render()
self.assertCountEqual(response.data, [])
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_not_empty_popular_list(self):
manufacturer = Manufacturer.objects.create(make="Ford")
car_one = Car.objects.create(manufacturer=manufacturer, model="Mustang")
car_two = Car.objects.create(manufacturer=manufacturer, model="F-150")
Rate.objects.create(car=car_one, rating=1)
Rate.objects.create(car=car_one, rating=5)
Rate.objects.create(car=car_two, rating=4)
cars = self.view_object.get_queryset()
serializer = PopularSerializer(cars, many=True)
response = self.view(self.request)
response.render()
self.assertCountEqual(response.data[0].keys(), ["id", "make", "model", "rates_number"])
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.data[0]["rates_number"], 2)
self.assertEqual(response.data[1]["rates_number"], 1)
self.assertEqual(response.status_code, status.HTTP_200_OK)
| tomasz-rzesikowski/cars_API | cars/tests/tests_views/tests_popular_list_view.py | tests_popular_list_view.py | py | 2,109 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.test.APIRequestFactory",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "rest_framework.test.APITestCase",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "cars.views.PopularListView.as_view",
"line_number": 15,
"usage_type... |
32817446826 | """Add beach_id column and BeachForecastListHistory table
Revision ID: eae746ee3547
Revises: cc49b6b03c5a
Create Date: 2021-11-06 03:37:19.196002
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'eae746ee3547'
down_revision = 'cc49b6b03c5a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('beachforecastlisthistory',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('create_dt', postgresql.TIMESTAMP(timezone=True), server_default=sa.text('now()'), nullable=True),
sa.Column('update_dt', postgresql.TIMESTAMP(timezone=True), nullable=True),
sa.Column('beach', sa.String(), nullable=True),
sa.Column('region', sa.String(), nullable=True),
sa.Column('ocean', sa.String(), nullable=True),
sa.Column('beach_id', sa.Integer(), nullable=True),
sa.Column('live_info', sa.JSON(), nullable=True),
sa.Column('forecast_info', sa.JSON(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_beachforecastlisthistory_beach'), 'beachforecastlisthistory', ['beach'], unique=False)
op.create_index(op.f('ix_beachforecastlisthistory_beach_id'), 'beachforecastlisthistory', ['beach_id'], unique=False)
op.create_index(op.f('ix_beachforecastlisthistory_id'), 'beachforecastlisthistory', ['id'], unique=False)
op.create_index(op.f('ix_beachforecastlisthistory_ocean'), 'beachforecastlisthistory', ['ocean'], unique=False)
op.create_index(op.f('ix_beachforecastlisthistory_region'), 'beachforecastlisthistory', ['region'], unique=False)
op.add_column('beachforecastlist', sa.Column('beach_id', sa.Integer(), nullable=True))
op.create_index(op.f('ix_beachforecastlist_beach_id'), 'beachforecastlist', ['beach_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_beachforecastlist_beach_id'), table_name='beachforecastlist')
op.drop_column('beachforecastlist', 'beach_id')
op.drop_index(op.f('ix_beachforecastlisthistory_region'), table_name='beachforecastlisthistory')
op.drop_index(op.f('ix_beachforecastlisthistory_ocean'), table_name='beachforecastlisthistory')
op.drop_index(op.f('ix_beachforecastlisthistory_id'), table_name='beachforecastlisthistory')
op.drop_index(op.f('ix_beachforecastlisthistory_beach_id'), table_name='beachforecastlisthistory')
op.drop_index(op.f('ix_beachforecastlisthistory_beach'), table_name='beachforecastlisthistory')
op.drop_table('beachforecastlisthistory')
# ### end Alembic commands ###
| veluminous/sea_forecast_api | alembic/versions/eae746ee3547_add_beach_id_column_and_.py | eae746ee3547_add_beach_id_column_and_.py | py | 2,744 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
39723147483 | from fastapi import Response, HTTPException
from odmantic import Field
from pydantic import BaseModel
from pydantic.main import ModelMetaclass
from starlette.background import BackgroundTask
import typing
from dojo.shared.supported_services import SupportedServices
if typing.TYPE_CHECKING:
from dojo.shared.error_messages import ErrorMessage
class ModelResponse(Response):
"""Directly return a Pydantic model as a JSON Response."""
media_type = "application/json"
def __init__(
self,
content: typing.Any,
status_code: int = 200,
headers: typing.Optional[dict] = None,
media_type: typing.Optional[str] = None,
background: typing.Optional[BackgroundTask] = None,
) -> None:
super().__init__(content, status_code, headers, media_type, background)
def render(self, content: typing.Any) -> bytes:
if not isinstance(content, BaseModel):
raise Exception("Content must be a pydantic model!")
return content.json(
encoder={
"ensure_ascii": False,
"allow_nan": False,
"indent": None,
"separators": (",", ":"),
}
).encode("utf-8")
class PredefinedModelResponse(Response):
"""Directly intiaize a pydantic model with defaults and return as a JSON Response."""
media_type = "application/json"
def __init__(
self,
content: typing.Any,
status_code: int = 200,
headers: typing.Optional[dict] = None,
media_type: typing.Optional[str] = None,
background: typing.Optional[BackgroundTask] = None,
) -> None:
super().__init__(content, status_code, headers, media_type, background)
def render(self, content: typing.Any) -> bytes:
if not isinstance(content, ModelMetaclass):
raise Exception("Content must be a ModelMetaclass!")
return (
content()
.json(
encoder={
"ensure_ascii": False,
"allow_nan": False,
"indent": None,
"separators": (",", ":"),
}
)
.encode("utf-8")
)
class ActionCompleted(BaseModel):
service: SupportedServices
action: str
class BackgroundProcessResponse(BaseModel):
uid: str
process_name: str
class APIErrorRaw(Exception):
def __init__(self, error_code: str, error_message: str, status_code: int = 400):
self.error_code = error_code
self.error_message = error_message
self.status_code = status_code
class PreinitErrorMessage(Exception):
"""Use this error for models that need to be initialized before they can be used."""
def __init__(self, model: typing.Any, status_code: int = 400):
self.model = model().json(
encoder={
"ensure_ascii": False,
"allow_nan": False,
"indent": None,
"separators": (",", ":"),
}
).encode("utf-8")
self.status_code = status_code
class ModelErrorMessage(Exception):
"""Use this error for models that have already been initalized """
def __init__(self, model: typing.Any, status_code: int = 400):
self.model = model.json(
encoder={
"ensure_ascii": False,
"allow_nan": False,
"indent": None,
"separators": (",", ":"),
}
).encode("utf-8")
self.status_code = status_code
| ms7m/dojo | dojo/shared/response.py | response.py | py | 3,595 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "fastapi.Response",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "typing.Opti... |
17659950405 | # -*- coding:utf-8 -*-
import pandas as pd
import requests
import time
import json
import os
def get_month_data(month):
page = 1
num = 1
video_list = []
while(True):
print('*****' * 10, f'page[{page}]', '*****' * 10)
# 处理月份格式
if month in ['01', '03', '05', '07', '08', '10', '12']:
url = f'https://s.search.bilibili.com/cate/search?main_ver=v3&search_type=video&view_type=hot_rank&pic_size=160x100&order=click©_right=-1&cate_id=22&page={page}&pagesize=20&time_from=2018{month}01&time_to=2018{month}31'
elif month in ['04', '06', '09', '11']:
url = f'https://s.search.bilibili.com/cate/search?main_ver=v3&search_type=video&view_type=hot_rank&pic_size=160x100&order=click©_right=-1&cate_id=22&page={page}&pagesize=20&time_from=2018{month}01&time_to=2018{month}30'
else:
url = f'https://s.search.bilibili.com/cate/search?main_ver=v3&search_type=video&view_type=hot_rank&pic_size=160x100&order=click©_right=-1&cate_id=22&page={page}&pagesize=20&time_from=2018{month}01&time_to=2018{month}28'
headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063'}
html_code = requests.get(url, headers=headers).text
# 获取当前网页最大页数
max_page = json.loads(html_code)['numPages']
# 获取视频信息
results = json.loads(html_code)['result']
for result in results:
print(result['title'], result['arcurl'], result['pubdate'].split(' ')[0], result['play'], result['video_review'], result['favorites'], result['author'])
video_list.append([result['title'],
result['arcurl'],
result['pubdate'].split(' ')[0],
result['play'],
result['video_review'],
result['favorites'],
result['author']])
num += 1
page += 1
time.sleep(0.5)
if page > max_page:
if not os.path.isdir('csv_files'):
os.mkdir('csv_files')
result_df = pd.DataFrame(video_list, columns=['title', 'link', 'date', 'play_count', 'danmu_num', 'favorites', 'author'])
result_df.to_csv(f'csv_files/month_{month}.csv', sep=',', na_rep='NA')
break
for month in range(1,13):
if month < 10:
month = '0' + str(month)
else:
month = str(month)
get_month_data(month) | PeanuTxT/bilibili_spider | danmu_spiders/link_spider.py | link_spider.py | py | 2,626 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 45... |
7768381873 | #!/usr/bin/env python3
# AWS Lambda function for creating an AMI image from a given instance.
# By Michael Ludvig - https://aws.nz
# Trigger this function from CloudWatch Scheduler (cron-like)
# Pass the Instance ID in 'instance_id' environment variable.
import os
import boto3
from datetime import datetime, timedelta
import time
ec2 = boto3.client('ec2')
def create_image(instance_id, reboot):
def _print_log(message):
print('%s @ %s: %s' % (instance_id, snapshot_timestamp, message))
snapshot_timestamp = datetime.strftime(datetime.now(), '%s')
_print_log('Snapshotting instance')
instance = ec2.describe_instances(InstanceIds=[instance_id])
description = ''
tags = {}
try:
tags = {item['Key']:item['Value'] for item in instance['Reservations'][0]['Instances'][0]['Tags']}
except:
pass
if 'Name' in tags:
description = tags['Name']
elif 'aws:cloudformation:stack-name' in tags:
description = tags['aws:cloudformation:stack-name']
else:
description = instance_id
name = instance_id+'_'+snapshot_timestamp
description = description + ' ' + datetime.strftime(datetime.now(), '%Y-%m-%d %H-%M-%S')
r = ec2.create_image(InstanceId = instance_id, Name = name, Description = description, NoReboot = not reboot)
image_id = r['ImageId']
_print_log('Created image: id=%s name=%s' % (image_id, name))
image_tags = [
{'Key': 'SnapshotTimestamp', 'Value': snapshot_timestamp },
{'Key': 'InstanceId', 'Value': instance_id },
]
if 'Name' in tags:
image_tags.append({ 'Key': 'Name', 'Value': tags['Name'] })
if 'aws:cloudformation:stack-name' in tags:
image_tags.append({ 'Key': 'StackName', 'Value': tags['aws:cloudformation:stack-name'] })
ec2.create_tags(Resources = [image_id], Tags = image_tags)
image_tags_string = ' '.join(map(lambda x: '%(Key)s=%(Value)s' % x, image_tags))
_print_log('Created tags: %s' % (image_tags_string))
return (image_id, snapshot_timestamp)
def lambda_handler(event, context):
try:
instance_id = os.environ['instance_id']
except:
print('ERROR: Environment variables must be set: instance_id')
raise
try:
reboot = event.get('reboot', True)
assert(type(event['reboot']) in [ bool, int ])
except:
print('ERROR: Event JSON expected: { "reboot": true / false }')
raise
image_id, snapshot_timestamp = create_image(instance_id, reboot)
return image_id
| mludvig/aws-standard-templates | src/lambda-snapshot-instance.py | lambda-snapshot-instance.py | py | 2,534 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "boto3.client",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strftime",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "datetime.da... |
19781798290 | from os import popen
from pathlib import Path
import threading
from tkinter import Tk, Canvas, Entry, Text, Button, PhotoImage
from tkinter import *
from tkinter import ttk
from tkinter.messagebox import OK
import pyautogui
import mouse
import cv2
import pytesseract
from PIL import Image
import threading
import numpy as np
PosXMax,PosYMax,PosXMin,PosYMin=0,0,0,0
CurrSessionData={}
OneAtATime=True
Stop=True
Counter=0
statu="Not Working"
window = Tk()
window.wm_iconbitmap("pokeball.ico")
window.title("PRO Hunt Tracker")
stat=ttk.Treeview(window)
stat['columns']=("Pokemon","Count","Percentage")
stat.column("#0",width=0,minwidth=0)
stat.column("Pokemon",width=150,anchor=W)
stat.column("Count",width=150,anchor=CENTER)
stat.column("Percentage",width=150,anchor=W)
stat.heading("Pokemon",text="Pokemon",anchor=W)
stat.heading("Count",text="Count",anchor=CENTER)
stat.heading("Percentage",text="Percentage",anchor=W)
def endit():
global Stop
global statu
statu="Not Working"
Stop=True
Status.configure(text=statu,fg="Red")
print("STOP")
def resetit():
global Stop
global CurrSessionData
global statu
statu="Not Working"
Status.configure(text=statu,fg="Red")
Stop=True
CurrSessionData.clear()
for record in stat.get_children():
stat.delete(record)
def destroy():
pop.destroy()
def Inst():
global pop
pop=Toplevel(window)
pop.title("Instructions")
pop.wm_iconbitmap("pokeball.ico")
pop.geometry("720x480")
Ok=Button(pop,text="OK!",command=destroy)
Line1=Label(pop,text="This tool will help you to stay on track with your hunting current hunting session.",font=("Poppins Regural", 18 * -1))
How=LabelFrame(pop,text="How to use",font=("Poppins Bold", 22 * -1))
Line2=Label(pop,text="1. Keep your client's resolution windowed on 1280 x 720.",font=("Poppins Regural", 18 * -1))
Line3=Label(pop,text="2. Now you will need to set up your coordinates, we will be targeting",font=("Poppins Regural", 18 * -1))
Line4=Label(pop,text="'Wild X' in your battle window,to do that we will use 2 buttons",font=("Poppins Regural", 18 * -1))
Line5=Label(pop,text="[Top Left Coord] [Bottom Right Coord] ",font=("Poppins Regural", 18 * -1))
Line6=Label(pop,text="[Top Left Coord] : Click it and then click on the top area before the",font=("Poppins Regural", 18 * -1))
Line7=Label(pop,text="'W' in 'Wild',[Bottom Right Coord] same as before but for bottom are after ",font=("Poppins Regural", 18 * -1))
Line8=Label(pop,text="Pokemon name, after that just click Start!",font=("Poppins Regural", 18 * -1))
Line9=Label(pop,text="Stop will stop the program and won't count,",font=("Poppins Regural", 18 * -1))
Line10=Label(pop,text="Reset will remove all enteries and will stop the program.",font=("Poppins Regural", 18 * -1))
Line11=Label(pop,text="Enjoy hunting :D",font=("Poppins Regural", 22 * -1))
Line1.pack()
How.pack()
Line2.pack()
Line3.pack()
Line4.pack()
Line5.pack()
Line6.pack()
Line7.pack()
Line8.pack()
Line9.pack()
Line10.pack()
Line11.pack()
Ok.pack()
def GetTopLeft():
while True:
x, y = pyautogui.position()
if(mouse.is_pressed("left")):
global PosXMax,PosYMax
PosXMax=x
PosYMax=y
print(PosXMax," ",PosYMax)
break
def GetBotRight():
while True:
x, y = pyautogui.position()
if(mouse.is_pressed("left")):
global PosXMin,PosYMin
PosXMin=x
PosYMin=y
break
def OCR(PosXMax,PosYMax,PosXMin,PosYMin):
t=""
sc = pyautogui.screenshot().crop((PosXMax,PosYMax,PosXMin,PosYMin))
#sc.save("test.png")
sc = sc.convert('RGBA')
data = np.array(sc)
r, g, b, t = data.T
out_areas = r <= 200
text_areas = r > 200
data[..., :-1][out_areas.T] = (252, 252, 252)
data[..., :-1][text_areas.T] = (0, 0, 0)
sc = Image.fromarray(data)
#sc.save("After.png")
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
t = pytesseract.image_to_string(sc)
if t!="":
X=t.split()
t=X[1]
return t
def UpdateCount():
for record in stat.get_children():
stat.delete(record)
for i,a in enumerate(CurrSessionData):
perc=float(CurrSessionData[a]/Counter)
perc=perc*100
stat.insert(parent='',index='end',iid=i,values=(a,CurrSessionData[a],perc))
def StartCount():
while True:
if not Stop:
global Counter
global OneAtATime
Encounter=OCR(PosXMax,PosYMax,PosXMin,PosYMin) #Reading the Name of the Pokemon
Hay=np.array(pyautogui.screenshot()) #Full Screen Screenshot converted to np so CV2 can use it
Hay = Hay[:, :, ::-1].copy() #Fixing Colours
Needle=cv2.imread("Needle.png") #Our needle will be the VS in battle window, to avoid multiple encounters
Needle2=cv2.imread("Map.png")#Checks for the map so it break the script when you minimize
result=cv2.matchTemplate(Hay,Needle,cv2.TM_CCOEFF_NORMED) #Finding VS
bug=cv2.matchTemplate(Hay,Needle2,cv2.TM_CCOEFF_NORMED)
bugtest=cv2.matchTemplate(Hay,Needle2,cv2.TM_CCOEFF_NORMED)
min_val2, max_val2, min_loc2, max_loc2 = cv2.minMaxLoc(bugtest)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
# print(max_val) #If found will be 99%
if(max_val2<0.80):
endit()
continue
if(Encounter !=""):
if Encounter in CurrSessionData and OneAtATime==True: #Adding Data to Dic if Not first encounter
CurrSessionData[Encounter]+=1
Counter+=1
elif Encounter not in CurrSessionData and OneAtATime==True: #if first add its data
CurrSessionData.update({Encounter: 1})
Counter+=1
if(max_val < 0.90 and Encounter=="") : #Checking the VS if its not there the bool will be true, and will freeze the counter above
OneAtATime=True
elif OneAtATime==True and Encounter !="": #There is a new encounter print it
print(Encounter.strip(),"Has been seen: ",CurrSessionData[Encounter],"Times",sep=" ")
print(Counter)
counter.configure(text=Counter)
UpdateCount()
#print("Has been seen: ")
#print(CurrSessionData[Encounter])
#print("Times")
OneAtATime=False
def StartIt():
global Stop
global statu
Stop = False
print("Start")
statu="Working"
Status.configure(text=statu,fg="Green")
threading.Thread(target=StartCount).start()
Instruction=Button(window,text="Instructions",width=18,command=Inst)
TopL=Button(window,text="Top Left Coord",command=GetTopLeft)
BotR=Button(window,text="Bottom Right Coord",command=GetBotRight)
Total=Label(window,text="Total Encounters",font=("Poppins Regular", 22 * -1))
counter=Label(window,text=str(Counter),font=("Poppins Regular", 20 * -1))
Status=Label(window,text=statu,font=("Poppins Bold", 26 * -1),fg="Red")
counter.grid(row=4,column=0,pady=2,sticky="NSEW")
Start=Button(window,text="Start",width=18,command=StartIt)
Stop=Button(window,text="Stop",width=18,command=endit)
Reset=Button(window,text="Reset",width=18,command=resetit)
rowcnt=0
colcnt=0
Buttons=[Instruction,TopL,BotR,Total,counter,Start,Stop,Reset,stat,Status]
for button in Buttons:
Grid.rowconfigure(window,rowcnt,weight=1)
#Grid.columnconfigure(window,colcnt,weight=1)
rowcnt+=1
#colcnt+=1
Instruction.grid(row=0,column=0,pady=2,sticky="NSEW")
TopL.grid(row=1,column=0,pady=2,sticky="NSEW")
BotR.grid(row=2,column=0,pady=2,sticky="NSEW")
Total.grid(row=3,column=0,pady=2,sticky="NSEW")
Status.grid(row=3,column=1,sticky="NSEW")
Start.grid(row=5,column=0,pady=2,sticky="NSEW")
Stop.grid(row=6,column=0,pady=2,sticky="NSEW")
Reset.grid(row=7,column=0,pady=2,sticky="NSEW")
stat.grid(row=0,column=1,pady=2,sticky="NSEW")
window.mainloop()
| MohamedWael3011/PROFarmTracker | gui.py | gui.py | py | 8,410 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tkinter.Tk",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk.Treeview",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "tkinter.Button",
"lin... |
1290956082 | # -*- coding: utf-8 -*-
"""
Created on Sat Jul 16 12:27:12 2022
@author: Delaeyram
"""
import streamlit as st
import cv2
st.markdown("Built by Eyram Dela")
run = st.checkbox("Run")
FRAME_WINDOW = st.image([])
video = cv2.VideoCapture(0)
net = cv2.dnn.readNet("dnn_model/yolov4-tiny.weights","dnn_model/yolov4-tiny.cfg")
model = cv2.dnn_DetectionModel(net)
model.setInputParams(size=(320,320),scale=1/255)
classes = []
with open("dnn_model/classes.txt","r") as file_object:
for class_name in file_object.readlines():
class_name = class_name.strip()
classes.append(class_name)
print(classes)
while run:
sucess,frame =video.read()
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
(class_ids,scores,bboxes) = model.detect(frame)
for class_id,score,bbox in zip(class_ids,scores,bboxes):
(x,y,w,h) = bbox
class_name = classes[class_id]
cv2.putText(frame,class_name,(x,y-10),cv2.FONT_HERSHEY_PLAIN,2,(100,0,50),2)
cv2.rectangle(frame,(x,y),(x+w,y+h),(100,0,50),2)
FRAME_WINDOW.image(frame)
| eyradel/hybrid | hybrid.py | hybrid.py | py | 1,119 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "streamlit.markdown",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "streamlit.checkbox",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "streamlit.image",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCaptur... |
70663470184 | import pandas as pd
import sqlite3
def load_data(messages_filepath, categories_filepath):
'''
Create dataframes for messages and categories data.
'''
messages_df = pd.read_csv(messages_filepath)
categories_df = pd.read_csv(categories_filepath)
return messages_df, categories_df
def clean_data(messages_df, categories_df):
'''
Create a clean, combined dataframe of messages and category dummy variables.
Args:
messages_df: DataFrame. Contains 'id' column for joining.
categories_df: DataFrame. Contains 'id' column for joining and
'categories' column with strings of categories separated by ;.
'''
# Merge datasets
df = pd.merge(messages_df, categories_df, on='id')
# Drop duplicates
df.drop_duplicates(inplace=True)
# Create categories columns
new_cat_df = df.categories.str.split(';', expand=True)
col_names = df.iloc[0].str[:-2]
new_cat_df.columns = col_names
for col in col_names:
new_cat_df[col] = new_cat_df[col].str[-1].astype(int)
df = pd.concat([df.drop('categories', axis=1), new_cat_df], axis=1)
# Clean 'related' values
df.loc[(df.related == 2), 'related'] = 1
return df
def save_data(df, database_filepath):
'''
Save dataframe to database in 'messages' table. Replace any existing data.
'''
conn = sqlite3.connect(database_filepath)
df.to_sql('messages', con=conn, if_exists='replace', index=False)
conn.commit()
conn.close()
def main(messages_filepath, categories_filepath, database_filepath):
'''
Extract messages and categories data from csv files, cleans the data and
saves merged data into database. Checks if data already saved.
'''
# Load data
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
messages_df, categories_df = load_data(messages_filepath,
categories_filepath)
conn = sqlite3.connect(database_filepath)
# Check if data already in db
try:
row_count = pd.read_sql('SELECT COUNT(*) FROM messages', conn).iloc[0][0]
if categories_df.drop_duplicates().shape[0] == row_count:
print('Database is up to date')
conn.close()
else:
print('Cleaning data...')
df = clean_data(messages_df, categories_df)
print('Saving data...')
save_data(df, database_filepath)
conn.close()
except sqlite3.OperationalError:
print('Cleaning data...')
df = clean_data(messages_df, categories_df)
print('Saving data...')
save_data(df, database_filepath)
conn.close()
if __name__ == '__main__':
# Create argparser
import argparse
parser = argparse.ArgumentParser(description='Categorize disaster messages')
parser.add_argument("messages_filepath", help="File path for messages csv")
parser.add_argument("categories_filepath", help="File path for categories csv")
parser.add_argument("database_filepath", help="File path for database")
args = parser.parse_args()
main(messages_filepath=args.messages_filepath,
categories_filepath=args.categories_filepath,
database_filepath=args.database_filepath) | rebeccaebarnes/DSND-Project-5 | scripts/process_data.py | process_data.py | py | 3,347 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_... |
43750959743 | # coding: utf-8
from __future__ import unicode_literals
import datetime
import unittest
from uwsgi_log_plugin import import_from_uwsgi_log
class UwsgiLogPluginTestCase(unittest.TestCase):
def test_import_from_uwsgi_log(self):
filename = "uwsgi.log"
table = import_from_uwsgi_log(filename, "utf-8")
self.assertEqual(len(table), 2)
first = table.Row(
pid=879,
ip="127.0.0.1",
datetime=datetime.datetime(2015, 6, 1, 11, 23, 16),
generation_time=0.17378,
http_path="/something",
http_verb="GET",
http_version=1.1,
http_status=404,
)
second = table.Row(
pid=31460,
ip="127.0.1.1",
datetime=datetime.datetime(2015, 7, 15, 23, 49, 20),
generation_time=0.000466,
http_path="/about",
http_verb="OPTIONS",
http_version=1.1,
http_status=200,
)
self.assertEqual(table[0], first)
self.assertEqual(table[1], second)
| turicas/rows | examples/library/tests_uwsgi_log.py | tests_uwsgi_log.py | py | 1,076 | python | en | code | 851 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "uwsgi_log_plugin.import_from_uwsgi_log",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "call"
},
{
"a... |
22279183529 | import time
from wflow_sdk.core.dataset import WFlowDataset
from torch.utils.data import DataLoader
from torchvision import transforms
from torch.nn.utils.rnn import pad_sequence
import random
import json
import torch
import sys
import librosa
sys.path.append('../')
from dataset.tokenization import BertTokenizer
from utils import _build_vid_pos_ids
config = json.load(open('./conf/uvat.json','r'))
import utils
config = utils.Config(config)
import numpy as np
with open('./data/quert_data_train.json')as ff:
for ll in ff:
query_nid = json.loads(ll.strip())
num=0
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
def pad_tensors_img(tensors, lens=None, pad=0):
"""B x [T, ...]"""
if len(tensors[0]) == 0:
return torch.zeros(0, dtype=tensors[0].dtype)
if lens is None:
lens = [t.shape[0] for t in tensors]
#print(len(tensors),tensors[0].shape)
max_len = max(lens)
bs = len(tensors)
h, w = tensors[0].shape[2:] #tensors[0].size(-1)
dtype = tensors[0].dtype
output = torch.zeros(bs, max_len, 3, h, w, dtype=dtype)
if pad:
output.data.fill_(pad)
for i, (t, l) in enumerate(zip(tensors, lens)):
# print('t, l: ', t.shape, l)
output.data[i, :l, ..., ..., ...] = t.data
return output
def pad_tensors_audio(tensors, lens=None, pad=0):
"""B x [T, ...]"""
if lens is None:
lens = [t.shape[0] for t in tensors]
if len(tensors[0]) == 0:
return torch.zeros(0, dtype=tensors[0].dtype)
max_len = max(lens)
c, _ = tensors[0].shape
bs = len(tensors)
#h, w = tensors[0].shape[2:] #tensors[0].size(-1)
dtype = tensors[0].dtype
output = torch.zeros(bs, c, max_len, dtype=dtype)
if pad:
output.data.fill_(pad)
for i, (t, l) in enumerate(zip(tensors, lens)):
# print('t, l: ', t.shape, l)
output.data[i, :, :l] = t.data
return output#.unsqueeze(1)
spatial_patch_size = config.spatial_patch_size
temporal_patch_size = config.temporal_patch_size
audio_temporal_patch_size = config.audio_temporal_patch_size
max_num_image = config.max_image
def fn(d):
print(d[0].keys())
#print(d[0]["DATA_VIDEO_FRAMES"][0])
#print(type(d[0]["DATA_VIDEO_FRAMES"]))
#print(len(d[0]["DATA_VIDEO_FRAMES"]))
#print(d[0]["DATA_VIDEO_FRAMES"])
#print(d[0]["DATA_VIDEO_FRAMES"][0].shape)
#print(len(d[0]["DATA_VIDEO_FRAMES"]))
return d
with open('./data/quert_data_train.json')as ff:
for ll in ff:
query_nid = json.loads(ll.strip())
tokener = BertTokenizer.from_pretrained('./dataset')
#dict_keys(['DATA_NID', 'DATA_AUDIO', 'DATA_TAG', 'DATA_CATEGORY', 'DATA_SUB_CATEGORY', 'DATA_VIDEO_FRAMES'])
def get_query_label_mask(nid):
if nid in query_nid:
if isinstance(query_nid[nid], str):
query = eval(query_nid[nid])
else:
quert = query_nid[nid]
#print('nid query:',query)
if random.random() >0.48:
#print('random smple:',random.sample(query, 1))
#print('type:',type(query_nid[nid]))
query = random.sample(query, 1)[0]
query_data_label = 1
#print('非随机query:',query)
else:
random_key = random.sample(query_nid.keys(), 1)[0]
query = random.sample(eval(query_nid[random_key]),1)[0]
#query = random.sample(random.sample(query_nid.keys(), 1)[0], 1)[0]
#print('query 随机1',query)
query_data_label = 0
else:
random_key = random.sample(query_nid.keys(), 1)[0]
query = random.sample(eval(query_nid[random_key]),1)[0]
#print('query 随机2',query)
query_data_label = 0
query = tokener.tokenize(query)
query = query + ['[SEP]']
query = tokener.convert_tokens_to_ids(query)
query_len = min(config.max_text_len, len(query))
return torch.tensor(query[:query_len]), torch.tensor(query_data_label), torch.ones(query_len, dtype=torch.long)
def get_mask_title(title):
#print(title)
if len(title)<1:
title = "视频无标题"
text = tokener.tokenize(title)
text_id = tokener.convert_tokens_to_ids(text)
len_title = min(config.max_text_len, len(text_id))
return torch.tensor(text_id[:len_title]), torch.ones(len_title, dtype=torch.long)
text = tokener.tokenize(title)
for i in range(len(text)):
if random.random() < 0.5:
text[i] = '[MASK]'
text_id = tokener.convert_tokens_to_ids(text)
len_title = min(config.max_text_len, len(text_id))
return torch.tensor(text_id[:len_title]), torch.ones(len_title, dtype=torch.long)
def get_mask_audio(wav):
try:
s = time.time()
wav = wav[: config.max_audio]
#wav = wav[: 13*100]
s2 = time.time()
wav = wav/(max(wav)+1e-7)
s3 = time.time()
#audio_mfcc = wav.reshape((13, -1))
#audio_mfcc = np.ones((13, 100))
#print(audio_mfcc.shape)
audio_mfcc = librosa.feature.mfcc(wav, sr=16000, n_mfcc = 13)
audio_mfcc = torch.tensor(audio_mfcc).to(torch.float32)
#audio_mfcc = torch.tensor(audio_mfcc)
#print("dtype", audio_mfcc.dtype)
#audio_mfcc = torch.tensor(audio_mfcc, dtype=torch.float32)
#print(audio_mfcc.shape)
e = time.time()
#print(e - s, "e - s audio", s2 - s, s3 - s2, e - s3, audio_mfcc.dtype, audio_mfcc.shape)
return audio_mfcc, audio_mfcc.shape[-1]
except Exception as e:
print(e)
#print(wav)
transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
def get_mask_video(video):
imgs = []
for img in video:
#print(type(img))
#print(img.shape)
imgs.append(transform(img))
if len(imgs) >= config.temporal_patch_size:
num = int(len(imgs) // config.temporal_patch_size * config.temporal_patch_size)
imgs = imgs[:num]
else:
for i in range(config.temporal_patch_size-len(imgs)):
imgs.append(torch.zeros([3,224,224]))
num = config.temporal_patch_size
imgs = torch.stack(imgs)
return imgs, num
import time
from concurrent.futures import ThreadPoolExecutor
executor = ThreadPoolExecutor(max_workers=8)
executor2 = ThreadPoolExecutor(max_workers=8)
executor3 = ThreadPoolExecutor(max_workers=8)
executor4 = ThreadPoolExecutor(max_workers=8)
import gc
def vqa_collate_no(inputs):
#return inputs
#del inputs
gc.collect()
return torch.ones((10,10))
def collate0(inputs):
s = time.time()
#print("start vqa_collate")
#(input_ids, img_feats, img_pos_feats, attn_masks, targets
# ) = map(list, unzip(inputs))
data_audios = []
data_tags = []
data_nids = []
data_video_frames = []
input_len = len(inputs)
audio_mfccs = []
len_audios = []
title_ids = []
title_masks = []
query_ids = []
query_data_labels = []
query_masks = []
tag1s = []
tag2s = []
videos = []
len_videos = []
for input in inputs:
ss = time.time()
nid = input.get('DATA_NID')
data_audio = input.get('DATA_AUDIO')
#audio_mfcc, len_audio = get_mask_audio(input.get('DATA_AUDIO'))
#title_id, title_mask = get_mask_title(input.get('DATA_TAG'))
data_tag = input.get('DATA_TAG')
ss2 = time.time()
#query_id, query_data_label, query_mask = get_query_label_mask(nid)
ss3 = time.time()
tag1 = input.get('DATA_CATEGORY')
tag2 = input.get('DATA_SUB_CATEGORY')
#video, len_video = get_mask_video(input.get('DATA_VIDEO_FRAMES', [])[:max_num_image])
data_video_frame = input.get('DATA_VIDEO_FRAMES', [])[:max_num_image]
ee = time.time()
#print("get_mask_video", ee - ss)
data_nids.append(nid)
data_audios.append(data_audio)
data_video_frames.append(data_video_frame)
#audio_mfccs.append(audio_mfcc)
#len_audios.append(len_audio)
data_tags.append(data_tag)
#title_ids.append(title_id)
#title_masks.append(title_mask)
#query_ids.append(query_id)
#query_data_labels.append(query_data_label)
#query_masks.append(query_mask)
#videos.append(video)
#len_videos.append(len_video)
ee = time.time()
#print("for time" , ee - ss, ss2 - ss, ss3-ss2)
s2 = time.time()
s22 = time.time()
results = executor.map(get_mask_audio, data_audios)
results2 = executor.map(get_mask_video, data_video_frames)
results3 = executor.map(get_mask_title, data_tags)
results4 = executor.map(get_query_label_mask, data_nids)
s3 = time.time()
for result in results:
audio_mfcc, len_audio = result
audio_mfccs.append(audio_mfcc)
len_audios.append(len_audio)
s4 = time.time()
for result in results2:
video, len_video = result
videos.append(video)
len_videos.append(len_video)
s5 = time.time()
for result in results3:
title_id, title_mask = result
title_ids.append(title_id)
title_masks.append(title_mask)
s6 = time.time()
for result in results4:
query_id, query_data_label, query_mask = result
query_ids.append(query_id)
query_data_labels.append(query_data_label)
query_masks.append(query_mask)
del inputs
gc.collect()
outputs = {
"audio_mfccs": audio_mfccs,
"len_audios": len_audios,
"videos": videos,
"len_videos": len_videos,
"title_ids": title_ids,
"title_masks": title_masks,
"query_ids": query_ids,
"query_data_labels": query_data_labels,
"query_masks": query_masks,
}
return outputs
def vqa_collate(feats):
audio_mfccs = []
len_audios = []
videos = []
len_videos = []
title_ids = []
title_masks = []
query_ids = []
query_data_labels = []
query_masks = []
for feat in feats:
audio_mfccs.extend(feat.get("audio_mfccs"))
len_audios.extend(feat.get("len_audios"))
videos.extend(feat.get("videos"))
len_videos.extend(feat.get("len_videos"))
title_ids.extend(feat.get("title_ids"))
title_masks.extend(feat.get("title_masks"))
query_ids.extend(feat.get("query_ids"))
query_data_labels.extend(feat.get("query_data_labels"))
query_masks.extend(feat.get("query_masks"))
s7 = time.time()
input_len = len(audio_mfccs)
#txt_lens = [i.size(0) for i in input_ids]
#print(text)
if len(title_ids[0]) != 0:
title_ids = pad_sequence(title_ids, batch_first=True, padding_value=0)
title_position_ids = torch.arange(0, title_ids.size(1), dtype=torch.long
).unsqueeze(0).repeat(input_len,1)
title_attn_masks = pad_sequence(title_masks, batch_first=True, padding_value=0)
else:
title_ids = torch.zeros(0, dtype=torch.long)
title_position_ids = torch.zeros(0, dtype=torch.long)
title_attn_masks = torch.zeros(0, dtype=torch.long)
if len(query_ids[0]) != 0:
query_ids = pad_sequence(query_ids, batch_first=True, padding_value=0)
query_position_ids = torch.arange(0, query_ids.size(1), dtype=torch.long
).unsqueeze(0).repeat(input_len,1)
query_attn_masks = pad_sequence(query_masks, batch_first=True, padding_value=0)
else:
query_ids = torch.zeros(0, dtype=torch.long)
query_position_ids = torch.zeros(0, dtype=torch.long)
query_attn_masks = torch.zeros(0, dtype=torch.long)
#print(input_ids, text_attn_masks)
query_data_labels = torch.stack(query_data_labels, dim=0)
#print(targets)
img_feat = pad_tensors_img(videos, len_videos)
#img_feat = img_feat.permute(0, 2, 1, 3, 4).contiguous()
if len(img_feat) !=0:
bs, tmp_image_mask_shape, c, h, w = img_feat.shape
img_feat = img_feat.permute(0, 2, 1, 3, 4).contiguous()
image_patch_number = (tmp_image_mask_shape // temporal_patch_size) * ((w // spatial_patch_size) ** 2)
video_attn_masks = torch.zeros([bs, image_patch_number], dtype=torch.long)
for i, t in enumerate(len_videos):
tmp_patch_num = (t // temporal_patch_size) * ((w // spatial_patch_size) ** 2)
video_attn_masks.data[i, :tmp_patch_num] = 1
video_position_ids = _build_vid_pos_ids(tmp_image_mask_shape // temporal_patch_size, w // spatial_patch_size, w // spatial_patch_size).repeat(input_len, 1, 1)
else:
video_position_ids = torch.zeros(0, dtype=torch.long)
video_attn_masks = torch.ones(0, dtype=torch.long)
audio_feat = pad_tensors_audio(audio_mfccs, len_audios)
if len(audio_feat) != 0:
bs, _, tmp_audio_mask_shape = audio_feat.shape
audio_patch_number = tmp_audio_mask_shape // audio_temporal_patch_size
audio_attn_masks_ = torch.zeros([bs, audio_patch_number], dtype=torch.long)
for i, t in enumerate(len_audios):
tmp_patch_num = t//audio_temporal_patch_size
audio_attn_masks_.data[i, :audio_temporal_patch_size] = 1
audio_position_ids = torch.arange(0, audio_patch_number, dtype=torch.long).unsqueeze(0).repeat(input_len,1)
else:
audio_attn_masks_ = torch.zeros(0, dtype=torch.long)
audio_position_ids = torch.ones(0, dtype=torch.long)
#bs, max_tl = input_ids.size()
#out_size = attn_masks.size(1)
#gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
e = time.time()
#print("end vqa_collate")
gc.collect()
batch = {'title_id': title_ids,
'title_position_id': title_position_ids,
'title_attn_mask': title_attn_masks,
'query_id':query_ids,
'query_position_id':query_position_ids,
'query_attn_mask':query_attn_masks,
'img_feat': img_feat,
'video_position_id': video_position_ids,
'video_attn_mask': video_attn_masks,
'audio_feat': audio_feat,
'audio_position_id': audio_position_ids,
'audio_attn_mask': audio_attn_masks_,
'target': query_data_labels}
#print("clo time", e - s, s2 - s, s22 - s2, s3 - s22, s4 - s3, s5 - s4, s6 - s5, s7 - s6, e - s7)
#print(time.time())
return batch
if __name__ == "__main__":
train_dataset = WFlowDataset(name="duanxiao_video_train", version=1)
train_dataloader = DataLoader(train_dataset, batch_size=5,collate_fn=vqa_collate, pin_memory=True)
#print("nids | video_frames_num | video_frames[0].shape | label")
for ret in train_dataloader:
print(ret.keys())
print("title_id:",ret['title_id'])
print("title_position_id:",ret['title_position_id'])
print("title_attn_mask:",ret['title_attn_mask'])
print("query_id:",ret['query_id'])
print("query_position_id:",ret['query_position_id'])
print('query_attn_mask:',ret['query_attn_mask'])
print("img_feat shape:",ret['img_feat'].shape)
print("video_position_id shape:",ret['video_position_id'].shape)
print("video_attn_mask shape:",ret['video_attn_mask'].shape)
print("audio_feat shape:",ret['audio_feat'].shape)
print("audio_position_id shape:",ret['audio_position_id'].shape)
print("audio_attn_mask shape:",ret['audio_attn_mask'].shape)
print("target:",ret['target'])
break
# for nid in ret["DATA_NID"]:
# print(nid)
# if nid not in query_nid:
# print(nid)
# if num%10000==0:
# print(num)
#video_frames_num = ret["DATA_VIDEO_FRAMES_NUM"]
#video_frames = ret["DATA_VIDEO_FRAMES"]
#label = ret["DATA_LABEL"]
#print(nids, "|", video_frames_num, "|", video_frames[0].shape, "|", label)
| MLgdg/Video-Clip | video_class/dataset/wflow_dataset.py | wflow_dataset.py | py | 16,264 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "utils.Config",
"line_numb... |
18253181418 | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from .KITConfig import KITConfig
from .kitdata import KITData
from .kitlodger import KITLodger
from collections import OrderedDict
from .Utils import kitutils
import itertools
import logging
class KITMatplotlib(object):
def __init__(self, cfg=None,is_cfg_new=None):
self.__graphs = []
self.__lodgers = []
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
# load style parameters from cfg file
self.__initStyle(cfg)
self.__is_cfg_new = is_cfg_new
def __initStyle(self, cfg):
""" Loads and sets various parameters from cfg file which are then used
to create the desired plot.
"""
self.cfg = cfg
# Canvas Options
self.canvasSize = kitutils.extractList(cfg['Canvas','CanvasSize'], 'float')
# Pad Options
self.grid = True
self.gridOptions = ('w', '-', '0.5')
self.padSize = kitutils.extractList(cfg['Canvas','PadSize'], 'float')
# Title options
self.title = cfg['Title','Title']
self.titleFont = cfg['Title','Font']
self.titleFontSize = cfg['Title','FontSize']
self.titleFontStyle = cfg['Title','FontStyle']
self.titleOffset = 1 + cfg['Title','Offset']/100.
# Axis Options
self.labelX = cfg['XAxis','Title']
self.labelY = cfg['YAxis','Title']
self.rangeX = kitutils.extractList(cfg['XAxis','Range'], "float")
self.rangeY = kitutils.extractList(cfg['YAxis','Range'], "float")
self.fontX = cfg['XAxis','Font']
self.fontY = cfg['YAxis','Font']
self.fontSizeX = cfg['XAxis','FontSize']
self.fontSizeY = cfg['YAxis','FontSize']
self.fontStyleX = cfg['XAxis','FontStyle']
self.fontStyleY = cfg['YAxis','FontStyle']
self.absX = cfg['XAxis','Abs']
self.absY = cfg['YAxis','Abs']
self.logX = cfg['XAxis','Log']
self.logY = cfg['YAxis','Log']
self.tickX = cfg['XAxis','SciTick']
self.tickY = cfg['YAxis','SciTick']
# Marker Options
self.markerSize = cfg['Marker','Size']
self.markerSet = kitutils.extractList(cfg['Marker','Set'])
self.hollowMarker = kitutils.extractList(cfg['Marker','HollowMarker'])
#Line options
self.colorPalette = cfg['Line','ColorPalette']
self.colorSet = kitutils.extractList(cfg['Line','Color'])
self.lineWidth = cfg['Line','Width']
self.lineStyle = kitutils.extractList(cfg['Line','Style'])
self.err = cfg['Line','ErrorBars']
# KITPlot specific options
self.norm = kitutils.extractList(cfg['Misc','Normalization'])
self.splitGraph = cfg['Misc','SplitGraph']
# legend options
self.__entryDict = cfg['Legend','EntryList']
self.legPosition = cfg['Legend','Position']
# sets
self.markers = {'s': 'square', 'v': 'triangle_down', '^': 'triangle_up',
'<': 'triangle_left', '>': 'triangle_right',
'8': 'octagon', 'p': 'pentagon', '*': 'star',
'h': 'hexagon1', 'H': 'hexagon2',
'D': 'diamond', 'd': 'thin_diamond', 'P': 'plus_filled',
'X': 'x_filled'}
self.lines = ['None', '-', '--', '-.', ':']
self.colors = self.__initColor()
return True
def __initColor(self):
# standard mpl colorSet
mpl_std = ['r', 'g', 'b', 'c', 'm', 'y', 'k']
# KITcolor dictionary
self.KITcolor = kitutils.get_KITcolor()
if self.colorPalette == "std":
mpl_std_sorted = [item for (i,item) in sorted(zip(self.colorSet, mpl_std))]
return mpl_std_sorted
elif self.colorPalette == "KIT":
return list(self.KITcolor.keys())
else:
print("Warning:::Invalid 'ColorPalette' value. Using KITcolor as default")
return list(self.KITcolor.keys())
def addGraph(self, arg):
""" Converts data of KITData objects or lists into a respective formate
and writes them into .__graphs. Lodgers are seperated and written into
.__lodgers.
Args: x, y or KITData
"""
x = []
y = []
dx = []
dy = []
if isinstance(arg, KITData):
if KITData().getRPunchDict() == None:
# self.__files.append(arg)
# toggle absolute mode
if self.absX:
x = list(np.absolute(arg.getX()))
else:
x = arg.getX()
if self.absY:
y = list(np.absolute(arg.getY()))
else:
y = arg.getY()
# get error bars if present
if arg.getdX() != [] and arg.getdY() != []:
dx = arg.getdX()
dy = arg.getdY()
elif arg.getdX() == [] and arg.getdY() == []:
pass
else:
raise ValueError("Check data table. Only 2 (x,y) or "
"4 (x,y,dx,dy) coordinates are allowed.")
# create graph list
if dx == [] and dy == []:
self.__graphs.append([x, y])
elif dx != [] and dy != []:
self.__graphs.append([x, y, dx, dy])
else:
raise ValueError("z-error not implemented yet")
# Rpunch
else:
raise ValueError("Dictionary error")
elif isinstance(arg, list) and len(arg) in [2,4]:
if self.absX:
x = list(np.absolute(arg[0]))
else:
x = arg
if self.absY:
y = list(np.absolute(arg[1]))
else:
y = arg[1]
if len(args) == 4:
dx = arg[2]
dy = arg[3]
# create graph list
if dx == [] and dy == []:
self.__graphs.append([x, y])
elif dx != [] and dy != []:
self.__graphs.append([x, y, dx, dy])
else:
raise ValueError("z-error not implemented yet")
# add lodger
elif isinstance(arg, KITLodger):
self.__lodgers.append(arg)
else:
raise ValueError("Cant add following graph: " + str(arg))
return True
def draw(self, fileList):
"""
doc
"""
# create self.__graphs list
for i, dset in enumerate(fileList):
self.addGraph(dset)
# read and adjsut .__entryDict before drawing
self.readEntryDict(len(self.__graphs),self.getDefaultEntryDict(fileList))
# interpret all entries in single file as graphs instead of a singel graph
if self.splitGraph is True and len(self.__graphs) == 1:
self.__graphs = [list(item) for item in zip(*self.__graphs[0])]
# adjust entryDict
newLength = len(self.__graphs)
if len(self.__entryDict) != newLength:
self.__entryDict = OrderedDict([])
for i in range(0,newLength):
self.__entryDict.update({str(i) : "Data"+str(i)})
self.cfg["Legend","EntryList"] = self.__entryDict
elif self.splitGraph is True and len(self.__graphs) != 1:
print("Warning::Can only split single graph. Request rejected")
# apply user defined normalization or manipulation of y values of each graph
kitutils.manipulate(self.__graphs, self.norm)
# create an empty canvas with canvas size in [inch]: 1 inch = 2.54 cm
fig = plt.figure(figsize=list(map(lambda x: x/2.54, self.canvasSize)))
# specify (nrows, ncols, axnum)
ax = fig.add_subplot(1, 1, 1)
# adjust pad size: [left, bottom, width, height]
ax.set_position(self.padSize)
# adjust axis tick
if self.tickX:
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
if self.tickY:
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
for i, table in enumerate(self.__graphs):
if isinstance(self.hollowMarker, list) and i in self.hollowMarker:
markerface = 'white'
else:
markerface = self.getColor(i)
ax.plot(table[0], # x-axis
table[1], # y-axis
color=self.getColor(i), # line color
marker=self.getMarker(i), # marker style
markersize=self.markerSize,
markerfacecolor=markerface,
linewidth=self.lineWidth,
linestyle=self.getLineStyle(i),
label=self.getLabel(i))
# set error bars
for i, table in enumerate(self.__graphs):
if len(table) == 4 and self.err == True:
ax.errorbar(table[0],table[1],xerr=table[2],yerr=table[3],
color=self.getColor(i),
elinewidth=1)
elif len(table) != 4 and self.err == True:
print("Warning::Can't find x- and y-errors in file. Request "
"rejected.")
# set titles
# weights = ['light', 'normal', 'medium', 'semibold', 'bold', 'heavy', 'black']
ax.set_title(self.title,
fontsize=self.titleFontSize,
y=self.titleOffset,
fontweight=self.titleFontStyle)
ax.set_xlabel(self.labelX,
fontsize=self.fontSizeX,
fontweight=self.fontStyleX)
ax.set_ylabel(self.labelY,
fontsize=self.fontSizeY,
fontweight=self.fontStyleY)
# set log styles
if self.logX:
ax.semilogx()
if self.logY:
ax.semilogy()
# set grid
if self.grid == True:
# *args = [color,linstyle,linewidth]
ax.grid()
# set axis range manually
if self.rangeX != 'auto':
ax.set_xlim(self.rangeX)
if self.rangeY != 'auto':
ax.set_ylim(self.rangeY)
self.setLegend(ax)
# x = [graph[0][0] for graph in self.__graphs]
# y = [graph[1][0] for graph in self.__graphs]
# x = self.__graphs[0][0]
# y = self.__graphs[0][1]
# y = [y for y in self.__graphs[0][1]]
# y2 = [y for y in self.__graphs[1][1] if y > 0.3e-12]
# y3 = [y for y in self.__graphs[1][1] if y > 0.3e-12]
# y = y1+y2+y3
# print(y)
# print(np.mean(y), np.std(y))
# m,b = np.polyfit(x,y,1)
# print(1/m,b)
# t = np.arange(0,0.018,0.001)
# f = m*t+b
# ax.plot(t, f, color='black')
# # ax.xaxis.get_children()[1].set_size(14)
# ax.xaxis.get_children()[1].set_weight("bold")
# ax.set_xticklabels
# ax.axhline(y=12000,color=self.KITcolor['KITred'][3][1],linewidth=10,linestyle='-',zorder=0)
# ax.axhline(y=8400,color=self.KITcolor['KITred'][3][1],linewidth=10,linestyle='-',zorder=0)
# ax.axhline(y=2100,color=self.KITcolor['KITred'][3][1],linewidth=10,linestyle='-',zorder=0)
return fig
def setLegend(self, obj):
# get names from cfg and lodger labels
graphEntries = [items[1] for items in list(self.__entryDict.items())]
total_len = len(self.__graphs)
# reorder legend items according to 'EntryList'
handles,labels = obj.get_legend_handles_labels()
# handles = self.adjustOrder(handles)
# labels = self.adjustOrder(labels)
handles = kitutils.adjustOrder(handles, self.__entryDict, total_len)
labels = kitutils.adjustOrder(labels, self.__entryDict, total_len)
if self.legPosition == "auto":
obj.legend(handles,labels)
elif self.legPosition == "TL":
obj.legend(handles,labels,loc='upper left')
elif self.legPosition == "BL":
obj.legend(handles,labels,loc='lower left')
elif self.legPosition == "TR":
obj.legend(handles,labels,loc='upper right')
elif self.legPosition == "BR":
obj.legend(handles,labels,loc='lower right')
elif self.legPosition == "test2":
obj.legend(handles,labels,bbox_to_anchor=(0., 1.17, 1., .102),
loc='upper right',ncol=3, mode="expand", borderaxespad=0.)
elif self.legPosition == "test":
obj.legend(handles,labels,bbox_to_anchor=(0., 0.,1.,1.),
loc='lower left',ncol=3, mode="expand", borderaxespad=0.)
elif self.legPosition == "below":
obj.legend(handles,labels,bbox_to_anchor=(0., -0.32, 1., .102),
loc='lower center',ncol=3, mode="expand", borderaxespad=0.)
return True
def getLabel(self, index):
label = [items[1] for items in list(self.__entryDict.items())]
return label[index]
def getMarker(self, index):
""" Returns a valid marker value for matplotlib's plot() function. If
'MarkerSet' is a list, the method will cycle the list's items until all
graphs are taken care of.
Args:
index (int): represents an iterator marking a certain graph in
.__graphs
"""
try:
# assign same marker to all graphs
if isinstance(self.markerSet, int):
return list(self.markers.keys())[self.markerSet]
# cycle list of strings
elif all(isinstance(item, str) for item in self.markerSet):
for i, item in enumerate(itertools.cycle(self.markerSet)):
if index == i:
return item
# cycle list of integers
elif all(isinstance(item, int) for item in self.markerSet):
for i, item in enumerate(itertools.cycle(self.markerSet)):
if index == i:
return list(self.markers.keys())[item]
except:
print("Warning:::Invalid value in 'MarkerSet'. Using default instead.")
return list(self.markers.keys())[index]
def getColor(self, index):
try:
# self.colors represents color_keys in KITcolor
if all(isinstance(item, int) for item in self.colorSet) \
and isinstance(self.colorSet, list):
for i, item in enumerate(itertools.cycle(self.colorSet)):
if i == index:
color = self.KITcolor[self.colors[item]][0][1]
return color
# if colors in 'ColorSet' are strings and correspond to entries
# in KITcolor dict
elif all(isinstance(item, str) for item in self.colorSet) \
and isinstance(self.colorSet, list):
# in case there are less entries in colorSet than needed we n
# eed to cycle that list
for i, cycled in enumerate(itertools.cycle(self.colorSet)):
if i == index:
color = cycled
break
# search for RGB values in KITcolor dict for given color key
for colorDict in list(self.KITcolor.values()):
try:
return colorDict[color]
except:
pass
raise Exception
color
except:
print("Warning:::Invalid input in 'ColorSet'. Using default instead.")
for i, color in enumerate(itertools.cycle(self.colors)):
if i == index:
return list(self.KITcolor[color].values())[0]
def getLineStyle(self, index):
try:
if isinstance(self.lineStyle, int):
return self.lines[self.lineStyle]
elif all(isinstance(item, str) for item in self.lineStyle) \
and isinstance(self.lineStyle, list):
for i, item in enumerate(itertools.cycle(self.lineStyle)):
if item not in self.lines:
raise ValueError
if index == i:
return item
elif all(isinstance(item, int) for item in self.lineStyle) \
and isinstance(self.lineStyle, list):
for i, item in enumerate(itertools.cycle(self.lineStyle)):
if index == i:
return self.lines[item]
except:
print("Warning:::Invalid value in 'LineStyle'. Using default instead.")
return self.lines[1]
def getGraphList(self):
return self.__graphs
def readEntryDict(self, exp_len, def_list):
"""'EntryList' makes the names and order of all graphs accessible. This
subsection is read every time KITPlot is executed. An empty value ("")
can be used to reset the entry to its default value (the original order
and names given by .__files).
"""
# writes entry dict to cfg and sets it back to default if value is ""
if self.cfg['Legend','EntryList'] == "":
self.cfg['Legend','EntryList'] = def_list
self.__entryDict = def_list
if self.__is_cfg_new == False:
print("EntryDict was set back to default!")
# calculate expected number of entries in 'EntryList'
if len(self.__entryDict) != exp_len and self.splitGraph == False:
raise KeyError("Unexpected 'EntryList' value! Number of graphs and "
"entries does not match or a key is used more than"
"once. Adjust or reset 'EntryList'.")
return True
def fixEntryDict(self):
# get key list from 'EntryList'
keys = [int(key) for key in self.__entryDict.keys()]
# key list should start at 0 and should have a length of len(keys)
straight_list = list(range(len(keys)))
# print("fix", straight_list)
# get reference list in respect to the original order of key list
ref_list = [y for (x,y) in sorted(zip(keys, straight_list))]
# reorder reference list so that values stay in the same order as before
fixed_order = [y for (x,y) in sorted(zip(ref_list, straight_list))]
values = list(self.__entryDict.values())
new = OrderedDict(zip(fixed_order, values))
self.__cfg['Legend','EntryList'] = new
def getDefaultEntryDict(self,List):
""" Loads default names and order in respect to the KITData objects
in 'self.__files' list. Both keys and values of the dictionary must be
strings.
"""
entryDict = OrderedDict()
# write legend entries in a dict
for i, graph in enumerate(List):
entryDict[i] = str(graph.getName())
return entryDict
| SchellDa/KITPlot | kitmatplotlib.py | kitmatplotlib.py | py | 19,336 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "Utils.kitutils.extractList",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "Utils... |
34766663711 | import os
import io
import imageio
import logging
import cv2
import numpy as np
from PIL import Image
import torch
from ts.torch_handler.base_handler import BaseHandler
logger = logging.getLogger(__name__)
def test_resize(img, size=640, pad=False):
h, w, c = img.shape
scale_w = size / w
scale_h = size / h
scale = min(scale_w, scale_h)
h = int(h * scale)
w = int(w * scale)
new_img = None
if pad:
new_img = np.zeros((size, size, c), img.dtype)
new_img[:h, :w] = cv2.resize(img, (w, h))
else:
new_img = cv2.resize(img, (w, h))
return new_img
def test_preprocess(img,
mean=[103.939, 116.779, 123.68],
to_tensor=True,
pad=False):
img = test_resize(img, size=640, pad=pad)
img = img.astype(np.float32)
img[..., 0] -= mean[0]
img[..., 1] -= mean[1]
img[..., 2] -= mean[2]
img = np.expand_dims(img, axis=0)
if to_tensor:
img = torch.Tensor(img.transpose(0, 3, 1, 2))
return img
class DBTextDetectionHandler(BaseHandler):
def __init__(self):
self.model = None
self.device = None
self.initialized = False
def initialize(self, ctx):
self.manifest = ctx.manifest
properties = ctx.system_properties
self.device = torch.device('cpu')
model_dir = properties.get("model_dir")
serialized_file = self.manifest['model']['serializedFile']
model_pt_path = os.path.join(model_dir, serialized_file)
assert os.path.exists(model_pt_path)
self.model = torch.jit.load(model_pt_path)
self.model.to(self.device)
self.model.eval()
logger.debug(
'Model file {0} loaded successfully'.format(model_pt_path))
self.initialized = True
def preprocess(self, request):
tensor_imgs = []
for _, data in enumerate(request):
image = data.get("data")
if image is None:
image = data.get("body")
input_image = Image.open(io.BytesIO(image))
input_image = np.array(input_image)
tensor_img = test_preprocess(input_image, pad=False)
tensor_imgs.append(tensor_img)
tensor_imgs = torch.cat(tensor_imgs)
return tensor_imgs
def inference(self, img):
return self.model(img)
def postprocess(self, data):
res = []
data = data.detach().cpu().numpy()
for pred in data:
prob_mask = (pred[0] * 255).astype(np.uint8)
thresh_mask = (pred[1] * 255).astype(np.uint8)
prob_mask = prob_mask.tolist()
thresh_mask = thresh_mask.tolist()
res.append({"prob_mask": prob_mask, "thresh_mask": thresh_mask})
return res
_service = DBTextDetectionHandler()
def handle(data, context):
if not _service.initialized:
_service.initialize(context)
if data is None:
return None
data = _service.preprocess(data)
data = _service.inference(data)
data = _service.postprocess(data)
return data
| huyhoang17/DB_text_minimal | src/db_handler.py | db_handler.py | py | 3,114 | python | en | code | 34 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_numbe... |
20382572134 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
bl_info = {
"name" : "SurfaceAdaptiveRTI",
"author" : "Ramamoorthy Luxman",
"description" : "Virtual surface adaptive RTI (NBLP)",
"blender" : (3, 1, 2),
"version" : (0, 0, 1),
"location" : "3D View > Tools > SurfaceAdaptiveRTI",
"warning" : "",
"category" : "3D View"
}
import os
from random import sample
import bpy
import numpy as np
import math
import time
import cv2
import numpy as np
from mathutils import Vector
import matplotlib.pyplot as plot
import yaml
from itertools import islice
from bpy.props import (StringProperty,
BoolProperty,
IntProperty,
FloatProperty,
FloatVectorProperty,
EnumProperty,
PointerProperty,
)
from bpy.types import (Panel,
Operator,
PropertyGroup,
)
#################################### Global vars { ################################
updated_lps = []
#################################### Global vars } ################################
#################################### Helpers { ####################################
# def cart2sph(x, y, z):
def cart2sph(*args):
x = args[0]
y = args[1]
z = 1.0
if len(args)>2:
z = args[2]
print(x,",",y,",",z)
hxy = np.hypot(x, y)
r = np.hypot(hxy, z)
el = np.arctan2(z, hxy)
az = np.arctan2(y, x)
return r, az, el
def sph2cart(az, el, r):
rcos_theta = r * np.cos(el)
x = rcos_theta * np.cos(az)
y = rcos_theta * np.sin(az)
z = r * np.sin(el)
return x, y, z
# Reads the standard .lp file to extract the light positions.
# This function calculates just the directions - thetas and phis. The radius of the dome in the actual lp file is ignored and considers the radius passed to this function only to recreate new light positions.
def read_lp_file(file_path, dome_radius):
light_positions = []
# Read in .lp data
if not os.path.exists(file_path):
return light_positions
try:
file = open(file_path)
except RuntimeError as ex:
error_report = "\n".join(ex.args)
print("Caught error:", error_report)
return {'ERROR'}
rows = file.readlines()
file.close()
numLights = int(rows[0].split()[0])
for idx in range(1, numLights + 1):
cols = rows[idx].split()
x = float(cols[1])
y = float(cols[2])
z = float(cols[3])
r, long, lat = cart2sph(x,y,z)
r = dome_radius
x, y, z = sph2cart(long,lat,r)
light_positions.append((float(x),float(y),float(z)))
return light_positions
############################ Generate n evenly spaced hemispherical points { ##################################
def generate_n_evenly_spaced_hemispherical_points(samples = 45):
samples = 2*samples
phi = math.pi * (3. - math.sqrt(5.)) # golden angle in radians
cartesian_points = []
polar_points = []
for i in range(int(samples/2), samples):
z = -1 + (i / float(samples - 1)) * 2 # z goes from 0 to 1
radius = math.sqrt(1 - z * z) # radius at z
theta = phi * i # golden angle increment
y = math.cos(theta) * radius
x = math.sin(theta) * radius
r, az, el = cart2sph(x,y,z)
# convert the angles to be in the range 0 to 2 pi
az = (az + np.pi) % (2 * np.pi) - np.pi
el = (el + np.pi) % (2 * np.pi) - np.pi
polar_points.append((float(az),float(el)))
cartesian_points.append((x, y, z))
# plot.figure().add_subplot(111, projection='3d').scatter([p[0] for p in cartesian_points], [p[1] for p in cartesian_points], [p[2] for p in cartesian_points]);
# plot.show()
return polar_points, cartesian_points
############################ Generate n evenly spaced hemispherical points } ##################################
def generate_homogenous_points_along_theta(samples = 10, dome_radius=1.0, phi=45.0):
light_positions_cartesian = []
light_positions_polar = []
dome_radius = dome_radius
phi = math.radians(phi)
if samples%2 != 0:
samples = samples+1
for i in range(0,samples):
theta = i*(math.radians(360.0)/samples)
x, y, z = sph2cart(theta, phi, dome_radius)
light_positions_cartesian.append((x,y,z))
light_positions_polar.append((theta, phi))
return light_positions_polar, light_positions_cartesian
#################################### Helpers } ####################################
#################################### NBLP algorithms { ####################################
#################################### NBLP Basic { ####################################
class Nblp:
iterations = []
class iteration:
def __init__(self, lps_polar, lps_cartesian, iteration_nb):
self.lps_polar = lps_polar
self.nb_images = len(lps_polar)
self.lps_cartesian = lps_cartesian
self.filenames_subtext = "nblp_iteration_"+str(iteration_nb)+"_"
self.iteration_nb = iteration_nb
def plot_lps(self):
fig, ax = plot.subplots(subplot_kw={'projection': 'polar'})
for i in range(0,self.nb_images):
theta = self.lps_polar[i][0]
radius = math.degrees(self.lps_polar[i][1])
ax.plot(theta, radius,"o")
ax.set_rmax(2)
ax.set_rticks([90, 60, 30, 0])
ax.set_rlabel_position(-22.5)
ax.grid(True)
ax.set_title("Light positions projected to 2D plane")
return plot
def rename_files(self, path):
for i in range(0, self.nb_images):
theta=str(math.floor(100*math.degrees(self.lps_polar[i][0]))/100)
phi=str(math.floor(100*math.degrees(self.lps_polar[i][1]))/100)
old_name = path+"nblp_iteration_"+str(self.iteration_nb)+"_"+str(i+1)+".png"
new_name = path+"nblp_iteration_"+str(self.iteration_nb)+"_"+str(i+1)+"_theta_" + theta + "_phi_" + phi + ".png"
os.rename(old_name, new_name)
def __init__(self):
iteration_nb = len(self.iterations)
lps_polar, lps_cartesian = self.generate_homogenous_points_along_theta(n=45, dome_radius=1, phi=math.radians(45.0), iteration_nb=iteration_nb)
step = self.iteration(lps_polar, lps_cartesian, iteration_nb)
self.iterations.append(step)
def dense_acquisition(self):
iteration_nb = len(self.iterations)
lps_polar, lps_cartesian = self.generate_homogenous_points_along_theta(n=45, dome_radius=1, phi=math.radians(45.0), iteration_nb=iteration_nb)
step = self.iteration(lps_polar, lps_cartesian, iteration_nb)
self.iterations.append(step)
def generate_homogenous_points_along_theta(self, n, dome_radius, phi, iteration_nb):
light_positions_cartesian = []
light_positions_polar = []
self.dome_radius = dome_radius
if n%2 != 0:
n = n+1
for i in range(0,n):
theta = i*(math.radians(360.0)/n)
x, y, z = sph2cart(theta, phi, dome_radius)
light_positions_cartesian.append((x,y,z))
light_positions_polar.append((theta, phi))
return light_positions_polar, light_positions_cartesian
def calculate_entropies(self,iteration_nb, file_path):
print("Calculating entropies")
img_path = file_path+"\\..\\"+self.iterations[iteration_nb].filenames_subtext+str(1)+".png"
img_sum = cv2.imread(img_path)
for i in range(0, self.iterations[iteration_nb].nb_images):
img_path = file_path+"\\..\\"+self.iterations[iteration_nb].filenames_subtext+str(i+1)+".png"
print(img_path)
img = cv2.imread(img_path)
img_sum = cv2.addWeighted(img_sum,0.5,img,0.5,0)
for i in range(0, self.iterations[iteration_nb].nb_images):
img_path = file_path+"\\..\\"+self.iterations[iteration_nb].filenames_subtext+str(i+1)+".png"
img_diff = cv2.absdiff(img_sum,cv2.imread(img_path))
normalized_img_diff = np.zeros(img_diff.shape)
# min_val = img_diff[..., 0].min()
# max_val = img_diff[..., 0].max()
min_val = img_diff.min()
max_val = img_diff.max()
normalized_img_diff = img_diff * (255/(max_val-min_val))
# cv2.normalize(img_diff, normalized_img_diff, min_val, max_val, cv2.NORM_MINMAX)
if not os.path.exists(file_path):
os.makedirs(file_path)
cv2.imwrite(file_path+self.iterations[iteration_nb].filenames_subtext+str(i)+".png", img_diff)
cv2.imwrite(file_path+self.iterations[iteration_nb].filenames_subtext+str(i)+"_normalized.png", normalized_img_diff)
def generate_lp_file(self, iteration_nb, file_path):
data = str(self.iterations[iteration_nb].nb_images)
for i in range(0, self.iterations[iteration_nb].nb_images):
step =self.iterations[iteration_nb]
data = data+"\n"+step.filenames_subtext+str(i+1)+".png\t"+str(step.lps_cartesian[i][0])+"\t"+str(step.lps_cartesian[i][1])+"\t"+str(step.lps_cartesian[i][2])
with open(file_path, 'w') as f:
f.write(data)
def write_log(self, path):
with open(path, 'w') as file:
iterations = []
for i in range(0, len(self.iterations)):
iteration = {{'iteration nb': i},
{'lps_polar': self.iterations[i].lps_polar},
{'nb_images': self.iterations[i].nb_images},
{'lps_cartesian':self.iterations[i].lps_cartesian},
{'filenames_subtext': self.iterations[i].filenames_subtext},
{'iteration_nb':self.iterations[i].iteration_nb}}
iterations.append(iteration)
yaml.dump(iterations, file)
#################################### NBLP Basic } ####################################
#################################### NBLP Basic approach 2 = Relighting loss gradient descent approach { ####################################
class Nblp_2:
iterations = []
class iteration:
def __init__(self, lps_polar, lps_cartesian, iteration_nb, context):
self.lps_polar = lps_polar
self.nb_images = len(lps_polar)
self.lps_cartesian = lps_cartesian
self.filenames_subtext = "nblp_iteration_"+str(iteration_nb)+"_"
self.iteration_nb = iteration_nb
self.context = context
self.generate_file_names()
# self.loss = 1.0
def plot_lps(self):
fig, ax = plot.subplots(subplot_kw={'projection': 'polar'})
for i in range(0,self.nb_images):
theta = self.lps_polar[i][0]
radius = math.degrees(self.lps_polar[i][1])
ax.plot(theta, radius,"o")
ax.set_rmax(2)
ax.set_rticks([90, 60, 30, 0])
ax.set_rlabel_position(-22.5)
ax.grid(True)
ax.set_title("Light positions projected to 2D plane")
return plot
def rename_files(self, path):
for i in range(0, self.nb_images):
theta=str(math.floor(100*math.degrees(self.lps_polar[i][0]))/100)
phi=str(math.floor(100*math.degrees(self.lps_polar[i][1]))/100)
old_name = path+"nblp_iteration_"+str(self.iteration_nb)+"_"+str(i+1)+".png"
new_name = path+self.file_names[i]
os.rename(old_name, new_name)
def generate_lp_file(self, file_path):
data = str(self.nb_images)
for i in range(0, self.nb_images):
data = data+"\n"+self.file_names[i]+"\t"+str(self.lps_cartesian[i][0])+"\t"+str(self.lps_cartesian[i][1])+"\t"+str(self.lps_cartesian[i][2])
with open(file_path, 'w') as f:
f.write(data)
def calculate_entropies(self,iteration_nb, file_path):
print("Calculating entropies")
img_path = file_path+"\\..\\"+self.file_names[0]
img_sum = cv2.imread(img_path)
for i in range(0, self.nb_images):
img_path = file_path+"\\..\\"+self.file_names[i]
print(img_path)
img = cv2.imread(img_path)
img_sum = cv2.addWeighted(img_sum,0.5,img,0.5,0)
for i in range(0, self.nb_images):
img_path = file_path+"\\..\\"+self.file_names[i]
img_diff = cv2.absdiff(img_sum,cv2.imread(img_path))
normalized_img_diff = np.zeros(img_diff.shape)
# min_val = img_diff[..., 0].min()
# max_val = img_diff[..., 0].max()
min_val = img_diff.min()
max_val = img_diff.max()
normalized_img_diff = img_diff * (255/(max_val-min_val))
# cv2.normalize(img_diff, normalized_img_diff, min_val, max_val, cv2.NORM_MINMAX)
if not os.path.exists(file_path):
os.makedirs(file_path)
cv2.imwrite(file_path+self.file_names[i], img_diff)
cv2.imwrite(file_path+self.file_names[i].split(".")[0]+"_normalized.png", normalized_img_diff)
def generate_file_names(self):
self.file_names = []
for i in range(0, self.nb_images):
theta=str(math.floor(100*math.degrees(self.lps_polar[i][0]))/100)
phi=str(math.floor(100*math.degrees(self.lps_polar[i][1]))/100)
self.file_names.append("nblp_iteration_"+str(self.iteration_nb)+"_"+str(i+1)+"_theta_" + theta + "_phi_" + phi + ".png")
def execute_acq(self):
global updated_lps
updated_lps = self.lps_cartesian
bpy.context.scene.render.filepath = self.context.scene.acquisition_panel.output_path+self.filenames_subtext+"#.png"
bpy.ops.rti.create_lights()
bpy.context.scene.frame_end = len(self.context.scene.light_panel.light_list)
bpy.ops.rti.set_animation()
bpy.ops.render.render(animation=True, use_viewport = True, write_still=True)
bpy.ops.render.play_rendered_anim()
self.plot_lps().savefig(self.context.scene.acquisition_panel.output_path+"iteration_"+str(self.iteration_nb)+"_"+str(self.nb_images)+".png")
self.rename_files(self.context.scene.acquisition_panel.output_path)
self.generate_lp_file(file_path=self.context.scene.acquisition_panel.output_path+"iteration_"+str(self.iteration_nb)+".lp")
self.calculate_entropies(self.iteration_nb,self.context.scene.acquisition_panel.output_path+"\\entropies\\")
def __init__(self, context):
path = context.scene.acquisition_panel.output_path
self.context = context
if not os.path.exists(path +"log.yaml"):
# lps_polar, lps_cartesian = generate_n_evenly_spaced_hemispherical_points(samples=25)
lps_polar, lps_cartesian = generate_homogenous_points_along_theta(samples=10)
step = self.iteration(lps_polar=lps_polar, lps_cartesian=lps_cartesian, iteration_nb=0, context=context)
step.execute_acq()
self.iterations.append(step)
else:
print(path + "log.yaml")
data = None
with open(path + "log.yaml") as f:
data = yaml.load(f.read(), yaml.loader.Loader)
for i in range(0, len(data)):
step = self.iteration(data[i]['lps_polar'], data[i]['lps_cartesian'], len(data)-1, context=context)
self.iterations.append(step)
if os.path.exists(path + "next_iteration.lp"):
lps_cartesian = read_lp_file(path + "next_iteration.lp", 1.0)
print(len(lps_cartesian))
lps_polar = []
for i in range(0, len(lps_cartesian)):
r, az, el = cart2sph(lps_cartesian[i][0], lps_cartesian[i][1], lps_cartesian[i][2])
lps_polar.append((float(az),float(el)))
step = self.iteration(lps_polar=lps_polar, lps_cartesian=lps_cartesian, iteration_nb=len(data), context=context)
step.execute_acq()
self.iterations.append(step)
# for i in range(0,len(self.iterations)):
# data = str(self.nb_images)
# for i in range(0, self.nb_images):
# data = data+"\n"+self.file_names[i]+"\t"+str(self.lps_cartesian[i][0])+"\t"+str(self.lps_cartesian[i][1])+"\t"+str(self.lps_cartesian[i][2])
# with open(file_path, 'w') as f:
# f.write(data)
def dense_acquisition(self):
iteration_nb = len(self.iterations)
lps_polar, lps_cartesian = self.generate_homogenous_points_along_theta(n=100, dome_radius=1, phi=math.radians(50.0), iteration_nb=iteration_nb)
step = self.iteration(lps_polar, lps_cartesian, iteration_nb)
self.iterations.append(step)
def write_log(self, path):
open(path, "w").close()
with open(path, 'w') as file:
iterations = []
for i in range(0, len(self.iterations)):
iteration = {'lps_polar': list(self.iterations[i].lps_polar),
'nb_images': self.iterations[i].nb_images,
'lps_cartesian': list(self.iterations[i].lps_cartesian),
'filenames_subtext': self.iterations[i].filenames_subtext,
'iteration_nb':self.iterations[i].iteration_nb,
'file_names': self.iterations[i].file_names}
iterations.append(iteration)
yaml.dump(iterations, file)
#################################### NBLP Basic approach 2 = Relighting loss } ####################################
#################################### NBLP algorithms } ####################################
#################################### PropertyGroups { ####################################
class light(bpy.types.PropertyGroup):
light = bpy.props.PointerProperty(name="Light object",
type = bpy.types.Object,
description = "A light source")
class camera(bpy.types.PropertyGroup):
camera = bpy.props.PointerProperty(name="Camera object",
type = bpy.types.Object,
description = "Camera")
class surface(bpy.types.PropertyGroup):
surface = bpy.props.PointerProperty(name="Surface object",
type = bpy.types.Object,
description = "Surface")
#################################### PropertyGroups } ####################################
#################################### Menu { ####################################
class lightSettings(PropertyGroup):
nblp: BoolProperty(
name="NBLP",
description="NBLP algorithm generated Light Positions",
default=True,
)
lp_file_path : StringProperty(
name="LP file",
subtype="FILE_PATH",
description="File path for light positions file (.lp)",
default="",
maxlen=1024,
)
dome_radius : FloatProperty(
name="RTI dome radius",
description="Radius of RTI dome [m]",
default=1.0
)
light_strength : FloatProperty(
name="Light strength",
description="Strength of the light source",
default=0.5
)
light_positions = []
light_list = []
class surfaceSettings(PropertyGroup):
def update(self, context):
mat = context.scene.objects[context.scene.surface_panel.surface[0]].active_material
node_tree = mat.node_tree
nodes = node_tree.nodes
bsdf = nodes.get("Principled BSDF")
bsdf.inputs[6].default_value=self.metallic
bsdf.inputs[7].default_value=self.specularity
bsdf.inputs[9].default_value=self.roughness
mesh_file_path : StringProperty(
name="Surface mesh file",
subtype="FILE_PATH",
description="File path for the surface mesh file",
default="",
maxlen=1024
)
texture_image_path : StringProperty(
name="Surface texture image file",
subtype="FILE_PATH",
description="File path for the surface texture image file",
default="",
maxlen=1024
)
metallic : FloatProperty(
name="Metallic",
description="Surface metallic",
default=0.0,
min=0.0,
max=1.0,
update=update
)
roughness : FloatProperty(
name="Roughness",
description="Surface roughness",
default=0.0,
update=update,
min=0.0,
max=1.0,
)
specularity : FloatProperty(
name="Specularity",
description="Specularity",
default=0.5,
update=update,
min=0.0,
max=1.0,
)
surface = []
surface_bbox = []
class cameraSettings(PropertyGroup):
def update(self, context):
cameras_obj= [cam for cam in bpy.data.objects if cam.type == 'CAMERA']
# data = cameras_obj[0].data
data = bpy.data.objects[context.scene.camera_panel.camera[0]].data
data.lens = self.focal_length
bpy.context.scene.render.resolution_y = int(self.aspect_ratio*bpy.context.scene.render.resolution_x)
data.display_size = self.view_port_size
data.sensor_width = self.sensor_size
def update_camera_height(self,context):
cameras_obj = [cam for cam in bpy.data.objects if cam.type == 'CAMERA']
if len(cameras_obj) != 1:
self.report({'ERROR'}, "Camera doesn't exist in scene or there is more than 1 camera.")
return {'FINISHED'}
cameras_obj[0].location[2] = self.camera_height
camera_height : FloatProperty(
name="Camera height",
description="Camera position height",
default=1,
min=0,
max=4.0,
update=update_camera_height
)
aspect_ratio : FloatProperty(
name="Aspect ratio",
description="Aspect ratio of the sensor",
default=1,
min=0.000,
max=2,
update=update
)
focal_length : FloatProperty(
name="Focal length",
description="Focal length",
default=199.6,
min=0.000,
max=1000,
step=1,
update=update
)
subject : PointerProperty(
name="Focus on",
description="Subject to focus",
type=bpy.types.Object,
)
resolution_factor : FloatProperty(
name="Resolution",
description="Resolution scale factor",
min=0,
max=5,
default=1.0,
update=update
)
view_port_size : FloatProperty(
name="View port size",
description="View port size",
min=0,
max=100,
default=0.04,
update=update
)
sensor_size : FloatProperty(
name="Sensor size",
description="Sensor size",
min=0,
max=1000,
default=8,
update=update
)
camera = []
class acquisitionSettings(PropertyGroup):
output_path : StringProperty(
name="Output path",
subtype="FILE_PATH",
description="File path for saving the rti acquisition",
default="",
maxlen=1024
)
csvOutputLines = []
#################################### Menu } ####################################
#################################### Operators { ####################################
class reset_scene(Operator):
bl_label = "Reset all"
bl_idname = "rti.reset_scene"
def execute(self, context):
scene = context.scene
for current_light in bpy.data.lights:
bpy.data.lights.remove(current_light)
for current_object in bpy.data.objects:
bpy.data.objects.remove(current_object)
return {"FINISHED"}
class createLights(Operator):
bl_label = "Create lights"
bl_idname = "rti.create_lights"
def execute(self, context):
global updated_lps
scene = context.scene
mytool = scene.light_panel
mytool.light_list.clear()
for current_light in bpy.data.lights:
current_light.animation_data_clear()
bpy.data.lights.remove(current_light)
mytool.light_positions = updated_lps
if not os.path.isfile(mytool.lp_file_path) and not mytool.nblp:
self.report({"ERROR"})
light_sources = bpy.context.scene.objects.get("light_sources")
if not light_sources:
light_sources = bpy.data.objects.new(name = "light_sources", object_data = None)
scene.collection.objects.link(light_sources)
for idx in range(0, len(mytool.light_positions)):
lightData = bpy.data.lights.new(name="RTI_light"+str(idx), type="SPOT")
lightData.spot_size = 0.1
current_light = bpy.data.objects.new(name="Light_{0}".format(idx), object_data=lightData)
(x,y,z) = mytool.light_positions[idx]
current_light.location = (x, y, z)
scene.collection.objects.link(current_light)
current_light.rotation_mode = 'QUATERNION'
current_light.rotation_quaternion = Vector((x,y,z)).to_track_quat('Z','Y')
current_light.parent = light_sources
mytool.light_list.append(current_light.name)
return {"FINISHED"}
class createCamera(Operator):
bl_idname = "rti.create_camera"
bl_label = "Create camera"
def execute(self, context):
scene = context.scene
cameras_obj = [cam for cam in bpy.data.objects if cam.type == 'CAMERA']
scene.camera_panel.camera.clear()
if len(cameras_obj) != 0:
# self.report({'ERROR'}, "Camera already exist in scene.")
# return {'FINISHED'}
for cam in bpy.data.cameras:
cam.animation_data_clear()
bpy.data.cameras.remove(cam)
camera_data = bpy.data.cameras.new("Camera")
camera_data.dof.use_dof = True
camera_data.type="PERSP"
camera_object = bpy.data.objects.new("Camera", camera_data)
# Link camera to current scene
scene.collection.objects.link(camera_object)
# Move camera to default location at top of dome
camera_object.location = (0,0,scene.light_panel.dome_radius)
scene.camera_panel.camera.append(camera_object.name)
return {'FINISHED'}
class importSurface(Operator):
bl_idname = "rti.import_surface"
bl_label = "Import surface"
def execute(self, context):
scene = context.scene
surfacetool = scene.surface_panel
objects = [obj for obj in bpy.data.objects if obj.type != 'CAMERA' and obj.type !="SUN"]
if len(objects) != 0:
self.report({'ERROR'}, "Surface already exist in scene. Delete the old surface to add new surface")
return {'FINISHED'}
if scene.surface_panel.mesh_file_path.endswith(".OBJ") or scene.surface_panel.mesh_file_path.endswith(".obj"):
bpy.ops.import_scene.obj(filepath=scene.surface_panel.mesh_file_path)
ob = bpy.context.selected_objects[0]
# ob.rotation_euler[0] = 0.0523599
surfacetool.surface.append(ob.name)
return {'FINISHED'}
class addTexture(Operator):
bl_idname = "rti.add_texture"
bl_label = "Add texture"
def execute(self, context):
scene = context.scene
mat = scene.objects[scene.surface_panel.surface[0]].active_material
nodes = mat.node_tree.nodes
nodes.clear()
node_principled = nodes.new(type='ShaderNodeBsdfPrincipled')
node_principled.location = 0,0
node_tex = nodes.new('ShaderNodeTexImage')
node_tex.image = bpy.data.images.load(scene.surface_panel.texture_image_path)
node_tex.location = -400,0
node_output = nodes.new(type='ShaderNodeOutputMaterial')
node_output.location = 400,0
links = mat.node_tree.links
link = links.new(node_tex.outputs["Color"], node_principled.inputs["Base Color"])
link = links.new(node_principled.outputs["BSDF"], node_output.inputs["Surface"])
node_tex = nodes.new('ShaderNodeTexImage')
node_tex.location = -400,0
img = bpy.data.images.get(scene.surface_panel.texture_image_path)
if img:
node_tex.image = img
return {'FINISHED'}
class SetAnimation(Operator):
bl_idname = "rti.set_animation"
bl_label = "Create animation"
def execute(self, context):
scene = context.scene
# bpy.
scene.timeline_markers.clear()
scene.animation_data_clear()
if len(bpy.data.worlds)>0:
bpy.data.worlds.remove(bpy.data.worlds["World"], do_unlink=True)
data = bpy.data.objects[context.scene.camera_panel.camera[0]].data
if context.scene.camera_panel.subject is not None:
data.dof.focus_object = context.scene.camera_panel.subject
numLights = len(scene.light_panel.light_list)
print("Setting new animation with",numLights)
for i in range(0, numLights):
current_light = bpy.data.objects[scene.light_panel.light_list[i]]
current_light.data.energy = 0.0
for k in range(0, numLights):
for i in range(0, numLights):
current_light = bpy.data.objects[scene.light_panel.light_list[i]]
if i != k:
current_light.data.energy = 0.0
current_light.data.keyframe_insert(data_path="energy", frame=k+1)
else:
current_light.data.energy = scene.light_panel.light_strength
current_light.data.keyframe_insert(data_path="energy", frame=k+1)
return {'FINISHED'}
class acquire(Operator):
bl_idname = "rti.acquire"
bl_label = "Acquire"
bl_use_preview = True
def execute(self, context):
global updated_lps
bpy.context.scene.render.engine = 'CYCLES'
bpy.context.scene.cycles.device = 'GPU'
bpy.context.scene.cycles.preview_samples = 100
bpy.context.scene.cycles.samples = 1000
bpy.context.scene.cycles.use_preview_denoising = True
bpy.context.scene.cycles.use_denoising = True
bpy.context.scene.render.resolution_x = int(1920*context.scene.camera_panel.resolution_factor)
bpy.context.scene.render.resolution_y = int(context.scene.camera_panel.aspect_ratio*bpy.context.scene.render.resolution_x)
bpy.context.scene.frame_start = 1
bpy.context.scene.render.filepath = context.scene.acquisition_panel.output_path
bpy.context.scene.render.image_settings.color_mode = 'BW'
bpy.context.scene.render.image_settings.color_depth = '8'
bpy.context.scene.render.use_overwrite = True
context.scene.camera = context.scene.objects[context.scene.camera_panel.camera[0]]
if not context.scene.light_panel.nblp:
bpy.context.scene.frame_end = len(context.scene.light_panel.light_list)
bpy.ops.rti.set_animation()
bpy.ops.render.render(animation=True, use_viewport = True, write_still=True)
bpy.ops.render.play_rendered_anim()
else:
print("Executing NBLP")
nblp = Nblp_2(context=context)
# updated_lps = nblp.iterations[0].lps_cartesian
# nblp.generate_lp_file(iteration_nb=0, file_path=context.scene.acquisition_panel.output_path+"iteration_"+str(len(nblp.iterations)-1)+".lp")
# bpy.context.scene.render.filepath = context.scene.acquisition_panel.output_path+nblp.iterations[0].filenames_subtext+"#.png"
# bpy.ops.rti.create_lights()
# bpy.context.scene.frame_end = len(context.scene.light_panel.light_list)
# bpy.ops.rti.set_animation()
# bpy.ops.render.render(animation=True, use_viewport = True, write_still=True)
# bpy.ops.render.play_rendered_anim()
# nblp.iterations[0].plot_lps().savefig(context.scene.acquisition_panel.output_path+"30.png")
# nblp.iterations[0].rename_files(context.scene.acquisition_panel.output_path)
# nblp.calculate_entropies(len(nblp.iterations)-1,context.scene.acquisition_panel.output_path+"\\entropies\\")
# nblp.dense_acquisition()
# updated_lps = nblp.iterations[len(nblp.iterations)-1].lps_cartesian
# nblp.generate_lp_file(iteration_nb=1, file_path=context.scene.acquisition_panel.output_path+"iteration_"+str(len(nblp.iterations)-1)+".lp")
# bpy.context.scene.render.filepath = context.scene.acquisition_panel.output_path+"dense_acquisition\\"+nblp.iterations[len(nblp.iterations)-1].filenames_subtext+"#.png"
# bpy.ops.rti.create_lights()
# bpy.context.scene.frame_end = len(context.scene.light_panel.light_list)
# bpy.ops.rti.set_animation()
# bpy.ops.render.render(animation=True, use_viewport = True, write_still=True)
# bpy.ops.render.play_rendered_anim()
# nblp.iterations[len(nblp.iterations)-1].plot_lps().savefig(context.scene.acquisition_panel.output_path+"dense_acquisition\\30.png")
# nblp.iterations[len(nblp.iterations)-1].rename_files(context.scene.acquisition_panel.output_path+"dense_acquisition\\")
nblp.write_log(context.scene.acquisition_panel.output_path+"log.yaml")
return {'FINISHED'}
#################################### Operators } ####################################
#################################### Panels { #######################################
class rti_panel(Panel):
bl_label = "Surface Adaptive RTI"
bl_idname = "VIEW3D_PT_surface_adaptive_rti"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Surface Adaptive RTI"
def draw(self, context):
layout = self.layout
scene = context.scene
class light_panel(Panel):
bl_label = "Light positions"
bl_parent_id = "VIEW3D_PT_surface_adaptive_rti"
bl_idname = "VIEW3D_PT_light_panel"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Surface Adaptive RTI"
def draw(self, context):
global updated_lps
layout = self.layout
scene = context.scene
lighttool = scene.light_panel
layout.label(text="Light positions")
layout.prop(lighttool, "nblp")
layout.prop(lighttool, "light_strength")
layout.prop(lighttool, "dome_radius")
if not lighttool.nblp:
layout.prop(lighttool,"lp_file_path")
row = layout.row(align = True)
updated_lps = read_lp_file(lighttool.lp_file_path, lighttool.dome_radius)
row = layout.row(align = True)
row.operator("rti.create_lights")
else:
for current_light in bpy.data.lights:
bpy.data.lights.remove(current_light)
class surface_panel(Panel):
bl_label = "Surface"
bl_parent_id = "VIEW3D_PT_surface_adaptive_rti"
bl_idname = "VIEW3D_PT_surface_panel"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Surface Adaptive RTI"
def draw(self, context):
layout = self.layout
scene = context.scene
surfacetool = scene.surface_panel
layout.label(text="Surface")
layout.prop(surfacetool, "mesh_file_path")
row = layout.row(align = True)
row.operator("rti.import_surface")
layout.label(text="Texture")
layout.prop(surfacetool, "texture_image_path")
row = layout.row(align = True)
row.operator("rti.add_texture")
layout.prop(surfacetool, "metallic", slider=True)
layout.prop(surfacetool, "roughness", slider=True)
layout.prop(surfacetool, "specularity", slider=True)
class camera_panel(Panel):
bl_label = "Camera"
bl_parent_id = "VIEW3D_PT_surface_adaptive_rti"
bl_idname = "VIEW3D_PT_camera_panel"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Surface Adaptive RTI"
def draw(self, context):
layout = self.layout
scene = context.scene
cameratool = scene.camera_panel
layout.label(text="Camera")
layout.prop(cameratool, "camera_height", slider=True)
layout.prop(cameratool, "aspect_ratio", slider=True)
layout.prop(cameratool, "focal_length", slider=True)
layout.prop(cameratool, "subject")
layout.prop(cameratool, "resolution_factor")
layout.prop(cameratool, "view_port_size", slider=True)
layout.prop(cameratool, "sensor_size", slider=True)
row = layout.row(align = True)
row.operator("rti.create_camera")
class acquisition_panel(Panel):
bl_label = "Acquisition"
bl_parent_id = "VIEW3D_PT_surface_adaptive_rti"
bl_idname = "VIEW3D_PT_acquisition_panel"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Surface Adaptive RTI"
def draw(self, context):
layout = self.layout
scene = context.scene
layout.label(text="Acquisition")
layout.prop(scene.acquisition_panel, "output_path")
# layout.operator("rti.set_animation")
layout.operator("rti.acquire")
row = layout.row(align = True)
row.operator("rti.reset_scene")
#################################### Panels } #######################################
#################################### Register classes { #######################################
classes = (reset_scene,
light,
camera,
surface,
lightSettings,
cameraSettings,
surfaceSettings,
acquisitionSettings,
createLights,
createCamera,
importSurface,
addTexture,
SetAnimation,
acquire,
rti_panel,
surface_panel,
light_panel,
camera_panel,
acquisition_panel)
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.Scene.light_panel = PointerProperty(type=lightSettings)
bpy.types.Scene.surface_panel = PointerProperty(type=surfaceSettings)
bpy.types.Scene.camera_panel = PointerProperty(type=cameraSettings)
bpy.types.Scene.acquisition_panel = PointerProperty(type=acquisitionSettings)
def unregister():
for cls in reversed(classes):
bpy.utils.unregister_class(cls)
del bpy.types.Scene.light_panel
if __name__ == "__main__":
register()
#################################### Register classes } ####################################### | ramamoorthyluxman/SurfaceAdaptiveRTI | __init__.py | __init__.py | py | 40,570 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.hypot",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.hypot",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.arctan2",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.arctan2",
"line_numbe... |
73876350504 | from gwibber.microblog import network, util
import cgi
from oauth import oauth
from gwibber.microblog.util import resources
from gettext import lgettext as _
import logging
logger = logging.getLogger("Twitter")
logger.debug("Initializing.")
PROTOCOL_INFO = {
"name": "Twitter",
"version": "1.0",
"config": [
"private:secret_token",
"access_token",
"username",
"color",
"receive_enabled",
"send_enabled",
],
"authtype": "oauth1a",
"color": "#729FCF",
"features": [
"send",
"receive",
"search",
"tag",
"reply",
"responses",
"private",
"public",
"delete",
"follow",
"unfollow",
"profile",
"retweet",
"like",
"send_thread",
"send_private",
"user_messages",
"sinceid",
"lists",
"list",
],
"default_streams": [
"receive",
"images",
"responses",
"private",
"lists",
],
}
URL_PREFIX = "https://twitter.com"
API_PREFIX = "https://api.twitter.com/1"
class Client ():
def __init__(self, acct):
self.service = util.getbus("Service")
if acct.has_key("secret_token") and acct.has_key("password"): acct.pop("password")
self.account = acct
if not acct.has_key("access_token") and not acct.has_key("secret_token"):
return [{"error": {"type": "auth", "account": self.account, "message": _("Failed to find credentials")}}]
self.sigmethod = oauth.OAuthSignatureMethod_HMAC_SHA1()
self.consumer = oauth.OAuthConsumer(*util.resources.get_twitter_keys())
self.token = oauth.OAuthToken(acct["access_token"], acct["secret_token"])
def _common(self, data):
m = {}
try:
m["mid"] = str(data["id"])
m["service"] = "twitter"
m["account"] = self.account["id"]
if data.has_key("created_at"):
m["time"] = util.parsetime(data["created_at"])
m["text"] = util.unescape(data["text"])
m["text"] = cgi.escape(m["text"])
m["content"] = m["text"]
# Go through the entities in the tweet and use them to linkify/filter tweeks as appropriate
if data.has_key("entities"):
#Get mention entries
if data["entities"].has_key("user_mentions"):
names = []
for mention in data["entities"]["user_mentions"]:
if not mention["screen_name"] in names:
try:
m["content"] = m["content"].replace("@" + mention["screen_name"], "@<a href='gwibber:/user?acct=" + m["account"] + "&name=@" + mention["screen_name"] + "'>" + mention["screen_name"] + "</a>")
except:
pass
names.append(mention["screen_name"])
#Get hashtag entities
if data["entities"].has_key("hashtags"):
hashtags = []
for tag in data["entities"]["hashtags"]:
if not tag["text"] in hashtags:
try:
m["content"] = m["content"].replace("#" + tag["text"], "#<a href='gwibber:/tag?acct=" + m["account"] + "&query=#" + tag["text"] + "'>" + tag["text"] + "</a>")
except:
pass
hashtags.append(tag["text"])
# Get url entities - These usually go in the link stream, but if they're picturesor videos, they should go in the proper stream
if data["entities"].has_key("urls"):
for urls in data["entities"]["urls"]:
url = cgi.escape (urls["url"])
expanded_url = url
if urls.has_key("expanded_url"):
if not urls["expanded_url"] is None:
expanded_url = cgi.escape(urls["expanded_url"])
display_url = url
if urls.has_key("display_url"):
display_url = cgi.escape (urls["display_url"])
if url == m["content"]:
m["content"] = "<a href='" + url + "' title='" + expanded_url + "'>" + display_url + "</a>"
else:
try:
startindex = m["content"].index(url)
endindex = startindex + len(url)
start = m["content"][0:startindex]
end = m["content"][endindex:]
m["content"] = start + "<a href='" + url + "' title='" + expanded_url + "'>" + display_url + "</a>" + end
except:
logger.debug ("Failed to set url for ID: %s", m["mid"])
m["type"] = "link"
images = util.imgpreview(expanded_url)
videos = util.videopreview(expanded_url)
if images:
m["images"] = images
m["type"] = "photo"
elif videos:
m["images"] = videos
m["type"] = "video"
else:
# Well, it's not anything else, so it must be a link
m["link"] = {}
m["link"]["picture"] = ""
m["link"]["name"] = ""
m["link"]["description"] = m["content"]
m["link"]["url"] = url
m["link"]["icon"] = ""
m["link"]["caption"] = ""
m["link"]["properties"] = {}
if data["entities"].has_key("media"):
for media in data["entities"]["media"]:
try:
url = cgi.escape (media["url"])
media_url_https = media["media_url_https"]
expanded_url = url
if media.has_key("expanded_url"):
expanded_url = cgi.escape(media["expanded_url"])
display_url = url
if media.has_key("display_url"):
display_url = cgi.escape (media["display_url"])
startindex = m["content"].index(url)
endindex = startindex + len(url)
start = m["content"][0:startindex]
end = m["content"][endindex:]
m["content"] = start + "<a href='" + url + "' title='" + expanded_url + "'>" + display_url + "</a>" + end
if media["type"] == "photo":
m["type"] = "photo"
m["photo"] = {}
m["photo"]["picture"] = media_url_https
m["photo"]["url"] = None
m["photo"]["name"] = None
except:
pass
else:
m["content"] = util.linkify(util.unescape(m["text"]),
((util.PARSE_HASH, '#<a href="gwibber:/tag?acct=%s&query=\\1">\\1</a>' % m["account"]),
(util.PARSE_NICK, '@<a href="gwibber:/user?acct=%s&name=\\1">\\1</a>' % m["account"])), escape=True)
m["html"] = m["content"]
m["to_me"] = ("@%s" % self.account["username"]) in data["text"] # Check if it's a reply directed at the user
m["favorited"] = data.get("favorited", False) # Check if the tweet has been favourited
except:
logger.error("%s failure - %s", PROTOCOL_INFO["name"], data)
return {}
return m
def _user(self, user):
return {
"name": user.get("name", None),
"nick": user.get("screen_name", None),
"id": user.get("id", None),
"location": user.get("location", None),
"followers": user.get("followers_count", None),
"friends": user.get("friends_count", None),
"description": user.get("description", None),
"following": user.get("following", None),
"protected": user.get("protected", None),
"statuses": user.get("statuses_count", None),
"image": user.get("profile_image_url", None),
"website": user.get("url", None),
"url": "/".join((URL_PREFIX, user.get("screen_name", ""))) or None,
"is_me": user.get("screen_name", None) == self.account["username"],
}
def _message(self, data):
if type(data) != dict:
logger.error("Cannot parse message data: %s", str(data))
return {}
n = {}
if data.has_key("retweeted_status"):
n["retweeted_by"] = self._user(data["user"] if "user" in data else data["sender"])
if data.has_key("created_at"):
n["time"] = util.parsetime(data["created_at"])
data = data["retweeted_status"]
else:
n["retweeted_by"] = None
if data.has_key("created_at"):
n["time"] = util.parsetime(data["created_at"])
m = self._common(data)
for k in n:
m[k] = n[k]
m["source"] = data.get("source", False)
if data.has_key("in_reply_to_status_id"):
if data["in_reply_to_status_id"]:
m["reply"] = {}
m["reply"]["id"] = data["in_reply_to_status_id"]
m["reply"]["nick"] = data["in_reply_to_screen_name"]
if m["reply"]["id"] and m["reply"]["nick"]:
m["reply"]["url"] = "/".join((URL_PREFIX, m["reply"]["nick"], "statuses", str(m["reply"]["id"])))
else:
m["reply"]["url"] = None
m["sender"] = self._user(data["user"] if "user" in data else data["sender"])
m["url"] = "/".join((m["sender"]["url"], "statuses", str(m.get("mid", None))))
return m
def _responses(self, data):
m = self._message(data)
m["type"] = None
return m
def _private(self, data):
m = self._message(data)
m["private"] = True
m["type"] = None
m["recipient"] = {}
m["recipient"]["name"] = data["recipient"]["name"]
m["recipient"]["nick"] = data["recipient"]["screen_name"]
m["recipient"]["id"] = data["recipient"]["id"]
m["recipient"]["image"] = data["recipient"]["profile_image_url"]
m["recipient"]["location"] = data["recipient"]["location"]
m["recipient"]["url"] = "/".join((URL_PREFIX, m["recipient"]["nick"]))
m["recipient"]["is_me"] = m["recipient"]["nick"] == self.account["username"]
m["to_me"] = m["recipient"]["is_me"]
return m
def _result(self, data):
m = self._common(data)
if data["to_user_id"]:
m["reply"] = {}
m["reply"]["id"] = data["to_user_id"]
m["reply"]["nick"] = data["to_user"]
m["sender"] = {}
m["sender"]["nick"] = data["from_user"]
m["sender"]["id"] = data["from_user_id"]
m["sender"]["image"] = data["profile_image_url"]
m["sender"]["url"] = "/".join((URL_PREFIX, m["sender"]["nick"]))
m["sender"]["is_me"] = m["sender"]["nick"] == self.account["username"]
m["url"] = "/".join((m["sender"]["url"], "statuses", str(m["mid"])))
return m
def _profile(self, data):
if "error" in data:
return {
"error": data["error"]
}
return {
"name": data.get("name", data["screen_name"]),
"service": "twitter",
"stream": "profile",
"account": self.account["id"],
"mid": data["id"],
"text": data.get("description", ""),
"nick": data["screen_name"],
"url": data.get("url", ""),
"protected": data.get("protected", False),
"statuses": data.get("statuses_count", 0),
"followers": data.get("followers_count", 0),
"friends": data.get("friends_count", 0),
"following": data.get("following", 0),
"favourites": data.get("favourites_count", 0),
"image": data["profile_image_url"],
"utc_offset": data.get("utc_offset", 0),
"id": data["id"],
"lang": data.get("lang", "en"),
"verified": data.get("verified", False),
"geo_enabled": data.get("geo_enabled", False),
"time_zone": data.get("time_zone", "")
}
def _list(self, data):
return {
"mid": data["id"],
"service": "twitter",
"account": self.account["id"],
"time": 0,
"text": data["description"],
"html": data["description"],
"content": data["description"],
"url": "/".join((URL_PREFIX, data["uri"])),
"sender": self._user(data["user"]),
"name": data["name"],
"nick": data["slug"],
"key": data["slug"],
"full": data["full_name"],
"uri": data["uri"],
"mode": data["mode"],
"members": data["member_count"],
"followers": data["subscriber_count"],
"kind": "list",
}
def _get(self, path, parse="message", post=False, single=False, **args):
url = "/".join((API_PREFIX, path))
request = oauth.OAuthRequest.from_consumer_and_token(self.consumer, self.token,
http_method=post and "POST" or "GET", http_url=url, parameters=util.compact(args))
request.sign_request(self.sigmethod, self.consumer, self.token)
if post:
headers = request.to_header()
data = network.Download(url, util.compact(args), post, header=headers).get_json()
else:
data = network.Download(request.to_url(), None, post).get_json()
resources.dump(self.account["service"], self.account["id"], data)
if isinstance(data, dict) and data.get("errors", 0):
if "authenticate" in data["errors"][0]["message"]:
logstr = """%s: %s - %s""" % (PROTOCOL_INFO["name"], _("Authentication failed"), data["errors"][0]["message"])
logger.error("%s", logstr)
return [{"error": {"type": "auth", "account": self.account, "message": data["errors"][0]["message"]}}]
else:
for error in data["errors"]:
logstr = """%s: %s - %s""" % (PROTOCOL_INFO["name"], _("Unknown failure"), error["message"])
return [{"error": {"type": "unknown", "account": self.account, "message": error["message"]}}]
elif isinstance(data, dict) and data.get("error", 0):
if "Incorrect signature" in data["error"]:
logstr = """%s: %s - %s""" % (PROTOCOL_INFO["name"], _("Request failed"), data["error"])
logger.error("%s", logstr)
return [{"error": {"type": "auth", "account": self.account, "message": data["error"]}}]
elif isinstance(data, str):
logstr = """%s: %s - %s""" % (PROTOCOL_INFO["name"], _("Request failed"), data)
logger.error("%s", logstr)
return [{"error": {"type": "request", "account": self.account, "message": data}}]
if parse == "follow" or parse == "unfollow":
if isinstance(data, dict) and data.get("error", 0):
logstr = """%s: %s - %s""" % (PROTOCOL_INFO["name"], _("%s failed" % parse), data["error"])
logger.error("%s", logstr)
return [{"error": {"type": "auth", "account": self.account, "message": data["error"]}}]
else:
return [["friendships", {"type": parse, "account": self.account["id"], "service": self.account["service"],"user_id": data["id"], "nick": data["screen_name"]}]]
if parse == "profile" and isinstance(data, dict):
return self._profile(data)
if parse == "list":
return [self._list(l) for l in data["lists"]]
if single: return [getattr(self, "_%s" % parse)(data)]
if parse: return [getattr(self, "_%s" % parse)(m) for m in data]
else: return []
def _search(self, **args):
data = network.Download("http://search.twitter.com/search.json", util.compact(args))
data = data.get_json()["results"]
if type(data) != list:
logger.error("Cannot parse search data: %s", str(data))
return []
return [self._result(m) for m in data]
def __call__(self, opname, **args):
return getattr(self, opname)(**args)
def receive(self, count=util.COUNT, since=None):
return self._get("statuses/home_timeline.json", include_entities=1, count=count, since_id=since)
def responses(self, count=util.COUNT, since=None):
return self._get("statuses/mentions.json", "responses", include_entities=1, count=count, since_id=since)
def private(self, count=util.COUNT, since=None):
private = self._get("direct_messages.json", "private", include_entities=1, count=count, since_id=since) or []
private_sent = self._get("direct_messages/sent.json", "private", count=count, since_id=since) or []
return private + private_sent
def public(self):
return self._get("statuses/public_timeline.json", include_entities=1)
def lists(self, **args):
following = self._get("%s/lists/subscriptions.json" % self.account["username"], "list") or []
lists = self._get("%s/lists.json" % self.account["username"], "list") or []
return following + lists
def list(self, user, id, count=util.COUNT, since=None):
return self._get("%s/lists/%s/statuses.json" % (user, id), include_entities=1, per_page=count, since_id=since)
def search(self, query, count=util.COUNT, since=None):
return self._search(include_entities=1, q=query, rpp=count, since_id=since)
def tag(self, query, count=util.COUNT, since=None):
return self._search(q="#%s" % query, count=count, since_id=since)
def delete(self, message):
return self._get("statuses/destroy/%s.json" % message["mid"], None, post=True, do=1)
def like(self, message):
return self._get("favorites/create/%s.json" % message["mid"], None, post=True, do=1)
def send(self, message):
return self._get("statuses/update.json", post=True, single=True,
status=message)
def send_private(self, message, private):
return self._get("direct_messages/new.json", "private", post=True, single=True,
text=message, screen_name=private["sender"]["nick"])
def send_thread(self, message, target):
return self._get("statuses/update.json", post=True, single=True,
status=message, in_reply_to_status_id=target["mid"])
def retweet(self, message):
return self._get("statuses/retweet/%s.json" % message["mid"], None, post=True, do=1)
def follow(self, screen_name):
return self._get("friendships/create.json", screen_name=screen_name, post=True, parse="follow")
def unfollow(self, screen_name):
return self._get("friendships/destroy.json", screen_name=screen_name, post=True, parse="unfollow")
def profile(self, id=None, count=None, since=None):
return self._get("users/show.json", screen_name=id, count=count, since_id=since, parse="profile")
def user_messages(self, id=None, count=util.COUNT, since=None):
profiles = [self.profile(id)] or []
messages = self._get("statuses/user_timeline.json", id=id, include_entities=1, count=count, since_id=since) or []
return messages + profiles
| thnguyn2/ECE_527_MP | mp4/SD_card/partition1/usr/share/gwibber/plugins/twitter/__init__.py | __init__.py | py | 18,053 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "gwibber.microblog.util.getbus",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "gwibber.microblog.util",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": ... |
6226481940 | import glob, os
import cv2
import numpy as np
def red_ch_zeros(foldername):
'''
Drop red channel.
:param foldername: string
:return: None
'''
dir_name = foldername + os.sep + "*"
image_files_list = list(glob.glob(dir_name))
for image_file in image_files_list:
src = cv2.imread(image_file, cv2.IMREAD_UNCHANGED)
src[:, :, 2] = np.zeros([src.shape[0], src.shape[1]])
# save image
cv2.imwrite(image_file, src)
print("모든 이미지에 대하여 Red channel Zeros 를 적용하였습니다.")
| Daeil-Jung/Fundus_Process | preproc/ch_reduction.py | ch_reduction.py | py | 563 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.sep",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_UNCHANGED",
"line_num... |
19405983770 | import time
from typing import List
class Solution:
'''
3.Longest Substring Without Repeating Characters
Given a string s, find the length of the longest substring without repeating characters.
'''
def basic(self, s):
n = len(s)
j = -1
mp = {}
res = 0
for i in range(n):
cur = s[i]
if cur in mp:
j = max(j, mp[cur])
res = max(i - j, res)
mp[cur] = i
return res
def lengthOfLongestSubstring(self, s: str) -> int:
'''
i is the fast pointer,
'''
i = j = length = 0
longest = [0]*128
while i < len(s):
current_char = s[i]
longest[ord(current_char)] += 1
while longest[ord(current_char)] > 1:
longest[ord(s[j])] -= 1
j += 1
length = max(length, i - j + 1)
i += 1
return length
def lengthOfLongestSubstring_hashmap(self, s: str) -> int:
n = len(s)
ans = j = 0
mp = {}
# j is the last time when the current char appears
# mp stores the position the key char appears last time
for i in range(n):
if s[i] in mp:
j = max(mp[s[i]], j)
ans = max(ans, i - j)
mp[s[i]] = i
return ans
'''
30. Substring with Concatenation of All Words
You are given a string s and an array of strings words of the same length.
Return all starting indices of substring(s) in s that is a concatenation of each word
in words exactly once, in any order, and without any intervening characters.
You can return the answer in any order.
Input: s = "barfoothefoobarman", words = ["foo","bar"]
Output: [0,9]
'''
def substringWithConcatenationofAllWords(self, s: str, words: List[str]) -> List[int]:
result = [0, 0]
ans = 0
mp = {}
for word in words:
mp[word] = -1
l = len(words[0])
i = j = 0
while (i < len(s)):
word = s[i:i+l]
if(word in mp and mp[word] > 0):
j = max(mp[word], j)
if(i+l-j > ans):
result = [j, i+l]
ans = i+l-j
mp[word] = i+l
i = i+l
return result
def minSubArrayLen(self, target: int, nums: List[int]) -> int:
'''
209
'''
n = len(nums)
j = 0
temp_sum = 0
res = float("inf")
for i in range(n):
cur = nums[i]
temp_sum += cur
while temp_sum >= target:
res = min(i - j + 1, res)
temp_sum -= nums[j]
j += 1
if res == float("inf"):
return 0
else:
return res
if __name__ == "__main__":
solution = Solution()
start_time = time.time()
# print(solution.lengthOfLongestSubstring_hashmap("abcbde"))
print(solution.substringWithConcatenationofAllWords("barfoofoobarthefoobarman", ["bar","foo","the"]))
print(time.time() - start_time)
while True:
text = input(">>")
print(solution.lengthOfLongestSubstring(text))
| Matthewow/Leetcode | slidingWindow/sliding_window.py | sliding_window.py | py | 3,335 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 12... |
19045144385 | #cog by @maxy_dev (maxy#2866)
import asyncio
import sys
import disnake as discord
import random
import os
from main import bot
from enum import Enum
import datetime, time
from disnake.ext import commands
from utils import db
if "debug" not in db:
db["debug"] = {}
class Required1(str, Enum):
true = "True"
false = ""
class Debug(commands.Cog):
def __init__(self, bot):
self.bot = bot
#when slash error
@commands.Cog.listener()
async def on_slash_command_error(self, inter, error):
if isinstance(error, commands.CommandNotFound):
return
if not isinstance(error, commands.CommandOnCooldown):
if not "Command raised an exception:" in str(error):
e = discord.Embed(title = "Error", description = f"```{str(error)}```", color = random.randint(0, 16777215))
else:
e = discord.Embed(title = "Error", description = f"```{str(error)[29:]}```", color = random.randint(0, 16777215))
else:
e = discord.Embed(title = "Error", description = f"{str(error)[:31]} <t:{int(time.time() + error.retry_after)}:R>", color = random.randint(0, 16777215))
await inter.send(embed = e, ephemeral = True)
#debug group
@commands.slash_command()
async def debug(self, inter):
pass
@debug.sub_command()
@commands.is_owner()
async def toggle(self, inter, toggler: Required1 = Required1.true):
'''
debug,,
Parameters
----------
text: None
'''
if str(inter.author.id) not in db["debug"] and toggler:
db["debug"][str(inter.author.id)] = "True"
e = discord.Embed(title = "Success", description = "Debug mode enabled", color = random.randint(0, 16777215))
await inter.send(embed = e, ephemeral = True)
return
if str(inter.author.id) in db["debug"] and not toggler:
del db["debug"][str(inter.author.id)]
e = discord.Embed(title = "Success", description = "Debug mode disabled", color = random.randint(0, 16777215))
await inter.send(embed = e, ephemeral = True)
#load extension command
@debug.sub_command()
@commands.is_owner()
async def load(self, inter, extension):
'''
Loads an extension
Parameters
----------
extension: Cog name
'''
bot.load_extension(f"cogs.{extension}")
await inter.send(f"cogs.{extension} is loaded", ephemeral = True)
#reload extension command
@debug.sub_command()
@commands.is_owner()
async def reload(self, inter, extension):
'''
Loads an extension
Parameters
----------
extension: Cog name
'''
bot.unload_extension(f"cogs.{extension}")
bot.load_extension(f"cogs.{extension}")
await inter.send(f"cogs.{extension} is reloaded", ephemeral = True)
#unload extension command
@debug.sub_command()
@commands.is_owner()
async def unload(self, inter, extension):
'''
Loads an extension
Parameters
----------
extension: Cog name
'''
bot.unload_extension(f"cogs.{extension}")
await inter.send(f"cogs.{extension} is unloaded", ephemeral = True)
#restart bot
# @debug.sub_command()
# @commands.is_owner()
# async def restart(self, inter):
# '''
# Restarts the bot
# '''
# await inter.send("Restarting", ephemeral = True)
# before = time.perf_counter()
# await bot.close()
# await bot.login(os.environ["DISCORD_TOKEN"])
# after = time.perf_counter()
# await inter.edit_original_message(f"Restarted, took `{round((after - before) * 1000)}`ms")
@debug.sub_command()
@commands.is_owner()
async def shutdown(self, inter):
'''
Shutdowns the bot
'''
await inter.send("Shutdown", ephemeral = True)
try:
await bot.close()
except Exception:
print("something went wrong")
sys.exit()
def setup(bot):
bot.add_cog(Debug(bot)) | 1randomguyspecial/pythonbot | cogs/debug.py | debug.py | py | 3,810 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "utils.db",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "utils.db",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "disnake.ext.commands.Cog",
"line_numb... |
22530319238 | import numpy as np
import pytest
import gym
from gym.wrappers import FrameStack
try:
import lz4
except ImportError:
lz4 = None
@pytest.mark.parametrize("env_id", ["CartPole-v1", "Pendulum-v1", "CarRacing-v2"])
@pytest.mark.parametrize("num_stack", [2, 3, 4])
@pytest.mark.parametrize(
"lz4_compress",
[
pytest.param(
True,
marks=pytest.mark.skipif(
lz4 is None, reason="Need lz4 to run tests with compression"
),
),
False,
],
)
def test_frame_stack(env_id, num_stack, lz4_compress):
env = gym.make(env_id, disable_env_checker=True)
shape = env.observation_space.shape
env = FrameStack(env, num_stack, lz4_compress)
assert env.observation_space.shape == (num_stack,) + shape
assert env.observation_space.dtype == env.env.observation_space.dtype
dup = gym.make(env_id, disable_env_checker=True)
obs, _ = env.reset(seed=0)
dup_obs, _ = dup.reset(seed=0)
assert np.allclose(obs[-1], dup_obs)
for _ in range(num_stack**2):
action = env.action_space.sample()
dup_obs, _, dup_terminated, dup_truncated, _ = dup.step(action)
obs, _, terminated, truncated, _ = env.step(action)
assert dup_terminated == terminated
assert dup_truncated == truncated
assert np.allclose(obs[-1], dup_obs)
if terminated or truncated:
break
assert len(obs) == num_stack
| openai/gym | tests/wrappers/test_frame_stack.py | test_frame_stack.py | py | 1,457 | python | en | code | 33,110 | github-code | 36 | [
{
"api_name": "gym.make",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "gym.wrappers.FrameStack",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "gym.make",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_... |
475267315 | from helpers.agent.multi_agent import MultiAgent
from helpers.env.env_utils import Init_Env
from itertools import count
import numpy as np
import torch
import glob
import time
import os
import gc
import sys
LEAVE_PRINT_EVERY_N_SECS = 300
class Env_Agent_Mix:
# initializerrrrrrr
def __init__(self, filename:str, training_info:dict, policy_info:dict,
value_info:dict, buffer_info:dict, seed:int, optim_update:int,
checkpoints:int, storage_loc:str):
# create environment
self.env_class = Init_Env(os.path.join(filename))
# initialize environment
self.env, self.brain_name = self.env_class.make_env_func(seed)
# get state_size, number of agents, number of actions
self.state_size, self.num_agents, \
self.num_actions = self.env_class.reset(self.env, self.brain_name,
train_mode=False, flag=True)
# environment bounds
self.low_bounds = np.array([-1] * self.num_actions)
self.high_bounds = np.array([1] * self.num_actions)
# seed
self.seed = seed
self.optim_update = optim_update
# general training information
self.update_steps = training_info["update_every_steps"]
self.warm_up_batch = training_info["n_warmup_batches"]
self.weight_mix_ratio = training_info["weight_mix_ratio"]
# initialize multi agents
self.multi_agent_init(policy_info, value_info, buffer_info)
# storage information
self.checkpoints = checkpoints
self.storage_loc = storage_loc
# multiple agents
def multi_agent_init(self, policy_info:dict, value_info:dict, buffer_info:dict):
general_info = {"weight_mix_ratio": self.weight_mix_ratio,
"bounds": (self.low_bounds, self.high_bounds),
"state_size": self.state_size,
"num_agents": self.num_agents,
"num_actions": self.num_actions}
# creating the agents
self.multi_agents = MultiAgent(general_info, policy_info, value_info, buffer_info, self.seed)
# interaction step
def env_agent_inter_step(self, states:np.ndarray, min_samples:int):
actions = []
# take random walks to initialize the and warm up the network
if self.multi_agents.buffer.storage_length() < min_samples:
for (agent, state) in zip(self.multi_agents.agents, states):
# append to
actions.append(agent.policy_model.select_random_action(state))
else:
for (agent, state) in zip(self.multi_agents.agents, states):
# append to
actions.append(agent.policy_model.select_action(state))
# take step and obtain rewards
parameters = self.env_class.step(self.env, self.brain_name, np.array(actions))
dones_a = []
## prep parameters to be stored in the array
for idx in range(self.num_agents):
is_truncated = parameters["max_reached"][idx]
dones_a.append(parameters["dones"][idx] and not is_truncated)
# store in buffer array -> (states, action, reward, next_state, done)
self.multi_agents.buffer.store(states, np.array(actions),
parameters["rewards"],
parameters["next_states"],
parameters["dones"])
# tracking parameters
self.episode_timestep[-1] += 1
self.episode_exploration[-1] += np.array([agent.policy_model.exploration_ratio\
for agent in self.multi_agents.agents]).mean()
return parameters["next_states"], parameters["rewards"], parameters["dones"]
# interaction step
def env_agent_inter_step_eval(self, states:np.ndarray):
actions = []
for (agent, state) in zip(self.multi_agents.agents, states):
# append to
actions.append(agent.policy_model.select_greedy_action(state))
# take step and obtain rewards
parameters = self.env_class.step(self.env, self.brain_name, np.array(actions))
return parameters["next_states"], parameters["rewards"], parameters["dones"]
# evaluation mode
def evaluate(self, n_episodes=1, stage="train"):
rs = []
rs_per_agent = []
for episode in range(n_episodes):
states = self.env_class.reset(self.env, self.brain_name,
train_mode=False, flag=False)
dones = False
reward_tracker = np.zeros(self.num_agents)
for idx in count():
# combines actions for each agent
states, rewards, dones = self.env_agent_inter_step_eval(states)
# store in array
reward_tracker += np.array(rewards)
if True in dones:
break
# append rewards
if stage.lower() =="train":
rs.append(reward_tracker.mean())
else:
rs_per_agent.append(reward_tracker)
rs.append(reward_tracker.mean())
# return mean and standard deviation
if stage.lower() =="train":
return np.mean(rs), np.std(rs)
else:
return rs, np.std(rs), np.array(rs_per_agent)
def train(self, env_info:dict):
# intialize constants
goal_mean_100_reward = env_info["goal_mean_100_rewards"]
max_minutes = env_info["max_minutes"]
max_episodes = env_info["max_episodes"]
gamma = env_info["gamma"]
# initialize tracking parameters
self.evaluation_scores = []
self.episode_timestep = []
self.episode_seconds = []
self.episode_reward = []
self.episode_exploration = []
self.reward_tracker = np.zeros((max_episodes, self.num_agents))
# loop and tracking parameters
training_start, last_debug_time = time.time(), float("-inf")
training_time = 0
total_steps = 0
# result for storage
results = np.empty((env_info["max_episodes"] , 7))
fin_episode = 0
# storage training loop
for episode in range(1, max_episodes + 1):
episode_start = time.time()
# refresh environment
states = self.env_class.reset(self.env, self.brain_name,
train_mode=True, flag=False)
self.episode_timestep.append(0.0)
self.episode_exploration.append(0.0)
# warmup samples
min_samples = self.warm_up_batch * self.multi_agents.batch_size
for step in count():
# get the next state, and terminal conditions
states, rewards, dones = self.env_agent_inter_step(states, min_samples)
self.reward_tracker[episode-1] += np.array(rewards)
if self.multi_agents.buffer.storage_length() > min_samples:
for _ in range(self.optim_update):
# extract from memory
idx_batch, memory_batch, ISweights = self.multi_agents.buffer.sample()
# optimize agents
self.multi_agents.optimize(idx_batch, memory_batch, ISweights, gamma)
# update network
if np.sum(self.episode_timestep) % self.update_steps == 0:
self.multi_agents.update_networks_multi()
if True in dones:
self.episode_reward.append(np.max(self.reward_tracker[episode-1]))
gc.collect()
break
# stat tracking
episode_elapsed = time.time() - episode_start
training_time += episode_elapsed
self.episode_seconds.append(episode_elapsed)
# evaluation
evaluation_score, _ = self.evaluate()
for idx, agent in enumerate(self.multi_agents.agents):
self.save_checkpoint(episode - 1, agent.policy_model, idx)
total_steps = int(np.sum(self.episode_timestep))
self.evaluation_scores.append(evaluation_score)
# mean and std calculations
mean_10_reward = np.mean(self.episode_reward[-10:])
std_10_reward = np.std(self.episode_reward[-10:])
mean_100_reward = np.mean(self.episode_reward[-100:])
min_100_reward = np.min(self.episode_reward[-100:])
max_100_reward = np.max(self.episode_reward[-100:])
std_100_reward = np.std(self.episode_reward[-100:])
mean_100_eval_score = np.mean(self.evaluation_scores[-100:])
std_100_eval_score = np.std(self.evaluation_scores[-100:])
min_100_eval_score = np.min(self.evaluation_scores[-100:])
max_100_eval_score = np.max(self.evaluation_scores[-100:])
lst_100_exp_rat = np.array(self.episode_exploration[-100:]) / np.array(self.episode_timestep[-100:])
mean_100_exp_rat = np.mean(lst_100_exp_rat)
std_100_exp_rat = np.std(lst_100_exp_rat)
wallclock_elapsed = time.time() - training_start
results[episode - 1] = total_steps, mean_100_reward,\
min_100_reward, max_100_reward, \
mean_100_eval_score, min_100_eval_score, \
max_100_eval_score
reached_debug_time = time.time() - last_debug_time >= LEAVE_PRINT_EVERY_N_SECS
# termination criteria check
reached_max_minutes = wallclock_elapsed >= max_minutes * 60
reached_max_episodes = episode >= max_episodes
reached_goal_mean_reward = mean_100_eval_score >= goal_mean_100_reward
training_over = reached_max_minutes or reached_max_episodes or reached_goal_mean_reward
#print debug message
self.debug_message(episode, total_steps, mean_10_reward, std_10_reward,
mean_100_reward, std_100_reward, mean_100_exp_rat,
std_100_exp_rat, mean_100_eval_score, std_100_eval_score,
training_start)
if training_over:
if reached_max_minutes: print(u'--> reached_max_minutes \u2715')
if reached_max_episodes: print(u'--> reached_max_episodes \u2715')
if reached_goal_mean_reward: print(u'--> reached_goal_mean_reward \u2713')
fin_episode = episode
break
# re-evaluate for 100 steps
final_eval_score, score_std, agent_scores = self.evaluate(n_episodes=100, stage="fin")
wallclock_time = time.time() - training_start
# print final message post evalualuation
self.final_message(np.mean(final_eval_score), score_std, training_time, wallclock_time)
# clean up the checkpoints
self.get_cleaned_checkpoints()
self.env.close() ; del self.env
return np.array(results), np.array(final_eval_score), np.array(agent_scores),\
training_time, wallclock_time, fin_episode
def final_message(self, final_eval_score, score_std, training_time, wallclock_time):
print('Final evaluation score {:.2f}\u00B1{:.2f} in {:.2f}s training time,'
' {:.2f}s wall-clock time.\n'.format(final_eval_score, score_std,
training_time, wallclock_time))
def debug_message(self, episode, total_steps, mean_10_reward, std_10_reward,
mean_100_reward, std_100_reward, mean_100_exp_rat,
std_100_exp_rat, mean_100_eval_score, std_100_eval_score,
training_start):
# message string
elapsed_str = time.strftime("%H:%M:%S", time.gmtime(time.time() - training_start))
debug_message = 'el {}, ep {:04}, ts {:07}, '
debug_message += 'ar_10 ts {:05.1f} \u00B1 {:05.1f}, '
debug_message += 'ar_100 ts {:05.1f} \u00B1 {:05.1f}, '
debug_message += 'ex 100 {:02.1f} \u00B1 {:02.1f}, '
debug_message += 'ev {:05.1f} \u00B1 {:05.1f}'
debug_message = debug_message.format(elapsed_str, episode - 1, total_steps,
mean_10_reward, std_10_reward,
mean_100_reward, std_100_reward,
mean_100_exp_rat, std_100_exp_rat,
mean_100_eval_score, std_100_eval_score)
print(debug_message)
def save_checkpoint(self, episode_idx, model, rank):
root_dir = os.path.join(self.storage_loc, "checkpoints",
"{}".format(self.seed),
"model_{}".format(rank))
# check if directory exists and create if not
if not os.path.isdir(root_dir):
os.makedirs(root_dir)
# save model
torch.save(model.state_dict(),
os.path.join(root_dir, 'ep_{}.tar'.format(episode_idx)))
def get_cleaned_checkpoints(self):
for rank in range(self.num_agents):
checkpoint_paths = {}
paths = glob.glob(os.path.join(self.storage_loc, "checkpoints",
"{}".format(self.seed),
"model_{}".format(rank),
"*.tar"))
paths_dic = {int(path.split('.')[0].split("_")[-1]): path for path in paths}
last_ep = max(paths_dic.keys())
checkpoint_idxs = np.linspace(1, last_ep + 1, self.checkpoints, endpoint=True, dtype=np.int) - 1
for idx, path in paths_dic.items():
if idx in checkpoint_idxs:
checkpoint_paths[idx] = path
else:
os.unlink(path)
| Oreoluwa-Se/MultiAgent-SAC-Tennis | helpers/env_agent_interact.py | env_agent_interact.py | py | 14,372 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "helpers.env.env_utils.Init_Env",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "numpy.array"... |
3123319458 | # ------------------------------------------------------------------------------
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.decomposition import KernelPCA
from sklearn.preprocessing import StandardScaler
import warnings
warnings.filterwarnings("ignore")
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
file_test = pd.read_csv('data_test_def.csv', dtype="float64")
file_train = pd.read_csv('data_train_def.csv', dtype="float64")
descargas_test = open('descargas_test.txt','r')
descargas_train = open('descargas_train.txt','r')
max_init = 0
max_fin = 0
matrices = []
nlpca_e_v = []
nlpca_dens = []
print('test')
for n,d in enumerate(descargas_test):
print(n)
file_temporal = file_test.iloc[max_init:]
time = file_temporal['time'].tolist()
for c,t in enumerate(time):
if c>0:
if time[c-1]<=t:
max_fin+=1
else:
break
mat = file_test.iloc[max_init:max_fin+max_init+1]
dens = mat['Densidad2_'].tolist()
mat2 = mat.drop(columns=['time','Densidad2_'])
nlpca = KernelPCA(n_components=8, kernel='rbf', copy_X=False) # Configurar el modelo con el número deseado de componentes y el kernel adecuado
nlpca_features = nlpca.fit_transform(mat2)
correlaciones = np.corrcoef(nlpca_features.T, dens)
correlacion_principal = correlaciones[-1, :-1] # Última fila, todas las columnas excepto la última
loadings = nlpca.eigenvectors_
loadings = mat2.values.T.dot(loadings)
explained_variance_ = nlpca.eigenvalues_ / sum(nlpca.eigenvalues_)
nlpca_loadings = [f'PC{i}' for i in list(range(1, len(loadings) + 1))]
loadings_df = pd.DataFrame.from_dict(dict(zip(nlpca_loadings, loadings)))
loadings_df.index.name = 'feature_names'
loadings_df.index = ['ACTON275', 'BOL5', 'ECE7','GR','GR2','HALFAC3','IACCEL1','RX306']
matrices.append(loadings_df)
nlpca_e_v.append(np.array(explained_variance_))
nlpca_dens.append(correlacion_principal)
max_init += max_fin+1
max_fin = 0
max_init = 0
max_fin = 0
print('train')
for n,d in enumerate(descargas_train):
print(n)
file_temporal = file_train.iloc[max_init:]
time = file_temporal['time'].tolist()
for c,t in enumerate(time):
if c>0:
if time[c-1]<=t:
max_fin+=1
else:
break
mat = file_train.iloc[max_init:max_fin+max_init+1]
dens = mat['Densidad2_'].tolist()
mat2 = mat.drop(columns=['time','Densidad2_'])
nlpca = KernelPCA(n_components=8, kernel='rbf',copy_X=False) # Configurar el modelo con el número deseado de componentes y el kernel adecuado
nlpca_features = nlpca.fit_transform(mat2)
correlaciones = np.corrcoef(nlpca_features.T, dens)
correlacion_principal = correlaciones[-1, :-1] # Última fila, todas las columnas excepto la última
loadings = nlpca.eigenvectors_
loadings = mat2.values.T.dot(loadings)
explained_variance_ = nlpca.eigenvalues_ / sum(nlpca.eigenvalues_)
nlpca_loadings = [f'PC{i}' for i in list(range(1, len(loadings) + 1))]
loadings_df = pd.DataFrame.from_dict(dict(zip(nlpca_loadings, loadings)))
loadings_df.index.name = 'feature_names'
loadings_df.index = ['ACTON275', 'BOL5', 'ECE7','GR','GR2','HALFAC3','IACCEL1','RX306']
matrices.append(loadings_df)
nlpca_e_v.append(np.array(explained_variance_))
nlpca_dens.append(correlacion_principal)
max_init += max_fin+1
max_fin = 0
suma_dataframes = None
num_dataframes = 0
for df in matrices:
if suma_dataframes is None:
suma_dataframes = df
else:
suma_dataframes += df
num_dataframes += 1
# Calcula el promedio dividiendo el dataframe acumulativo entre el número de dataframes
promedio_dataframe = suma_dataframes / num_dataframes
# Si deseas obtener el promedio como un nuevo dataframe
# promedio_dataframe = pd.DataFrame.mean(suma_dataframes)
print(promedio_dataframe)
arreglo_listas = np.array(nlpca_e_v)
arreglo_listas_dens = np.array(nlpca_dens)
vector_promedio = np.mean(arreglo_listas, axis=0)
vector_promedio_dens = np.mean(arreglo_listas_dens, axis=0)
print(vector_promedio)
print("Correlaciones:")
for i, correlacion in enumerate(vector_promedio_dens):
print(f"Componente Principal {i+1}: {correlacion}")
plt.bar(range(1,len(vector_promedio)+1),vector_promedio)
plt.plot(range(1,len(vector_promedio)+1),np.cumsum(vector_promedio),c='red',label='Cumulative Explained Variance')
plt.legend(loc='upper left')
plt.xlabel('Number of components')
plt.ylabel('Explained variance (eignenvalues)')
plt.title('Scree plot')
plt.show()
| PauloAguayo/PUCV | TimeEncoder/nlpca.py | nlpca.py | py | 4,866 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sklearn.decomp... |
6508923868 | # -*- coding: utf-8 -*-
import numpy as np
import csv
import chainer
import chainer.functions as F
import chainer.links as L
import sys
import matplotlib.pyplot as plt
"""
データを実際に予測するときに利用するクラス
:param
predict_ary: csvからデータを取ってきて, 入力と正解をタプル形式で配列で持つクラス
plt_ary: matplotlibで表示するためにもつ配列
model: chainerでつくったモデルのインスタンスを格納
one_count: 正解データで"1"のラベルのもの
性能はrecall_rateで測定(TP/TP+TN)
"""
class PredictorClass(object):
def __init__(self):
self.predict_ary = []
self.plt_ary = []
self.model = None
self.one_count = None
def load_model(self, m_cls, path):
self.model = m_cls
chainer.serializers.load_npz(path, m_cls)
def load_csv(self, path):
with open(path, 'r') as f:
self.predict_ary = [(np.array(row[:-1], dtype=np.float32), int(row[-1])) for row in csv.reader(f)]
def predict(self):
ans_count = 0
one_count = 0
question_num = len(self.predict_ary)
for data in self.predict_ary:
reshape_np = data[0].reshape(1, 50)
answer = data[1]
y = np.argmax(F.softmax(model(reshape_np)).data)
if answer == 1:
one_count += 1
if y == answer:
ans_count += 1
self.plt_ary.append((self.model(reshape_np).data, y, 1))
else:
self.plt_ary.append((self.model(reshape_np).data, y, 0))
accuracy = ans_count / question_num
self.one_count = one_count
return accuracy
def plot(self):
x_ary0 = []
y_ary0 = []
x_ary1 = []
y_ary1 = []
incorrect_x0 = []
incorrect_y0 = []
incorrect_x1 = []
incorrect_y1 = []
for dt in self.plt_ary:
if dt[2] == 0:
if dt[1] == 0:
incorrect_x0.append(dt[0][0][0])
incorrect_y0.append(dt[0][0][1])
continue
else:
incorrect_x1.append(dt[0][0][0])
incorrect_y1.append(dt[0][0][1])
continue
if dt[1] == 0:
x_ary0.append(dt[0][0][0])
y_ary0.append(dt[0][0][1])
else:
x_ary1.append(dt[0][0][0])
y_ary1.append(dt[0][0][1])
np_plt_x0 = np.array(x_ary0, dtype=np.float32)
np_plt_y0 = np.array(y_ary0, dtype=np.float32)
print('0->1: ', len(incorrect_x0))
print('1->0: ', len(incorrect_x1))
print('1->1: ', len(x_ary1))
print('0->0: ', len(x_ary0))
print('recall rate: ', len(x_ary1) / self.one_count)
print('accuracy: ', (len(x_ary1)+len(x_ary0)) / len(self.plt_ary))
np_plt_x1 = np.array(x_ary1, dtype=np.float32)
np_plt_y1 = np.array(y_ary1, dtype=np.float32)
np_incorrectx0 = np.array(incorrect_x0, dtype=np.float32)
np_incorrecty0 = np.array(incorrect_y0, dtype=np.float32)
np_incorrectx1 = np.array(incorrect_x1, dtype=np.float32)
np_incorrecty1 = np.array(incorrect_y1, dtype=np.float32)
plt.plot(np_plt_x1, np_plt_y1, 'o', color='r', alpha=0.5)
plt.plot(np_plt_x0, np_plt_y0, 'o', color='b', alpha=0.5)
plt.plot(np_incorrectx0, np_incorrecty0, 'o', color='g', alpha=0.5)
plt.plot(np_incorrectx1, np_incorrecty1, 'o', color='m', alpha=0.5)
plt.show()
| awkrail/laugh_maker | validation_src/predictor.py | predictor.py | py | 3,639 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "chainer.serializers.load_npz",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "chainer.serializers",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "num... |
71699272744 | import numpy as np
import scipy.interpolate as sp_interp
from scipy.integrate import odeint, solve_ivp
import matplotlib.pyplot as plt
def odefun(t,x,params_log):
# Variables
Cs_ctnt = x[0]
Cc_ctnt = x[1]
Cp_ctnt = x[2]
# Arguments
a_log = params_log[0]
b_log = params_log[1]
Tsc_log = params_log[2]
# cTnT
Jsc_ctnt = Cs_ctnt - Cc_ctnt
Jcp_ctnt = np.power(10, a_log) * (Cc_ctnt - Cp_ctnt)
Jpm_ctnt = np.power(10, b_log) * Cp_ctnt
# sigmoid curve
G_sc = np.power(t, 3) / (np.power(t, 3) + np.power(10, (3 * Tsc_log)))
#Differential equations
dCs_ctnt_tau = - Jsc_ctnt * G_sc
dCc_ctnt_tau = Jsc_ctnt * G_sc - Jcp_ctnt
dCp_ctnt_tau = Jcp_ctnt - Jpm_ctnt
#Result
d_concentration = [dCs_ctnt_tau, dCc_ctnt_tau, dCp_ctnt_tau]
return d_concentration
'''
def objective_func(params_init_log, data, time):
#params = 10 ** parameter_init
params = np.array(np.power(10, params_init_log))
x0 = np.array([params[-2], params[-1], 0])
t_vec = np.linspace(0, time[-1] * 1.6, 201)
res = solve_ivp(lambda t, x: odefun(t, x, params),[t_vec[0], t_vec[-1]], x0, 'RK23', t_eval=t_vec)
x1, x2, x3 = res.y
cTnT_sim = sp_interp.interp1d(t_vec, x3)(time)
obj = np.sum(((data - cTnT_sim) ** 2) * data)
return obj
'''
#FUNZIONE OBIETTIVO MIGLIORE!
def obj_func(params_init_log, data, time):
#params = 10 ** parameter_init
params = np.array(np.power(10, params_init_log))
x0 = np.array([params[-2], params[-1], 0])
t_vec = np.linspace(0, time[-1] * 1.6, 201)
res = odeint(lambda x,t: odefun(t, x, params), x0, t_vec)
x1, x2, x3 = res.T
cTnT_sim= sp_interp.interp1d(t_vec+params[-1], x3, bounds_error=False, fill_value="extrapolate")(time)
obj = np.sum(((data - cTnT_sim) ** 2) * data)
return obj
if __name__=="__main__":
print("Test odefun non optimized.")
data = [1.4300, 1.0900, 0.9820, 1.2200, 1.2600, 0.5410] # array concentrazione troponina
time = [5.1333, 6.2833, 13.1833, 29.9167, 53.8500, 77.2167] # array tempi di acquisizione troponina
parameter_init = [0.005, 0.005, 30, 0.1, 1]
print("parameter_init: ",parameter_init)
params=np.log10(parameter_init)
print("log10_parameter_init: ",params)
x0=[params[-2],params[-1],0]
t_vec_stemi= np.linspace(0, max(time)*1.6, 201)
sol = solve_ivp(odefun, [t_vec_stemi[0], t_vec_stemi[-1]], x0, 'RK23', args=(params,), t_eval=t_vec_stemi)
x1, x2, x3 = sol.y
#Plot test
plt.figure()
plt.plot(t_vec_stemi, x3, label='Sol')
plt.xlabel('Time')
plt.ylabel('Concentration of troponin')
plt.legend()
plt.show()
print("Test odefun optimized")
best_params =[0.5941, 0.095959, 70.1804, 7.058, 3.2886]
print("best_params: ",best_params)
params_log = np.log10(best_params)
print("log10: ",params_log)
x0 = [params_log[-2], params_log[-1], 0]
t_vec_stemi = np.linspace(0, max(time) * 1.6, 201)
sol1 = solve_ivp(odefun, [t_vec_stemi[0], t_vec_stemi[-1]], x0, 'RK23', args=(params_log,), t_eval=t_vec_stemi)
x_1, x_2, x_3 = sol1.y
# Plot test
plt.figure()
plt.plot(t_vec_stemi, x_3, label='Sol1')
plt.xlabel('Time')
plt.ylabel('Concentration of troponin')
plt.legend()
plt.show()
'''
X = odeint(lambda x, t: odefun(t,x, params_log), x0, t_vec_stemi)
plt.plot(t_vec_stemi, X[:,2], label='Test plot')
plt.xlabel('Time')
plt.ylabel('Concentration of troponin')
plt.legend()
plt.show()
'''
print("Test local odefun optimized")
fmincon_params = [-0.8403, -1.7972, 1.8891, -0.3495, 0.6547]
print("best_params: ", fmincon_params)
# params_log = np.log10(fmincon_params)
params_log = fmincon_params
print("log10: ",params_log)
x0 = [params_log[-2], params_log[-1], 0]
t_vec_stemi = np.linspace(0, max(time) * 1.6, 201)
sol1 = solve_ivp(odefun, [t_vec_stemi[0], t_vec_stemi[-1]], x0, 'RK23', args=(params_log,), t_eval=t_vec_stemi)
x_1, x_2, x_3 = sol1.y
# Plot test
plt.figure()
plt.plot(t_vec_stemi, x_3, label='Sol1')
plt.xlabel('Time')
plt.ylabel('Concentration of troponin')
plt.legend()
plt.show()
#sol1 = solve_ivp(odefun, [t_vec_stemi[0], t_vec_stemi[-1]], x0, 'RK23', args=(params_log,), t_eval=t_vec_stemi)
#x_1, x_2, x_3 = sol1.y
# X = solve_ivp(lambda t,x: odefun(t, x, params_log), [t_vec_stemi[0], t_vec_stemi[-1]],x0,'RK23', t_eval=t_vec_stemi)
# x1_,x2_,x3_=X.y
# plt.figure()
# plt.plot(t_vec_stemi, x3, label='X')
# plt.xlabel('Time')
# plt.ylabel('Concentration of troponin')
# plt.legend()
# plt.show()
| FraViss/CBRApy_ | CODE/functions_repository.py | functions_repository.py | py | 4,668 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.power",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": ... |
3882480498 | """
aimall_utils.py v0.1
F. Falcioni, L. J. Duarte, P. L. A. Popelier
Library with function to submit job to AIMAll and get properties values from output
AIMAll version: 19.10.12
Check for updates at github.com/FabioFalcioni
For details about the method, please see XXXXXXX
"""
import numpy as np
from typing import List
def distance_A_B(xyz_file: str, atom_A: int, atom_B: int) -> float:
"""distance_A_B gets the distance between atom A and B in an XYZ type file
:return: distance value
:rtype: float
"""
# INTERNAL VARIABLES:
all_coordinates = []
# WORKING IN THE FILE:
with open(xyz_file) as f:
coordinates_list = f.readlines()[3:] # remove the first 2 lines of xyz file
for i in range(0, len(coordinates_list)):
coordinates_of_atom = [float(c) for c in coordinates_list[i].split()[1:]]
all_coordinates.append(coordinates_of_atom)
coord_atom_A = all_coordinates[atom_A - 1]
coord_atom_B = all_coordinates[atom_B - 1]
x = 0
y = 1
z = 2
# GET DISTANCE
r_AB = np.sqrt(
(coord_atom_B[x] - coord_atom_A[x]) ** 2
+ (coord_atom_B[y] - coord_atom_A[y]) ** 2
+ (coord_atom_B[z] - coord_atom_A[z]) ** 2
)
return r_AB
def get_atom_list_wfn(wfn_file: str) -> List[str]:
"""get_atom_list_wfn is a function that gets atom labels from a wavefunction file (.wfn format)
:raises ValueError: Atomic labels not found
:return: list of atom labels
:rtype: List
"""
# INTERNAL VARIABLES:
atom_list = []
# OPEN FILE:
file = open(wfn_file, "r")
lines = file.readlines() # Convert file into a array of lines
file.close() # Close file
# ERRORS:
if "(CENTRE " not in lines[2]:
raise ValueError(
"Atomic labels not found"
) # Checks if atomic list exist inside file
# GET ATOM LIST:
for i in range(len(lines)):
if "(CENTRE" in lines[i]:
split_line = lines[i].split()
atom_list.append(
split_line[0].lower() + str(split_line[1])
) # uppercase to lowercase
return atom_list
def get_atom_list_wfx(wfx_file: str) -> List[str]:
"""get_atom_list_wfx is a function that gets atom labels from a wavefunction file (.wfx format)
:raises ValueError: Atomic labels not found
:return: list of atom labels
:rtype: List
"""
# INTERNAL VARIABLES:
atom_list = []
# OPEN FILE:
file = open(wfx_file, "r")
lines = file.readlines() # Convert file into a array of lines
file.close() # Close file
# ERRORS:
if "<Nuclear Names>" not in lines[33]:
raise ValueError(
"Atomic labels not found"
) # Checks if atomic list exist inside file
# GET ATOM LIST:
for i in range(len(lines)):
if "<Nuclear Names>" in lines[i]:
i += 1
while "</Nuclear Names>" not in lines[i]:
split_line = lines[i].split()
atom_list.append(split_line[0].lower()) # uppercase to lowercase
i += 1
return atom_list
def get_aimall_wfn_energies(wfn_files: List[str]) -> List[float]:
"""get_aimall_wfn_energies gets the total energy for each of the wavefunction (.wfn) files in a list
:raises ValueError: Energy values not found in wavefunction file
:return: list of energies
:rtype: List
"""
# INTERNAL VARIABLES:
wfn_energy = [] # List of wfn files
# READ FILES
for path in wfn_files:
file = open(path, "r")
lines = file.readlines()
# ERRORS:
if (
"TOTAL ENERGY " not in lines[-1]
): # Checks if there is an energy value at the end of wfn file.
raise ValueError("Energy values not found in file: ", path)
wfn_energy.append(float(lines[-1].split()[3]))
file.close()
return wfn_energy
def get_aimall_wfx_energies(wfx_files: List[str]) -> List[float]:
"""get_aimall_wfx_energies gets the total energy for each of the wavefunction (.wfx type) files in a list
:raises ValueError: Energy values not found in wavefunction file
:return: list of energies
:rtype: List
"""
# INTERNAL VARIABLES:
wfx_energy = [] # List of wfn files
# READ FILES
for path in wfx_files:
file = open(path, "r")
lines = file.readlines()
# ERRORS:
if (
"<Energy = T + Vne + Vee + Vnn>" not in lines[-6]
): # Checks if there is an energy value at the end of wfn file.
raise ValueError("Energy values not found in file: ", path)
wfx_energy.append(float(lines[-5].split()[0]))
file.close()
return wfx_energy
def intra_property_from_int_file(
folders: List[str], prop: List[str], atom_list: List[str]
) -> List:
"""intra_property_from_int_file gets IQA intra-atomic energy values from .int files output from AIMAll
:raises ValueError: File is empty or does not exist
:return: Lists of intra-atomic energies with the corresponding intra-atomic labeling
:rtype: List of floats and strings
"""
# INTERNAL VARIABLES:
temp1 = [] # Temporary array
temp2 = [] # Temporary array
temp3 = [] # Temporary array
intra_properties = [] # Output
contributions_list = [] # Output
# READ PROPERTIES FROM .INT FILES
for folder in folders:
for atom in atom_list:
file = open(folder + "/" + atom + ".int", "r")
lines = file.readlines()
file.close()
for i in lines:
if 'IQA Energy Components (see "2EDM Note")' in i:
start = lines.index(i)
elif "2EDM Note:" in i:
end = lines.index(i)
if end >= len(lines): # Checks the .int file.
raise ValueError(
"File is empty or does not exist: " + folder + "/" + atom + ".int"
)
lines = [lines[i] for i in range(start + 1, end)]
for term in prop:
for i in lines:
if (term + " ") in i:
temp1.append(float(i.split()[-1]))
# ORGANIZE ARRAY ORDER
for j in range(len(prop)):
for i in range(j, len(temp1), len(prop)):
temp2.append(temp1[i])
for j in range(len(atom_list)):
temp3.append([temp2[i] for i in range(j, len(temp2), len(atom_list))])
start = 0
for j in range(len(prop)):
for atom_prop in temp3:
intra_properties.append(
[atom_prop[i] for i in range(start, (j + 1) * len(folders))]
)
start = (j + 1) * len(folders)
# CREATE CONTRIBUTIONS LIST ARRAY:
for a in prop:
for b in atom_list:
contributions_list.append(a + "-" + b)
return intra_properties, contributions_list
def inter_property_from_int_file(
folders: List[str], prop: List[str], atom_list: List[str]
) -> List:
"""inter_property_from_int_file gets IQA inter-atomic energy values from .int files output from AIMAll
:raises ValueError: File is empty or does not exist
:return: Lists of inter-atomic energies with the corresponding intra-atomic labeling
:rtype: List of floats and strings
"""
# INTERNAL VARIABLES:
temp1 = [] # Temporary array
temp2 = [] # Temporary array
temp3 = [] # Temporary array
inter_properties = [] # Output
contributions_list = [] # Output
for path in folders:
for i in range(len(atom_list)):
atom1 = atom_list[i]
for j in range(i + 1, len(atom_list)):
atom2 = atom_list[j]
file = open(path + "/" + atom1 + "_" + atom2 + ".int", "r")
lines = file.readlines()
file.close()
for i in lines:
if ' Energy Components (See "2EDM Note"):' in i:
start = lines.index(i)
elif "2EDM Note:" in i:
end = lines.index(i)
if end >= len(lines): # Checks the .int file.
raise ValueError(
"File is empty or does not exist: "
+ path
+ "/"
+ atom1
+ "_"
+ atom2
+ ".int"
)
lines = [lines[i] for i in range(start + 1, end)]
for term in prop:
for i in lines:
if (term + " ") in i:
temp1.append(float(i.split()[-1]))
# ORGANIZE ARRAY ORDER
for j in range(len(prop)):
for i in range(j, len(temp1), len(prop)):
temp2.append(temp1[i])
for j in range(int(len(atom_list) * (len(atom_list) - 1) / 2)):
temp3.append(
[
temp2[i]
for i in range(
j, len(temp2), int(len(atom_list) * (len(atom_list) - 1) / 2)
)
]
)
start = 0
for j in range(len(prop)):
for atom_prop in temp3:
inter_properties.append(
[atom_prop[i] for i in range(start, (j + 1) * len(folders))]
)
start = (j + 1) * len(folders)
# CREATE CONTRIBUTIONS LIST ARRAY:
for a in prop:
for i in range(len(atom_list)):
for j in range(i + 1, len(atom_list)):
contributions_list.append(a + "-" + atom_list[i] + "_" + atom_list[j])
return inter_properties, contributions_list
def charge_transfer_and_polarisation_from_int_file(
folders: List[str],
atom_list: List[str],
inter_properties: List[str],
xyz_files: List[str],
) -> List:
"""charge_transfer_and_polarisation_from_int_file gets IQA polarisation and charge-transfer energies
values by computing them as Vct = qAqB/rAB and Vpl = Vcl - Vct
:raises ValueError: File is empty or does not exist
:return: Returns lists of energies and labels for both Vct and Vpl
:rtype: List of floats and strings
"""
# INTERNAL VARIABLES:
n = len(atom_list)
f = len(folders)
temp1 = [] # Temporary array
temp2 = [] # Temporary array
temp3 = [] # Temporary array
net_charges = []
charge_transfer_properties = [] # Output
polarisation_properties = [] # Output
contributions_list_CT = [] # Output
contributions_list_PL = [] # Output
# CREATE CONTRIBTUIONS LIST ARRAY
for i in range(len(atom_list)):
for j in range(i + 1, len(atom_list)):
contributions_list_PL.append(
"Vpl_IQA(A,B)-" + atom_list[i] + "_" + atom_list[j]
)
contributions_list_CT.append(
"Vct_IQA(A,B)-" + atom_list[i] + "_" + atom_list[j]
)
# READ NET-CHARGE PROPERTIES FROM .INT FILES
for folder in folders:
net_charge_group = []
for i in range(0, len(atom_list)):
file = open(folder + "/" + atom_list[i] + ".int", "r")
lines = file.readlines()
file.close()
for line in lines:
if "Results of the basin integration:" in line:
start = lines.index(line)
elif "|Dipole|" in line:
end = lines.index(line)
if end >= len(lines): # Checks the .int file.
raise ValueError(
"File is empty or does not exist: "
+ folder
+ "/"
+ atom_list[i]
+ ".int"
)
lines = [lines[j] for j in range(start + 1, end)]
Q = float(lines[0].split()[-4])
net_charge_group.append(Q)
net_charges.append(net_charge_group)
# GET CHARGE TRANSFER TERMS
for k in range(len(net_charges)):
for i in range(len(atom_list)):
for j in range(i + 1, len(atom_list)):
temp1.append(
(net_charges[k][i] * net_charges[k][j])
/ ((distance_A_B(xyz_files[k], i + 1, j + 1)) * 1.8897259886)
)
# ORGANIZE CHARGE TRANSFER ARRAY ORDER
for j in range(int(n * (n - 1) / 2)):
temp2.append([temp1[i] for i in range(j, len(temp1), int(n * (n - 1) / 2))])
start = 0
for atom_prop in temp2:
charge_transfer_properties.append([atom_prop[i] for i in range(start, f)])
# Isolate Vcl terms
classical_properties = inter_properties[: len(charge_transfer_properties)]
# OBTAIN POLARISATION TERMS AS Vpl = Vcl - Vct
for i in range(len(classical_properties)):
for j in range(len(classical_properties[i])):
temp3.append(classical_properties[i][j] - charge_transfer_properties[i][j])
# ORGANIZE POLARISATION ARRAY ORDER
polarisation_properties = [
temp3[i * f : (i + 1) * f] for i in range((len(temp3) + f - 1) // f)
]
return (
charge_transfer_properties,
contributions_list_CT,
polarisation_properties,
contributions_list_PL,
)
| popelier-group/REG | REG/aimall_utils.py | aimall_utils.py | py | 13,205 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "numpy.sqrt",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 1... |
71244448103 | import argparse
import os
import random
import tensorflow as tf
import re
import subprocess
import pyonmttok
import threading
import flask
from flask import request, jsonify
# TensorFlow Addons lazily loads custom ops. So we call the op with invalid inputs
# just to trigger the registration.
# See also: https://github.com/tensorflow/addons/issues/1151.
import tensorflow_addons as tfa
try:
tfa.seq2seq.gather_tree(0, 0, 0, 0)
except tf.errors.InvalidArgumentError:
pass
class EnDeTranslator(object):
__shared_instance = None
@staticmethod
def getInstance(export_dir):
"""Static Access Method"""
if EnDeTranslator.__shared_instance == None:
__shared_instance = EnDeTranslator(export_dir)
return EnDeTranslator.__shared_instance
def __init__(self, export_dir):
if EnDeTranslator.__shared_instance != None:
raise Exception ("This class is a singleton class !")
else:
imported = tf.saved_model.load(export_dir)
self._translate_fn = imported.signatures["serving_default"]
sp_model_path = os.path.join(export_dir, "assets.extra", "wmtende.model")
self._tokenizer = pyonmttok.Tokenizer("none", sp_model_path=sp_model_path)
EnDeTranslator.__shared_instance = self
def translate(self, texts):
"""Translates a batch of texts."""
inputs = self._preprocess(texts)
outputs = self._translate_fn(**inputs)
return self._postprocess(outputs)
def _preprocess(self, texts):
all_tokens = []
lengths = []
max_length = 0
for text in texts:
tokens, _ = self._tokenizer.tokenize(text)
length = len(tokens)
all_tokens.append(tokens)
lengths.append(length)
max_length = max(max_length, length)
for tokens, length in zip(all_tokens, lengths):
if length < max_length:
tokens += [""] * (max_length - length)
inputs = {
"tokens": tf.constant(all_tokens, dtype=tf.string),
"length": tf.constant(lengths, dtype=tf.int32)}
return inputs
def _postprocess(self, outputs):
texts = []
for tokens, length in zip(outputs["tokens"].numpy(), outputs["length"].numpy()):
tokens = tokens[0][:length[0]].tolist()
texts.append(self._tokenizer.detokenize(tokens))
return texts
#parser = argparse.ArgumentParser(description="Translation client example")
#parser.add_argument("export_dir", help="Saved model directory")
#args = parser.parse_args()
r = random.randrange(1, 100)
def get_my_ip():
out = subprocess.Popen(['hostname','-I',],stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = out.communicate()
res = str(stdout )
my_ip = res
return my_ip
ip = get_my_ip()
ip = str(ip[2:-3])
translator = None
def load_model():
global translator
translator = EnDeTranslator.getInstance("averaged-ende-export500k-v2")
app = flask.Flask(__name__)
app.config["DEBUG"] = True
@app.route("/get_ip")
def get_ip():
return ip
@app.route('/dir')
def dir():
return str(os.listdir(os.getcwd()))
@app.route('/')
def index():
return str(r)
@app.route('/api/translate/<paragraph>', methods=['GET'])
def get_translation_english_to_german(paragraph):
translator = EnDeTranslator.getInstance("averaged-ende-export500k-v2")
print("API called:", paragraph)
output = translator.translate([paragraph])
res = { }
res['output_val'] = str('\n'.join(output))
res['ip_address'] = ip
return jsonify(res)
@app.route('/api/translate/', methods=['POST'])
def post_translation_english_to_german():
paragraph = request.form['paragraph']
print("API called:", paragraph)
inputs = re.split('\.|\n',paragraph)
#inputs = re.split('\n',paragraph)
inputs = [i for i in inputs if i not in ['', ' ']]
print('Inputs:',inputs)
global translator
translator = EnDeTranslator.getInstance("averaged-ende-export500k-v2")
output = translator.translate(inputs )
res = { }
res['output_val'] = str('\n'.join(output))
res['ip_address'] = ip
return jsonify(res)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080)
| bhavinkotak07/distributed_machine_translation | model_api.py | model_api.py | py | 4,096 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow_addons.seq2seq.gather_tree",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tensorflow_addons.seq2seq",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.errors",
"line_number": 17,
"usage_type": "attribute"
}... |
32994978761 | """
Executor class.
"""
from __future__ import unicode_literals
import yaml
import subprocess
from voluptuous import Schema
from contextlib import closing
from functools import partial
from six import PY2
from locale import getpreferredencoding
class BaseExecutor(object):
"""
A generic executor class.
"""
options_schema = Schema({})
def __init__(self, options=None):
self.options = self.options_schema(options or {})
@property
def full_name(self):
"""
Get the full name of the executor.
:returns: The full dotted-name representation of the current instance
class, including its definition module.
"""
return '{module}.{name}'.format(
module=self.__module__,
name=self.__class__.__name__,
)
def execute(self, environment, commands, display):
"""
Execute the specified commands.
:param environment: The environment variables dictionary.
:param commands: A list of commands to execute.
:param display: The display to use for command output and report.
:returns: True if the execution went fine, False otherwise.
"""
display.set_context(commands=commands)
for index, command in enumerate(commands):
with display.command(
index,
command,
) as result:
result.returncode = self.execute_one(
environment=environment,
command=command,
output=partial(display.command_output, index),
)
if result.returncode is None:
raise RuntimeError(
"No returncode specified for command execution "
"({})".format(
command,
),
)
elif result.returncode != 0:
return False
return True
def executor_representer(dumper, executor):
if executor.options:
return dumper.represent_mapping(
'tag:yaml.org,2002:map',
{
'name': executor.full_name,
'options': executor.options,
},
)
else:
return dumper.represent_scalar(
'tag:yaml.org,2002:str',
executor.full_name,
)
yaml.add_multi_representer(BaseExecutor, executor_representer)
class ShellExecutor(BaseExecutor):
"""
An executor that execute commands through the system shell.
"""
def execute_one(self, environment, command, output):
# Python 2 subprocess doesn't deal well with unicode commands.
command = (
command.encode(getpreferredencoding())
if PY2
else command
)
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
env=environment,
)
with closing(process.stdout):
while True:
data = process.stdout.read(4096)
if data:
output(data)
else:
break
process.wait()
return process.returncode
| freelan-developers/plix | plix/executors.py | executors.py | py | 3,338 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "voluptuous.Schema",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "yaml.add_multi_representer",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "six.PY... |
5054739529 | from django.conf.urls import url, include
from rest_framework.urlpatterns import format_suffix_patterns
from .views import *
urlpatterns = {
url(r'^$', default, name="default"),
url(r'^profile/$', ProfileCreateView, name="profile"),
url(r'^profile/(?P<pk>[0-9]+)/$', ProfileDetailsView, name="profile_details"),
url(r'^session/$', SessionCreateView, name="session"),
url(r'^session/(?P<pk>[0-9]+)/$', SessionDetailsView, name="session_details"),
url(r'^question/$', QuestionCreateView, name="question"),
url(r'^question/(?P<pk>[0-9]+)/$', QuestionDetailsView, name="question_details"),
url(r'^response/$', UserResponseCreateView, name="response"),
url(r'^response/(?P<pk>[0-9]+)/$', UserResponseDetailsView, name="response_details"),
url(r'^savesession/$', SaveSession, name="save_session"),
url(r'^saveresponse/(?P<filename>[^/]+)/$', SaveResponse, name="save_response"),
url(r'^getresponse/$', GetResponse, name="get_response"),
}
urlpatterns = format_suffix_patterns(urlpatterns) | Willievuong/Stutter | backend/database/urls.py | urls.py | py | 1,032 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.co... |
41014917696 | import matplotlib.pyplot as plt
import matplotlib.animation as animation
from qbstyles import mpl_style
import numpy as np
import time
import os.path
import re
fig = plt.figure(facecolor='#1a1e24',edgecolor='black')
fig.canvas.set_window_title('MCS Readings')
ax1 = fig.add_subplot(1,1,1)
mpl_style(dark=True)
data = []
old_avg_data = []
old_bin_size = 0
old_start_time = -1
old_stop_time = -1
old_plot_averages = -1
ax1.set_facecolor('#1a1e24')
plt.grid(b=True, which='minor', color='#666666', linestyle='-')
avg_path = 'C:/Users/sean/Documents/labscript/labscript_suite/logs/mcs_avg/'
old_list_of_files = os.listdir(avg_path)
numeric_const_pattern = '[-+]? (?: (?: \d* \. \d+ ) | (?: \d+ \.? ) )(?: [Ee] [+-]? \d+ ) ?'
rx = re.compile(numeric_const_pattern, re.VERBOSE)
with open("C:/Users/sean/Documents/labscript/labscript_suite/labconfig/RSPE-052096.ini", "r") as file:
exp_name = file.readline().rstrip()
exp_name = file.readline().rstrip().split(' ')[-1]
connection_table_path = "C:/Users/sean/Documents/labscript/labscript_suite/userlib/labscriptlib/"+exp_name+"/connectiontable.py"
def sample_no_to_time(s):
return 0.0001+((s-1)/10000)
def cumulative2bin(arr,bin_size): #bin_size in sample numbers
prev_bin = 0
index=bin_size-1
res = []
while(index<len(arr)):
res.append(int(arr[index])-prev_bin)
prev_bin = int(arr[index])
index = index+bin_size
if len(res)>1:
res[0] = 0
res[1] = 0
return res
def floatfromstring(s):
match = rx.search(s)
if match is not None:
return round(float(match.group(0)),1)
def animate(i):
newdata = [line.rstrip('\n') for line in open('C:/Users/sean/Documents/labscript/labscript_suite/logs/mcs_temp.txt')]
global data, old_bin_size, old_start_time, old_stop_time, old_plot_averages, old_avg_data, old_list_of_files
file = open(connection_table_path)
all_lines = file.readlines()
start_time = 0
stop_time = -1
bin_size = 1
plot_averages = 1
list_of_files = os.listdir(avg_path)
for line in all_lines:
line=line.strip()
if line:
if(line[0:14]=="MCS_start_time"):
start_time = floatfromstring(line)
elif(line[0:13]=="MCS_stop_time"):
stop_time = floatfromstring(line)
elif(line[0:12]=="MCS_bin_size"):
bin_size = floatfromstring(line)
elif(line[0:13]=="plot_averages"):
plot_averages = int(floatfromstring(line))
if(bin_size<0.1 or bin_size*10!=int(bin_size*10) or start_time<0 or stop_time<=0 or plot_averages<=0):
ax1.set_title('ERROR, CHECK YOUR MCS PARAMETERS\nMCS DATA NOT BEING SAVED', color = '#F35654', fontsize=7)
ax1.plot([])
elif(newdata != data or bin_size != old_bin_size or old_start_time!=start_time or old_stop_time != stop_time or plot_averages != old_plot_averages or list_of_files != old_list_of_files):
ax1.clear()
ax1.set_xlabel('Time (ms)', color = 'white', fontsize=7)
plt.xlim(start_time*1000, stop_time*1000)
plt.minorticks_on()
data = newdata
old_bin_size=bin_size
old_start_time = start_time
old_stop_time = stop_time
old_plot_averages = plot_averages
if(plot_averages == 1):
ax1.set_title('Bin size: %.1f ms' %bin_size, color = '#F35654', fontsize=7)
# ax1.plot(np.linspace(start=0.1,stop=sample_no_to_time(len(newdata)), num = len(newdata)), newdata, marker='.', linewidth=0.6, color='#96ad68')
bin_array = cumulative2bin(newdata,int(bin_size*10))
x = np.linspace(start=100+bin_size, stop=100+len(bin_array)*bin_size, num=len(bin_array))
ax1.plot(x, bin_array, marker='.', linewidth=0.6, color='#96ad68')
elif(plot_averages>1):
avg_data=[]
temp = []
# list_of_files = os.listdir(avg_path)
old_list_of_files = list_of_files
if len(list_of_files) > plot_averages:
# oldest_file = sorted(os.listdir(avg_path), key=lambda x:os.path.getctime(os.path.join(avg_path,x)))[0]
# os.remove(avg_path+oldest_file)
# list_of_files = os.listdir(avg_path)
ax1.set_title('ERROR, RUN ANOTHER SHOT OR INCREASE plot_averages TO '+str(len(list_of_files)), color = '#F35654', fontsize=7)
ax1.plot([0])
elif len(list_of_files)==plot_averages:
for f in list_of_files:
f_path = avg_path+f
temp = [line.rstrip('\n') for line in open(f_path)]
if len(avg_data)==0:
avg_data = temp
else:
avg_data = list(map(lambda x,y:int(x)+int(y), temp, avg_data))
avg_data[:] = [x/plot_averages for x in avg_data]
avg_bin_data = cumulative2bin(avg_data,int(bin_size*10))
x = np.linspace(start=100+bin_size, stop=100+len(avg_bin_data)*bin_size, num=len(avg_bin_data))
ax1.set_title('PLOTTING AVERAGE OF '+str(plot_averages)+' SHOTS\nBin size: %.1f ms' %bin_size, color = '#F35654', fontsize=7)
ax1.plot(x, avg_bin_data, marker='.', linewidth=0.6, color='#3366cc')
# save_data = "\n".join(map(str, avg_data))
# with open('C:/Users/sean/Documents/labscript/labscript_suite/logs/test.txt','w+') as savefile:
# savefile.write(save_data)
else:
ax1.set_title('PLOTTING AVERAGE OF '+str(plot_averages)+' SHOTS\n'+str(len(list_of_files))+' SHOTS RECEIVED\n'+'Bin size: %.1f ms' %bin_size, color = '#F35654', fontsize=7)
ax1.plot([0])
ani = animation.FuncAnimation(fig, animate, interval=1000)
plt.show()
| joymkj/labscript | labscript_suite/labscript_utils/mcs.py | mcs.py | py | 5,998 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "qbstyles.mpl_style",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplot... |
17895779950 | import os
import tempfile
from typing import List
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import data # local file import from experimental.shoshin
def _make_temp_dir() -> str:
return tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR'))
def _make_serialized_image(size: int) -> bytes:
image = np.random.randint(0, 255, size=(size, size, 3), dtype=np.uint8)
return tf.io.encode_png(image).numpy()
def _make_example(
example_id: str,
longitude: float,
latitude: float,
encoded_coordinates: str,
label: float,
string_label: float,
patch_size: int,
large_patch_size: int,
) -> tf.train.Example:
example = tf.train.Example()
example.features.feature['example_id'].bytes_list.value.append(
example_id.encode()
)
example.features.feature['coordinates'].float_list.value.extend(
(longitude, latitude)
)
example.features.feature['encoded_coordinates'].bytes_list.value.append(
encoded_coordinates.encode()
)
example.features.feature['label'].float_list.value.append(label)
example.features.feature['string_label'].bytes_list.value.append(
string_label.encode()
)
example.features.feature['pre_image_png'].bytes_list.value.append(
_make_serialized_image(patch_size)
)
example.features.feature['post_image_png'].bytes_list.value.append(
_make_serialized_image(patch_size)
)
example.features.feature['pre_image_png_large'].bytes_list.value.append(
_make_serialized_image(large_patch_size)
)
example.features.feature['post_image_png_large'].bytes_list.value.append(
_make_serialized_image(large_patch_size)
)
return example
def _write_tfrecord(examples: List[tf.train.Example], path: str) -> None:
with tf.io.TFRecordWriter(path) as file_writer:
for example in examples:
file_writer.write(example.SerializeToString())
def _create_test_data():
examples_dir = _make_temp_dir()
labeled_train_path = os.path.join(
examples_dir, 'train_labeled_examples.tfrecord')
labeled_test_path = os.path.join(
examples_dir, 'test_labeled_examples.tfrecord')
unlabeled_path = os.path.join(
examples_dir, 'unlabeled_examples.tfrecord')
_write_tfrecord([
_make_example('1st', 0, 0, 'A0', 0, 'no_damage', 64, 256),
_make_example('2nd', 0, 1, 'A1', 0, 'no_damage', 64, 256),
_make_example('3rd', 0, 2, 'A2', 1, 'major_damage', 64, 256),
], labeled_train_path)
_write_tfrecord([
_make_example('4th', 1, 0, 'B0', 0, 'no_damage', 64, 256),
], labeled_test_path)
_write_tfrecord([
_make_example('5th', 2, 0, 'C0', -1, 'bad_example', 64, 256),
_make_example('6th', 2, 1, 'C1', -1, 'bad_example', 64, 256),
_make_example('7th', 2, 2, 'C2', -1, 'bad_example', 64, 256),
_make_example('8th', 2, 3, 'C3', -1, 'bad_example', 64, 256),
], unlabeled_path)
return labeled_train_path, labeled_test_path, unlabeled_path
class SkaiDatasetTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for Skai dataset."""
DATASET_CLASS = data.SkaiDataset
SPLITS = {
'labeled_train': 3,
'labeled_test': 1,
'unlabeled': 4
}
EXAMPLE_DIR = _make_temp_dir()
BUILDER_CONFIG_NAMES_TO_TEST = ['test_config']
SKIP_TF1_GRAPH_MODE = True
@classmethod
def setUpClass(cls):
super().setUpClass()
labeled_train_path, labeled_test_path, unlabeled_path = _create_test_data()
cls.DATASET_CLASS.BUILDER_CONFIGS = [
data.SkaiDatasetConfig(
name='test_config',
labeled_train_pattern=labeled_train_path,
labeled_test_pattern=labeled_test_path,
unlabeled_pattern=unlabeled_path)
]
if __name__ == '__main__':
tfds.testing.test_main()
| google/uncertainty-baselines | experimental/shoshin/data_test.py | data_test.py | py | 3,743 | python | en | code | 1,305 | github-code | 36 | [
{
"api_name": "tempfile.mkdtemp",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint"... |
7005651266 | """Added_TokenBlackList_table
Revision ID: c2d76eeeeb15
Revises: 703db21eb105
Create Date: 2021-11-20 00:13:10.270320
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c2d76eeeeb15'
down_revision = '703db21eb105'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('token_black_list',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('jti', sa.String(length=36), nullable=False),
sa.Column('blacklisted_at', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('token_black_list')
# ### end Alembic commands ###
| nazarkohut/room_book | migrations/versions/c2d76eeeeb15_added_tokenblacklist_table.py | c2d76eeeeb15_added_tokenblacklist_table.py | py | 841 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.INTEG... |
5893250281 | #!/usr/bin/env python3
"""
hw2main.py
UNSW COMP9444 Neural Networks and Deep Learning
DO NOT MODIFY THIS FILE
"""
import torch
from torchtext import data
from config import device
import student
def main():
print("Using device: {}"
"\n".format(str(device)))
# Load the training dataset, and create a dataloader to generate a batch.
textField = data.Field(lower=True, include_lengths=True, batch_first=True,
tokenize=student.tokenise,
preprocessing=student.preprocessing,
postprocessing=student.postprocessing,
stop_words=student.stopWords)
labelField = data.Field(sequential=False, use_vocab=False, is_target=True)
dataset = data.TabularDataset('train.json', 'json',
{'reviewText': ('reviewText', textField),
'rating': ('rating', labelField),
'businessCategory': ('businessCategory', labelField)})
textField.build_vocab(dataset, vectors=student.wordVectors)
# Allow training on the entire dataset, or split it for training and validation.
if student.trainValSplit == 1:
trainLoader = data.BucketIterator(dataset, shuffle=True,
batch_size=student.batchSize,
sort_key=lambda x: len(x.reviewText),
sort_within_batch=True)
else:
train, validate = dataset.split(split_ratio=student.trainValSplit)
trainLoader, valLoader = data.BucketIterator.splits((train, validate),
shuffle=True,
batch_size=student.batchSize,
sort_key=lambda x: len(x.reviewText),
sort_within_batch=True)
# Get model and optimiser from student.
net = student.net.to(device)
lossFunc = student.lossFunc
optimiser = student.optimiser
# Train.
for epoch in range(student.epochs):
runningLoss = 0
for i, batch in enumerate(trainLoader):
# Get a batch and potentially send it to GPU memory.
inputs = textField.vocab.vectors[batch.reviewText[0]].to(device)
length = batch.reviewText[1].to(device)
rating = batch.rating.to(device)
businessCategory = batch.businessCategory.to(device)
# PyTorch calculates gradients by accumulating contributions to them
# (useful for RNNs). Hence we must manually set them to zero before
# calculating them.
optimiser.zero_grad()
# Forward pass through the network.
ratingOutput, categoryOutput = net(inputs, length)
loss = lossFunc(ratingOutput, categoryOutput, rating, businessCategory)
# Calculate gradients.
loss.backward()
# Minimise the loss according to the gradient.
optimiser.step()
runningLoss += loss.item()
if i % 32 == 31:
print("Epoch: %2d, Batch: %4d, Loss: %.3f"
% (epoch + 1, i + 1, runningLoss / 32))
runningLoss = 0
# Save model.
torch.save(net.state_dict(), 'savedModel.pth')
print("\n"
"Model saved to savedModel.pth")
# Test on validation data if it exists.
if student.trainValSplit != 1:
net.eval()
correctRatingOnlySum = 0
correctCategoryOnlySum = 0
bothCorrectSum = 0
with torch.no_grad():
for batch in valLoader:
# Get a batch and potentially send it to GPU memory.
inputs = textField.vocab.vectors[batch.reviewText[0]].to(device)
length = batch.reviewText[1].to(device)
rating = batch.rating.to(device)
businessCategory = batch.businessCategory.to(device)
# Convert network output to integer values.
ratingOutputs, categoryOutputs = student.convertNetOutput(*net(inputs, length))
# Calculate performance
correctRating = rating == ratingOutputs.flatten()
correctCategory = businessCategory == categoryOutputs.flatten()
correctRatingOnlySum += torch.sum(correctRating & ~correctCategory).item()
correctCategoryOnlySum += torch.sum(correctCategory & ~correctRating).item()
bothCorrectSum += torch.sum(correctRating & correctCategory).item()
correctRatingOnlyPercent = correctRatingOnlySum / len(validate)
correctCategoryOnlyPercent = correctCategoryOnlySum / len(validate)
bothCorrectPercent = bothCorrectSum / len(validate)
neitherCorrectPer = 1 - correctRatingOnlyPercent \
- correctCategoryOnlyPercent \
- bothCorrectPercent
score = 100 * (bothCorrectPercent
+ 0.5 * correctCategoryOnlyPercent
+ 0.1 * correctRatingOnlyPercent)
print("\n"
"Rating incorrect, business category incorrect: {:.2%}\n"
"Rating correct, business category incorrect: {:.2%}\n"
"Rating incorrect, business category correct: {:.2%}\n"
"Rating correct, business category correct: {:.2%}\n"
"\n"
"Weighted score: {:.2f}".format(neitherCorrectPer,
correctRatingOnlyPercent,
correctCategoryOnlyPercent,
bothCorrectPercent, score))
if __name__ == '__main__':
main()
| gakkistyle/comp9444 | ass2/hw2/hw2main.py | hw2main.py | py | 5,907 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "config.device",
"line_number": 18,
"usage_type": "argument"
},
{
"api_name": "torchtext.data.Field",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torchtext.data",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "student.tokenis... |
73158859623 | from atexit import register
from unicodedata import category
from django import template
from ..models import Category
register = template.Library()
# @register.simple_tag
# def title(data="وبلاگ جنگویی"):
# return data
# @register.inclusion_tag("pages/partials/category_navbar.html")
# def category_navbar():
# return {
# "category": Category.objects.filter(status=True)
# }
@register.inclusion_tag("registration/partials/link.html")
def link(request, link_name, content, classes_a, classes_i):
return {
"request": request,
"link_name": link_name,
"link": f"account:{link_name}",
"content": content,
"classes_a": classes_a,
"classes_i": classes_i,
} | AliNozhati/BlogProject | pages/templatetags/base_tags.py | base_tags.py | py | 741 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "atexit.register",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.template.Library",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "atexit.register.... |
74963737383 | # !/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@FileName: EditAdmin
@Author : sky
@Date : 2023/2/8 11:08
@Desc : 修改用户界面设计与功能实现
"""
import sql_table
from PySide6.QtWidgets import QApplication, QMainWindow, QMessageBox, QInputDialog, QLineEdit, QPushButton, QLabel
from PySide6.QtCore import Qt, QRect, QMetaObject, QCoreApplication
from ComboCheckBox import QComboCheckBox
from db import DBSESSION, md5
class EditAdmin(object):
# 界面设计
def setup_ui(self, Form, adminId):
Form.setObjectName("Form")
Form.resize(360, 279)
self.label = QLabel(Form)
self.label.setGeometry(QRect(40, 40, 54, 12))
self.label.setAlignment(Qt.AlignmentFlag.AlignRight | Qt.AlignmentFlag.AlignTrailing |
Qt.AlignmentFlag.AlignVCenter)
self.label.setObjectName("label")
admin = DBSESSION.query(sql_table.TAdmin).filter(sql_table.TAdmin.a_id == adminId).first()
self.textEdit = QLineEdit(Form)
self.textEdit.setGeometry(QRect(100, 30, 181, 31))
self.textEdit.setText(admin.a_username)
self.textEdit.setObjectName("textEdit")
self.textEdit_2 = QLineEdit(Form)
self.textEdit_2.setGeometry(QRect(100, 80, 181, 31))
self.textEdit_2.setText(admin.a_mark)
self.textEdit_2.setObjectName("textEdit_2")
self.label_2 = QLabel(Form)
self.label_2.setGeometry(QRect(40, 90, 54, 12))
self.label_2.setAlignment(Qt.AlignmentFlag.AlignRight | Qt.AlignmentFlag.AlignTrailing |
Qt.AlignmentFlag.AlignVCenter)
self.label_2.setObjectName("label_2")
self.textEdit_3 = QComboCheckBox(Form)
class_list = []
try:
if admin.a_classid != '0':
for data in admin.a_classid.split(','):
class_list.append(data)
except Exception as e:
pass
# 获取班级列表(供老师选择来管理)
temp_class_list = DBSESSION.query(sql_table.TClass).all()
for item in temp_class_list:
self.textEdit_3.add_item(f"{item.c_id}.{item.c_name}",
flag=admin.a_classid == '0' or str(item.c_id) in class_list)
self.textEdit_3.setGeometry(QRect(100, 130, 181, 31))
self.textEdit_3.setObjectName("textEdit_3")
self.label_3 = QLabel(Form)
self.label_3.setGeometry(QRect(23, 140, 71, 20))
self.label_3.setObjectName("label_3")
self.pushButton = QPushButton(Form)
self.pushButton.setGeometry(QRect(150, 220, 75, 23))
self.pushButton.setObjectName("pushButton")
# 提交按钮
self.pushButton.clicked.connect(lambda: self.updateAdmin(adminId, Form))
self.pushButton_2 = QPushButton(Form)
self.pushButton_2.setGeometry(QRect(230, 220, 75, 23))
self.pushButton_2.setObjectName("pushButton_2")
# 取消按钮
self.pushButton_2.clicked.connect(lambda: Form.hide())
self.pushButton_3 = QPushButton(Form)
self.pushButton_3.setGeometry(QRect(10, 220, 75, 23))
self.pushButton_3.setObjectName("pushButton_3")
# 重置密码按钮
self.pushButton_3.clicked.connect(lambda: self.resetPassw(adminId, Form))
self.retranslateUi(Form)
QMetaObject.connectSlotsByName(Form)
# 重新翻译(针对性修改)
def retranslateUi(self, Form):
_translate = QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "编辑角色"))
self.label.setText(_translate("Form", "用户名:"))
self.label_2.setText(_translate("Form", "备注:"))
self.label_3.setText(_translate("Form", "可管理班级:"))
self.pushButton.setText(_translate("Form", "提交"))
self.pushButton_2.setText(_translate("Form", "取消"))
self.pushButton_3.setText(_translate("Form", "重置密码"))
# 修改用户名
def updateAdmin(self, adminId, Form):
username = self.textEdit.text()
mark = self.textEdit_2.text()
classids = self.textEdit_3.get_class_text()
try:
admin = DBSESSION.query(sql_table.TAdmin).filter(sql_table.TAdmin.a_id == adminId).update(
{"a_username":f"{username}","a_mark":f"{mark}","a_classid":f"{classids}"})
DBSESSION.commit()
QMessageBox.about(Form, "成功", "编辑成功!请刷新数据列表。")
except Exception as e:
DBSESSION.rollback()
QMessageBox.about(Form, "失败", "编辑失败!")
Form.close()
# 重置密码
def resetPassw(self, adminId, Form):
text, okPressed = QInputDialog.getText(Form, "重置密码", "新密码:", QLineEdit.EchoMode.Normal, '')
if okPressed:
try:
DBSESSION.query(sql_table.TAdmin).filter(sql_table.TAdmin.a_id == adminId).update(
{"a_password":f"{md5(text)}"})
DBSESSION.commit()
QMessageBox.about(Form, "成功", "重置密码成功!")
except Exception as e:
DBSESSION.rollback()
QMessageBox.about(Form, "失败", "重置密码失败!")
Form.close()
def main():
import sys
app = QApplication(sys.argv)
aw = EditAdmin()
window = QMainWindow()
aw.setup_ui(window, 1)
window.show()
window.setWindowTitle("学生管理系统-修改用户")
sys.exit(app.exec())
if __name__ == "__main__":
main() | Bxiaoyu/NotesRep | studentms/EditAdmin.py | EditAdmin.py | py | 5,579 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PySide6.QtWidgets.QLabel",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "PySide6.QtCore.QRect",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PySide6.QtCore.Qt.AlignmentFlag",
"line_number": 25,
"usage_type": "attribute"
},
{
... |
70606068584 | import os
from os.path import (
abspath,
dirname,
isfile,
join as join_path,
)
from six.moves.configparser import ConfigParser
from pymud.utilities import ConsoleLogger
rel_path = abspath(join_path(dirname(__file__), 'pymud.conf'))
etc_path = join_path('/etc/pymud', 'pymud.conf')
CONFIG = ConfigParser()
if isfile(rel_path):
CONFIG.read(rel_path)
elif isfile(etc_path):
CONFIG.read(etc_path)
else:
CONFIG.add_section('general')
CONFIG.set(
'general',
'host',
os.getenv('HOST', '0.0.0.0')
)
CONFIG.set(
'general',
'port',
os.getenv('PORT', '7001')
)
LOGGER = ConsoleLogger()
| jzaleski/pymud | pymud/__init__.py | __init__.py | py | 675 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line... |
44141408017 | from torch.utils.data import Dataset
import numpy as np
from PIL import Image
import random
from torchvision import transforms
from options import utils_option
import os
import math
from lib import utils_image as util
class BaseDataset(Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
def get_parms(self):
# ------------------------------------
# Probability
# ------------------------------------
self.noise_p = self.opt["corruption"]["noise"]
self.rain_p = self.opt["corruption"]["rain"]
self.blur_p = self.opt["corruption"]["blur"]
self.impainting_p = self.opt["corruption"]["impainting"]
self.impainting_rate = self.opt["corruption"]["impainting_rate"]
self.superresolution_p = self.opt["corruption"]["superresolution"]
self.superresolution_scale = self.opt["corruption"]["superresolution_scale"]
self.scale = self.opt["upscale"]
self.L_size = self.image_size // self.scale
self.sigma = self.datasets_opt['sigma'] if self.datasets_opt['sigma'] else 25
self.sigma_test = self.datasets_opt['sigma_test'] if self.datasets_opt['sigma_test'] else self.sigma
self.noise_type = self.opt["corruption"]["noise_type"]
self.noise_num = self.opt["corruption"]["noise_num"]
self.resize_size = self.image_size * 8
self.DEBUG = False
def combine_datasets_path(self):
get_dataset = self.datasets_opt[self.dataset_type + "_dataset"] # dataset list
self.paths_H = []
self.paths_L = []
if isinstance(get_dataset, list):
for dataset_name in get_dataset:
self.get_datasets_path(dataset_name)
else:
self.get_datasets_path(get_dataset)
def get_datasets_path(self, get_dataset):
self.dataset_file = utils_option.json_parse(self.opt["dataset_file"])[get_dataset]
self.shift = self.datasets_opt["shift"]
self.n_channels = self.dataset_file['n_channels'] if self.dataset_file['n_channels'] else 3
self.image_size = self.datasets_opt['H_size'] if self.datasets_opt['H_size'] else 64
self.window_size = self.opt["netG"]["window_size"]
# ------------------------------------
# get path of H
# return None if input is None
# ------------------------------------
self.paths_H_filename = self.dataset_file['dataroot_H_filename']
self.paths_L_filename = self.dataset_file['dataroot_L_filename']
self.paths_H.extend(
util.get_image_paths(os.path.join(self.dataset_file['dataroot_H'], self.paths_H_filename[0])))
self.paths_H.sort()
if self.dataset_file['dataroot_L'] != None:
self.paths_L.extend(
util.get_image_paths(os.path.join(self.dataset_file['dataroot_L'], self.paths_L_filename[0]))
)
else:
self.paths_L.extend(self.paths_H)
self.paths_L.sort()
if self.DEBUG:
for i in zip(self.paths_L, self.paths_H):
print("data list:", i)
def get_image_from_path(self, path):
img = Image.open(path)
if self.n_channels < 3:
img = img.convert("L")
img = np.asarray(img)
return img
def get_img_by_index(self, index):
H_path = self.paths_H[index]
L_path = self.paths_L[index]
img_H = self.get_image_from_path(H_path)
if self.opt["task"] == "denoising" or self.opt["task"] == "med":
img_H = self.albumen_transform(image=img_H)['image']
img_L = img_H.copy()
else:
img_L = self.get_image_from_path(L_path)
return img_H, img_L
def get_patch_from_img(self, img_H, img_L):
# --------------------------------
# randomly crop the patch
# --------------------------------
if self.n_channels == 3:
H, W, _ = img_H.shape
else:
H, W = img_H.shape
rnd_h = random.randint(0, max(0, H - self.image_size))
rnd_w = random.randint(0, max(0, W - self.image_size))
patch_H = img_H[rnd_h:rnd_h + self.image_size, rnd_w:rnd_w + self.image_size]
patch_L = img_L[rnd_h:rnd_h + self.image_size, rnd_w:rnd_w + self.image_size]
return patch_H, patch_L
def img_fliiper(self, img, random_val, random_vertical):
# --------------------------------
# augmentation - flip, rotate
# --------------------------------
# randomly horizontal flip the image with probability of 0.5
if (random_val > 0.5):
img = img.transpose(Image.FLIP_LEFT_RIGHT)
# randomly vertically flip the image with probability 0.5
if (random_vertical > 0.5):
img = img.transpose(Image.FLIP_TOP_BOTTOM)
return img
def build_transform(self, image_size):
t = []
t.append(transforms.ToTensor()) # convert (B, H, W, C) from [0,255] to (B, C, H, W) [0. ,1.]
return transforms.Compose(t)
def rescale_tv(self, img, size, type):
if type == "BILINEAR":
type = Image.BILINEAR
elif type == "NEAREST":
type = Image.NEAREST
elif type == "BICUBIC":
type = Image.BICUBIC
elif type == "LANCZOS":
type = Image.LANCZOS
elif type == "HAMMING":
type = Image.HAMMING
return transforms.Resize(size, interpolation=type)(img)
def get_downsample_type(size=1):
return np.random.choice(
["NEAREST", "LANCZOS", "HAMMING", "BILINEAR", "BICUBIC"], size=size, replace=False)
| chqwer2/Multi-view-Self-supervised-Disentanglement-Denoising | data/dataset_base.py | dataset_base.py | py | 5,678 | python | en | code | 99 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "options.utils_option.json_parse",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "options.utils_option",
"line_number": 62,
"usage_type": "name"
},
{
"api... |
5347720873 | from operator import mul
from functools import reduce
def combinations_count(n, r):
r = min(r, n - r)
numer = reduce(mul, range(n, n - r, -1), 1)
denom = reduce(mul, range(1, r + 1), 1)
return numer // denom
N, L = map(int, input().split())
l = N%L
cnt = 0
for l in range(N//L+1):
x = N-L*l
cnt += combinations_count(x+l, x)
print(cnt%(10**9+7))
| hrkhrkhrk/Atom | kyopro_educational_90/50_StairJump.py | 50_StairJump.py | py | 371 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "functools.reduce",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "operator.mul",
"line_number": 5,
"usage_type": "argument"
},
{
"api_name": "functools.reduce",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "operator.mul",
"l... |
39826357335 | import logparser
import numpy as np
from matplotlib import pyplot as plt
access_log = open("access.log","r")
dt_counter = {}
for line in access_log:
logDict = logparser.parser(line)
dat = logDict['time'][:11]
if dat not in dt_counter:
dt_counter[dat] = 1
else:
dt_counter[dat] = dt_counter[dat] + 1
def hits(x):
return x[-1]
sort = sorted(dt_counter.items(),key=hits,reverse=True)[:15]
date = []
hits = []
for item in sort:
date.append(item[0])
hits.append(item[1])
access_log.close()
plt.figure(figsize=(12,7))
plt.xlabel("Date -->")
plt.ylabel("Hits -->")
plt.title("Hits per Date")
plt.xticks(rotation=90)
x = date
y = hits
plt.plot(x, y, color = 'red')
plt.yticks(np.arange(min(y)+12, 8000+250, 250))
plt.show()
| SaneshSabu/Visualisation-of-Log-file-using-Python-and-Matplotlib | hits_date_line_graph.py | hits_date_line_graph.py | py | 797 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logparser.parser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "matplotli... |
40187237048 | from selenium import webdriver
from datetime import datetime
import time
browser=webdriver.Chrome(r"C:\Users\FZL\Desktop\Blast Betting Game Predictor\Dependencies\chromedriver.exe")
#go to site
browser.get("http://1fifa90.com/games/crash/index")
#login
time.sleep(5)
browser.find_element_by_id("mail").send_keys("onlyfbaccount@gmail.com")
browser.find_element_by_id("pass").send_keys("P566XN7xJCybsT9tgkSv")
time.sleep(2)
browser.find_element_by_class_name("action-button").click()
time.sleep(10)
browser.find_element_by_id("play_button").click()
time.sleep(5)
browser.find_element_by_class_name("lang_67").click()
time.sleep(3)
def writeToHistoryFile(recordList):
with open("C:\\Users\FZL\Desktop\Blast Betting Game Predictor\First things First (Data Collecting)\history.csv","a") as myfile:
for eachColumn in recordList:
myfile.write(eachColumn+",")
myfile.write("\n")
def checkForFirstLine(firstRecord):
aa=[]
while str(firstRecord.text).split("\n")[0]=="-":
time.sleep(2)
# if str(firstRecord.text).split("\n")[0]!="-":
return (list([str(datetime.now())]+str(firstRecord.text).split("\n")))
# else:
# time.sleep(2)
# return checkForFirstLine(firstRecord)
history=[]
aa=[]
recordRows=browser.find_elements_by_xpath("//div[contains(@class,'table-body')]/div[contains(@class,'crash-row')]")
for eachRow in recordRows[1:]:
if str(eachRow.text).split("\n")[0]!="-":
history.append([str(datetime.now())]+str(eachRow.text).split("\n"))
for eachRecrod in reversed(history):
writeToHistoryFile(eachRecrod)
while 1:
firstRecord = browser.find_element_by_xpath("//div[contains(@class,'table-body')]/div[contains(@class,'crash-row')]")
newRecord=checkForFirstLine(firstRecord)
print((newRecord))
writeToHistoryFile(newRecord)
time.sleep(5)
try:
browser.find_element_by_xpath('//*[@id="disconnect_screen"]/table/tbody/tr/td/center/a').click()
time.sleep(10)
browser.find_element_by_id("play_button").click()
time.sleep(5)
browser.find_element_by_class_name("lang_67").click()
time.sleep(3)
except:
pass
| alifzl/Blast-Betting-Game-Predictor | First things First (Data Collecting)/myBot.py | myBot.py | py | 2,122 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "time.sleep",
... |
18433473447 | """H1st IoT Maintenance Operations: URL configs."""
from django.conf.urls import include, url
from rest_framework.routers import DefaultRouter
from aito.iot_mgmt.maint_ops.views import (
EquipmentInstanceDailyRiskScoreViewSet,
EquipmentProblemTypeViewSet,
EquipmentInstanceAlarmPeriodViewSet,
EquipmentInstanceProblemDiagnosisViewSet,
AlertDiagnosisStatusViewSet,
EquipmentInstanceAlertPeriodViewSet,
)
ROUTER = DefaultRouter()
ROUTER.register(
'equipment-instance-daily-risk-scores',
EquipmentInstanceDailyRiskScoreViewSet)
ROUTER.register(
'equipment-problem-types',
EquipmentProblemTypeViewSet)
ROUTER.register(
'equipment-instance-alarm-periods',
EquipmentInstanceAlarmPeriodViewSet)
ROUTER.register(
'equipment-instance-problem-diagnoses',
EquipmentInstanceProblemDiagnosisViewSet)
ROUTER.register(
'alert-diagnosis-statuses',
AlertDiagnosisStatusViewSet)
ROUTER.register(
'equipment-instance-alert-periods',
EquipmentInstanceAlertPeriodViewSet)
URL_PATTERNS = [
# API URLs
url('iot/api/maint-ops/', include(ROUTER.urls)),
]
| aitomatic/contrib | src/aito/iot_mgmt/maint_ops/urls.py | urls.py | py | 1,124 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "aito.iot_mgmt.maint_ops.views.EquipmentInstanceDailyRiskScoreViewSet",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "aito.iot_mgmt.maint_ops.views.Equip... |
33808709044 | """added profile pict
Revision ID: d03756b7815b
Revises: 845e6def5277
Create Date: 2022-03-02 16:17:18.286475
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd03756b7815b'
down_revision = '845e6def5277'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('u_pict', sa.String(), nullable=True))
op.create_index(op.f('ix_user_u_pict'), 'user', ['u_pict'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_u_pict'), table_name='user')
op.drop_column('user', 'u_pict')
# ### end Alembic commands ###
| andyderis36/4p-flask | migrations/versions/d03756b7815b_added_profile_pict.py | d03756b7815b_added_profile_pict.py | py | 790 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "alembic.op.add_column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String"... |
19842008557 | # 백준 4485. 녹색 옷 입은 애가 젤다지?
# 시간 제한 1초 / 메모리 제한 256MB
from collections import deque
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
def bfs():
global dist
queue = deque()
queue.append((0, 0))
dist[0][0] = graph[0][0]
while queue:
x, y = queue.popleft()
for d in range(4):
nx = x + dx[d]
ny = y + dy[d]
if not(0 <= nx < n and 0 <= ny < n):
continue
cost = graph[nx][ny]
if dist[nx][ny] > dist[x][y] + cost:
dist[nx][ny] = dist[x][y] + cost
queue.append((nx, ny))
index = 1
while True:
n = int(input())
if n == 0:
break
graph = [list(map(int, input().split())) for _ in range(n)]
dist = [[1e9 for _ in range(n)] for _ in range(n)]
bfs()
print("Problem {}: {}".format(index, dist[n-1][n-1]))
index += 1 | eundeok9/algorithm-study | 백준/Gold/4485. 녹색 옷 입은 애가 젤다지?/녹색 옷 입은 애가 젤다지?.py | 녹색 옷 입은 애가 젤다지?.py | py | 966 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 10,
"usage_type": "call"
}
] |
17849552367 | from ..config import Vector, ParameterName, ColorConfig, ZOrderConfig
from ..config import CompositeFigure, PathStep, PathOperation, PathShape, Rectangle, Ellipse, ellipse_arc_obj
class CulturedCellConfig(object):
common_z_order = ZOrderConfig.default_patch_z_order
z_order_increment = ZOrderConfig.z_order_increment
dish_outline_z_order = common_z_order + 2 * z_order_increment
media_z_order = common_z_order
cell_z_order = common_z_order - z_order_increment
cell_nucleus_z_order = cell_z_order + z_order_increment
media_color = ColorConfig.orange
media_side_alpha = ColorConfig.lower_alpha_value
media_top_alpha = ColorConfig.alpha_for_bar_plot
common_edge_width = 9.5
infusion_rect_edge_width = common_edge_width - 1
cell_alpha = ColorConfig.higher_alpha
cell_nucleus_alpha = 1
dish_outline_config = {
ParameterName.edge_width: common_edge_width,
ParameterName.edge_color: ColorConfig.black_color,
ParameterName.face_color: None,
ParameterName.z_order: dish_outline_z_order
}
media_config = {
ParameterName.edge_width: None,
ParameterName.z_order: media_z_order
}
media_top_config = {
**media_config,
ParameterName.face_color: media_color.add_transparency(media_top_alpha)
}
media_side_config = {
**media_config,
ParameterName.face_color: media_color.add_transparency(media_side_alpha)
}
cell_config = {
ParameterName.edge_width: None,
ParameterName.z_order: cell_z_order,
ParameterName.face_color: media_color.add_transparency(cell_alpha)
}
cell_nucleus_config = {
ParameterName.edge_width: None,
ParameterName.z_order: cell_nucleus_z_order,
ParameterName.face_color: media_color.add_transparency(cell_nucleus_alpha)
}
class CulturedCell(CompositeFigure):
total_width = 1
height_to_width_ratio = 0.4
def __init__(self, scale=1, bottom_left_offset=None, base_z_order=0, z_order_increment=1, **kwargs):
total_width = self.total_width
total_height = self.height_to_width_ratio * total_width
ellipse_height_to_width_ratio = self.height_to_width_ratio / 2
center_x = total_width / 2
ellipse_width = total_width
ellipse_height = ellipse_height_to_width_ratio * ellipse_width
dish_height = (self.height_to_width_ratio - ellipse_height_to_width_ratio) * total_width
bottom_ellipse_center_y = ellipse_height / 2
top_ellipse_center_y = bottom_ellipse_center_y + dish_height
media_height = dish_height * 0.6
media_top_ellipse_center_y = bottom_ellipse_center_y + media_height
cell_width = 0.15 * total_width
cell_height = 0.17 * total_height
cell_nucleus_width = 0.3 * cell_width
cell_nucleus_height = 0.3 * cell_height
cell_location_list = [
Vector(center_x - 0.1 * total_width, bottom_ellipse_center_y - 0.12 * media_height),
Vector(center_x + 0.254 * total_width, bottom_ellipse_center_y + 0.463 * media_height),
Vector(center_x + 0.1 * total_width, bottom_ellipse_center_y + 0.32 * media_height),
Vector(center_x - 0.34 * total_width, bottom_ellipse_center_y + 0.31 * media_height),
Vector(center_x - 0.2386 * total_width, bottom_ellipse_center_y - 0.083 * media_height),
Vector(center_x + 0.05 * total_width, bottom_ellipse_center_y - 0.33 * media_height),
Vector(center_x + 0.325 * total_width, bottom_ellipse_center_y - 0.062 * media_height),
]
cell_nucleus_relative_location = Vector(0, 0.1)
cell_obj_list = []
for cell_location in cell_location_list:
cell_obj = Ellipse(**{
ParameterName.center: cell_location,
ParameterName.width: cell_width,
ParameterName.height: cell_height,
ParameterName.name: 'cell',
**CulturedCellConfig.cell_config
})
cell_nucleus_location = cell_location + cell_nucleus_relative_location * Vector(cell_width, cell_height)
cell_nucleus_obj = Ellipse(**{
ParameterName.center: cell_nucleus_location,
ParameterName.width: cell_nucleus_width,
ParameterName.height: cell_nucleus_height,
ParameterName.name: 'cell_nucleus',
**CulturedCellConfig.cell_nucleus_config
})
cell_obj_list.extend([cell_obj, cell_nucleus_obj])
bottom_half_ellipse = ellipse_arc_obj.generator(
Vector(center_x, bottom_ellipse_center_y),
-180, 0, ellipse_width / 2, ellipse_height / 2
)
left_vertical_path = [
PathStep(PathOperation.moveto, Vector(center_x - ellipse_width / 2, top_ellipse_center_y)),
PathStep(PathOperation.lineto, Vector(center_x - ellipse_width / 2, bottom_ellipse_center_y))
]
right_vertical_path = [
# PathStep(PathOperation.moveto, Vector(center_x + ellipse_width / 2, bottom_ellipse_center_y)),
PathStep(PathOperation.lineto, Vector(center_x + ellipse_width / 2, top_ellipse_center_y))
]
media_left_vertical_path = [
PathStep(PathOperation.moveto, Vector(center_x - ellipse_width / 2, media_top_ellipse_center_y)),
PathStep(PathOperation.lineto, Vector(center_x - ellipse_width / 2, bottom_ellipse_center_y))
]
media_right_vertical_path = [
# PathStep(PathOperation.moveto, Vector(center_x + ellipse_width / 2, bottom_ellipse_center_y)),
PathStep(PathOperation.lineto, Vector(center_x + ellipse_width / 2, media_top_ellipse_center_y))
]
media_top_half_ellipse = ellipse_arc_obj.generator(
Vector(center_x, media_top_ellipse_center_y),
0, -180, ellipse_width / 2, ellipse_height / 2
)
dish_outline_obj_list = [
Ellipse(**{
ParameterName.center: Vector(center_x, top_ellipse_center_y),
ParameterName.width: ellipse_width,
ParameterName.height: ellipse_height,
ParameterName.name: 'top_outline_ellipse',
**CulturedCellConfig.dish_outline_config
}),
PathShape(**{
ParameterName.path_step_list: [*left_vertical_path, *bottom_half_ellipse, *right_vertical_path],
ParameterName.closed: False,
ParameterName.name: 'bottom_outline',
**CulturedCellConfig.dish_outline_config
})
]
media_obj_list = [
Ellipse(**{
ParameterName.center: Vector(center_x, media_top_ellipse_center_y),
ParameterName.width: ellipse_width,
ParameterName.height: ellipse_height,
ParameterName.name: 'media_top',
**CulturedCellConfig.media_top_config,
}),
PathShape(**{
ParameterName.path_step_list: [
*media_left_vertical_path, *bottom_half_ellipse, *media_right_vertical_path,
*media_top_half_ellipse],
ParameterName.name: 'media_side',
**CulturedCellConfig.media_side_config,
})
]
cultured_cell_dict = {
ParameterName.dish_outline: {
dish_outline_obj.name: dish_outline_obj for dish_outline_obj in dish_outline_obj_list},
ParameterName.media: {
media_obj.name: media_obj for media_obj in media_obj_list},
ParameterName.cell: {
cell_obj.name: cell_obj for cell_obj in cell_obj_list}
}
super().__init__(
cultured_cell_dict, bottom_left=Vector(0, 0), size=Vector(total_width, total_height),
scale=scale, bottom_left_offset=bottom_left_offset, base_z_order=base_z_order,
z_order_increment=z_order_increment)
| LocasaleLab/Automated-MFA-2023 | figures/figure_plotting/figure_elements/diagrams/diagram_elements/object_diagrams/cultured_cell.py | cultured_cell.py | py | 8,045 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "config.ZOrderConfig.default_patch_z_order",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "config.ZOrderConfig",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "config.ZOrderConfig.z_order_increment",
"line_number": 7,
"usage_type": ... |
12315654775 | import pygame
pygame.init()
default_display = 320, 240
max_display = pygame.display.Info().current_w, pygame.display.Info().current_h
scale = 1
fullscreen = False
draw_surface = pygame.Surface(default_display)
scale_surface = pygame.Surface(default_display)
game_display = pygame.display.set_mode(default_display)
clock = pygame.time.Clock()
# Functions
def set_display():
global game_display, scale_surface, draw_surface
scale_surface = pygame.Surface(get_resolution())
if fullscreen:
draw_surface = pygame.Surface(max_display)
gameDisplay = pygame.display.set_mode(max_display, pygame.FULLSCREEN)
else:
draw_surface = pygame.Surface(default_display)
gameDisplay = pygame.display.set_mode(default_display)
return
def get_resolution():
if fullscreen:
return max_display[0] * scale, max_display[1] * scale
else:
return default_display[0] * scale, default_display[1] * scale
# MainLoop
while True:
# EventHandle
for event in pygame.event.get():
# Quit
if event.type == pygame.QUIT:
pygame.quit()
quit()
# Buttons
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_z:
scale += 1
if scale == 4:
scale = 1
set_display()
if event.key == pygame.K_x:
fullscreen = not fullscreen
set_display()
if event.key == pygame.K_c:
pass
# Draw
draw_surface.fill((255, 255, 255))
pygame.draw.rect(draw_surface, (0, 0, 0), (50, 50, 50, 50))
pygame.transform.scale(draw_surface, get_resolution(), scale_surface)
game_display.blit(scale_surface, (0, 0))
pygame.display.update()
clock.tick(60)
| philorfa/FDTD_2D | pythonProject/Functions/delete later.py | delete later.py | py | 1,880 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "pygame.display.Info",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pygame.Surface",
... |
29730548092 | import setuptools
# with open("README.md", "r") as fh:
# long_description = fh.read()
long_description = 'https://github.com/suckmybigdick/flask-wechat-utils'
setuptools.setup(
name = "flask-wechat-utils",
version="0.1.16",
auth="Huang Xu Hui",
author_email="13250270761@163.com",
description="flask-wechat-tuils for wechat-app-user's login/register/auth, and message_template",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/suckmybigdick/flask-wechat-utils",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 2",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
"Flask==0.10.1",
"requests==2.9.1",
"cryptography",
"itsdangerous==0.24",
"Werkzeug==0.14.1",
"flask_restplus==0.12.1",
"flask_mongoengine==0.9.5",
],
)
| synctrust/flask-wechat-utils | setup.py | setup.py | py | 908 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 19,
"usage_type": "call"
}
] |
42238036553 | import requests
import json ,os,pprint
while True:
if os.path.exists("s_courses.json")==False:
a=requests.get('http://saral.navgurukul.org/api/courses')
pprint.pprint(a.text)
b=a.text
c=open('s_courses.json','w')
json.dump(b,c)
c.close()
c=open('s_courses.json','r')
jsl1=json.load(c)
jsl2=json.loads(jsl1)
print(type(jsl2))
c.close()
for i in jsl2:
for j in range(len(jsl2[i])):
if j<9:
x='0'+str(j+1)
print(x,'-',jsl2[i][j]['id'],' ','name -',jsl2[i][j]['name'])
else:
print(1+j,'-',jsl2[i][j]['id'],' ','name -',jsl2[i][j]['name'])
j=int(input('enter no.'))
print(j,'-',jsl2[i][j-1]['id'],' ','name -',jsl2[i][j-1]['name'],'\n')
x=jsl2[i][j-1]['id']
while True:
a1=requests.get(' http://saral.navgurukul.org/api/courses/'+str(x)+'/exercises')
b1=a1.text
if os.path.exists("'exercises_'+str(x)+'.json'")==False:
c1=open('exercises_'+str(x)+'.json','w+')
json.dump(b1,c1)
c1.close()
c1=open('exercises_'+str (x)+'.json','r')
js1=json.load(c1)
js2=json.loads(js1)
c.close()
for i in js2:
for j in range(len(js2[i])):
print('\t',j+1,'-',js2[i][j]['name'])
inp=input('''
*** YOU WANT TO
PRIVIOUS PAGE - THEN PRESS ("P")
NEXT PAGE - TNEN PRESS ("N")
STARTING PAGE - THEN PRESS ("S")''').lower()
if inp=='p':
x-=1
elif(inp=='n'):
x+=1
else:
break
if x<14:
x=14
print('have no PRIVIOUS PAGE ')
elif(x>92):
x=92
print('have no NEXT PAGE')
print('Id-',x)
inp=input('**you want to exit then press "exit"').lower()
if 'e' in inp:
break
| shabidkhan/API | API.py | API.py | py | 1,568 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_numbe... |
40367902920 | # importing required modules
import tkinter as tk
from tkinter import filedialog
import xlsxwriter
from init_row_data import init_row_data
from compute_B import compute_B
from compute_A import compute_A
# console interface
print("Power bills data extractor in txt\nver.: 1.0, 10/03/2022\nAuthor: Fellipe Filgueira")
input("Press ENTER to begin...")
print("\nProcessing...")
# load files paths
root = tk.Tk()
root.withdraw()
root.call('wm', 'attributes', '.', '-topmost', True)
filePath = filedialog.askopenfilename(multiple=False) # True to load multiple files
# creating .xlsx object
findPath = filePath[len(filePath)::-1]
findPath = findPath[findPath.find("/"):]
findPath = findPath[len(findPath)::-1]
workbook = xlsxwriter.Workbook(findPath + 'data.xlsx')
worksheet = workbook.add_worksheet('main')
data_dict = init_row_data()
data_headers = list(data_dict.keys())
for itr in range(len(data_headers)): worksheet.write(0, itr, data_headers[itr])
# reading txt file
file = open(filePath, 'r', encoding='utf-8')
file_items = file.readlines()
# set lists
text = [[]]
data = []
# set loop variables
count = 0
j = 0
row = 1
# convert str into list based in break points
for item in file_items:
if 'Federal' in item:
text[count].append(item)
if item != file_items[-1]:
text.append(list())
count += 1
if item[0:2] == '11' or item[0:2] == '12'or item[0:2] == '13' or item[0:2] == '14' and 'Federal' not in item:
text[count].append(item)
else:
pass
# algorithm loop
while j < len(text):
if 'B3' in text[j][1]:
data = init_row_data()
data = compute_B(j, text, data)
for i in range(len(list(data))): worksheet.write(row, i, data[list(data)[i]])
row += 1
if 'A4' in text[j][1] or 'A3' in text[j][1]:
data = init_row_data()
data = compute_A(j, text, data)
for i in range(len(list(data))): worksheet.write(row, i, data[list(data)[i]])
row += 1
if text[j][1][81:83] not in ['B3', 'A3', 'A4'] :
print(text[j][1][26:36] + " bill was not correctly identified")
else:
pass
j += 1
# close files
workbook.close()
file.close()
# console interface
print("\ndata.xlsx file has been successfully saved!")
input("Press ENTER to finish...") | fellipefilgueira/power-bill-data-txt-extractor | Source/main.py | main.py | py | 2,315 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tkinter.Tk",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog.askopenfilename",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "xlsx... |
709130702 | #!/usr/bin/env python
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "geweb",
version = "0.1.0",
author = "Artem Sazhin",
author_email = "arts@point.im",
description = ("Asyncronous micro web framework based on gevent."),
long_description=read("README.md"),
license = "BSD",
keywords = "web framework gevent jinja2",
url = "http://bitbucket.org/arts/geweb",
packages = ["geweb", "geweb.template", "geweb.session", "geweb.util", "geweb.db", "geweb.db.pgsql"],
install_requires = ["gevent", "jinja2", "argparse", "setproctitle"],
scripts = ["geweb/bin/geweb"],
include_package_data = True,
classifiers = [
"Development Status :: 3 - Alpha",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Application Frameworks",
"License :: OSI Approved :: BSD License",
"Intended Audience :: Developers",
],
)
| artss/geweb | setup.py | setup.py | py | 1,350 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"lin... |
9273754357 | import os
import gym
import time
import argparse
import numpy as np
import matplotlib.pyplot as plt
from EA_components_OhGreat.EA import EA
from EA_components_OhGreat.Recombination import *
from EA_components_OhGreat.Mutation import *
from EA_components_OhGreat.Selection import *
from src.Evaluation import *
from src.Network import *
from src.utilities import *
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-exp_name', action='store',
dest='exp_name', type=str,
default='test_experiment',
help="Defines the name of the experiment.")
parser.add_argument('-b', action='store',
dest='budget', type=int,
default=10000,
help="Defines the total amount of evaluations.")
parser.add_argument('-min', action='store_true',
dest='minimize',
help="Use this flag if the problem is minimization.")
parser.add_argument('-r', action='store',
dest='recombination', type=str,
default=None,
help="Defines the recombination strategy.")
parser.add_argument('-m', action='store',
dest='mutation', type=str,
default='IndividualSigma',
help="Defines the mutation strategy.")
parser.add_argument('-s', action='store',
dest='selection', type=str,
default='PlusSelection',
help="Defines the selection strategy.")
parser.add_argument('-ps', action='store',
dest='parents_size', type=int,
default=2,
help="Defines the number of parents per generation.")
parser.add_argument('-os', action='store',
dest='offspring_size', type=int,
default=4,
help="Defines the number of offspring per generation.")
parser.add_argument('-mul', action='store',
dest='one_fifth_mul', type=float,
default=0.9,
help="Defines the multiplier for the one fifth success rule.")
parser.add_argument('-pat', action='store',
dest='patience', type=int,
default=None,
help="Defines the wait time before resetting sigmas.")
parser.add_argument('-exp_reps', action='store',
dest='exp_reps', type=int,
default=5,
help="Defines the number of experiments to average results.")
parser.add_argument('-train_reps', action='store',
dest='train_reps', type=int,
default=10,
help="Defines the number of evaluation repetitions to use during training.")
parser.add_argument('-eval_reps', action='store',
type=int, default=100,
help="Defines the number of evaluation repetitions to run after \
'training' our candidate individuals.")
parser.add_argument('-model', action='store',
dest='model', type=int,
default=0,
help="Defines the model architecture to use.")
parser.add_argument('-env', action='store', type=str,
dest='env', default='CartPole-v1')
parser.add_argument('-render_train', action='store_true',
help='use this flag to render the training process \
of the individuals')
parser.add_argument('-render_eval', action='store_true',
help='use this flag to render the evaluation process \
after training our individuals')
parser.add_argument('-virtual_display', action='store_true',
help='needed for headless servers when using render')
parser.add_argument('-plot_name', action='store', type=str,
default=None)
parser.add_argument('-env_threshold', action='store',
type=float, default=500,
help="Optimum value to set as horizontal line in plot")
parser.add_argument('-v', action='store',
dest='verbose', type=int, default=0,
help="Defines the intensity of debug prints.")
args = parser.parse_args()
if args.verbose > 0:
print(args)
# used to train on headless servers
if args.virtual_display:
os.environ["SDL_VIDEODRIVER"] = "dummy"
# create gym environment
env = gym.make(args.env)
if env is None:
exit("Please select an environment")
print()
# environment specific parameters
n_observations = np.sum([dim for dim in env.observation_space.shape])
if env.action_space.__class__.__name__ == "Discrete":
n_actions = env.action_space.n
elif env.action_space.__class__.__name__ == "Box":
n_actions = sum(env.action_space._shape)
else:
exit(f"{env.action_space.__class__.__name__} action space not yet implemented")
# create an instance of the model
if args.model == 0:
model = NN_regression_0(n_observations, 4, 4, n_actions).to("cpu")
elif args.model == 1:
model = NN_regression_1(n_observations, 4, 4, n_actions).to("cpu")
elif args.model == 2:
model = NN_regression_2(n_observations, 4, 4, n_actions).to("cpu")
elif args.model == 3:
model = NN_regression_3(n_observations, 4, 8, n_actions).to("cpu")
else:
exit("Choose a valid model")
# define es individual size
individual_size = model.total_params
print(f"Model architecture: {args.model}\nEnvironment: {env.unwrapped.spec.id}\nNumber of observations: {n_observations}\nNumber of actions: {n_actions}\nIndividual size: {individual_size}")
print()
# EA specific parameters
minimize = args.minimize
budget = args.budget
patience = args.patience
parents_size = args.parents_size
offspring_size = args.offspring_size
# Recombination specific controls
if args.recombination != None:
recombination = globals()[args.recombination]()
elif args.recombination == "GlobalIntermediary" and args.offspring_size > 1:
print("GlobalIntermediary recombination cannot be used with more than one offspring.")
print("Please use a valid configuration")
exit()
else: recombination = None
# Mutation specific controls
if args.mutation == "IndividualOneFifth":
mutation = globals()[args.mutation](args.one_fifth_mul)
else:
mutation = globals()[args.mutation]()
selection=globals()[args.selection]()
evaluation = RewardMaximizationNN(env, model, args.train_reps,
args.render_train)
# loop through experiment
best_results = []
data_for_plots = []
for i in range(args.exp_reps):
# define new ea istance
ea = EA(minimize=minimize, budget=budget, patience=patience,
parents_size=parents_size, offspring_size=offspring_size,
individual_size=individual_size, recombination=recombination,
mutation=mutation, selection=selection, evaluation=evaluation,
verbose=args.verbose)
# run the ea
start_time = time.time()
best_ind, all_best_evals = ea.run()
end_time = time.time()
# keep track of results
best_results.append([best_ind, max(all_best_evals)])
data_for_plots.append(all_best_evals)
print(f"Rep: {i+1} | average for {args.train_reps} evals: {max(all_best_evals)} | time: {np.round(end_time-start_time, 2)}")
# save plot if name has been defined
if args.plot_name != None:
save_plot(args.plot_name, args.env_threshold, np.array(data_for_plots))
# loop through final evalutation process for our best results
eval_results = []
for res in best_results:
model.update_weights(res[0])
curr_eval = eval(env, model,
args.eval_reps,
render=args.render_eval)
eval_results.append(curr_eval)
print("Evaluation results",np.round(eval_results, 2))
# if saving is enabled
if args.exp_name != None:
# initialize directory and save results
if not os.path.exists('model_weights'):
os.makedirs('model_weights')
# get the best individual
best_ind_idx = np.argmax(eval_results)
best_ind = best_results[best_ind_idx][0]
# update model weights to match best inidividual
model.update_weights(best_ind)
# Save best individual as model weights
torch.save(model.state_dict(), 'model_weights/'+args.exp_name)
# Also save it as a numpy array
np.save('model_weights/'+args.exp_name+'.npy', best_ind)
if __name__ == "__main__":
main() | OhGreat/es_for_rl_experimentation | train_model.py | train_model.py | py | 9,142 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "gym.make",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"lin... |
11231627750 | """
"""
# Email: Kun.bj@outlook.com
import collections
import copy
import json
import os
import pickle
import time
import traceback
from collections import Counter
from pprint import pprint
import numpy as np
from sklearn.preprocessing import StandardScaler
from fkm import vis
from fkm.cluster import centralized_minibatch_kmeans
from fkm.utils.utils_func import dump, obtain_true_centroids
from fkm.utils.utils_stats import evaluate2
from fkm.utils.utils_func import timer
# These options determine the way floating point numbers, arrays and
# other NumPy objects are displayed.
# np.set_printoptions(precision=3, suppress=True)
from fkm.vis.visualize import plot_2gaussian, plot_3gaussian
np.set_printoptions(precision=3, suppress=True, formatter={'float': '{:.3f}'.format}, edgeitems=120, linewidth=100000)
def save_history2txt(seed_history, out_file='.txt'):
"""
with open(seed_file + '.txt', 'w') as file:
file.write(json.dumps(seed_history)) # not working
Returns
-------
"""
def format(data):
res = ''
if type(data) == dict:
for k, v in data.items():
res += f'{k}: ' + format(v) + '\n'
elif type(data) == list:
res += f'{data} \n'
else:
res += f'{data} \n'
return res
with open(out_file, 'w') as f:
f.write('***Save data with pprint\n')
pprint(seed_history, stream=f, sort_dicts=False) # 'sort_dicts=False' works when python version >= 3.8
f.write('\n\n***Save data with recursion')
res = format(seed_history)
f.write(res)
# def normalize2(raw_x, raw_y, raw_true_centroids, splits, params, is_federated=False):
# """
# Only for diagonal covariance matrix:
# even for federated kmeans, we still can get global mean and std from each client data.
# Based on that, for centralized and federated kmeans, we can use the same standscaler.
#
# Parameters
# ----------
# raw_x
# raw_y
# raw_true_centroids
# splits
# params
# is_federated
#
# Returns
# -------
#
# """
# is_show = params['is_show']
# normalize_method = params['normalize_method']
# # do normalization
# if normalize_method == 'std':
# # collects all clients' data together
# x = copy.deepcopy(raw_x)
# y = copy.deepcopy(raw_y)
# new_true_centroids = copy.deepcopy(raw_true_centroids)
# for spl in splits: # train and test
# x[spl] = np.concatenate(x[spl], axis=0)
# global_stdscaler = StandardScaler()
# global_stdscaler.fit(x['train'])
# for spl in splits: # train and test
# new_true_centroids[spl] = global_stdscaler.transform(raw_true_centroids[spl])
#
# if is_federated == False: # for centralized normalization
# new_x = copy.deepcopy(raw_x)
# new_y = copy.deepcopy(raw_y)
# for i_, _ in enumerate(new_x['train']):
# new_x['train'][i_] = global_stdscaler.transform(new_x['train'][i_])
# new_x['test'][i_] = global_stdscaler.transform(new_x['test'][i_])
# params['stdscaler'] = global_stdscaler
#
# if is_show:
# if '3GAUSSIANS' in params['p0']: # for plotting
# plot_3gaussian(new_x['train'][0], new_y['train'], new_x['train'][1], new_y['train'][1],
# new_x['train'][2], new_y['train'][2], params, title='std')
# else: # federated kmeans
# # for each client, we can get mean and std and then use them to get global_std
# new_x = copy.deepcopy(raw_x)
# new_y = copy.deepcopy(raw_y)
# stds = [[]] * len(new_x['train']) # get number of clients
# N = 0
# for i_, _ in enumerate(new_x['train']): # for each client
# data = new_x['train'][i_]
# n = len(data)
# stds[i_] = (n, np.mean(data, axis=0), np.std(data, axis=0))
# N += n
#
# dim = new_x['train'][0].shape[1]
# global_mean = [[]] * dim
# global_std = [[]] * dim
# # the following method only works for diagonal covariance matrix
# for i_ in range(dim):
# # compute global mean and std given each client's mean and std
# # global_mean = 1/N * (\sum client1 + \sum client2 + \sum client3)
# # = 1/N * (n1 * mu1 + n2 * mu2 + n3 * mu3)
# global_mean[i_] = sum([n[i_] * mu[i_] for n, mu, s in stds]) / N
#
# # global_var = E(x-mu)**2 = (1/N * (\sum x**2)) - mu**2
# # = (1/N * (\sum client1**2 + \sum client2**2+ \sum client3**2)) - global_mean **2
# # = (1/N * (n1 * var1 + n2 * var2 + n3 * var3)) - global_mean**2
# # = (1/N * (n1 * std1**2 + n2 * std2** + n3 * std3**2)) - global_mean**2
# global_std[i_] = (sum([n[i_] * s[i_] ** 2 for n, mu, s in stds]) / N - global_mean[i_] ** 2) ** (1 / 2)
#
# # for each client, then normalize its data use the global mean and std
# global_stdscaler2 = StandardScaler()
# global_stdscaler2.mean_ = global_mean
# global_stdscaler2.scale_ = global_std
#
# for i_, _ in enumerate(new_x['train']):
# new_x['train'][i_] = global_stdscaler2.transform(new_x['train'][i_])
# new_x['test'][i_] = global_stdscaler2.transform(new_x['test'][i_])
# params['stdscaler'] = global_stdscaler2
#
# if is_show:
# if '3GAUSSIANS' in params['p0']: # for plotting
# plot_3gaussian(new_x['train'][0], new_y['train'], new_x['train'][1], new_y['train'][1],
# new_x['train'][2], new_y['train'][2], params, title='std')
#
# else:
# new_x, new_y, new_true_centroids = raw_x, raw_y, raw_true_centroids
#
# return new_x, new_y, new_true_centroids
def normalize(raw_x, raw_y, raw_true_centroids, splits, params):
"""
For federated kmeans, we still can get global mean and std from each client data.
Based on that, for centralized and federated kmeans, we can use the same standscaler.
Parameters
----------
raw_x
raw_y
raw_true_centroids
splits
params
Returns
-------
"""
try:
is_show = params['is_show']
except Exception as e:
is_show = params['IS_SHOW']
try:
normalize_method = params['normalize_method']
except Exception as e:
normalize_method = params['NORMALIZE_METHOD']
try:
algorithm_name = params['p0']
except Exception as e:
algorithm_name = params['ALGORITHM']['name']
# do normalization
if normalize_method == 'std':
# collects all clients' data together and get global stdscaler
x = copy.deepcopy(raw_x)
y = copy.deepcopy(raw_y)
new_true_centroids = copy.deepcopy(raw_true_centroids)
for spl in splits: # train and test
x[spl] = np.concatenate(x[spl], axis=0)
global_stdscaler = StandardScaler() # we can get the same global_stdscaler using each client mean and std.
global_stdscaler.fit(x['train'])
# print(global_stdscaler.mean_, global_stdscaler.scale_)
for spl in splits: # train and test
new_true_centroids[spl] = global_stdscaler.transform(new_true_centroids[spl])
# print(new_true_centroids[spl])
# normalize data
new_x = copy.deepcopy(raw_x)
new_y = copy.deepcopy(raw_y)
for spl in splits:
for i_ in range(len(new_x[spl])):
new_x[spl][i_] = global_stdscaler.transform(new_x[spl][i_]) # normalize data first
# for j_ in set(new_y[spl][i_]):
# new_true_centroids[spl][j_] = np.mean(new_x[spl][i_], axis=0) # get new centroids
# if is_show:
# if '2GAUSSIANS' in algorithm_name: # for plotting
# plot_2gaussian(new_x['train'][0], new_y['train'], new_x['train'][1], new_y['train'][1],
# params, title='std')
# elif '3GAUSSIANS' in algorithm_name: # for plotting
# plot_3gaussian(new_x['train'][0], new_y['train'], new_x['train'][1], new_y['train'][1],
# new_x['train'][2], new_y['train'][2], params, title='std')
# elif '4GAUSSIANS' in algorithm_name: # for plotting
# plot_2gaussian(new_x['train'][0], new_y['train'], new_x['train'][1], new_y['train'][1],
# params, title='std')
elif normalize_method == 'min_max':
raise NotImplementedError
else:
# new_x, new_y, new_true_centroids = raw_x, raw_y, raw_true_centroids
# # collects all clients' data together and get global stdscaler
# x = copy.deepcopy(raw_x)
# y = copy.deepcopy(raw_y)
# for spl in splits: # train and test
# x[spl] = np.concatenate(x[spl], axis=0)
#
# global_stdscaler = StandardScaler() # we can get the same global_stdscaler using each client mean and std.
# global_stdscaler.fit(x['train'])
return raw_x, raw_y, raw_true_centroids, None
return new_x, new_y, new_true_centroids, global_stdscaler
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
@timer
def run_model(args):
"""
Parameters
----------
params
KMeansFederated
verbose:
0 < verbose <= 5: info
5 < verbose <= 10: debug
Returns
-------
"""
np.random.seed(args['SEED']) # set the global seed for numpy
VERBOSE = args['VERBOSE']
SEPERTOR = args['SEPERTOR']
SPLITS = args['SPLITS']
dataset_name = args['DATASET']['name']
N_CLUSTERS = args['N_CLUSTERS']
N_CLIENTS = args['N_CLIENTS']
# args['DATASET']['detail'] = f'{SEPERTOR}'.join(args['DATASET']['detail'], f'M_{N_CLIENTS}|K_{N_CLUSTERS}')
# dataset_detail = args['DATASET']['detail']
# algorithm_py_name = args['ALGORITHM']['py_name']
# initial_method = args['ALGORITHM']['initial_method']
server_init_method = args['ALGORITHM']['server_init_method']
client_init_method = args['ALGORITHM']['client_init_method']
N_REPEATS = args['N_REPEATS']
TOLERANCE = args['TOLERANCE']
NORMALIZE_METHOD = args['NORMALIZE_METHOD']
# algorithm_detail = args['ALGORITHM']['detail']
# OUT_DIR = args['OUT_DIR']
# # if os.path.exists(OUT_DIR):
# # shutil.rmtree(OUT_DIR)
history_file = os.path.join(args['OUT_DIR'], 'history.dat')
args['history_file'] = history_file
# if os.path.exists(history_file):
# # here could be some issue for multi-tasks, please double-check before calling this function.
# return history_file
# settings
history = {'args': args} # Store all results and needed data
# data is fixed, however, the algorithm will have different initialization centroids with different seeds.
with open(args['data_file'], 'rb') as f:
raw_x, raw_y = pickle.load(f)
print(f'data_file: ', args['data_file'])
for split in SPLITS:
X_ = raw_x[split]
y_ = raw_y[split]
print(len(X_), N_CLIENTS)
for i_c in range(N_CLIENTS):
print(f'{split}:{i_c}-th client raw data info, where mean: {np.mean(X_[i_c], axis=0)}, '
f'std: {np.std(X_[i_c], axis=0)}, and y: {collections.Counter(y_[i_c])}')
if VERBOSE >= 1:
# print raw_x and raw_y distribution
for split in SPLITS:
print(f'{split} set:')
clients_x, clients_y = raw_x[split], raw_y[split]
if VERBOSE >= 5:
# print each client distribution
for c_i, (c_x, c_y) in enumerate(zip(clients_x, clients_y)):
print(f'\tClient_{c_i}, n_datapoints: {len(c_y)}, '
f'cluster_size: {sorted(Counter(c_y).items(), key=lambda kv: kv[0], reverse=False)}')
y_tmp = []
for vs in clients_y:
y_tmp.extend(vs)
print(f'n_{split}_clients: {len(clients_x)}, n_datapoints: {sum(len(vs) for vs in clients_y)}, '
f'cluster_size: {sorted(Counter(y_tmp).items(), key=lambda kv: kv[0], reverse=False)}')
# obtain the true centroids given raw_x and raw_y
raw_true_centroids = obtain_true_centroids(raw_x, raw_y, SPLITS, args)
if VERBOSE >= 3:
# print true centroids
for split in SPLITS:
true_c = raw_true_centroids[split]
print(f'{split}_true_centroids:')
print(true_c)
# if algorithm_name == 'FEMNIST':
# save_image2disk((raw_x, raw_y), out_dir_i, params)
# do normalization in global for kmeans and federated kmeans
raw_x, raw_y, raw_true_centroids, global_stdscaler = normalize(raw_x, raw_y, raw_true_centroids, SPLITS, args)
args['global_stdscaler'] = global_stdscaler
history['raw_true_centroids'] = raw_true_centroids
print(f'after normalization, true_centroids:\n{raw_true_centroids} \nwhen normalize_method = {NORMALIZE_METHOD}')
# history = {'x': raw_x, 'y': raw_y, 'results': []}
# SEEDS = [10 * v ** 2 for v in range(1, N_REPEATS + 1, 1)]
SEEDS = [42] # we fix the model seed; however, the data seed is different.
history['SEEDS'] = SEEDS
from fkm.cluster import centralized_kmeans, federated_server_init_first, federated_client_init_first, \
federated_greedy_kmeans
KMEANS2PY = {
'centralized_kmeans': centralized_kmeans.KMeans,
'federated_server_init_first': federated_server_init_first.KMeansFederated,
'federated_client_init_first': federated_client_init_first.KMeansFederated,
'federated_greedy_kmeans': federated_greedy_kmeans.KMeansFederated,
'centralized_minibatch_kmeans': centralized_minibatch_kmeans.KMeans,
}
for idx_seed, seed in enumerate(SEEDS): # repetitions: to obtain average and std score.
# if VERBOSE >= 2:
print(f'\n***{idx_seed}th repeat with seed: {seed}:')
X = copy.deepcopy(raw_x)
Y = copy.deepcopy(raw_y)
true_centroids = copy.deepcopy(raw_true_centroids)
if not args['IS_FEDERATED']:
# collects all clients' data together
for spl in SPLITS:
X[spl] = np.concatenate(X[spl], axis=0)
Y[spl] = np.concatenate(Y[spl], axis=0)
print(spl, X[spl].shape, Y[spl].shape)
t1 = time.time()
# for Centralized Kmeans, we use server_init_centroids as init_centroids.
kmeans = KMEANS2PY[args['ALGORITHM']['py_name']](
n_clusters=N_CLUSTERS,
# batch_size=BATCH_SIZE,
sample_fraction=1.0,
epochs_per_round=args['CLIENT_EPOCHS'],
max_iter=args['ROUNDS'],
server_init_method=server_init_method,
client_init_method=client_init_method,
true_centroids=true_centroids,
random_state=seed,
learning_rate=0,
adaptive_lr=0,
epoch_lr=0,
momentum=0,
reassign_min=0,
reassign_after=0,
verbose=VERBOSE,
tol=TOLERANCE,
params=args
)
if VERBOSE > 5:
# print all kmeans's variables.
pprint(vars(kmeans))
# During the training, we also evaluate the model on the test set at each iteration
kmeans.fit(X, Y, SPLITS, record_at=None)
# After training, we obtain the final scores on the train/test set.
scores = evaluate2(
kmeans=kmeans,
x=X, y=Y,
splits=SPLITS,
federated=args['IS_FEDERATED'],
verbose=VERBOSE,
)
# # To save the disk storage, we only save the first repeat results.
# if params['p0'] == 'FEMNIST' and s_i == 0:
# try:
# predict_n_saveimg(kmeans, x, y, SPLITS, SEED,
# federated=params['is_federated'], verbose=VERBOSE,
# out_dir=os.path.join(out_dir_i, f'SEED_{SEED}'),
# params=params, is_show=is_show)
# except Exception as e:
# print(f'Error: {e}')
# # traceback.print_exc()
t2 = time.time()
print(f'{idx_seed}th repeat with seed {seed} takes {(t2 - t1):.4f}s')
# for each seed, we will save the results.
history[seed] = {'initial_centroids': kmeans.initial_centroids,
'true_centroids': kmeans.true_centroids,
'final_centroids': kmeans.centroids,
'training_iterations': kmeans.training_iterations,
'history': kmeans.history,
'scores': scores, 'duration': t2 - t1}
if dataset_name != 'FEMNIST':
try:
# # save the current 'history' to disk before plotting.
seed_file = os.path.join(args['OUT_DIR'], f'SEED_{seed}', f'~history.dat')
dump(history[seed], out_file=seed_file)
save_history2txt(history[seed], out_file=seed_file + '.txt')
except Exception as e:
print(f'save_history2txt() fails when SEED={seed}, Error: {e}')
if VERBOSE >= 2:
pprint(f'seed:{seed}, '
f'scores:{scores}')
try:
results_avg = vis.visualize.stats_single(history)
except Exception as e:
traceback.print_exc()
results_avg = {}
# save the current 'history' to disk before plotting.
history['results_avg'] = results_avg
dump(history, out_file=history_file)
try:
vis.visualize.plot_single(history)
except Exception as e:
traceback.print_exc()
# out_file = os.path.join( args['OUT_DIR'], f'varied_clients-Server_{server_init_method}-Client_{client_init_method}')
# print(out_file)
# dump(stats, out_file=out_file + '.dat')
# # dump(histories, out_file=out_file + '-histories.dat')
# with open(history_file + '-histories.txt', 'w') as file:
# file.write(json.dumps(history, cls=NumpyEncoder)) # use `json.loads` to do the reverse
return history_file
| kun0906/fkm | fkm/_main.py | _main.py | py | 16,242 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.set_printoptions",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
... |
35401554885 | from django.conf.urls import url,include
from . import views as app_v
from django.contrib.auth import views
from app.forms import LoginForm
urlpatterns = [
url(r'^$',app_v.index, name='home'),
url(r'^login/$', views.login, {'template_name': 'app/login.html', 'authentication_form': LoginForm}, name='login'),
url(r'^logout/$', views.logout, {'next_page': '/app/'}, name='logout'),
url(r'^register/$', app_v.register, name="register"),
url(r'^success/', app_v.success, name="success"),
url(r'^success1/', app_v.success1, name="success1"),
url(r'^check/username/(?P<username>[-\w.]+)/$', app_v.check),
url(r'^create/$', app_v.createPromo, name="createPromo"),
url(r'^withdraw/$', app_v.withdrawPromo, name="withdrawPromo"),
url(r'^sendOtp/$', app_v.sendOtp),
url(r'^getToken/$', app_v.get_token),
url(r'^checkBalance/$', app_v.checkBalance),
url(r'^generateChecksum/$', app_v.generateChecksum),
url(r'^makeTransaction/$', app_v.makeTransaction),
url(r'^doTransfer/$', app_v.doTransfer),
] | vigzmv/LetsPay | app/urls.py | urls.py | py | 1,050 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.views.login",
"line_number": 11,
"usage_type": "attribute"
},
{
"ap... |
20407034990 | import os
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets, transforoms
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Using {} device'.format(device))
class NeuralNetwork(nn.Module):
def __init__(self):
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLu(),
nn.Linear(512, 512),
nn.Relu(),
nn.Linear(512, 10),
nn.Relu()
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
model = NeuralNetwork.to(device)
# print(model)
x = torch.rand(1, 28, 28, device=device)
logits = model(X)
pred_probab = nn.Softmax(dim=1)(logits)
y_pred = pred_probab.argmax(1)
# print(f"Predicted class: {y_pred}")
loss_fn = nn.CrossEntropyLoss()
def train_loop(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
for batch, (X, y) in enumerate(dataloader):
# Compute prediction and loss
pred= model(X)
loss = loss_fn(pred, y)
#Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test_loop(dataloader, model, loss_fn):
size = len(dataloader.dataset)
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= size
correct /= size
# print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:8f} \n")
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
epochs = 10
for t in range(epochs):
print(f"Epochs {t+1}\n-------------------")
traing_loop(traing_dataloader, model, loss_fn, optimizer)
test_loop(test_dataloader, mode, loss_fn)
print("Done!")
model = models.vgg16(pretrained=True)
torch.save(model.state_dict(), 'model_weigths.pth')
model = models.vgg16()
model.load_state_dict(torch.load('model_weights.pth'))
model.eval()
torch.save(model, 'model.pth')
model = torch.load('model.pth') | a1key/VisionAssignment2 | test/Module.py | Module.py | py | 2,569 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.cuda.is_available",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",... |
14566681748 | from django.urls import path, re_path
from django.contrib.auth.decorators import permission_required
from django.views.generic import RedirectView
from .feeds import PublishTrackFeed
from . import views
urlpatterns = [
path('', RedirectView.as_view(url='catalogue/', permanent=False)),
path('images/', views.image_list, name='documents_image_list'),
path('image/<slug:slug>/', views.image, name="documents_image"),
path('image/<slug:slug>/publish', views.publish_image,
name="documents_publish_image"),
path('catalogue/', views.document_list, name='documents_document_list'),
path('user/', views.my, name='documents_user'),
path('user/<username>/', views.user, name='documents_user'),
path('users/', views.users, name='documents_users'),
path('activity/', views.activity, name='documents_activity'),
re_path(r'^activity/(?P<isodate>\d{4}-\d{2}-\d{2})/$',
views.activity, name='documents_activity'),
path('upload/', views.upload, name='documents_upload'),
path('create/<slug:slug>/',
views.create_missing, name='documents_create_missing'),
path('create/', views.create_missing, name='documents_create_missing'),
path('book/<slug:slug>/publish', views.publish, name="documents_publish"),
path('book/<slug:slug>/', views.book, name="documents_book"),
path('book/<slug:slug>/gallery/',
permission_required('documents.change_book')(views.GalleryView.as_view()),
name="documents_book_gallery"),
path('book/<slug:slug>/xml', views.book_xml, name="documents_book_xml"),
path('book/dc/<slug:slug>/xml', views.book_xml_dc, name="documents_book_xml_dc"),
path('book/<slug:slug>/txt', views.book_txt, name="documents_book_txt"),
path('book/<slug:slug>/html', views.book_html, name="documents_book_html"),
path('book/<slug:slug>/epub', views.book_epub, name="documents_book_epub"),
path('book/<slug:slug>/mobi', views.book_mobi, name="documents_book_mobi"),
path('book/<slug:slug>/pdf', views.book_pdf, name="documents_book_pdf"),
path('book/<slug:slug>/pdf-mobile', views.book_pdf, kwargs={'mobile': True}, name="documents_book_pdf_mobile"),
path('chunk_add/<slug:slug>/<slug:chunk>/',
views.chunk_add, name="documents_chunk_add"),
path('chunk_edit/<slug:slug>/<slug:chunk>/',
views.chunk_edit, name="documents_chunk_edit"),
path('book_append/<slug:slug>/',
views.book_append, name="documents_book_append"),
path('chunk_mass_edit',
views.chunk_mass_edit, name='documents_chunk_mass_edit'),
path('image_mass_edit',
views.image_mass_edit, name='documents_image_mass_edit'),
path('track/<slug:slug>/', PublishTrackFeed()),
path('active/', views.active_users_list, name='active_users_list'),
path('active.csv', views.active_users_list, kwargs={'csv': True}, name='active_users_csv'),
path('mark-final/', views.mark_final, name='mark_final'),
path('mark-final-completed/', views.mark_final_completed, name='mark_final_completed'),
]
| fnp/redakcja | src/documents/urls.py | urls.py | py | 3,056 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.views.generic.RedirectView.as_view",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.views.generic.RedirectView",
"line_number": 9,
"usage_type": "name"
},
... |
15989239821 | """
Handlers related to "reports" about students' activity - badge evidence pages
and badge issuing routines.
"""
from controllers.utils import BaseHandler, ReflectiveRequestHandler, XsrfTokenManager
from common import prefetch
import pprint
from models.roles import Roles
import re
from collections import defaultdict
from models import transforms
from modules.badges.badge_models import Badge, BadgeAssertion
from report import UnitReport, PartReport, ExpertBadgeReport
from report import _parts as part_config
from models.models import Student
from models.models import EventEntity
from wiki_models import Annotation
from jinja2 import Markup
import urllib
import wtforms as wtf
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import deferred
import page_templates
from common.querymapper import LoggingMapper
import logging
import wtforms as wtf
def exam_display_choices(exam_info):
"Decide which display choices are available to a student, depending on their score."
choices = [
('blank', '(Blank)'),
]
default = 'blank'
if exam_info['completed']:
choices.append( ('completed', 'Submitted the exam') )
if exam_info['did_pass']:
choices.append( ('passed', 'Passed the exam, with at least (passing score) out of 100%') )
choices.append( ('scored', 'Passed the exam, scoring (your score) out of 100%') )
default = 'passed'
return locals()
class EvidenceHandler(BaseHandler, ReflectiveRequestHandler):
"""
Handler to show badge evidence - both the 'top' page and the text of each unit's wikifolio.
Also the display settings for each badge - whether to show the text of the wikifolio
and how to display the exam.
"""
get_actions = ['view', 'settings']
default_action = 'view'
post_actions = ['save_settings']
class SettingsForm(wtf.Form):
report_id = wtf.HiddenField()
units_are_public = wtf.BooleanField(
"Show my Wikifolio entries for this badge on the evidence page?")
# Will set choices dynamically.
exam_display = wtf.SelectField(
"How to display exam scores on the evidence page?")
review_is_public = wtf.BooleanField(
"""Show the instructor's review of the paper on the evidence page? Only
relevant for the term paper.""")
def get_settings(self):
user = self.personalize_page_and_get_enrolled()
if not user:
return
try:
report = PartReport.get_by_id(int(self.request.GET.get('id', -1)))
except ValueError:
report = None
if not report:
self.abort(404, "That evidence report was not found.")
if not self.can_edit(user, report):
self.abort(403)
form = self.SettingsForm(
report_id=report.key().id(),
units_are_public=report.units_are_public,
exam_display=report.exam_display,
review_is_public=report.review_is_public)
#if report.part != 4:
#del form.review_is_public
display_field_params = {
'choices': [('blank', '(Blank)')],
'default': 'blank'
}
if report.assessment_scores:
if len(report.assessment_scores) > 1:
logging.warning("Evidence page settings assuming there's just one exam per part, but there is more than one")
display_field_params = exam_display_choices(
report.assessment_scores[0])
form.exam_display.choices = display_field_params['choices']
form.exam_display.default = display_field_params['default']
self.template_value['report'] = report
self.template_value['form'] = form
self.template_value['xsrf_token'] = XsrfTokenManager.create_xsrf_token('save_settings')
self.template_value['navbar'] = {}
self.template_value['action_url'] = '/badges/evidence?action=save_settings'
self.template_value['badge_name'] = report._config['name']
self.render('wf_evidence_settings.html')
def can_edit(self, user, report):
#Roles.is_course_admin(self.app_context)
return report.student_email == user.key().name()
def post_save_settings(self):
user = self.personalize_page_and_get_enrolled()
if not user:
return
form = self.SettingsForm(self.request.POST)
try:
report = PartReport.get_by_id(int(form.report_id.data))
except ValueError:
report = None
if not report:
self.abort(404, "That evidence report was not found.")
if not self.can_edit(user, report):
self.abort(403, "You can't edit that user's report.")
if report.assessment_scores:
display_field_params = exam_display_choices(
report.assessment_scores[0])
form.exam_display.choices = display_field_params['choices']
form.exam_display.default = display_field_params['default']
else:
del form.exam_display
if not form.validate():
self.redirect('/')
return
report.units_are_public = form.units_are_public.data
report.review_is_public = form.review_is_public.data
if report.assessment_scores:
report.exam_display = form.exam_display.data
report.put()
EventEntity.record(
'set-evidence-settings',
users.get_current_user(),
transforms.dumps({
'part': report.part,
'slug': report.slug,
'review_is_public': report.review_is_public,
'public': report.units_are_public,
'exam_display': report.exam_display,
'email': user.key().name()
}))
self.template_value['navbar'] = {}
self.template_value['content'] = '''<div class="gcb-aside">OK, saved settings.<br>
<a href="/student/home">Back to your account page...</a></div>'''
self.render('bare.html')
def head(self):
try:
report = PartReport.get_by_id(int(self.request.GET.get('id', -1)))
except ValueError:
report = None
if not report:
self.abort(404)
self.abort(200)
def get_view(self):
try:
report = PartReport.get_by_id(int(self.request.GET.get('id', -1)))
except ValueError:
report = None
if not report:
self.abort(404)
if not report.exam_display:
if report.assessment_scores:
display_info = exam_display_choices(report.assessment_scores[0])
report.exam_display = display_info['default']
else:
report.exam_display = 'blank'
self.report = report
self.template_value['inline_save'] = lambda: ''
self.template_value['navbar'] = {}
self.template_value['author'] = self.report.student
self.template_value['review_is_public'] = self.report.review_is_public
if report.units_are_public:
self.template_value['unit_link'] = self._unit_link
else:
self.template_value['unit_link'] = None
self.template_value['no_unit_links'] = True
self.template_value['unit_title'] = self._unit_title
self.unit_num = None
try:
if report.units_are_public:
self.unit_num = int(self.request.GET.get('unit', ''))
except ValueError:
self.unit_num = None
if self.unit_num:
self.unit = self.report.get_unit(self.unit_num)
if self.unit:
self.render_unit()
return
else:
logging.warning('Could not find the right unit %d for PartReport %s',
self.unit_num, self.report.key())
self.render_top()
def _unit_link(self, unit):
return self.request.path + "?" + urllib.urlencode({
'id': self.request.GET['id'],
'unit': unit,
})
def _unit_title(self, unit):
unit_obj = self.find_unit_by_id(unit)
title = unit_obj.title
title = re.sub(r'\(.*?\)', '', title)
return title.strip()
def render_unit(self):
fields = {
k: Markup(v) for k,v in self.unit.wiki_fields.iteritems()}
fields['reflection'] = Markup('<p><i>Removed from this view for peers\' privacy.<br>--BOOC Instructors and Tech Staff</i></p>')
self.template_value['fields'] = fields
self.template_value['unit'] = self.find_unit_by_id(self.unit_num)
self.template_value['report'] = self.unit
self.template_value['badge_slug'] = self.report.badge.key().name()
# THIS is kinda magic - we render a page like wf_temp_u1.html, but have
# it inherit from wf_evidence.html rather than wf_page.html. This
# removes the comment section, for instance.
self.template_value['layout_template'] = 'wf_evidence.html'
self.template_value['review'] = Annotation.reviews(whose=self.report.student, unit=self.unit_num).get()
self.render(page_templates.templates[self.unit_num])
def render_top(self):
self.template_value['part'] = self.report
self.render('wf_evidence_top.html')
class ExpertEvidenceHandler(BaseHandler, ReflectiveRequestHandler):
"""
Handler for the "expert" badge - issued at the end of the course.
This badge has links to all the previous badges, rather than links
to the internal units.
"""
get_actions = ['view', 'settings']
default_action = 'view'
post_actions = ['save_settings']
def can_edit(self, user, report):
return report.student_email == user.key().name()
class SettingsForm(wtf.Form):
report_id = wtf.HiddenField()
# Will set choices dynamically.
exam_display = wtf.SelectField(
"How to display exam scores on the evidence page?")
def get_settings(self):
user = self.personalize_page_and_get_enrolled()
if not user:
return
try:
report = ExpertBadgeReport.get_by_id(int(self.request.GET.get('id', -1)))
except ValueError:
report = None
if not report:
self.abort(404, "That evidence report was not found.")
if not self.can_edit(user, report):
self.abort(403)
form = self.SettingsForm(
report_id=report.key().id(),
exam_display=report.exam_display)
display_field_params = {
'choices': [('blank', '(Blank)')],
'default': 'blank'
}
display_field_params = exam_display_choices(
report.final_exam_score)
form.exam_display.choices = display_field_params['choices']
form.exam_display.default = display_field_params['default']
self.template_value['report'] = report
self.template_value['form'] = form
self.template_value['xsrf_token'] = XsrfTokenManager.create_xsrf_token('save_settings')
self.template_value['navbar'] = {}
self.template_value['badge_name'] = "Assessment Expert Badge"
self.template_value['action_url'] = '/badges/expert_evidence?action=save_settings'
self.render('wf_evidence_settings.html')
def post_save_settings(self):
user = self.personalize_page_and_get_enrolled()
if not user:
return
form = self.SettingsForm(self.request.POST)
try:
report = ExpertBadgeReport.get_by_id(int(form.report_id.data))
except ValueError:
report = None
if not report:
self.abort(404, "That evidence report was not found.")
if not self.can_edit(user, report):
self.abort(403, "You can't edit that user's report.")
display_field_params = exam_display_choices(
report.final_exam_score)
form.exam_display.choices = display_field_params['choices']
form.exam_display.default = display_field_params['default']
if not form.validate():
self.redirect('/')
return
report.exam_display = form.exam_display.data
report.put()
EventEntity.record(
'set-evidence-settings',
users.get_current_user(),
transforms.dumps({
'slug': report.slug,
'exam_display': report.exam_display,
'email': user.key().name()
}))
self.template_value['navbar'] = {}
self.template_value['content'] = '''<div class="gcb-aside">OK, saved settings.<br>
<a href="/student/home">Back to your account page...</a></div>'''
self.render('bare.html')
def get_view(self):
try:
report = ExpertBadgeReport.get_by_id(int(self.request.GET.get('id', -1)))
except ValueError:
report = None
if not report:
self.abort(404)
all_assertions_q = BadgeAssertion.all()
all_assertions_q.filter('recipient', report.student_key)
all_assertions_q.filter('revoked', False)
all_assertions = all_assertions_q.run(limit=10)
if not report.exam_display:
if report.final_exam_score:
display_info = exam_display_choices(report.final_exam_score)
report.exam_display = display_info['default']
else:
report.exam_display = 'blank'
self.template_value['report'] = report
self.template_value['navbar'] = {}
self.template_value['author'] = report.student
# TODO: links to the other badges
all_assertions = prefetch.prefetch_refprops(
list(all_assertions), BadgeAssertion.badge)
course_parts = {'practices': None, 'principles': None, 'policies': None}
for ass in all_assertions:
name_parts = ass.badge_name.split('.')
if name_parts[0] in course_parts:
if (not course_parts[name_parts[0]]) or (len(name_parts) > 1):
course_parts[name_parts[0]] = ass
self.template_value['part_assertions'] = course_parts
self.render('wf_expert_evidence.html')
def combine_badge_slug_parts(*parts):
return '.'.join(parts)
def choose_badge_version(slug, completion):
# Choose the base badge version - normal vs expertise.
# Doesn't choose Leader vs. normal - that is later.
if not completion['units']:
return None
elif not completion['assessments']:
return slug
else:
return combine_badge_slug_parts(slug, 'expertise')
class SingleIssueHandler(BaseHandler):
"""
Decide whether to issue a badge to one student, and optionally really issue it.
"""
class Form(wtf.Form):
part = wtf.IntegerField('Which part of the course to issue a badge for? (1,2,3)')
really_save = wtf.BooleanField('Really issue the badge and freeze the scores?', default=False)
re_run = wtf.BooleanField('Re-run all unit and part reports? Will delete old ones if you also choose Really freeze above.', default=False)
email = wtf.StringField('The email of the student to reconsider')
def get(self):
if not users.is_current_user_admin():
self.abort(403)
form = self.Form()
self.template_value['form'] = form
self.template_value['xsrf_token'] = XsrfTokenManager.create_xsrf_token('post')
self.template_value['action_url'] = self.request.url
self.template_value['title'] = 'Reconsider a single participant'
self.render('badge_bulk_issue.html')
def post(self):
if not users.is_current_user_admin():
self.abort(403)
if not XsrfTokenManager.is_xsrf_token_valid(self.request.POST.get('xsrf_token', ''), 'post'):
self.abort(403, 'XSRF token failed.')
form = self.Form(self.request.POST)
if not form.validate():
self.response.write('<br>'.join(form.errors))
return
student = Student.get_by_key_name(form.email.data)
report = PartReport.on(student, course=self.get_course(),
part=form.part.data,
force_re_run=form.re_run.data,
put=form.really_save.data)
badge_version = choose_badge_version(part_config[form.part.data]['slug'], report.completion())
if badge_version:
badge = Badge.get_by_key_name(badge_version)
if not badge:
self.response.write(' There is no badge with key_name %s (so I cannot issue a badge)' % badge_version)
if form.really_save.data and badge:
b = Badge.issue(badge, student, put=False)
b.evidence = self.request.host_url + '/badges/evidence?id=%d' % report.key().id()
b.put()
self.response.write('Issued badge %s!' % badge_version)
else:
self.response.write('Would have issued badge %s!' % badge_version)
else:
self.response.write('Not issuing because at least one of: %s' % (', '.join(report.incomplete_reasons)))
class BulkIssueMapper(LoggingMapper):
"""
Issue completion badges to many folks! This includes 'expertise' and 'knowledge' badges.
"""
KIND = Student
FILTERS = [('is_participant', True)]
def __init__(self, really, course, part, host_url, re_run):
super(BulkIssueMapper, self).__init__()
self.really = really
self.course = course
self.part = part
self.host_url = host_url
self.re_run = re_run
self.num_issued = 0
def map(self, student):
self.log.append('########## Student %s ##########' % student.key().name())
report = PartReport.on(student, course=self.course, part=self.part,
force_re_run=self.re_run, put=self.really)
completion = report.completion()
self.log.append(' Passed? %s.' % str(completion))
badge_version = choose_badge_version(report.slug, completion)
if badge_version:
badge = Badge.get_by_key_name(badge_version)
if not badge:
self.log.append(' There is no badge with key_name %s (so I cannot issue a badge)' % badge_version)
self.num_issued += 1
if self.really and badge:
b = Badge.issue(badge, student, put=False) # need to include evidence URL here somehow
b.evidence = self.host_url + '/badges/evidence?id=%d' % report.key().id()
b.put()
self.log.append(' Issued badge, name=%s, assertion id=%d' % (
badge_version, b.key().id()))
return ([b], [])
else:
self.log.append(' WOULD issue badge.')
else:
self.log.append('Not issuing because at least one of: %s' % (', '.join(report.incomplete_reasons)))
##TODO: this is not comprehensive: they could still have .expertise or .leader versions.
#if self.really and badge:
#Badge.ensure_not_issued(badge, student)
return ([], [])
def finish(self):
self.log.append('DONE. Issued %d badges total.' % self.num_issued)
self._batch_write()
def choose_expert_badge_version(completion):
badge_version = None
if completion['badges']:
badge_version = 'expert'
if badge_version and completion['assessments'] \
and all('expertise' in slug for slug in completion['badge_slugs']):
badge_version = 'expert.expertise' # lol
return badge_version
class BulkExpertBadgeIssueMapper(LoggingMapper):
"""
Issue end-of-course badges to many folks.
"""
KIND = Student
FILTERS = [('is_participant', True)]
def __init__(self, really, course, unused_part_num, host_url, force_re_run):
LoggingMapper.__init__(self)
self.really = really
self.course = course
self.host_url = host_url
self.force_re_run = force_re_run
self.num_issued = 0
def map(self, student):
self.log.append('--------------- Student %s' % student.key().name())
report = ExpertBadgeReport.on(student, self.course,
force_re_run=self.force_re_run, put=self.really)
completion = report.completion()
self.log.append(' Passed? %s.' % str(completion))
badge_version = choose_expert_badge_version(completion)
if badge_version:
badge = Badge.get_by_key_name(badge_version)
if not badge:
self.log.append('no such badge! %s' % badge_version)
self.num_issued += 1
if self.really and badge:
b = Badge.issue(badge, student, put=False) # need to include evidence URL here somehow
b.evidence = self.host_url + '/badges/expert_evidence?id=%d' % report.key().id()
b.put()
self.log.append(' Issued badge, name=%s, assertion id=%d' % (
badge.key().name(), b.key().id()))
return ([b], [])
else:
self.log.append(' WOULD issue badge %s' % badge_version)
else:
self.log.append('Incomplete, we are missing: %s' % (', '.join(report.incomplete_reasons())))
return ([], [])
def finish(self):
self.log.append('DONE. Issued %d badges total.' % self.num_issued)
self._batch_write()
NOBODY = object()
def default_dict_entry():
return ([NOBODY], -1)
class BulkLeaderIssueMapper(LoggingMapper):
"""
Issue leader badges. In this one, the badges aren't issued during the main phase
of the 'mapper' loop over all the students - instead, the badges are issued
in the "finish" phase.
"""
KIND = Student
FILTERS = [('is_participant', True)]
def __init__(self, really, course, part, host_url, re_run):
super(BulkLeaderIssueMapper, self).__init__()
self.really = really
self.course = course
self.part = part
self.host_url = host_url
self.re_run = re_run
self.best_by_group = defaultdict(default_dict_entry)
self.leader_badge_keys = (
part_config[part]['slug'] + '.expertise.leader',
part_config[part]['slug'] + '.leader',
)
leader_badge_e = Badge.get_by_key_name(self.leader_badge_keys[0])
leader_badge = Badge.get_by_key_name(self.leader_badge_keys[1])
if not leader_badge and leader_badge_e:
logging.warning('Missing a badge with one of: %s', str(self.leader_badge_keys))
self.log.append('Missing a badge with one of: %s'% str(self.leader_badge_keys))
if self.really:
raise ValueError('Missing a badge with one of: %s'% str(self.leader_badge_keys))
def map(self, student):
self.log.append('######### Student %s ##########' % student.key().name())
part_report = PartReport.on(student, course=self.course, part=self.part,
force_re_run=self.re_run)
completion = part_report.completion()
base_badge_version = choose_badge_version(part_report.slug, completion)
if not base_badge_version:
self.log.append(' Skipping, since not complete.')
return ([], [])
self.log.append(' Part is complete, considering units.')
unit_reports = part_report.unit_reports
promotions = 0
for ur in unit_reports:
promotions += ur.promotions
best_so_far = self.best_by_group[student.group_id][1]
if promotions > best_so_far:
self.best_by_group[student.group_id] = ([student.key().name()], promotions)
self.log.append(' They have current best for group %s, with %d.' % (
student.group_id, promotions))
elif promotions == best_so_far:
self.best_by_group[student.group_id][0].append(student.key().name())
self.log.append(' They ARE TIED FOR CURRENT BEST for group %s, with %d.' % (
student.group_id, promotions))
return ([], [])
def finish(self):
if self.really:
leader_badges = dict((key, Badge.get_by_key_name(key)) for key in self.leader_badge_keys)
for group_id, (emails, count) in self.best_by_group.iteritems():
self.log.append('Considering group %s, best score is %d' % (
str(group_id), count))
if count < 1:
self.log.append('... Best score is too low, skipping.')
continue
if self.really:
for email in emails:
report = PartReport.on(
db.Key.from_path(Student.kind(), email),
course=self.course, part=self.part,
force_re_run=self.re_run)
base_badge_version = choose_badge_version(report.slug, report.completion())
leader_badge_version = combine_badge_slug_parts(base_badge_version, 'leader')
leader_badge = leader_badges[leader_badge_version]
assert leader_badge, "Should be a badge called %s" % leader_badge_version
b = Badge.issue(leader_badge,
db.Key.from_path(Student.kind(), email), put=False)
b.evidence = self.host_url + '/badges/evidence?id=%d' % report.key().id()
b.put()
self.log.append('... ISSUED leader badge %s to %s, id=%d' % (
leader_badge_version, email, b.key().id()))
else:
self.log.append('... WOULD ISSUE leader badge to %s' % ' '.join(emails))
self._batch_write()
class BulkExpertLeaderIssueMapper(LoggingMapper):
"""
Expert badges go to people who pass all the parts of the course,
and do the final exam, and do the survey. Leaders are calculated
based on who gets the most Exemplaries in the entire course.
"""
KIND = ExpertBadgeReport
def __init__(self, really, course, part, host_url, re_run):
super(BulkExpertLeaderIssueMapper, self).__init__()
self.really = really
self.course = course
self.host_url = host_url
self.re_run = re_run
self.best_by_group = defaultdict(default_dict_entry)
def map(self, report):
student = report.student
self.log.append('---------------- Student %s' % student.key().name())
if self.re_run:
report._run(self.course)
best_so_far = self.best_by_group[student.group_id][1]
promotions = report.exemplary_count
completion = report.completion()
if promotions > best_so_far:
self.log.append('New best for group %s, %d promotions' % (
student.group_id, promotions))
if completion['badges']:
self.best_by_group[student.group_id] = ([student.key().name()], promotions)
else:
self.log.append('BUT, Skipping, not complete.')
elif promotions == best_so_far:
self.log.append('TIED best for group %s, %d promotions' % (
student.group_id, promotions))
if completion['badges']:
self.best_by_group[student.group_id][0].append(student.key().name())
else:
self.log.append('BUT, Skipping, not complete.')
return ([], [])
def finish(self):
leader_badge_slugs = ('expert.leader', 'expert.expertise.leader')
leader_badges = dict((k, Badge.get_by_key_name(k)) for k in leader_badge_slugs)
for group_id, (emails, count) in self.best_by_group.iteritems():
self.log.append('Considering group %s, best score is %d' % (
str(group_id), count))
if count < 1:
self.log.append('... Best score is too low, skipping.')
continue
for email in emails:
report = ExpertBadgeReport.on(
db.Key.from_path(Student.kind(), email),
course=self.course,
force_re_run=self.re_run)
base_badge_version = choose_expert_badge_version(report.completion())
if not base_badge_version:
raise AssertionError('They should have passed, wat?')
leader_badge_slug = base_badge_version + '.leader'
if self.really:
leader_badge = leader_badges[leader_badge_slug]
b = Badge.issue(leader_badge,
db.Key.from_path(Student.kind(), email), put=False)
b.evidence = self.host_url + '/badges/expert_evidence?id=%d' % report.key().id()
b.put()
self.log.append('... ISSUED %s to %s, id=%d' % (
leader_badge_slug, email, b.key().id()))
else:
self.log.append('... WOULD ISSUE %s to %s' % (
leader_badge_slug, email))
self._batch_write()
issuer_mappers = {
'completion': BulkIssueMapper,
'leader': BulkLeaderIssueMapper,
'expert': BulkExpertBadgeIssueMapper,
'expert-leader': BulkExpertLeaderIssueMapper,
}
class BulkIssuanceHandler(BaseHandler, ReflectiveRequestHandler):
"""
Request handler for kicking off all these different kinds of badge issuing mappers.
"""
default_action = 'prep'
get_actions = ['prep', 'watch']
post_actions = ['start']
TITLE = 'Bulk Issue Badges'
class IssueForm(wtf.Form):
part = wtf.IntegerField('Which part of the course to issue a badge for? (1,2,3)')
really_save = wtf.BooleanField('Really issue the badges and freeze the scores?', default=False)
leader_or_completion = wtf.RadioField('Do you want to issue completion badges, or leader badges?',
choices=[(k, k) for k in issuer_mappers.keys()])
force_re_run_reports = wtf.BooleanField('Re-run all unit and part reports? Will delete old ones if you also choose Really freeze above.', default=False)
one_email = wtf.TextField('Only consider one student? Enter their e-mail here.',
validators=[wtf.validators.Optional()])
def _action_url(self, action, **kwargs):
params = dict(kwargs)
params['action'] = action
return '?'.join((
self.request.path,
urllib.urlencode(params)))
def get_prep(self):
if not users.is_current_user_admin():
self.abort(403)
self.render_form(self.IssueForm())
def render_form(self, form):
self.template_value['form'] = form
self.template_value['xsrf_token'] = self.create_xsrf_token('start')
self.template_value['action_url'] = self._action_url('start')
self.template_value['title'] = self.TITLE
self.render('badge_bulk_issue.html')
def post_start(self):
if not users.is_current_user_admin():
self.abort(403)
form = self.IssueForm(self.request.POST)
if not form.validate():
self.render_form(form)
return
REALLY = form.really_save.data
part_num = form.part.data
problems = set()
student_infos = []
issuer = issuer_mappers[form.leader_or_completion.data]
job = issuer(REALLY, self.get_course(), part_num, self.request.host_url, form.force_re_run_reports.data)
if form.one_email.data:
job.FILTERS.append(
('__key__', db.Key.from_path('Student', form.one_email.data)))
logging.debug('Filters for issuing: %s', repr(job.FILTERS))
job_id = job.job_id
deferred.defer(job.run, batch_size=50)
self.redirect(self._action_url('watch', job_id=job_id))
def get_watch(self):
if not users.is_current_user_admin():
self.abort(403)
job_id = self.request.GET.get('job_id', None)
if not job_id:
self.abort(404)
messages = BulkIssueMapper.logs_for_job(job_id)
self.template_value['title'] = self.TITLE
self.template_value['problems'] = []
self.template_value['log'] = messages
self.render('badge_bulk_issue_done.html')
class DammitMapper(LoggingMapper):
"""Goes through all PartReports and re-runs their assessment grades."""
KIND = PartReport
def __init__(self, course):
super(DammitMapper, self).__init__()
self.course = course
def map(self, report):
self.log.append("Working on report for %s, part %d" % (report.student_email, report.part))
had_before = bool(report.assessment_scores)
report._run_assessments(self.course)
has_after = bool(report.assessment_scores)
self.log.append("had before? %s. has after? %s" % (had_before, has_after))
return ([report], [])
class DammitHandler(BaseHandler, ReflectiveRequestHandler):
get_actions = ['start', 'watch']
default_action = 'watch'
def _action_url(self, action, **kwargs):
params = dict(kwargs)
params['action'] = action
return '?'.join((
self.request.path,
urllib.urlencode(params)))
def get_start(self):
if not users.is_current_user_admin():
self.abort(403)
course = self.get_course()
job = DammitMapper(course)
job_id = job.job_id
deferred.defer(job.run, batch_size=50)
self.redirect(self._action_url('watch', job_id=job_id))
def get_watch(self):
if not users.is_current_user_admin():
self.abort(403)
job_id = self.request.GET.get('job_id', None)
if not job_id:
self.abort(404)
messages = BulkIssueMapper.logs_for_job(job_id)
self.template_value['title'] = "GRRRRRR"
self.template_value['problems'] = []
self.template_value['log'] = messages
self.render('badge_bulk_issue_done.html')
| twiffy/eabooc | coursebuilder/modules/wikifolios/report_handlers.py | report_handlers.py | py | 34,512 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "controllers.utils.BaseHandler",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "controllers.utils.ReflectiveRequestHandler",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "wtforms.Form",
"line_number": 60,
"usage_type": "attribute"
},... |
16472183531 | import collections
import itertools
import json
import logging
import os
import pickle
import random
import numpy as np
from sklearn.model_selection import train_test_split
import config
from utils.utils import JsonlReader
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
np.random.seed(config.SEED)
random.seed(config.SEED)
def get_groups_texts_from_umls_vocab(relation_text_to_groups, cui_to_entity_texts, fname_reltext_all_combos,
load_existing=True):
# Load rel groups file, if it exists
if os.path.isfile(fname_reltext_all_combos) and load_existing:
logger.info('Found existing relation text combination file -- loading relations.')
with open(fname_reltext_all_combos, 'rb') as f:
return pickle.load(f)
else:
logger.info("Not loading relation text combination file. Generating new set:")
# Get all related CUI groups into set:
groups = set()
for relation_text in relation_text_to_groups:
groups.update(relation_text_to_groups[relation_text])
# Collect all combinations of related entities
logger.info("Collecting all possible textual combinations of CUI groups ...")
groups_texts = set()
l = len(groups)
for idx, (cui_src, cui_tgt) in enumerate(groups):
if idx % 100000 == 0 and idx != 0:
logger.info("Parsed {} groups of {}".format(idx, l))
cui_src_texts = cui_to_entity_texts[cui_src]
cui_tgt_texts = cui_to_entity_texts[cui_tgt]
for cui_src_text_i in cui_src_texts:
temp = list(zip([cui_src_text_i] * len(cui_tgt_texts), cui_tgt_texts))
temp = ["\t".join(i) for i in temp]
groups_texts.update(temp)
# NOTE: this consumes a LOT of memory (~18 GB)! (clearing up memory takes around half an hour)
logger.info("Collected {} unique tuples of (src_entity_text, tgt_entity_text) type.".format(len(groups_texts)))
# Save rel groups:
with open(fname_reltext_all_combos, 'wb') as f:
logger.info('Saving relation text combination file.')
pickle.dump(groups_texts, f)
return groups_texts
def align_groups_to_sentences(groups_texts, jsonl_fname, output_fname):
jr = JsonlReader(jsonl_fname)
wf = open(output_fname, "w", encoding="utf-8", errors="ignore")
logger.info("Aligning texts (sentences) to groups ...")
pos_groups = set()
neg_groups = set()
for idx, jdata in enumerate(jr):
if idx % 1000000 == 0 and idx != 0:
logger.info("Processed {} tagged sentences".format(idx))
# Permutations of size for matched entities in a sentence
matched_perms = set(itertools.permutations(jdata['matches'].keys(), 2))
# Left-hand-side (lhs) <==> right-hand-side (rhs)
lhs2rhs = collections.defaultdict(list)
rhs2lhs = collections.defaultdict(list)
for group in matched_perms:
src, tgt = group
lhs2rhs[src].append(tgt)
rhs2lhs[tgt].append(src)
# Since `groups_texts` contain all possible groups that can exist
# in the UMLS KG, for some relation, the intersection of this set
# with matched permuted groups efficiently yields groups which
# **do exist in KG for some relation and have matching sentences**.
matched_perms = {"\t".join(m) for m in matched_perms}
common = groups_texts.intersection(matched_perms)
# We use sentence level noise, i.e., for the given sentence the
# common groups represent positive groups, while the negative
# samples can be generated as follows (like open-world assumption):
#
# For a +ve group, with prob. 1/2, remove the left (src) or right
# (tgt) entity and replace with N entities such that the negative
# group (e_orig, e_replaced) [for rhs] / (e_replaced, e_orig) [for lhs]
# **must not be in KG for any relation**. This technique can possibly be
# seen as creating hard negatives for same text evidence.
output = {"p": set(), "n": set()}
for group in common:
pos_groups.add(group)
src, tgt = group.split("\t")
output["p"].add(group)
# Choose left or right side to corrupt
lhs_or_rhs = random.choice([0, 1])
if lhs_or_rhs == 0:
for corrupt_tgt in lhs2rhs[src]:
negative_group = "{}\t{}".format(src, corrupt_tgt)
if negative_group not in common:
output["n"].add(negative_group)
else:
for corrupt_src in rhs2lhs[tgt]:
negative_group = "{}\t{}".format(corrupt_src, tgt)
if negative_group not in common:
output["n"].add(negative_group)
if output["p"] and output["n"]:
no = list(output["n"])
random.shuffle(no)
# Keep number of negative groups at most as positives
no = no[:len(output["p"])]
output["n"] = no
output["p"] = list(output["p"])
neg_groups.update(no)
jdata["groups"] = output
wf.write(json.dumps(jdata) + "\n")
# There will be lot of negative groups, so we will remove them next!
logger.info("Collected {} positive and {} negative groups.".format(len(pos_groups), len(neg_groups)))
return pos_groups, neg_groups
def pruned_triples(cui_to_entity_texts, relation_text_to_groups, pos_groups, neg_groups, min_rel_group=10,
max_rel_group=1500):
logger.info("Mapping CUI groups to relations ...")
group_to_relation_texts = collections.defaultdict(list)
for idx, (relation_text, groups) in enumerate(relation_text_to_groups.items()):
for group in groups:
group_to_relation_texts[group].append(relation_text) # can have multiple rel texts per group
logger.info("Mapping relations to groups texts ...")
relation_text_to_groups_texts = collections.defaultdict(set)
for idx, (group, relation_texts) in enumerate(group_to_relation_texts.items()):
if idx % 1000000 == 0 and idx != 0:
logger.info("Mapped from {} groups".format(idx))
cui_src, cui_tgt = group
local_groups = set()
cui_src_texts = cui_to_entity_texts[cui_src]
cui_tgt_texts = cui_to_entity_texts[cui_tgt]
for l1i in cui_src_texts:
local_groups.update(list(zip([l1i] * len(cui_tgt_texts), cui_tgt_texts)))
for lg in local_groups:
if "\t".join(lg) in pos_groups:
for relation_text in relation_texts:
relation_text_to_groups_texts[relation_text].add("\t".join(lg))
logger.info("No. of relations before pruning: {}".format(len(relation_text_to_groups_texts)))
# Prune relations based on the group size
relations_to_del = list()
for relation_text, groups_texts in relation_text_to_groups_texts.items():
if (len(groups_texts) < min_rel_group) or (len(groups_texts) > max_rel_group):
relations_to_del.append(relation_text)
logger.info("Relations not matching the criterion of min, max group sizes of {} and {}.".format(min_rel_group,
max_rel_group))
# Delete relations not meeting min and max counts
for r in relations_to_del:
del relation_text_to_groups_texts[r]
logger.info("Relations deleted: {}".format(relations_to_del))
logger.info("No. of relations after pruning: {}".format(len(relation_text_to_groups_texts)))
# Update positive groups
new_pos_groups = set()
entities = set()
for relation_text, groups_texts in relation_text_to_groups_texts.items():
for group_text in groups_texts:
new_pos_groups.add(group_text)
entities.update(group_text.split("\t"))
logger.info("Updated no. of positive groups after pruning: {}".format(len(new_pos_groups)))
logger.info("No. of unique entities: {}".format(len(entities)))
# Update negative groups
# 1) We apply the constraint that the negative groups must have positive
# triples entities only
new_neg_groups = set()
for negative_group in neg_groups:
src, tgt = negative_group.split("\t")
if (src in entities) and (tgt in entities):
new_neg_groups.add(negative_group)
logger.info("[1] Updated no. of negative groups after pruning groups that are not in positive entities: {}".format(
len(new_neg_groups)))
# 2) Negative examples are used for NA / Other relation, which is just another class.
# To avoid training too much on NA relation, we make a simple choice randomly taking
# the same number of groups as largest group size positive class.
max_pos_group_size = max([len(v) for v in relation_text_to_groups_texts.values()])
new_neg_groups = list(new_neg_groups)
random.shuffle(new_neg_groups)
# Using 70% of positive groups to form negative groups
new_neg_groups = new_neg_groups[:int(max_pos_group_size * 0.7)]
logger.info(
'Len of new_pos_groups: {}, Len of max_pos_group_size: {}'.format(len(new_pos_groups), max_pos_group_size))
logger.info(
"Number of negative groups after taking 70 percent more than positive groups: {}".format(len(new_neg_groups)))
relation_text_to_groups_texts["NA"] = new_neg_groups
# Collect triples now
triples = set()
for r, groups in relation_text_to_groups_texts.items():
for group in groups:
src, tgt = group.split("\t")
triples.add((src, r, tgt))
triples = list(triples)
logger.info(" *** No. of triples (including NA) *** : {}".format(len(triples)))
return triples
def filter_triples_with_evidence(triples, max_bag_size=32):
group_to_relation_texts = collections.defaultdict(set)
for ei, rj, ek in triples:
group = "{}\t{}".format(ei, ek)
group_to_relation_texts[group].add(rj)
jr = JsonlReader(config.groups_linked_sents_file)
group_to_data = collections.defaultdict(list)
candid_groups = set(group_to_relation_texts.keys())
for idx, jdata in enumerate(jr):
if idx % 1000000 == 0 and idx != 0:
logger.info("Processed {} lines for linking to triples".format(idx))
common = candid_groups.intersection(jdata["groups"]["p"] + jdata["groups"]["n"])
if not common:
continue
for group in common:
src, tgt = group.split("\t")
src_span = jdata["matches"][src]
tgt_span = jdata["matches"][tgt]
sent = jdata["sent"]
sent = sent.replace("$", "")
sent = sent.replace("^", "")
# src entity mentioned before tgt entity
if src_span[1] < tgt_span[0]:
sent = sent[:src_span[0]] + "$" + src + "$" + sent[src_span[1]:tgt_span[0]] + "^" + tgt + "^" + sent[
tgt_span[
1]:]
rel_dir = 1
# tgt entity mentioned before src entity
elif src_span[0] > tgt_span[1]:
sent = sent[:tgt_span[0]] + "^" + tgt + "^" + sent[tgt_span[1]:src_span[0]] + "$" + src + "$" + sent[
src_span[
1]:]
rel_dir = -1
# Should not happen, but to be on safe side
else:
continue
if group not in group_to_data:
group_to_data[group] = collections.defaultdict(list)
group_to_data[group][rel_dir].append(sent)
# Adjust bag sizes
new_group_to_data = dict()
for group in list(group_to_data.keys()):
src, tgt = group.split("\t")
bag = list()
for rel_dir in group_to_data[group]:
bag.extend(group_to_data[group][rel_dir])
if len(bag) > max_bag_size:
bag = random.sample(bag, max_bag_size)
else:
idxs = list(np.random.choice(list(range(len(bag))), max_bag_size - len(bag)))
bag = bag + [bag[i] for i in idxs]
new_group_to_data["\t".join([src, tgt, "0"])] = {
"relations": group_to_relation_texts[group],
"bag": bag
}
group_to_data = new_group_to_data
filtered_triples = set()
for group in group_to_data:
src, tgt, _ = group.split("\t")
for relation in group_to_data[group]["relations"]:
filtered_triples.add((src, relation, tgt))
return filtered_triples, group_to_data
def remove_overlapping_sents(train_lines, dev_lines, test_lines):
dev_test_sentences = set()
for line in dev_lines:
dev_test_sentences.update({s.replace("$", "").replace("^", "") for s in line["sentences"]})
for line in test_lines:
dev_test_sentences.update({s.replace("$", "").replace("^", "") for s in line["sentences"]})
new_train_lines = list()
for line in train_lines:
new_sents = list()
for sent in line["sentences"]:
temp_sent = sent.replace("$", "").replace("^", "")
if temp_sent not in dev_test_sentences:
new_sents.append(sent)
if not new_sents:
continue
bag = new_sents
if len(bag) > config.bag_size:
bag = random.sample(bag, config.bag_size)
else:
idxs = list(np.random.choice(list(range(len(bag))), config.bag_size - len(bag)))
bag = bag + [bag[i] for i in idxs]
line["sentences"] = bag
new_train_lines.append(line)
new_triples = set()
for line in new_train_lines:
src, tgt = line["group"]
relation = line["relation"]
new_triples.add((src, relation, tgt))
return new_train_lines, new_triples
def create_data_split(triples):
triples = list(triples)
inds = list(range(len(triples)))
y = [relation for _, relation, _ in triples]
# train_dev test split
train_dev_inds, test_inds = train_test_split(inds, stratify=y, test_size=0.2, random_state=config.SEED)
y = [y[i] for i in train_dev_inds]
train_inds, dev_inds = train_test_split(train_dev_inds, stratify=y, test_size=0.1, random_state=config.SEED)
train_triples = [triples[i] for i in train_inds]
dev_triples = [triples[i] for i in dev_inds]
test_triples = [triples[i] for i in test_inds]
logger.info(" *** Train triples : {} *** ".format(len(train_triples)))
logger.info(" *** Dev triples : {} *** ".format(len(dev_triples)))
logger.info(" *** Test triples : {} *** ".format(len(test_triples)))
return train_triples, dev_triples, test_triples
def split_lines(triples, group_to_data):
groups = set()
for ei, _, ek in triples:
groups.add("{}\t{}".format(ei, ek))
lines = list()
for group in groups:
src, tgt = group.split("\t")
G = ["\t".join([src, tgt, "0"]), ]
for g in G:
if g not in group_to_data:
continue
data = group_to_data[g]
_, _, rel_dir = g.split("\t")
rel_dir = int(rel_dir)
for relation in data["relations"]:
lines.append({
"group": (src, tgt),
"relation": relation,
"sentences": data["bag"],
"e1": data.get("e1", None), "e2": data.get("e2", None),
"reldir": rel_dir
})
return lines
def report_data_stats(lines, triples):
stats = dict(
num_of_groups=len(lines),
num_of_sents=sum(len(line["sentences"]) for line in lines),
num_of_triples=len(triples)
)
for k, v in stats.items():
logger.info(" *** {} : {} *** ".format(k, v))
def write_final_jsonl_file(lines, output_fname):
with open(output_fname, "w") as wf:
for idx, line in enumerate(lines):
wf.write(json.dumps(line) + "\n")
if __name__ == "__main__":
# 0. Load UMLS vocab object
logger.info("Loading UMLS vocab object `{}` ...".format(config.umls_cui_to_txts))
with open(config.umls_cui_to_txts, "rb") as ctt, open(config.umls_reltxt_to_groups, "rb") as rttg:
cui_to_entity_texts = pickle.load(ctt)
relation_text_to_groups = pickle.load(rttg)
ctt.close(), rttg.close()
# 1. Collect all possible group texts from their CUIs
groups_texts = get_groups_texts_from_umls_vocab(relation_text_to_groups, cui_to_entity_texts,
config.reltext_all_combos)
# 2. Search for text alignment of groups (this can take up to 80~90 mins)
pos_groups, neg_groups = align_groups_to_sentences(groups_texts, config.medline_linked_sents_file,
config.groups_linked_sents_file)
# 3. From collected groups and pruning relations criteria, get final triples
triples = pruned_triples(cui_to_entity_texts, relation_text_to_groups, pos_groups, neg_groups, config.min_rel_group,
config.max_rel_group)
# 4. Collect evidences and filter triples based on sizes of collected bags
triples, group_to_data = filter_triples_with_evidence(triples, config.bag_size)
logger.info(" *** No. of triples (after filtering) *** : {}".format(len(triples)))
E, R = set(), set()
with open(config.triples_file_all, "w") as wf:
for ei, rj, ek in triples:
E.update([ei, ek])
R.add(rj)
line = "{}\t{}\t{}".format(ei, rj, ek)
wf.write(line + '\n')
with open(config.entities_file, "w") as wf:
for e in E:
wf.write("{}\n".format(e))
with open(config.relations_file, "w") as wf:
for r in R:
wf.write("{}\n".format(r))
logger.info(" *** No. of entities *** : {}".format(len(E)))
logger.info(" *** No. of relations *** : {}".format(len(R)))
# 5. Split into train, dev and test at triple level to keep zero triples overlap
train_triples, dev_triples, test_triples = create_data_split(triples)
train_lines = split_lines(train_triples, group_to_data)
dev_lines = split_lines(dev_triples, group_to_data)
test_lines = split_lines(test_triples, group_to_data)
# Remove any overlapping test and dev sentences from training
logger.info("Train stats before removing overlapping sentences ...")
report_data_stats(train_lines, train_triples)
train_lines, train_triples = remove_overlapping_sents(train_lines, dev_lines, test_lines)
# Write triples file and final combined file
logger.info("Train stats after removing dev + test overlapping sentences ...")
triples_splits = [
(config.complete_train, config.triples_file_train, train_triples, train_lines, "TRAIN"),
(config.complete_dev, config.triples_file_dev, dev_triples, dev_lines, "DEV"),
(config.complete_test, config.triples_file_test, test_triples, test_lines, "TEST")
]
for complete_file, trip_file, trips, lines, split_name in triples_splits:
print(split_name)
report_data_stats(lines, trips)
with open(trip_file, "w") as wf:
for ei, rj, ek in trips:
wf.write("{}\t{}\t{}\n".format(ei, rj, ek))
# 6. Write train, dev, test files with sentence, group and relation
logger.info("Creating training file at `{}` ...".format(complete_file))
write_final_jsonl_file(lines, complete_file)
| IBM/aihn-ucsd | amil/preprocess/_4_splits.py | _4_splits.py | py | 19,985 | python | en | code | 18 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.random.... |
19856854666 | """
autoencoder.py
Autoencoder-style image generation model.
"""
import glob
import os
import tensorflow as tf
from matplotlib import pyplot as plt
from tqdm import tqdm
import models
from utils import gauss_kernel
class AutoEncoderGAN():
def __init__(self,
batch_size,
z_dim,
enc_model,
dec_model,
disc_model,
loss_weights,
max_filters,
conv_init='he_normal',
verbose=True,
**kwargs):
self.batch_size = batch_size
self.z_dim = z_dim
self.enc_model = enc_model
self.dec_model = dec_model
self.disc_model = disc_model
self.loss_weights = loss_weights
self.max_filters = max_filters
self.conv_init = conv_init
self.verbose = verbose
self.best_gen_loss = None
self.best_disc_loss = None
self.gen_loss_curve = []
self.disc_loss_curve = []
if 'blur_kernel' in kwargs.keys() and kwargs['blur_kernel']:
self.gauss_kernel = gauss_kernel(kwargs['blur_kernel'])
def init_models(self, in_size, out_size, disc_size, lr, b1, b2):
"""Define encoder, decoder, and discriminator models if using plus optimizers."""
encoder_model = getattr(models, self.enc_model)
decoder_model = getattr(models, self.dec_model)
self.encoder = encoder_model(in_size, self.z_dim, max_filters=self.max_filters, conv_init=self.conv_init)
self.decoder = decoder_model(out_size, self.z_dim, max_filters=self.max_filters, conv_init=self.conv_init)
if self.loss_weights['discriminator'] > 0 and self.disc_model:
discriminator_model = getattr(models, self.disc_model)
self.discriminator = discriminator_model(disc_size, max_filters=self.max_filters, conv_init=self.conv_init)
if self.verbose:
print(self.encoder.summary())
print(self.decoder.summary())
if self.loss_weights['discriminator'] > 0:
print(self.discriminator.summary())
self.gen_optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=lr, beta1=b1, beta2=b2)
if self.loss_weights['discriminator'] > 0:
self.disc_optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=lr*4, beta1=b1, beta2=b2)
def train_step(self, img_in, true_img, out_size, disc_size):
"""Make one update step."""
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
gen_z = self.encoder(img_in, training=True)
gen_img = self.decoder(gen_z, training=True)
self.tot_gen_loss = 0
if self.loss_weights['discriminator'] > 0:
if disc_size < out_size:
true_img_disc = tf.random_crop(true_img, [self.batch_size, disc_size, disc_size, 3])
gen_img_disc = tf.random_crop(gen_img, [self.batch_size, disc_size, disc_size, 3])
else:
true_img_disc = true_img
gen_img_disc = gen_img
real_disc = self.discriminator(true_img_disc, training=True)
generated_disc = self.discriminator(gen_img_disc, training=True)
gen_loss = self.generator_loss(generated_disc) * self.loss_weights['discriminator']
disc_loss = self.discriminator_loss(true_img_disc, gen_img_disc, real_disc, generated_disc) * \
self.loss_weights['discriminator']
self.tot_disc_loss = disc_loss
self.tot_gen_loss += gen_loss
self.disc_loss_curve.append(self.tot_disc_loss)
l2_loss = self.l2_loss(gen_img, true_img) * self.loss_weights['l2_gen']
self.tot_gen_loss += l2_loss
self.gen_loss_curve.append(self.tot_gen_loss)
gradients_gen = gen_tape.gradient(
self.tot_gen_loss, self.encoder.trainable_variables + self.decoder.trainable_variables)
self.gen_optimizer.apply_gradients(
zip(gradients_gen, self.encoder.trainable_variables + self.decoder.trainable_variables))
self.best_gen_loss = min(self.best_gen_loss, self.tot_gen_loss) if self.best_gen_loss else self.tot_gen_loss
if self.loss_weights['discriminator'] > 0:
gradients_disc = disc_tape.gradient(self.tot_disc_loss, self.discriminator.trainable_variables)
self.disc_optimizer.apply_gradients(zip(gradients_disc, self.discriminator.trainable_variables))
self.best_disc_loss = min(self.best_disc_loss, self.tot_disc_loss) if self.best_disc_loss else self.tot_disc_loss
return (gen_loss, disc_loss, l2_loss)
else:
return (l2_loss)
def train(self, train_data, in_size, out_size, disc_size, iterations, lr, save_dir, b1, b2, save_int, **kwargs):
"""Train over specified number of iterations per settings."""
if not os.path.isdir(save_dir): os.mkdir(save_dir)
self.init_models(in_size, out_size, disc_size, lr, b1, b2)
gen_steps, disc_steps = self.load_saved_models(save_dir)
source_imgs = None
progress = tqdm(train_data.take(iterations))
for iteration, true_img in enumerate(progress):
if kwargs['blur_kernel']:
proc_img = tf.nn.depthwise_conv2d(true_img, self.gauss_kernel, strides=[1, 1, 1, 1], padding="SAME")
else:
proc_img = true_img
if in_size < out_size:
proc_img = tf.image.resize(proc_img, (in_size, in_size), align_corners=True)
if source_imgs is None:
source_imgs = proc_img
self.save_img(source_imgs, save_dir, name='imgs_in_')
losses = self.train_step(img_in=proc_img, true_img=true_img, out_size=out_size, disc_size=disc_size)
if self.loss_weights['discriminator'] > 0:
progress.set_postfix(best_gen_loss=self.best_gen_loss.numpy(), best_disc_loss=self.best_disc_loss.numpy(),
gen_loss=losses[0].numpy(), disc_loss=losses[1].numpy(), l2_loss=losses[2].numpy())
else:
progress.set_postfix(l2_loss=losses.numpy())
if iteration % save_int == 0 or iteration == iterations - 1:
self.generate(source_imgs, iteration + 1 + gen_steps, save_dir)
self.save_learning_curve(save_dir)
self.save_models(save_dir, iteration, gen_steps, disc_steps)
def generator_loss(self, generated_disc):
"""Gen loss from WGAN-GP loss: https://arxiv.org/pdf/1704.00028.pdf"""
# Negative so that gradient descent maximizes critic score received by generated output
return -tf.reduce_mean(generated_disc)
def discriminator_loss(self, real_imgs, generated_imgs, real_disc, generated_disc, gp_lambda=10, epsilon=0.001):
"""Disc loss from WGAN-GP loss: https://arxiv.org/pdf/1704.00028.pdf"""
# Difference between critic scores received by generated output vs real image
# Lower values mean that the real image samples are receiving higher scores, therefore
# gradient descent maximizes discriminator accuracy
out_size = real_imgs.get_shape().as_list()
d_cost = tf.reduce_mean(generated_disc) - tf.reduce_mean(real_disc)
alpha = tf.random.uniform(
shape=[self.batch_size, out_size[2], out_size[2], 3],
minval=0.,
maxval=1.
)
diff = generated_imgs - real_imgs
interpolates = real_imgs + (alpha * diff)
with tf.GradientTape() as tape:
tape.watch(interpolates)
interpolates_disc = self.discriminator([interpolates], training=False)
# Gradient of critic score wrt interpolated imgs
gradients = tape.gradient(interpolates_disc, [interpolates])[0]
# Euclidean norm of gradient for each sample
norm = tf.sqrt(tf.reduce_sum(tf.square(gradients), axis=[1, 2, 3]))
# Gradient norm penalty is the average distance from 1
gradient_penalty = tf.reduce_mean((norm - 1.) ** 2) * gp_lambda
epsilon_penalty = tf.reduce_mean(real_disc) * epsilon
return d_cost + gradient_penalty + epsilon_penalty
def l2_loss(self, gen_img, true_img):
"""L2 loss."""
if self.loss_weights['l2_loss_size'] != gen_img.get_shape().as_list()[1]:
gen_img = tf.image.resize(gen_img,
(self.loss_weights['l2_loss_size'], self.loss_weights['l2_loss_size']),
align_corners=True)
if self.loss_weights['l2_loss_size'] != true_img.get_shape().as_list()[1]:
true_img = tf.image.resize(true_img,
(self.loss_weights['l2_loss_size'], self.loss_weights['l2_loss_size']),
align_corners=True)
return tf.reduce_mean(tf.squared_difference(true_img, gen_img))
def generate(self, source_imgs, epoch, save_dir):
"""Inference pass to produce and save image."""
generated_imgs = self.decoder(self.encoder(source_imgs, training=False), training=False)
self.save_img(generated_imgs, save_dir, name=str(generated_imgs[0].shape[-2]) + '_' + str(epoch) + '_')
def save_img(self, img_tensor, save_dir, name):
"""Save out image as JPG."""
img = tf.cast(255 * (img_tensor + 1)/2, tf.uint8)
for i, ind_img in enumerate(img):
encoded = tf.image.encode_jpeg(ind_img)
tf.write_file(os.path.join(save_dir, name + str(i) + '.jpg'), encoded)
def save_learning_curve(self, save_dir):
"""Save out losses as plot."""
plt.plot(range(len(self.gen_loss_curve)), self.gen_loss_curve, color='g', linewidth='1')
plt.xlabel("Iterations")
plt.ylabel("Generator Loss")
plt.savefig(os.path.join(save_dir, 'gen_loss.jpg'), bbox_inches='tight')
plt.clf()
if self.loss_weights['discriminator'] > 0:
plt.plot(range(len(self.disc_loss_curve)), self.disc_loss_curve, color='g', linewidth='1')
plt.xlabel("Iterations")
plt.ylabel("Discriminator Loss")
plt.savefig(os.path.join(save_dir, 'disc_loss.jpg'), bbox_inches='tight')
plt.clf()
def load_saved_models(self, save_dir):
"""Load models from checkpoints."""
gen_checkpoints = glob.glob(os.path.join(save_dir, 'encoder-*.h5'))
max_gen, max_disc = 0, 0
if len(gen_checkpoints) > 0:
all_steps = [int(os.path.basename(path).split('.')[0].split('-')[1]) for path in gen_checkpoints]
max_gen = max(all_steps)
self.encoder.load_weights(os.path.join(save_dir, 'encoder-' + str(max_gen) + '.h5'), by_name=True)
self.decoder.load_weights(os.path.join(save_dir, 'decoder-' + str(max_gen) + '.h5'), by_name=True)
if self.disc_model:
disc_checkpoints = glob.glob(os.path.join(save_dir, 'disc-*.h5'))
if len(disc_checkpoints) > 0:
all_steps = [int(os.path.basename(path).split('.')[0].split('-')[1]) for path in disc_checkpoints]
max_disc = max(all_steps)
self.discriminator.load_weights(os.path.join(save_dir, 'disc-' + str(max_disc) + '.h5'), by_name=True)
return max_gen, max_disc
def save_models(self, save_dir, current_steps, gen_steps, disc_steps):
"""Save models as checkpoints."""
self.encoder.save(os.path.join(save_dir, 'encoder-' + str(gen_steps + current_steps) + '.h5'))
self.decoder.save(os.path.join(save_dir, 'decoder-' + str(gen_steps + current_steps) + '.h5'))
if self.disc_model:
self.discriminator.save(os.path.join(save_dir, 'disc-' + str(disc_steps + current_steps) + '.h5')) | xaliceli/lemotif | image-models/autoencoder.py | autoencoder.py | py | 11,931 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "utils.gauss_kernel",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1.train.AdamOptimizer",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat",
"line_number": 63,
"usage_type": "attribute"
},
{
... |
13161856150 | #coding: utf-8
import os
import time
import random
import jieba
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import classification_report
from sklearn import tree, metrics
from sklearn import feature_extraction, model_selection
# 导入文本特征向量转化模块
from sklearn.feature_extraction.text import TfidfTransformer,TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
import feature_selection
from sklearn.datasets import fetch_20newsgroups
def TextProcessing(folder_path, test_size=0.2):
folder_list = os.listdir(folder_path)
class_list = []
data_list = [] #存储分完词,去除停用词等清洗工作之后的每一个文本
# 类间循环
for folder in folder_list:
new_folder_path = os.path.join(folder_path, folder)
files = os.listdir(new_folder_path)
# 类内循环
j = 0
for file in files:
raw=" "
with open(os.path.join(new_folder_path, file), 'r', encoding='UTF-8') as fp:
for line in fp.readlines(): # 依次读取每行
line = line.strip() # 去掉每行头尾空白
if not len(line) or line.startswith('#'): # 判断是否是空行或注释行
continue # 是的话,跳过不处理
raw += line+" " # 保存
#print(raw)
data_list.append(raw)
class_list.append(folder)#文本类别标签
## 划分训练集和测试集
train_data_list, test_data_list, train_class_list, test_class_list =model_selection.train_test_split(data_list, class_list, test_size=0.2, random_state=1)
#print(train_data_list)
return train_data_list, test_data_list, train_class_list, test_class_list
#文本特征选择与向量化,不同的特征词选择方法,不同的特征词数目
def TextFeature(train_data_list, test_data_list, train_class_list, test_class_list, fs_method, fs_num):
term_set_fs1 = feature_selection.feature_selection(train_data_list, train_class_list, fs_method)
term_set_fs = feature_selection.feature_selection(train_data_list, train_class_list, fs_method)[:fs_num]
print('Feature selection...')
print('fs method:' + fs_method, 'fs num:' + str(fs_num))
term_dict = dict(zip(term_set_fs, range(len(term_set_fs))))
print(len(term_set_fs1))
print(term_set_fs1)
# 分别对训练数据集和测试集进行文本向量化(以归一化的tf-idf计算词语的权重)
'''vectorizer = CountVectorizer()
# 该类会将文本中的词语转换为词频矩阵,矩阵元素a[i][j] 表示j词在i类文本下的词频
vectorizer.fixed_vocabulary = True
vectorizer.vocabulary_ = term_dict
transformer = TfidfTransformer()
# 该类会统计每个词语的tf-idf权值
tfidf_train = transformer.fit_transform(vectorizer.fit_transform(train_data_list))
# 第一个fit_transform是计算tf-idf,第二个fit_transform是将文本转为词频矩阵
tfidf_test = transformer.fit_transform(vectorizer.fit_transform(test_data_list))
# 第一个fit_transform是计算tf-idf,第二个fit_transform是将文本转为词频矩阵
weight_train = tfidf_train.toarray()
weight_test = tfidf_test.toarray()
print(weight_train)
'''
'''for i in range(len(weight_train)):
# 打印每类文本的tf-idf词语权重,第一个for遍历所有文本,第二个for便利某一类文本下的词语权重
print(u"-------这里输出第", i, u"类文本的词语tf-idf权重------")
for j in range(len(word)):
print(word[j],weight_train[i][j])
'''
tv = TfidfVectorizer(sublinear_tf=True, max_df=0.5, vocabulary=term_dict)
#tv = TfidfVectorizer(sublinear_tf=True, max_df=0.5)
tfidf_train = tv.fit_transform(train_data_list)
#tv2 = TfidfVectorizer(vocabulary=term_dict)
tv2 = TfidfVectorizer(vocabulary=tv.vocabulary_)
tfidf_test = tv2.fit_transform(test_data_list)
word= tv.get_feature_names()
## word = vectorizer.get_feature_names() # 获取词袋模型中的所有词语
# print(word)
weight_train = tfidf_train.toarray() # 将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重
weight_test = tfidf_test.toarray()
return weight_train, weight_test, train_class_list, test_class_list
# 利用朴素贝叶斯分类算法来构建文本分类模型,并计算分类准确率
def TextClassifyModelBuilding(weight_train, weight_test, train_class_list, test_class_list):
# 创建模型
mnb = MultinomialNB(alpha=0.01)
# 对训练数据集进行模型训练生成文本分类器
classifier = mnb.fit(weight_train, train_class_list)
# 使用构建的分类器对测试数据集进行文本类别预测,并输出显示分类准确率,召回率等各项指标
predict_class_list = classifier.predict(weight_test)
acc = np.mean(predict_class_list == test_class_list)
#test_accuracy = classifier.score(weight_test, test_class_list)
#print('输出显示准确率:')
#print(test_accuracy)
'''
print(predict_class_list)
print("准确率:", classifier.score(weight_test, test_class_list))
print("其他指标:\n", classification_report(test_class_list, predict_class_list))
print("finished")
'''
return acc
if __name__ == '__main__':
print("start")
## 1.1数据读取
## 1.2文本预处理(分词,停用词去除,数字等不规则符号去除等),按比例将整个数据集划分测试集与训练集
folder_path = '../data/train_data'
train_data_list, test_data_list, train_class_list, test_class_list=TextProcessing(folder_path, test_size=0.2)
print("jieguo:")
#print(train_data_list)
'''
# 1 下载新闻数据
news = fetch_20newsgroups(subset="all")
# 2 分割训练数据和测试数据
train_data_list, test_data_list, train_class_list, test_class_list = model_selection.train_test_split(news.data,
news.target,
test_size=0.2,
random_state=1)
'''
## 2.1文本特征词选择
fs_method_list = ['IG', 'MI']
#fs_method_list = ['IG']
fs_num_list = range(10000, 88000, 10000)
acc_dict = {}
## 2.2文本向量化
for fs_method in fs_method_list:
acc_list = []
for fs_num in fs_num_list:
weight_train, weight_test, train_class_list, test_class_list = TextFeature(train_data_list, test_data_list, train_class_list, test_class_list, fs_method, fs_num)
acc= TextClassifyModelBuilding(weight_train, weight_test, train_class_list, test_class_list)
acc_list.append(acc)
acc_dict[fs_method] = acc_list
print('fs method:', acc_dict[fs_method])
for fs_method in fs_method_list:
plt.plot(fs_num_list, acc_dict[fs_method], '--^', label=fs_method)
plt.title('feature selection')
plt.xlabel('fs num')
plt.ylabel('accuracy')
plt.ylim((0.4, 0.8))
plt.legend(loc='upper left', numpoints=1)
plt.show()
| ilray88/python_ShuJuWaJueShiJian | 章节9-数据挖掘在中文文本分类中的应用/News-classification-master/News-classification-master/script/6.py | 6.py | py | 7,285 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": ... |
5511081889 | import numpy as np
import pandas as pd
from pydub import AudioSegment
import librosa
from tqdm import tqdm
import os
'''
loudness:响度,表示音频信号的振幅大小。
pitch_0, pitch_1, ...:基频,表示音频信号中的主要频率成分。这些列包含了不同时间节点上的基频信息。
chroma_0, chroma_1, ...:音调,表示音频信号中的音高信息。这些列包含了不同时间节点上的音调信息。
mfcc_0, mfcc_1, ...:梅尔频率倒谱系数(MFCC),表示音频信号中的频谱特性。这些列包含了不同时间节点上的MFCC特征。
spectral_centroid:谱质心,表示音频信号的亮度或音色。这一列包含了不同时间节点上的谱质心信息。
spectral_rolloff:谱衰减,表示音频信号中的频率分布。这一列包含了不同时间节点上的谱衰减信息。
zero_crossing_rate:零交叉率,表示音频波形在零点上穿越的次数。这一列包含了不同时间节点上的零交叉率信息。
上述特征分别对应于不同的音频属性,如响度、音高、音色等。这些特征可以用于音频分析、音乐信息检索、音频分类等任务。
hop_length:512个样本(即每两个相邻帧之间的样本数)
frame_rate:音频的采样率(在load_audio_file()函数中获取)
要计算每帧的持续时间(以秒为单位),可以使用以下公式:
frame_duration = hop_length / frame_rate
要计算每秒钟的帧数,可以使用以下公式:
frames_per_second = frame_rate / hop_length
以一个具有44100Hz采样率的音频文件为例:
frame_duration = 512 / 44100 ≈ 0.0116秒
frames_per_second = 44100 / 512 ≈ 86帧
所以,在这个例子中,每帧持续时间约为0.0116秒,每秒钟有约86帧。这些值会根据音频的实际采样率和hop_length参数而有所不同。
要将每秒钟的帧数限制为60帧,您需要根据音频的采样率(frame_rate)调整hop_length参数。您可以使用以下公式计算所需的hop_length:
hop_length = frame_rate / frames_per_second
例如,对于具有44100Hz采样率的音频文件,要使每秒钟分成60帧,可以这样计算hop_length:
hop_length = 44100 / 60 ≈ 735
由于hop_length必须是整数,您可以将其四舍五入到最接近的整数,例如735。
然后,您可以在分析音频的analyze_audio()函数中更新hop_length参数
'''
def load_audio_file(file_path):
audio = AudioSegment.from_file(file_path)
audio_samples = audio.get_array_of_samples()
audio_samples = np.array(audio_samples, dtype=np.float32)
return audio_samples, audio.frame_rate
# 音调色度加权平均
def calculate_weighted_chroma(chroma_features):
# 为每个音调分配权重
weights = np.arange(1, 13)
# 将Chroma features乘以相应的权重
weighted_chroma = chroma_features * weights[:, np.newaxis]
# 计算每个时间窗口中加权Chroma features的和
weighted_chroma_sum = np.sum(weighted_chroma, axis=0)
# 标准化加权和,使其在0到1之间
weighted_chroma_normalized = (weighted_chroma_sum - np.min(weighted_chroma_sum)) / (
np.max(weighted_chroma_sum) - np.min(weighted_chroma_sum))
return weighted_chroma_normalized
def frequency_bands_pitch(pitches, low_freq=300, mid_freq=3000):
# 根据给定的频率界限划分频率带
low_band = np.mean(pitches[(pitches >= 0) & (pitches <= low_freq)], axis=0)
mid_band = np.mean(pitches[(pitches > low_freq) & (pitches <= mid_freq)], axis=0)
high_band = np.mean(pitches[pitches > mid_freq], axis=0)
return low_band, mid_band, high_band
# 计算频率带强度
def frequency_bands_intensity(spectrum, frame_rate, low_freq=300, mid_freq=3000):
n_fft = spectrum.shape[0] * 2
freqs = np.fft.fftfreq(n_fft, 1 / frame_rate)[:n_fft // 2]
low_band_intensity = np.mean(spectrum[(freqs >= 0) & (freqs <= low_freq), :], axis=0)
mid_band_intensity = np.mean(spectrum[(freqs > low_freq) & (freqs <= mid_freq), :], axis=0)
high_band_intensity = np.mean(spectrum[freqs > mid_freq, :], axis=0)
return low_band_intensity, mid_band_intensity, high_band_intensity
# 下采样
def downsample(array, factor):
# 计算需要删除的数据点的数量
remainder = len(array) % factor
if remainder != 0:
# 删除数组末尾的余数个数据点
array = array[:-remainder]
# 重新调整数组形状并计算每个分组的平均值
return np.mean(array.reshape(-1, factor), axis=1)
def analyze_audio(audio_samples, frame_rate, bvid):
# 设置参数
FRAMES = 0.5
hop_length = int(round(frame_rate / FRAMES)) # 计算所需的hop_length
n_fft = 2048
# 计算响度
loudness = librosa.feature.rms(y=audio_samples, frame_length=n_fft, hop_length=hop_length)
# # 从音频样本中提取基频(pitch)
# pitches, _ = librosa.piptrack(y=audio_samples, sr=frame_rate, n_fft=2048,
# hop_length=frame_rate // 2)
#
# print(pitches)
# # 计算基于基频的频率带
# low_band_pitch, mid_band_pitch, high_band_pitch = frequency_bands_pitch(pitches, frame_rate, hop_length)
# # Calculate total duration in seconds
# pitches = np.transpose(pitches)
# total_duration_seconds = pitches.shape[1] // frame_rate
#
# print(pitches, total_duration_seconds)
#
# # Initialize empty lists to store the averages
# low_band_averages = []
# mid_band_averages = []
# high_band_averages = []
#
# for i in range(total_duration_seconds):
# # Calculate start and end frames for the current second
# start_frame = i * frame_rate
# end_frame = (i + 1) * frame_rate
#
#
#
# # Calculate averages for the current second
# low_band_pitch, mid_band_pitch, high_band_pitch = frequency_bands_pitch(pitches, frame_rate,
# hop_length,
# start_frame,
# end_frame)
#
# # Append the averages to the respective lists
# low_band_averages.append(low_band_pitch)
# mid_band_averages.append(mid_band_pitch)
# high_band_averages.append(high_band_pitch)
#
# print(low_band_averages,mid_band_averages,high_band_averages)
# 计算音频信号的短时傅里叶变换(STFT)
stft = np.abs(librosa.stft(audio_samples, n_fft=2048, hop_length=frame_rate // 2))
# 计算频率带强度
low_band_intensity, mid_band_intensity, high_band_intensity = frequency_bands_intensity(stft,
frame_rate)
# 频率带强度进行下采样
downsample_factor = 4
low_band_intensity_downsampled = downsample(low_band_intensity, downsample_factor)
mid_band_intensity_downsampled = downsample(mid_band_intensity, downsample_factor)
high_band_intensity_downsampled = downsample(high_band_intensity, downsample_factor)
# 计算音调色度
chroma = librosa.feature.chroma_stft(y=audio_samples, sr=frame_rate, n_fft=n_fft,
hop_length=hop_length)
# 计算加权Chroma特征
weighted_chroma = calculate_weighted_chroma(chroma)
# 计算MFCC
mfcc = librosa.feature.mfcc(y=audio_samples, sr=frame_rate, n_fft=n_fft, hop_length=hop_length)
# 计算谱质心
spectral_centroid = librosa.feature.spectral_centroid(y=audio_samples, sr=frame_rate,
n_fft=n_fft, hop_length=hop_length)
# 计算谱衰减
spectral_rolloff = librosa.feature.spectral_rolloff(y=audio_samples, sr=frame_rate, n_fft=n_fft,
hop_length=hop_length)
# 计算零交叉率
zero_crossing_rate = librosa.feature.zero_crossing_rate(y=audio_samples, frame_length=n_fft,
hop_length=hop_length)
# 创建数据序列
data = {
'loudness': loudness[0],
# 'pitch': pitches,
'chroma': chroma,
'mfcc': mfcc,
'spectral_centroid': spectral_centroid[0],
'spectral_rolloff': spectral_rolloff[0],
'zero_crossing_rate': zero_crossing_rate[0]
}
# 将数据序列保存到CSV文件
loudness_df = pd.DataFrame(loudness.T, columns=['loudness'])
chroma_df = pd.DataFrame(chroma.T, columns=[f'chroma_{i}' for i in range(chroma.shape[0])])
mfcc_df = pd.DataFrame(mfcc.T, columns=[f'mfcc_{i}' for i in range(mfcc.shape[0])])
spectral_centroid_df = pd.DataFrame(spectral_centroid.T, columns=['spectral_centroid'])
spectral_rolloff_df = pd.DataFrame(spectral_rolloff.T, columns=['spectral_rolloff'])
zero_crossing_rate_df = pd.DataFrame(zero_crossing_rate.T, columns=['zero_crossing_rate'])
# 将基于基频的频率带信息添加到数据DataFrame中
data_df = pd.concat(
[loudness_df, chroma_df, mfcc_df, spectral_centroid_df, spectral_rolloff_df,
zero_crossing_rate_df], axis=1)
# # 获取数据的长度,以便为DataFrame创建索引
# num_frames = len(data_df)
#
# low_band_pitch_df = pd.DataFrame({'low_band_pitch': low_band_pitch}, index=range(num_frames))
# mid_band_pitch_df = pd.DataFrame({'mid_band_pitch': mid_band_pitch}, index=range(num_frames))
# high_band_pitch_df = pd.DataFrame({'high_band_pitch': high_band_pitch}, index=range(num_frames))
#
# data_df = pd.concat([data_df, low_band_pitch_df, mid_band_pitch_df, high_band_pitch_df], axis=1)
# 将频率带强度信息添加到数据DataFrame中
low_band_intensity_df = pd.DataFrame(low_band_intensity_downsampled, columns=['low_band_intensity'])
mid_band_intensity_df = pd.DataFrame(mid_band_intensity_downsampled, columns=['mid_band_intensity'])
high_band_intensity_df = pd.DataFrame(high_band_intensity_downsampled, columns=['high_band_intensity'])
data_df = pd.concat(
[data_df, low_band_intensity_df, mid_band_intensity_df, high_band_intensity_df], axis=1)
# 计算加权Chroma特征
weighted_chroma = calculate_weighted_chroma(chroma)
# 添加加权Chroma特征到DataFrame中
weighted_chroma_df = pd.DataFrame({'weighted_chroma': weighted_chroma})
data_df = pd.concat([data_df, weighted_chroma_df], axis=1)
# 重置索引
data_df = data_df.reset_index()
# 重命名索引列为'second'
data_df.rename(columns={'index': 'second'}, inplace=True)
data_df.to_csv('/Volumes/SSD/Data/video/{}/{}_sound_1fps.csv'.format(bvid, bvid), index=True)
if __name__ == "__main__":
# 读取csv中的bvid、cid
video_list = pd.read_csv('/Volumes/SSD/Data/VideoinfoCntSelected.csv')
bvid_list = video_list['bvid'].values.tolist()
cid_list = video_list['cid'].values.tolist()
for bvid in tqdm(bvid_list):
mp3_file_path = '/Volumes/SSD/Data/video/{}/{}.mp3'.format(bvid, bvid)
if not os.path.exists(mp3_file_path):
continue
csv_file_path = '/Volumes/SSD/Data/video/{}/{}_sound_1fps.csv'.format(bvid, bvid)
# if os.path.exists(csv_file_path):
# print(f"File {bvid} already exists, skipping...")
# continue
print(bvid)
audio_samples, frame_rate = load_audio_file(mp3_file_path)
analyze_audio(audio_samples, frame_rate, bvid)
| indecreasy/Bilibili_Audiovisual_Danmuku | videoCap/soundCap_1fps.py | soundCap_1fps.py | py | 11,617 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "pydub.AudioSegment.from_file",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pydub.AudioSegment",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.flo... |
22971648619 | import os
import json
import boto3
from itertools import groupby
def pprint_time(secs):
ret_str = ""
if secs >= 86400:
days = secs//86400
secs = secs % 86400
ret_str += f"{days} "
if days > 1:
ret_str += "days "
else:
ret_str += "day "
if secs >= 3600:
hours = secs // 3600
secs = secs % 3600
ret_str += f"{hours} "
if hours > 1:
ret_str += "hrs "
else:
ret_str += "hr "
if secs >= 60:
mins = secs // 60
secs = secs % 60
ret_str += f"{mins} "
if mins > 1:
ret_str += "mins "
else:
ret_str += "min "
if secs > 0:
ret_str += f"{secs} secs"
if ret_str[-1:] == " ":
ret_str = ret_str[:-1]
return ret_str
def set_debug(event):
boto3.set_stream_logger()
print(event)
def group_by(lst, keyfunc):
# Groups a list of dicts by the given keyfunc, and returns a corresponding array
lst = sorted(lst, key=keyfunc)
r = []
for key, group in groupby(lst, keyfunc):
r.append(list(group))
return r
def parse_user(user_id):
return "".join(filter(str.isdigit, str(user_id)))[-10:]
def table_init(table_name):
# Grab the DynamoDB table based on table_name
dynamodb = boto3.resource('dynamodb')
return dynamodb.Table(table_name)
def crowdsourced_data_verify(dtable, card_name, p_data_path):
x = partial_query(dtable, card_name, p_data_path)
return bool(x)
def user_init(utable, user_id):
user_id = parse_user(user_id)
user = utable.get_item(Key={"user_id":user_id})
if not "Item" in user:
utable.put_item(
Item = {
'user_id':user_id,
'crowdsourcing':{},
'social':{},
}
)
def fetch_user_card_following(utable, user_id, card_name):
user_id = parse_user(user_id)
following_list = utable.get_item(Key={"user_id":user_id})
return following_list['Item']['crowdsourcing'][card_name].split("|")
def crowdsourced_data_init(dtable, card_name, data_path, metadata={}):
# Create a new entry in the crowdsourcing table
temp = dtable.get_item(
Key={
"card_name":card_name,
"data_path":data_path,
}
)
if "Item" in temp:
return
dtable.put_item(
Item = {
"card_name":card_name,
"data_path":data_path,
"metadata":metadata,
"crowdsourced_data":{}
}
)
def crowdsourced_data_update_metadata(dtable, card_name, data_path, metadata):
dtable.update_item(
Key = {
"card_name":card_name,
"data_path":data_path,
},
UpdateExpression = "SET metadata=:m",
ExpressionAttributeValues = {
":m":metadata,
}
)
def crowdsourced_data_update(dtable, card_name, data_path, user_id="", update=None, meta=False):
# Update the crowdsourcing table entry at the user id with the edit value
# If update is none, remove the user's vote
# If metadata is True, replace the metadata with the value in the update field
# At least one of `user_id` or `metadata` must be truthy
# If meta is True, update must be non-null
assert meta or user_id
if meta:
assert update
crowdsourced_data_update_metadata(dtable, card_name, data_path, update)
else:
if not update:
crowdsourced_data_remove(dtable, card_name, data_path, user_id)
else:
dtable.update_item(
Key = {
"card_name":card_name,
"data_path":data_path,
},
UpdateExpression = "SET crowdsourced_data.#u=:d",
ExpressionAttributeNames = {
'#u':user_id,
},
ExpressionAttributeValues = {
':d':update,
}
)
def crowdsourced_data_remove(utable, dtable, card_name, user_id):
# Remove the user's input from that particular crowdsourcing table
info = user_get_card(utable, dtable, user_id, card_name)
for item in info:
data_path = item['data_path']
print(f"Removing from {card_name}, {data_path}")
dtable.update_item(
Key = {
"card_name":card_name,
"data_path":data_path,
},
UpdateExpression = "REMOVE crowdsourced_data.#u",
ExpressionAttributeNames = {
'#u':user_id
}
)
def user_remove(utable, user_id):
utable.delete_item(
Key = {
'user_id':user_id
}
)
print(f"Removed User ID: {user_id}")
def user_follow(utable, user_id, card_name, partial_queries):
# Save a partial query to a user's profile; the queries are a | delimited string
utable.update_item(
Key = {
"user_id":user_id,
},
UpdateExpression = "SET crowdsourcing.#c=:i",
ExpressionAttributeNames = {
"#c":card_name,
},
ExpressionAttributeValues = {
":i":partial_queries
}
)
def user_social_add(utable, user_id, card_name, value):
utable.update_item(
Key = {
'user_id':user_id,
},
UpdateExpression = "SET social.#c = :v",
ExpressionAttributeNames = {
"#c":card_name,
},
ExpressionAttributeValues = {
":v":value
}
)
def user_social_modify(utable, user_id, card_name, data_path, data):
path = '.'.join(data_path)
d = { f"#i{j}": d for j, d in enumerate(data_path)}
path = '.'.join(sorted(d.keys()))
update_expr = f'SET social.{card_name}.{path} = :d'
print(update_expr)
utable.update_item(
Key = {
'user_id':user_id,
},
UpdateExpression = update_expr,
ExpressionAttributeNames = d,
ExpressionAttributeValues = {
":d":data
}
)
def partial_query(table, card_name, p_data_path):
# Given a rank key, hash key, and partial query, return the items retrieved by a query on hash_key with filter rank_key
items = table.query(
KeyConditionExpression = "card_name = :c AND begins_with(data_path, :p)",
ExpressionAttributeValues = {
":c":card_name,
":p":p_data_path
}
)
return items["Items"]
def user_get_card(utable, dtable, user_id, card_name):
# Get the raw database data for a specific card for a specific user
user = utable.get_item(Key={'user_id':user_id}, ConsistentRead = True)
p_data_paths = user['Item']['crowdsourcing'][card_name].split("|")
items = []
for p in p_data_paths:
items.extend(partial_query(dtable, card_name, p))
return items
| IrisHub/iris-3-backend-prototypes | fns/utils.py | utils.py | py | 5,768 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "boto3.set_stream_logger",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "itertools.groupby",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "boto3.resource",
"line_number": 56,
"usage_type": "call"
}
] |
39245610548 | from flask import Flask, request,Response
from config.settings import format_url
from flask_sqlalchemy import SQLAlchemy
from schema.models import db
def create_app():
app = Flask(__name__)
app.config.from_object('config.settings')
app.config['SQLALCHEMY_DATABASE_URI'] = format_url('postgresql')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.app_context().push()
db.init_app(app)
db.create_all()
return app
if __name__ == "__main__":
entry = create_app()
entry.run(host='0.0.0.0', port=5000, debug=True) | OKULLO/flask_api | app/run.py | run.py | py | 577 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "config.settings.format_url",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "schema.models.db.init_app",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sche... |
28938445312 | import os
import torch
import functools
import yaml
import torch
from torch import nn
from torch.nn import init
import torch.nn.functional as F
from torch.utils import model_zoo
from networks.swinir import SwinIR
from networks.convnext import ConvNeXt
from networks.convnext import model_urls
import segmentation_models_pytorch as smp
from utils import dotdict
def save_model(model, filename):
if torch.cuda.device_count() > 1:
torch.save(model.module.state_dict(), filename)
else:
torch.save(model.state_dict(), filename)
def load_weight(model, weight_file, device):
"""Load trained weight.
You should put your weight file on the root directory with the name of `weight_file`.
"""
if os.path.isfile(weight_file):
model.load_state_dict(torch.load(weight_file, map_location=device), strict=True)
# model.load_state_dict(torch.load(weight_file), strict=False)
print('load weight from {}.'.format(weight_file))
else:
raise f'weight file {weight_file} is not exist.'
def select_generator(opt, device, rank):
if opt.cfg:
with open(opt.cfg, 'r') as f:
cfg = dotdict(yaml.safe_load(f))
if opt.gene == 'unet':
generator = smp.Unet(encoder_name=opt.enc,
encoder_weights="imagenet",
in_channels=3,
classes=3,
activation='tanh'
)
elif opt.gene == 'unet++':
generator = smp.UnetPlusPlus(encoder_name=opt.enc,
encoder_weights="imagenet",
in_channels=3,
classes=3,
activation=opt.activation,
)
elif opt.gene == 'pan':
generator = smp.PAN(encoder_name=opt.enc,
encoder_weights="imagenet",
in_channels=3,
classes=3,
activation=opt.activation,
)
elif opt.gene == 'fpn':
generator = smp.FPN(encoder_name=opt.enc,
encoder_weights="imagenet",
in_channels=3,
classes=3,
activation=opt.activation,
)
elif opt.gene == 'deeplabv3+':
generator = smp.DeepLabV3Plus(encoder_name=opt.enc,
encoder_weights="imagenet",
in_channels=3,
classes=3,
activation=opt.activation,
)
elif opt.gene == 'swinir':
generator = SwinIR(in_chans=3,
upscale=cfg.upscale,
img_siz=cfg.image_size,
window_size=cfg.window_size,
depths=cfg.depths,
embed_dim=cfg.embed_dim,
num_heads=cfg.num_heads,
mlp_ratio=cfg.mlp_ratio,
upsampler=cfg.upsampler,
activation=opt.activation,
resi_connection=cfg.resi_connection)
elif opt.gene == 'convnext':
generator = ConvNeXt(in_chans=3,
depths=[6, 6, 6, 6],
# depths=[3, 3, 27, 3],
dims=[180, 180, 180, 180],
)
if opt.weight is not None:
load_weight(generator, opt.weight, device)
return generator.to(device)
| thisisiron/dacon-sr | networks/__init__.py | __init__.py | py | 3,921 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "torch.cuda.device_count",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "torch.save",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.save",
"... |
2735889585 | debug = True
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
from Cython.Compiler.Options import get_directive_defaults
import numpy as np
numpy_include_dir = np.get_include()
#cy_options = {
# 'annotate': True,
# 'compiler_directives': {
# 'profile': debug,
# 'linetrace': debug,
# 'boundscheck': debug,
# 'wraparound': debug,
# 'initializedcheck': debug,
# 'language_level': 3,
# },
#}
get_directive_defaults()['linetrace'] = True
get_directive_defaults()['binding'] = True
my_extra_compile_args = ['-fopenmp', '-ffast-math', '-O3']
my_extra_link_args = ['-fopenmp']
ext_modules = [
Extension(
"fmm_tree_c",
sources=["fmm_tree_c.pyx"],
include_dirs=[numpy_include_dir],
define_macros=[('CYTHON_TRACE', '1' if debug else '0')],
extra_compile_args=my_extra_compile_args,
extra_link_args=my_extra_link_args
),
Extension(
"tree_build_routines_c",
sources=["tree_build_routines_c.pyx"],
include_dirs=[numpy_include_dir],
define_macros=[('CYTHON_TRACE', '1' if debug else '0')],
extra_compile_args=my_extra_compile_args,
extra_link_args=my_extra_link_args
),
Extension(
"list_build_routines_c",
sources=["list_build_routines_c.pyx"],
include_dirs=[numpy_include_dir],
define_macros=[('CYTHON_TRACE', '1' if debug else '0')],
extra_compile_args=my_extra_compile_args,
extra_link_args=my_extra_link_args
),
Extension(
"tree_compute_routines_c",
sources=["tree_compute_routines_c.pyx"],
include_dirs=[numpy_include_dir],
define_macros=[('CYTHON_TRACE', '1' if debug else '0')],
libraries=["m"],
extra_compile_args=my_extra_compile_args,
extra_link_args=my_extra_link_args
)
]
setup(name="Tree_Compute_Routines_C",
ext_modules = cythonize(ext_modules)
) | cwcurtis/Python_FMM_Project | setup.py | setup.py | py | 2,002 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.get_include",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "Cython.Compiler.Options.get_directive_defaults",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "Cython.Compiler.Options.get_directive_defaults",
"line_number": 25,
"usa... |
71104023784 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by i@BlahGeek.com at 2015-07-31
import yaml
import sys
if __name__ == '__main__':
data = yaml.load(open(sys.argv[1]).read(), Loader=yaml.FullLoader)
body = open(sys.argv[2]).read()
data['body'] = body
with open(sys.argv[1], 'w') as f:
f.write(yaml.dump(data, default_flow_style=False))
| blahgeek/blog.blahgeek.com | scripts/posts_addbody.py | posts_addbody.py | py | 368 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "yaml.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "yaml.FullLoader",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number... |
16608477822 | import requests
import json
import sqlite3
# მოაქვს მარსზე გადაღებული ფოტოები კონკრეტული თარიღის მითითებისას. მონაცემები იწერება json ფაილსა და მონაცემთა ბაზაში.
key = '6A7O2ghhDhKe1fBnNVDuVglIz04poXa3c1VJDlm7'
date = input("Enter the date in YYYY-M-D format: ")
payload = {'earth_date': date, 'api_key': key}
url = 'https://api.nasa.gov/mars-photos/api/v1/rovers/curiosity/photos'
r = requests.get(url, params=payload)
print(r.headers)
print(r.status_code)
json_file = r.text
dict = json.loads(json_file)
str_dict = json.dumps(dict, indent=3)
# json ფაილში ვინახავ წამოღებულ ინფორმაციას
with open("mars_data.json", 'w') as file:
json.dump(dict, file, indent=3)
# ვუკავშირდები მონაცემთა ბაზას
db = sqlite3.connect("mars.sqlite")
c = db.cursor()
# ვქმნი table-ს სახელად mars. მასში არის სამი სვეტი: თარიღების, როვერების და ლინკების
c.execute("""CREATE TABLE if not exists mars
(id INTEGER PRIMARY KEY AUTOINCREMENT,
date VARCHAR(50),
rover VARCHAR(25),
pic_link VARCHAR(150))
""")
amount_of_pics = len(dict["photos"])
# ციკლი გადის ფოტოების რაოდენობაში და აგროვებს ლინკებს, თარიღებსა და rover-ების სახელებს.
# ეკრანზე გამოდის მხოლოდ თარიღი და ლინკი. ამავდროულად, სამივე მონაცემი გადადის მონაცემთა ბაზაში.
for i in range(amount_of_pics):
earth_date = dict["photos"][i]["earth_date"]
pic_link = dict["photos"][i]["img_src"]
rover = dict["photos"][i]["camera"]["name"]
print("{} - {}".format(earth_date, pic_link))
c.execute('INSERT INTO mars (date, rover, pic_link) VALUES (?,?,?)', (earth_date, rover, pic_link))
db.commit()
db.close()
| AnaGagnidze/Quiz3 | Quiz3. AG.py | Quiz3. AG.py | py | 2,374 | python | ka | code | 1 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 19,... |
13000663213 | import os
import pytest
import yaml
@pytest.fixture
def zuul_data():
data = {}
with open('/home/zuul/src/github.com/opentelekomcloud-infra/system-config/inventory/base/gate-hosts.yaml') as f:
inventory = yaml.safe_load(f)
data['inventory'] = inventory
zuul_extra_data_file = os.environ.get('TESTINFRA_EXTRA_DATA')
if os.path.exists(zuul_extra_data_file):
with open(zuul_extra_data_file, 'r') as f:
extra = yaml.safe_load(f)
data['extra'] = extra
return data
| opentelekomcloud-infra/system-config | testinfra/conftest.py | conftest.py | py | 531 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "yaml.safe_load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"l... |
9193917776 | import torch
import torch.nn as nn
def _get_simclr_projection_head(num_ftrs: int, out_dim: int):
"""Returns a 2-layer projection head.
Reference (07.12.2020):
https://github.com/google-research/simclr/blob/master/model_util.py#L141
"""
modules = [
nn.Linear(num_ftrs, num_ftrs),
#nn.BatchNorm1d(num_ftrs),
nn.ReLU(),
nn.Linear(num_ftrs, out_dim)
]
return nn.Sequential(*modules)
class SimCLR(nn.Module):
"""Implementation of the SimCLR[0] architecture
Recommended loss: :py:class:`lightly.loss.ntx_ent_loss.NTXentLoss`
[0] SimCLR, 2020, https://arxiv.org/abs/2002.05709
Attributes:
backbone:
Backbone model to extract features from images.
num_ftrs:
Dimension of the embedding (before the projection head).
out_dim:
Dimension of the output (after the projection head).
"""
def __init__(self,
backbone: nn.Module,
num_ftrs: int = 32,
out_dim: int = 128):
super(SimCLR, self).__init__()
self.backbone = backbone
self.projection_head = _get_simclr_projection_head(num_ftrs, out_dim)
def forward(self,
x0: torch.Tensor,
x1: torch.Tensor = None,
return_features: bool = False):
"""Embeds and projects the input images.
Extracts features with the backbone and applies the projection
head to the output space. If both x0 and x1 are not None, both will be
passed through the backbone and projection head. If x1 is None, only
x0 will be forwarded.
Args:
x0:
Tensor of shape bsz x channels x W x H.
x1:
Tensor of shape bsz x channels x W x H.
return_features:
Whether or not to return the intermediate features backbone(x).
Returns:
The output projection of x0 and (if x1 is not None) the output
projection of x1. If return_features is True, the output for each x
is a tuple (out, f) where f are the features before the projection
head.
Examples:
>>> # single input, single output
>>> out = model(x)
>>>
>>> # single input with return_features=True
>>> out, f = model(x, return_features=True)
>>>
>>> # two inputs, two outputs
>>> out0, out1 = model(x0, x1)
>>>
>>> # two inputs, two outputs with return_features=True
>>> (out0, f0), (out1, f1) = model(x0, x1, return_features=True)
"""
# forward pass of first input x0
f0 = self.backbone(x0).flatten(start_dim=1)
out0 = self.projection_head(f0)
# append features if requested
if return_features:
out0 = (out0, f0)
# return out0 if x1 is None
if x1 is None:
return out0
# forward pass of second input x1
f1 = self.backbone(x1).flatten(start_dim=1)
out1 = self.projection_head(f1)
# append features if requested
if return_features:
out1 = (out1, f1)
# return both outputs
return out0, out1
| tibe97/thesis-self-supervised-learning | lightly/models/simclr.py | simclr.py | py | 3,319 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "torch.nn.Linear",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": ... |
18924234195 | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver import ActionChains
import time
class MouseHovering():
def test1(self):
baseUrl = "https://letskodeit.teachable.com/pages/practice"
driver = webdriver.Firefox()
driver.maximize_window()
driver.get(baseUrl)
driver.implicitly_wait(3)
driver.execute_script("window.scrollBy(0, 600);")
time.sleep(2)
element = driver.find_element(By.ID, "mousehover")
itemToClickLocator = ".//div[@class='mouse-hover-content']//a[text()='Top']"
try:
actions = ActionChains(driver)
actions.move_to_element(element).perform()
print("Mouse Hovered on element")
time.sleep(2)
topLink = driver.find_element(By.XPATH, itemToClickLocator)
actions.move_to_element(topLink).click().perform()
print("Item Clicked")
except:
print("Mouse Hover failed on element")
ff = MouseHovering()
ff.test1() | PacktPublishing/-Selenium-WebDriver-With-Python-3.x---Novice-To-Ninja-v- | CODES/S24 - Selenium WebDriver -_ Working With Actions Class/1-mouse-hovering.py | 1-mouse-hovering.py | py | 1,053 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Firefox",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "selenium.web... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.