seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7921292233 | import pytest
from scripts.create_type_file import get_properties, create_connection_by_url, get_metadata_param
test_data = [
["wss://rpc.polkadot.io", "Polkadot"],
["wss://kusama-rpc.polkadot.io", "Kusama"],
["wss://westend-rpc.polkadot.io", "Westend"],
["wss://statemine.api.onfinality.io/public-ws", "Statemine"],
["wss://karura-rpc-0.aca-api.network", "Karura"],
["wss://rpc.shiden.astar.network", "Shiden"],
["wss://bifrost-rpc.liebi.com/ws", "Bifrost"],
["wss://heiko-rpc.parallel.fi", "Parallel Heiko"],
["wss://basilisk.api.onfinality.io/public-ws", "Basilisk"],
["wss://fullnode.altair.centrifuge.io", "Altair"],
["wss://spiritnet.kilt.io/", "KILT Spiritnet"],
["wss://kilt-peregrine-k8s.kilt.io", "KILT Peregrine"],
["wss://falafel.calamari.systems/", "Calamari Parachain"],
["wss://quartz.unique.network", "QUARTZ by UNIQUE"],
["wss://pioneer.api.onfinality.io/public-ws", "Pioneer Network"]
]
@pytest.mark.parametrize("url, expected", test_data)
def test_properties(url, expected):
network_property = get_properties(create_connection_by_url(url))
assert network_property.name == expected
@pytest.mark.parametrize("url, expected", test_data)
def test_type_building(url, expected):
network_types = get_metadata_param(create_connection_by_url(url))
assert isinstance(network_types.runtime_id, int) | zktony/nova-utils | tests/test_type_creation.py | test_type_creation.py | py | 1,386 | python | en | code | null | github-code | 36 | [
{
"api_name": "scripts.create_type_file.get_properties",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "scripts.create_type_file.create_connection_by_url",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 22,
"u... |
29698007156 | import pandas as pd
import ppscore as pps
import streamlit as st
import plotly.express as px
from utils.helpers import (plot_binary_feature,
plot_feature_distribution,
define_lr_pipeline,
get_cross_val_score)
from utils.constants import (TARGET_COL,
N_JOBS,
N_SPLITS,
RANDOM_STATE)
def introduction(df: pd.DataFrame):
st.write(f"""
For this problem we are going to use churn data, downloaded from Kaggle:
""")
st.dataframe(df.head())
st.write(f"""
It consists on {df.shape[0]} rows and {df.shape[1]} columns
""")
st.write(f"""
# Analysis
Let's analyze each column:
""")
st.write(f"""
### _customerID_
This column seems to be an identifier. How many unique values does it have?
`{df['customerID'].nunique()}`
Same as number of rows! It's an identifier column. We should drop it for modelling
""")
binary_cols = [col for col in df.columns
if (df[col].nunique() == 2)
and (col != 'Churn')]
other_cols = list(set(df.columns).difference(set(binary_cols)))
other_cols = [col for col in other_cols if (col != 'Churn') and (col != 'customerID')]
[plot_binary_feature(df, col) for col in binary_cols]
[plot_feature_distribution(df, col) for col in other_cols]
st.write(f"""
### Null values
""")
st.write(df.isnull().any().any())
st.write(f"""
Predictive Power Score
""")
pps_churn = pd.concat([
pd.Series(pps.score(df, col, 'Churn').get('ppscore'), name=col)
for col in df.columns if col != 'Churn'
], axis=1).T.reset_index().rename(columns={'index': 'Feature', 0: 'Score'}).sort_values(by='Score')
st.dataframe(pps_churn)
st.plotly_chart(
px.bar(pps_churn, x='Feature', y='Score')
)
pps_matrix = pps.matrix(df)
st.plotly_chart(
px.imshow(pps_matrix,
x=pps_matrix.columns.tolist(),
y=pps_matrix.columns.tolist())
)
st.write(f"""
# Model
""")
st.write(f"""
## Logistic Regression
""")
metrics = ['accuracy', 'f1']
df.drop(columns='customerID', inplace=True)
df['Churn'] = df['Churn'].replace({'No': 0,
'Yes': 1})
lr_pipeline = define_lr_pipeline(df, TARGET_COL, N_JOBS, RANDOM_STATE)
results_lr_pipeline = get_cross_val_score(lr_pipeline,
df, TARGET_COL,
N_SPLITS, metrics,
N_JOBS, RANDOM_STATE)
st.write(f"""
`{results_lr_pipeline["test_accuracy"].mean().round(2)}`
± `{results_lr_pipeline["test_accuracy"].std().round(2)}`
""")
st.write(f"""
`{results_lr_pipeline["test_f1"].mean().round(2)}`
± `{results_lr_pipeline["test_f1"].std().round(2)}`
""")
st.write(f"""
## Decision tree
""")
st.write(f"""
## Boosting trees
""")
| diegoglozano/interpretable-ml | modules/introduction.py | introduction.py | py | 3,220 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "streamlit.write",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "streamlit.dataframe",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "streamlit.w... |
34202592392 | #!/usr/bin/env python
import click
from intercom.models import Location
from intercom.repositories import CustomerRepository
from intercom.services import get_customers_close_to_office
@click.command()
@click.option('--dataset', default='dataset/customers.txt', help='Path to the dataset.')
@click.option('--max-distance', default=100.0, type=float, help='Maximum distance from the office.')
def command(dataset, max_distance):
office_location = Location(latitude=53.339428, longitude=-6.257664) # Dublin
customer_repository = CustomerRepository(dataset)
# Get the customers close to the office and sort them by `user_id`
customers = get_customers_close_to_office(customer_repository, office_location, max_distance)
customers = sorted(customers, key=lambda x: x.user_id)
for customer in customers:
click.echo(f'[user_id={customer.user_id}] {customer.name}')
if __name__ == '__main__':
command()
| Wicker25/intercom-test | cli.py | cli.py | py | 941 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "intercom.models.Location",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "intercom.repositories.CustomerRepository",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "intercom.services.get_customers_close_to_office",
"line_number": 18,
"u... |
3321900706 | import numpy as np
import matplotlib.pyplot as plt
def f(x):
return x**2 + (np.sin(9*x))**2
n=8
xi = [i/n for i in range(n+1)]
yi = [f(xi[i]) for i in range(n+1)]
#tabela das diferenças divididas:
T = [yi]
for j in range(n+1):
T += [[(T[j][i+1]-T[j][i])/(xi[i+1+j]-xi[i]) for i in range(n-j)]]
#polinómio interpolador:
def p(x):
return T[0][0] + T[1][0]*(x-xi[0]) + T[2][0]*(x-xi[0])*(x-xi[1]) + T[3][0]*(x-xi[0])*(x-xi[1])*(x-xi[2]) + T[4][0]*(x-xi[0])*(x-xi[1])*(x-xi[2])*(x-xi[3]) + T[5][0]*(x-xi[0])*(x-xi[1])*(x-xi[2])*(x-xi[3])*(x-xi[4]) + T[6][0]*(x-xi[0])*(x-xi[1])*(x-xi[2])*(x-xi[3])*(x-xi[4])*(x-xi[5]) + T[7][0]*(x-xi[0])*(x-xi[1])*(x-xi[2])*(x-xi[3])*(x-xi[4])*(x-xi[5])*(x-xi[6]) + T[8][0]*(x-xi[0])*(x-xi[1])*(x-xi[2])*(x-xi[3])*(x-xi[4])*(x-xi[5])*(x-xi[6])*(x-xi[7])
#gráficos:
x = np.arange ( start = 0
, stop = 1.001
, step = 0.001
)
y = p(x)
ff = f(x)
plt.plot(x,ff, label='f(x)')
plt.plot(x, y, 'r-', label='p(x)')
pontos = np.array([ [xi[i],yi[i]] for i in range(n+1) ])
xx, yy = pontos.T
plt.scatter(xx,yy, color='k')
plt.axis([0, 1, -1.6, 1.8])
#plt.axvline(0, color='k')
#plt.axhline(0, color='k')
plt.grid()
plt.legend()
plt.show()
plt.savefig('ex2-pol.pdf', bbox_inches='tight') | MotaBruno/Analise-Numerica-projetos | ex2-pol.py | ex2-pol.py | py | 1,317 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.sin",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
... |
30582925697 | # -*- coding: utf-8 -*-
"""
Group 8 Final Project - Automatic Image Colorization
Ahmed Nasrallah, Touseef Ali, Hitesh Kumar
"""
#%%
import keras
from keras.preprocessing import image
from keras.engine import Layer
from keras.layers import Conv2D, Conv3D, UpSampling2D, InputLayer, Conv2DTranspose, Input, Reshape, merge, concatenate
from keras.layers import Activation, Dense, Dropout, Flatten
from keras.layers.normalization import BatchNormalization
from keras.callbacks import TensorBoard
from keras.models import Sequential, Model
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.models import load_model
from skimage.color import rgb2lab, lab2rgb, rgb2gray, gray2rgb
from skimage.transform import resize, rescale
from skimage.io import imsave
from time import time
import numpy as np
import os
import random
import tensorflow as tf
from PIL import Image, ImageFile
import scipy
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
#%% Using VGG16 as feature extrator
vggmodel = keras.applications.vgg16.VGG16()
newmodel = Sequential()
num = 0
for i, layer in enumerate(vggmodel.layers):
if i<19:
newmodel.add(layer)
newmodel.summary()
for layer in newmodel.layers:
layer.trainable=False
#%% Getting Training Images and extracting features from training images
path = 'train_data/'
train_datagen = ImageDataGenerator(rescale=1. / 255)
train = train_datagen.flow_from_directory(path, target_size=(224, 224),batch_size=500,class_mode=None)
X =[]
Y =[]
for img in train[0]:
try:
lab = rgb2lab(img)
X.append(lab[:,:,0])
Y.append(lab[:,:,1:] / 128)
except:
print('error')
X = np.array(X)
Y = np.array(Y)
X = X.reshape(X.shape+(1,))
print(X.shape)
print(Y.shape)
vggfeatures = []
for i, sample in enumerate(X):
sample = gray2rgb(sample)
sample = sample.reshape((1,224,224,3))
prediction = newmodel.predict(sample)
prediction = prediction.reshape((7,7,512))
vggfeatures.append(prediction)
vggfeatures = np.array(vggfeatures)
print(vggfeatures.shape)
#%% Building Model
#Encoder
encoder_input = Input(shape=(7, 7, 512,))
#Decoder
decoder_output = Conv2D(256, (3,3), activation='relu', padding='same')(encoder_input)
decoder_output = Conv2D(128, (3,3), activation='relu', padding='same')(decoder_output)
decoder_output = UpSampling2D((2, 2))(decoder_output)
decoder_output = Conv2D(64, (3,3), activation='relu', padding='same')(decoder_output)
decoder_output = UpSampling2D((2, 2))(decoder_output)
decoder_output = Conv2D(32, (3,3), activation='relu', padding='same')(decoder_output)
decoder_output = UpSampling2D((2, 2))(decoder_output)
decoder_output = Conv2D(16, (3,3), activation='relu', padding='same')(decoder_output)
decoder_output = UpSampling2D((2, 2))(decoder_output)
decoder_output = Conv2D(2, (3, 3), activation='tanh', padding='same')(decoder_output)
decoder_output = UpSampling2D((2, 2))(decoder_output)
model = Model(inputs=encoder_input, outputs=decoder_output)
#%% Training Model
tensorboard = TensorBoard(log_dir='Model/')
model.compile(optimizer='Adam', loss='mse' , metrics=['accuracy'])
history = model.fit(vggfeatures, Y, validation_split=0.1, verbose=1, epochs=50, batch_size=25)
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper right')
plt.savefig('train_loss_tempe.png')
plt.show()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper right')
plt.savefig('train_acc_tempe.png')
plt.show()
#%% Testing
testpath = 'testing_set/'
files = os.listdir(testpath)
for idx, file in enumerate(files):
test = img_to_array(load_img(testpath+file))
test = resize(test, (224,224), anti_aliasing=True)
test*= 1.0/224
lab = rgb2lab(test)
l = lab[:,:,0]
L = gray2rgb(l)
L = L.reshape((1,224,224,3))
#print(L.shape)
vggpred = newmodel.predict(L)
ab = model.predict(vggpred)
#print(ab.shape)
ab = ab*128
cur = np.zeros((224, 224, 3))
cur[:,:,0] = l
cur[:,:,1:] = ab
p = lab2rgb(cur)
imsave('results/'+str(idx)+".jpg", p)
| hitsasu/image-colorization | code/color.py | color.py | py | 4,440 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "keras.applications.vgg16.VGG16",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "keras.applications",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "keras.models.Sequential",
"line_number": 33,
"usage_type": "call"
},
{
"ap... |
38802468088 | import numpy as np
import cv2
import os
import time
import crop
import sys
from PIL import Image
def mean(x):
return sum(x) / len(x)
def showimage(name, image):
cv2.imshow(name, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def grouping(img):
print('the grouping part')
image = cv2.imread(img)
pos1 = np.where(image == [255, 0, 0])
prev_len_of_img = len(pos1[0])
print("original image blue pixels count",prev_len_of_img)
imagegray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
pos1 = np.where(imagegray > 128)
imagegray[pos1] = 255
pos2 = np.where(imagegray > 80) and np.where(imagegray < 120)
imagegray[pos2] = 128
pos3 = np.where(imagegray == 255)
imagegray[pos3] = 0
p = np.where(imagegray > 120)
a = list(filter(lambda x: x < 600, p[0]))
l = len(a)
b = p[1]
b = b[:len(a)]
for i in range(len(a) - 3):
x = a[i]
y = b[i]
try:
for i in range(1, 5):
for j in range(1, 5):
if (imagegray[x + i][y] < 10 and imagegray[x - i][y] < 10 and imagegray[x][y + i] < 10 and
imagegray[x][y - i] < 10 and imagegray[x - i][y - j] < 10 and imagegray[x + i][
y + j] < 10 and imagegray[x - i][y + j] < 10 and imagegray[x + i][y - j] < 10) :
image[x][y - 1] = [255, 0, 0]
image[x][y - 2] = [255, 0, 0]
image[x][y - 3] = [255, 0, 0]
image[x][y - 4] = [255, 0, 0]
except:
pass
return image, prev_len_of_img
def gradient(image):
a = []
b = []
c = []
f = []
for i in range(len(image) - 2):
for j in range(len(image[i]) - 2):
if (all(image[i][j] == 0)):
continue
if (abs(mean(image[i][j]) - mean(image[i][j - 1]))) > 50: # vertical
# print("",abs(mean(image[i][j]) - mean(image[i][j - 1])))
a.append((i, j))
if (abs(mean(image[i][j]) - mean(image[i - 1][j]))) > 50: # vertical
b.append((i, j))
if (abs(mean(image[i][j]) - mean(image[i][j + 1]))) > 50: # vertical
c.append((i, j))
# if (abs(mean(image[i][j]) - mean(image[i + 1][j]))) > 50: # vertical
# f.append((i, j))
# if (abs(mean(image[i][j]) - mean(image[i - 1][j - 1]))) > 50: # vertical
# # print ('', abs(mean(image[i][j]) - mean(image[i + 1][j + 1])))
# f.append((i, j))
d = set(a).intersection(set(b))
e = set(d).intersection(set(c))
# e = set(g).intersection(set(f))
for i in e:
image[i[0]][i[1]] = [255, 0, 0]
showimage("", image)
step1 = image
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# showimage('',image_gray)
pos1 = np.where(image_gray >= 0)
image_gray[pos1] = 255
positionblue = np.where(step1 == [255, 0, 0])
a = positionblue[0]
b = positionblue[1]
for i in e:
image_gray[i[0]][i[1]] = 128
showimage('image_gray', image_gray)
return image_gray
if __name__ == "__main__":
start_time = time.time()
path = os.getcwd()
images_path_largedent = os.chdir('./largedentimages/')
arr=[]
for i in os.listdir('.'):
arr.append(i)
for i in arr:
if i.endswith(".jpg"):
image_name=i
print(i)
fname,fext=os.path.splitext(image_name)
image = cv2.imread(image_name)
crop1, _ = crop.crop_radial_arc_two_centres(image, centre_x1=340, centre_y1=-50, centre_x2=340,
centre_y2=-10,radius1=160, radius2=350, theta1=215, theta2=325)
# blur = cv2.medianBlur(image,5)
# blur = ndimage.gaussian_filter(crop1, sigma=3)
# showimage('',blur)
path = os.getcwd()
images_path_output = os.chdir('./output/')
image_cropped_name = fname + "cropped.jpg"
cv2.imwrite(image_cropped_name, crop1)
img = cv2.imread(image_cropped_name)
crop_img = img[190:530, 700:1300]
# showimage('', crop_img)
print("gradient start")
imagegrayed = gradient(crop1)
# showimage('', imagegrayed)
image1 = fname + '_gray_image.jpg'
cv2.imwrite(image1, imagegrayed)
"""
print("groupping start")
finalimage, pixels_cnt = grouping(image1)
final = fname + '_final_image.jpg'
cv2.imwrite(final, finalimage)
# image_final = cv2.imread(final)
# showimage("final_image", image)
pos1 = np.where(finalimage == [255, 0, 0])
print('Final length of image',len(pos1[0]))
sec_pixels_cound = len(pos1[0])
final_pixels_cnt = sec_pixels_cound - pixels_cnt
if final_pixels_cnt == 0:
print ("correct Part", final_pixels_cnt)
else:
print ("Faulty Part", final_pixels_cnt)
path = os.getcwd()
images_path_largedent1 = os.chdir('..')
"""
print("--- %s seconds ---" % (time.time() - start_time))
| kaustubhdeokar/FinalTrendzlinkImage | gradient.py | gradient.py | py | 5,348 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imshow",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_n... |
32413118307 | from flask import Flask, render_template, redirect, url_for
from flask_bootstrap import Bootstrap
from flask_ckeditor import CKEditor
from datetime import date
from flask_sqlalchemy import SQLAlchemy
import os
app = Flask(__name__)
ckeditor = CKEditor(app)
Bootstrap(app)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get("DATABASE_URL", "sqlite:///review.db")
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class Review(db.Model):
__tablename__ = "reviews"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), nullable=False)
rating = db.Column(db.String(250), nullable=False)
review = db.Column(db.String(250), nullable=False)
img_url = db.Column(db.String(250), nullable=False)
db.create_all()
@app.route('/')
def home():
No_of_files = len(os.listdir("static/assets/promo")) - 1
reviews = Review.query.all()
return render_template("index.html", n=No_of_files, reviews=reviews)
@app.route('/contact')
def contact():
return render_template("contact.html")
@app.route('/about')
def about():
return render_template("about.html")
if __name__ == "__main__":
app.run(debug=True)
| matic56789/TriBap | main.py | main.py | py | 1,194 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask_ckeditor.CKEditor",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask_bootstrap.Bootstrap",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.enviro... |
4065634528 | """
Module to detect new parkrun events and coordinate the notification system.
Notifications are sent out via gmail, with credentials stored in a credentials.toml
file.
"""
import mimetypes
import smtplib
from datetime import datetime
from email.message import EmailMessage
from pathlib import Path
from typing import Optional, Union
import pandas as pd
import plotly.express as px
from utils import read_json, read_toml
DATE_TODAY = datetime.today().strftime("%Y-%m-%d")
MAP_SAVE_FNAME = Path("maps", DATE_TODAY + ".html")
def find_latest_n_jsons(uk_jsons: Union[Path, str], n: int = 2) -> list[str]:
"""Finds the latest n jsons saved in the saved json directory.
Deliberately uses the date in the filename instead of accessing
file date created data, as it's likely going to be written to a
data lake in the future.
Args:
json_save_dir (Union[Path,str]): folder directory with saved json files
n (int, optional): Quantity of latest files to return. Defaults to 2.
Returns:
list[str]: filenames (dates) of latest files
"""
files = Path(uk_jsons).glob("**/*")
file_dates = [x.stem for x in files]
return sorted(file_dates, reverse=True)[:n]
def read_latest_n_jsons(
json_load_dir: Union[Path, str], latest_files: list[str]
) -> list[dict]:
"""Loads in the latest jsons as specified by the latest_files list
Args:
json_load_dir (Union[Path, str]): directory contianing saved jsons
latest_files (list[str]): json filenames to load
Returns:
list[dict]: list containing loaded jsons
"""
newest_jsons = []
for filename in latest_files:
full_filename = Path(json_load_dir, filename + ".json")
newest_jsons.append(read_json(full_filename))
return newest_jsons
def find_new_parkruns(new_json: dict, prev_json: dict) -> list[str]:
"""Compare two json files and retrive events that exist in new_json
that do no exist in prev_json.
Args:
new_json (dict): newest parkrun json
prev_json (dict): previous parkrun json
Returns:
list[str]: event names found in new but not prev
"""
new_event_names = []
for event in new_json:
new_event_names.append(event["properties"]["EventLongName"])
prev_event_names = []
for event in prev_json:
prev_event_names.append(event["properties"]["EventLongName"])
return [i for i in new_event_names if i not in prev_event_names]
def find_parkun_locations(
parkrun_json: dict, event_name: str
) -> tuple[str, list[float]]:
"""Retrive location (lat,lon) data for
Args:
parkrun_json (dict): json containing event names and location data
event_name (str): events to find location data for
Returns:
tuple[str, list[float]]: tuple containing lat, lon for events specified
"""
for i in range(len(parkrun_json)):
if parkrun_json[i]["properties"]["EventLongName"] == event_name:
return event_name, parkrun_json[i]["geometry"]["coordinates"]
def create_map(
new_parkun_dict: dict, mapbox_token: str, save_fname: Union[Path, str]
) -> None:
"""Creates and saves scatter map containing events specified in the
new_parkrun_dict
Args:
new_parkun_dict (dict): dict containing event names and lat,lon data
mapbox_token (str): mapbox credentials
save_fname (Union[Path, str]): filename of saved scatter
"""
df = (
pd.DataFrame.from_dict(new_parkun_dict)
.T.reset_index()
.rename(columns={"index": "Event Name", 0: "lon", 1: "lat"})
)
px.set_mapbox_access_token(mapbox_token)
fig = px.scatter_mapbox(
df,
lat="lat",
lon="lon",
hover_name="Event Name",
size_max=15,
zoom=3.5,
title="New Parkrun Events",
mapbox_style="open-street-map",
)
fig.write_html(save_fname)
return None
def send_email(
sender_address: str,
sender_password: str,
recipient_address: str,
mail_content: str,
attachment: Optional[Path],
attachment_fname: str,
) -> None:
"""Coordinates the automatic email notification system.
Args:
sender_address (str): email address of the system sender
sender_password (str):password associated with the email address.
Note, for gmail accounts, this must be an "app password",
not the typical login password.
recipient_address (str): email address of recipient
mail_content (str): text content of the email
attachment (Optional[Path]): attachment to include in email
attachment_fname (str): name of the attachment to appear in the sent email
"""
message = EmailMessage()
message["From"] = sender_address
message["To"] = recipient_address
message["Subject"] = "This weeks new parkrun(s)"
body = mail_content
message.set_content(body)
if attachment is not None:
mime_type, _ = mimetypes.guess_type(attachment)
mime_type, mime_subtype = mime_type.split("/")
with open(attachment, "rb") as file:
message.add_attachment(
file.read(),
maintype=mime_type,
subtype=mime_subtype,
filename=attachment_fname,
)
mail_server = smtplib.SMTP_SSL("smtp.gmail.com")
mail_server.login(sender_address, sender_password)
mail_server.send_message(message)
mail_server.quit()
print("Mail sent")
return None
def email_new_events_main() -> None:
"""main function to coordinate the detection of new events, generation of maps, and
emailing to recipients.
"""
# Load credentials
config = read_toml("credentials.toml")
uk_json_save_dir = config["json_directories"]["uk_save_dir"]
mapbox_token = config["mapbox_token"]["mapbox_token"]
sender_address = config["gmail"]["sender_address"]
gmail_password = config["gmail"]["password"]
recipient_addresses = config["gmail"]["recipient_addresses"]
# Find new parkruns
latest_jsons_fnames = find_latest_n_jsons(uk_json_save_dir)
latest_jsons = read_latest_n_jsons(uk_json_save_dir, latest_jsons_fnames)
new_json = latest_jsons[0]
prev_json = latest_jsons[1]
new_parkruns = find_new_parkruns(new_json, prev_json)
new_parkrun_loc_dict = {}
for run in new_parkruns:
new_parkrun_loc_dict[run] = find_parkun_locations(new_json, run)[1]
# Create new parkrun map
if len(new_parkrun_loc_dict.keys()) > 0:
create_map(new_parkrun_loc_dict, mapbox_token, MAP_SAVE_FNAME)
# Email new parkruns to recipients
new_parkruns_content = "\n".join([x for x in new_parkrun_loc_dict.keys()])
mail_content = f"This weeks new parkruns:\n {new_parkruns_content}"
attachment = MAP_SAVE_FNAME
if len(new_parkrun_loc_dict.keys()) == 0:
mail_content = "No new parkruns found this week!"
attachment = None
for recipient in recipient_addresses:
send_email(
sender_address,
gmail_password,
recipient,
mail_content,
attachment,
f"{DATE_TODAY}" + ".html",
)
return None
| jmoro0408/parkrun | email_new_events.py | email_new_events.py | py | 7,229 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.today",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "typing.Union",... |
20445071495 | import logging as l
import sys
import os
import json
logger = l.getLogger("Transformer")
logger.setLevel(l.DEBUG)
formatter = l.Formatter('%(asctime)s | %(levelname)s | %(message)s')
stdout_handler = l.StreamHandler(sys.stdout)
stdout_handler.setLevel(l.INFO)
stdout_handler.setFormatter(formatter)
fh = l.FileHandler("/home/devadmin/Pythons_Scripts/MS_DB/SCDashBoard/pipline/logs/Transformer.log", "w")
fh.mode = 'w'
fh.setFormatter(formatter)
logger.addHandler(stdout_handler)
logger.addHandler(fh)
# It's a class that transforms data
class transformer:
def __init__(self, mode):
self.mode = mode
@staticmethod
def fixstr(str):
return str.replace("'", "''") if str else "null"
@staticmethod
def fixdate(date):
if date :
jour = date.split("T")[0]
heure = date.split("T")[1].split(".")[0]
return f'{jour} {heure}'
else :
return '0000-00-00 00:00:00'
@staticmethod
def fixinteger(self, value):
# sourcery skip: last-if-guard, remove-redundant-slice-index, remove-unnecessary-else, simplify-negative-index, swap-if-else-branches
if value:
# determine multiplier
if value.find(",") != -1:
value = f"{value.split(',')[0]}.{value.split(',')[1]}"
multiplier = 1
if value.endswith('K'):
multiplier = 1000
value = value[0:len(value)-1] # strip multiplier character
elif value.endswith('M'):
multiplier = 1000000
value = value[0:len(value)-1] # strip multiplier character
return int(float(value) * multiplier)
else:
self.logger.info(f'integer {value} = 0')
return 0
def transformPageSourceToRowData(data):
# sourcery skip: extract-duplicate-method, instance-method-first-arg-name
logger.info("")
logger.info("<<DEBUT TRANSFORMATION>>")
logger.info("Récupération des items contnenant les données...")
tracks = data.find_all("li", {"class": "soundList__item"})
logger.info("Transformation des données... ")
logger.info("")
datas = []
for track in tracks:
d_data = {
"artist": transformer.fixstr(track.find("span", {"class": "soundTitle__usernameText"}).get_text(strip=True)),
"date": transformer.fixdate(track.find("time", {"class": "relativeTime sc-text-secondary sc-text-captions"}).get('datetime')),
}
try:
d_data["title"] = transformer.fixstr(track.find("a", {"class": "sc-link-primary soundTitle__title sc-link-dark sc-text-h4"}).findChildren()[0].get_text(strip=True))
except(AttributeError):
d_data["title"] = transformer.fixstr(track.find("a", {"class": "sc-link-primary soundTitle__title g-opacity-transition-500 g-type-shrinkwrap-block g-type-shrinkwrap-primary theme-dark sc-text-h4"}).findChildren()[0].get_text(strip=True))
try:
d_data["num_likes"] = transformer.fixinteger(track.find("button", {"class": "sc-button-like sc-button-secondary sc-button sc-button-small sc-button-responsive"}).get_text())
except(ValueError):
d_data["num_likes"] = 0
if track.find("a", {"class": "sc-ministats sc-ministats-small sc-ministats-comments sc-link-secondary"}):
d_data["num_comments"] = transformer.fixinteger(track.find("a", {"class": "sc-ministats sc-ministats-small sc-ministats-comments sc-link-secondary"}).findChildren()[1].get_text())
else:
d_data["num_comments"] = 0
if track.find("span", {"class": "sc-ministats sc-ministats-small sc-ministats-plays sc-text-secondary"}):
d_data["num_streams"] = transformer.fixinteger(track.find("span", {"class": "sc-ministats sc-ministats-small sc-ministats-plays sc-text-secondary"}).findChildren()[1].get_text())
else:
d_data["num_streams"] = 0
if track.find("span", {"class": "sc-truncate sc-tagContent"}):
if transformer.fixstr(track.find("span", {"class": "sc-truncate sc-tagContent"}).get_text().lower())==data.find("h1", {"class": "tagsMain__title sc-py-1x sc-px-2x sc-mb-2x"}).get_text().split("#")[-1]:
d_data["main_tag"] = transformer.fixstr(track.find("span", {"class": "sc-truncate sc-tagContent"}).get_text())
else:
d_data["main_tag"] = transformer.fixstr(track.find("span", {"class": "sc-truncate sc-tagContent"}).get_text())
d_data["taglist"] = data.find("h1", {"class": "tagsMain__title sc-py-1x sc-px-2x sc-mb-2x"}).get_text().split("#")[-1]
else:
d_data["main_tag"] = "Null"
d_data["taglist"] = data.find("h1", {"class": "tagsMain__title sc-py-1x sc-px-2x sc-mb-2x"}).get_text().split("#")[-1]
if track.find("a", {"class": "sc-ministats sc-ministats-small sc-link-secondary sc-text-h4 sc-ministats-followers "}):
d_data["num_abonnes"] = transformer.fixinteger(track.find("a", {"class": "sc-ministats sc-ministats-small sc-link-secondary sc-text-h4 sc-ministats-followers "}).findChildren()[1].get_text())
else:
d_data["num_abonnes"] = 0
logger.info("Nouvelle ligne :")
logger.info(d_data)
logger.info("")
datas.append(d_data)
logger.info("<<FIN EXTRACTION>>")
return datas
def transformJSONCollectionToRowData(self, datalist):
@staticmethod
def fixTaglist(str):
if str:
strlist = str.replace(r"\"", "").replace('"', '').replace(" ", ",").lower()
return strlist
else:
return 'null'
@staticmethod
def fixint(value):
return 0 if value is None else value
@staticmethod
def buildRowData(item):
d_data = {
"title": transformer.fixstr(item["title"]),
"artist": transformer.fixstr(item["user"]["username"].lower()),
"num_abonne": item["user"]["followers_count"],
"date": transformer.fixdate(item["created_at"]),
"num_likes": fixint(item["likes_count"]),
"num_comments": fixint(item["comment_count"]),
"num_streams": fixint(item["playback_count"]),
"main_tag": transformer.fixstr(item["genre"]) if item["genre"] != "" else "null",
"taglist" : fixTaglist(transformer.fixstr(item["tag_list"])) if item["tag_list"] != "" else "null"
}
return d_data
n = 0
datas=[]
logger.info("<<DEBUT TRANSFORMATION>>")
if self.mode=="wmf":
for file in os.listdir("/home/devadmin/Pythons_Scripts/MS_DB/SCDashBoard/tmp"):
with open(f"/home/devadmin/Pythons_Scripts/MS_DB/SCDashBoard/tmp/{file}", "r") as rf :
data = json.load(rf)
for item in data["collection"]:
n = n + 1
logger.debug(f"data[collection] : {item}")
d_data = buildRowData(item)
logger.info(f"Track : {n}\n {d_data}")
datas.append(d_data)
logger.info(f"Nb d'item : {n}")
else:
data = datalist
for item in data:
n = n + 1
d_data = buildRowData(item)
logger.info(f"Track : {n}\n {d_data}")
datas.append(d_data)
logger.info(f"Nb d'item : {n}")
logger.info("")
logger.info("<<FIN TRANSFORMATION>>")
logger.info("")
return datas | BastienEstia/SCDashBoard | pipline/Transformer.py | Transformer.py | py | 7,956 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "logging.Formatter",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.StreamHan... |
18794196909 | """
Backend implementation for parsing the LCLS Questionnaire.
"""
import functools
import logging
import re
from typing import Optional
from psdm_qs_cli import QuestionnaireClient
from ..errors import DatabaseError
from .json_db import JSONBackend
logger = logging.getLogger(__name__)
class RequiredKeyError(KeyError):
"""Required key not found in questionnaire."""
...
def _create_motor_callable(name, beamline, info):
"""Create a motor entry."""
container = 'pcdsdevices.happi.containers.Motor'
class_name = None
kwargs = {'name': '{{name}}'}
prefix = info['pvbase']
if info.get('stageidentity') == 'Beckhoff':
class_name = 'pcdsdevices.device_types.BeckhoffAxis'
return create_entry(name, beamline, prefix, kwargs, container,
info, class_name)
def _create_trig_callable(name, beamline, info):
"""Create a trigger entry."""
container = 'pcdsdevices.happi.containers.Trigger'
kwargs = {'name': '{{name}}'}
prefix = info['pvbase']
return create_entry(name, beamline, prefix, kwargs, container, info)
def _create_ai_ao_callable(name, beamline, info):
"""Create an acrommag channel entry."""
container = 'pcdsdevices.happi.containers.Acromag'
class_name = 'pcdsdevices.device_types.AcromagChannel'
prefix = info['pvbase']
ch = info.get('channel')
if not ch:
raise RequiredKeyError('Unable to create an acromag input channel '
'entry without a channel')
name_prefix = 'ai_' if ':ai' in prefix else 'ao_'
name = f'{name_prefix}{ch}'
kwargs = {'name': '{{name}}', 'channel': ch}
return create_entry(name, beamline, prefix, kwargs, container,
info, class_name)
def create_entry(name, beamline, prefix, kwargs, container,
info, class_name=None):
"""
Create a happi_entry.
Parameters
----------
name : str
Item name.
beamline : str
The beamline with which to associate the entry.
prefix : str
Epics base PV.
kwargs : dict
Information to pass through to the device, upon initialization
class_name : str
The class name to report in the new entry.
container : str
The container name to report in the new entry.
info : dict
Device information from `_translate_items`.
"""
entry = {
'_id': name,
'active': True,
'args': ['{{prefix}}'],
'beamline': beamline,
'kwargs': kwargs,
'lightpath': False,
'name': name,
'prefix': prefix,
'type': container,
**info,
}
if class_name is not None:
entry['device_class'] = class_name
return entry
# Map of (questionnaire type) to:
# 1. Device class (or factory function) and
# 2. Happi container
DEFAULT_TRANSLATIONS = {
'motors': _create_motor_callable,
'trig': _create_trig_callable,
'ao': _create_ai_ao_callable,
'ai': _create_ai_ao_callable,
}
class QuestionnaireHelper:
def __init__(self, client: QuestionnaireClient):
self._client = client
self._experiment = None
self.experiment_to_proposal = client.getExpName2URAWIProposalIDs()
def __repr__(self) -> str:
try:
return (
f'<{self.__class__.__name__} experiment={self.experiment} '
f'run_number={self.run_number} proposal={self.proposal} '
f'beamline={self.beamline}>'
)
except Exception:
return f'<{self.__class__.__name__} experiment={self.experiment}>'
@property
def experiment(self) -> str:
"""The experiment name."""
return self._experiment
@experiment.setter
def experiment(self, experiment: str):
self._experiment = experiment
# Proposals are per-experiment: clear the cache.
self.get_proposal_list.cache_clear()
self.get_run_details.cache_clear()
@property
def proposal(self):
"""Get the proposal number for the configured experiment."""
if self.experiment is None:
raise RuntimeError('Experiment unset')
try:
return self.experiment_to_proposal[self.experiment]
except KeyError:
# Rare case for debug/daq experiments, roll with it for now
return self.experiment
@property
def run_number(self):
"""Get the run number from the experiment."""
if self.experiment is None or len(self.experiment) <= 2:
raise RuntimeError(f'Experiment invalid: {self.experiment}')
run_number = self.experiment[-2:]
return f'run{run_number}'
@functools.lru_cache
def get_proposal_list(self) -> dict:
"""
Get the proposal list (a dict, really) for the configured experiment.
Raises
------
DatabaseError
"""
run_number = self.run_number
try:
logger.debug("Requesting list of proposals in %s", run_number)
return self._client.getProposalsListForRun(run_number)
except KeyError as ex:
# Invalid proposal id for this run
raise DatabaseError(
f'Unable to find proposal {self.proposal}'
) from ex
except Exception as ex:
# Find if our exception gave an HTTP status code and interpret it
status_code = ex.args[1] if len(ex.args) >= 2 else ''
if status_code == 500:
# No information found from run
reason = f'No run id found for {run_number}'
elif status_code == 401:
# Invalid credentials
reason = 'Invalid credentials'
else:
# Unrecognized error
reason = 'Unable to find run information'
raise DatabaseError(reason) from ex
@property
def beamline(self) -> str:
"""
Determine the beamline from a proposal + run_number.
Returns
-------
beamline : str
"""
proposals = self.get_proposal_list()
return proposals[self.proposal]['Instrument']
@functools.lru_cache
def get_run_details(self) -> dict:
"""Get details of the run in a raw dictionary."""
return self._client.getProposalDetailsForRun(
self.run_number, self.proposal
)
def as_happi_database(self, translations=None) -> dict:
"""
Based on the current experiment, generate a happi database.
Parameters
----------
translations : dict, optional
Translations to use when converting questionnaire items.
Returns
-------
db : dict
The happi JSON-backend-compatible dictionary.
"""
return self.to_database(
beamline=self.beamline,
run_details=self.get_run_details(),
translations=translations,
)
@staticmethod
def _translate_items(run_details: dict, table_name: str) -> dict:
"""
Translate flat questionnaire items into nested dictionaries.
Parameters
----------
run_details : dict
The run detail dictionary, from `get_run_details`.
table_name : str
The table name (e.g., 'motors' of 'pcdssetup-motors-1-name').
Returns
-------
device_info : dict
"""
pattern = re.compile(rf'pcdssetup-{table_name}-(\d+)-(\w+)')
devices = {}
for field, value in run_details.items():
match = pattern.match(field)
if match:
device_number, name = match.groups()
if device_number not in devices:
devices[device_number] = {}
# Add the key information to the specific device dictionary
devices[device_number][name] = value
return devices
@staticmethod
def _create_db_item(info: dict,
beamline: str,
method_call,
) -> dict:
"""
Create one database entry given translated questionnaire information.
Parameters
----------
info : dict
Device information from `_translate_items`.
beamline : str
The beamline with which to associate the entry.
class_name : str
The class name to report in the new entry.
container : str
The container name to report in the new entry.
Returns
-------
happi_entry : dict
"""
# Shallow-copy to not modify the original:
info = dict(info)
name = info.pop('name', None)
if not name:
raise RequiredKeyError('Unable to create an item without a name')
# There are some easy mistakes we can correct for, otherwise
# happi validation will fail.
# 1. A capitalized name:
name = name.lower()
entry = method_call(name, beamline, info)
# Empty strings from the Questionnaire make for invalid entries:
for key in {'prefix', 'name'}:
if not entry.get(key):
raise RequiredKeyError(
f"Unable to create an item without key {key}"
)
return entry
@staticmethod
def to_database(beamline: str,
run_details: dict,
*,
translations: Optional[dict] = None
) -> dict:
"""
Translate a set of run details into a happi-compatible dictionary.
Parameters
----------
run_details : dict
The run detail dictionary, from `get_run_details`.
beamline : str
The beamline with which to associate the entry.
translations : dict, optional
Translations to use when converting questionnaire items.
Returns
-------
db : dict
The happi JSON-backend-compatible dictionary.
"""
happi_db = {}
if translations is None:
translations = DEFAULT_TRANSLATIONS
for table_name, translation in translations.items():
devices = QuestionnaireHelper._translate_items(
run_details, table_name)
if not devices:
logger.info(
"No device information found under '%s'", table_name
)
continue
for device_number, item_info in devices.items():
logger.debug(
'[%s:%s] Found %s', table_name, device_number, item_info
)
try:
entry = QuestionnaireHelper._create_db_item(
info=item_info,
beamline=beamline,
method_call=translation
)
except RequiredKeyError:
logger.debug(
'Missing key for %s:%s', table_name, device_number,
exc_info=True
)
except Exception as ex:
logger.warning(
'Failed to create a happi database entry from the '
'questionnaire device: %s:%s. %s: %s',
table_name, device_number, ex.__class__.__name__, ex,
)
else:
identifier = entry['_id']
if identifier in happi_db:
logger.warning(
'Questionnaire name clash: %s (was: %s now: %s)',
identifier, happi_db[identifier], entry
)
happi_db[identifier] = entry
return happi_db
class QSBackend(JSONBackend):
"""
Questionniare Backend.
This backend connects to the LCLS questionnaire and looks at items with the
key pattern pcds-{}-setup-{}-{}. These fields are then combined and turned
into proper happi items. The translation of table name to `happi.HappiItem`
is determined by the :attr:`.translations` dictionary. The beamline is
determined by looking where the proposal was submitted.
Unlike the other backends, this one is read-only. All changes to the device
information should be done via the web interface. Finally, in order to
avoid duplicating any code needed to search the device database, the
`QSBackend` inherits directly from `JSONBackend`. Many of the methods are
unmodified with exception being that this backend merely searches through
an in-memory dictionary whereas the `JSONBackend` reads from a file before
searches.
Parameters
----------
expname : str
The experiment name from the elog, e.g. xcslp1915.
url : str, optional
Provide a base URL for the Questionnaire. If left as None the
appropriate URL will be chosen based on your authentication method.
use_kerberos : bool, optional
Use a Kerberos ticket to login to the Questionnaire. This is the
default authentication method.
user : str, optional
A username for ws_auth sign-in. If not provided the current login name
is used.
pw : str, optional
A password for ws_auth sign-in. If not provided a password will be
requested.
cfg_path : str, optional
Path to the happi config.
"""
translations = DEFAULT_TRANSLATIONS
def __init__(self, expname: str, *, url=None, use_kerberos=True, user=None,
pw=None, cfg_path=None):
# Load cache is unused for this backend, but we have it here for
# API compatibility with the superclass JSONBackend.
self._load_cache = None
# Create our client and gather the raw information from the client
self._client = QuestionnaireClient(
url=url, use_kerberos=use_kerberos, user=user, pw=pw
)
self.db = self._initialize_database(expname)
def _initialize_database(self, experiment):
"""Initialize and convert the questionnaire."""
try:
self.experiment = experiment
self.helper = QuestionnaireHelper(self._client)
self.helper.experiment = experiment
return self.helper.as_happi_database(
translations=self.translations
)
except Exception:
logger.error('Failed to load the questionnaire', exc_info=True)
return {}
def initialize(self):
"""Can not initialize a new Questionnaire entry from API."""
raise NotImplementedError("The Questionnaire backend is read-only")
def load(self):
"""Return the structured dictionary of information."""
return self.db
def store(self, *args, **kwargs):
"""The current implementation of this backend is read-only."""
raise NotImplementedError("The Questionnaire backend is read-only")
def save(self, *args, **kwargs):
"""The current implementation of this backend is read-only."""
raise NotImplementedError("The Questionnaire backend is read-only")
def delete(self, _id):
"""The current implementation of this backend is read-only."""
raise NotImplementedError("The Questionnaire backend is read-only")
| pcdshub/happi | happi/backends/qs_db.py | qs_db.py | py | 15,490 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "psdm_qs_cli.QuestionnaireClient",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "errors.DatabaseError",
"line_number": 178,
"usage_type": "call"
},
{
"api_name... |
44008099598 | import pygame
from pygame.locals import *
from random import randint as rand
pygame.init()
screen = pygame.Surface((500, 100))
window = pygame.display.set_mode((screen.get_width() * 2, screen.get_height() * 2))
def constrain(val, lo, hi):
# Because these things are useful :)
if val <= lo:
return lo
elif val >= hi:
return hi
else:
return val
blank = pygame.Surface((20, 20))
blank.set_colorkey((255, 255, 255))
#logopath = "C:\\Users\\Charles Turvey\\PycharmProjects\\Stuff\\Simulator\\fakerobotapi\\Resources\\LogoSmall.png"
#srlogo = pygame.image.load_extended(logopath)
#srlogo = pygame.transform.scale(srlogo, (20, 20))
#srlogo.set_colorkey((0, 0, 0))
#srlogo.convert()
FIST = pygame.image.load_extended("D:\\Users\\Charles Turvey\\Documents\\Python\\FIST.png")
FIST = pygame.transform.scale(FIST, (20, 20))
FIST.set_colorkey((255, 255, 255))
FIST.convert()
rightFIST = pygame.transform.rotate(FIST, -90)
leftFIST = pygame.transform.flip(rightFIST, True, False)
leftDRAGON = pygame.image.load_extended("D:\\Users\\Charles Turvey\\Documents\\Python\\DRAGON.png")
leftDRAGON = pygame.transform.scale(leftDRAGON, (40*int(leftDRAGON.get_width()/leftDRAGON.get_height()), 40))
leftDRAGON.set_colorkey((255, 255, 255))
leftDRAGON.convert()
rightDRAGON = pygame.transform.flip(leftDRAGON, True, False)
pieces = []
grid = [[0 for j in range(screen.get_height())] for i in range(screen.get_width())]
players = dict()
class Piece:
def __init__(self, xin, yin, col=(100, 100, 100)):
self.x = xin
self.y = yin
self.col = col
self.target = (-1, -1)
self.focus = 0
pieces.append(self)
grid[xin][yin] = self
def show(self):
if self.focus > 0:
self.focus -= 1
if self.y != self.target[1]:
if self.y < self.target[1]:
if self.y + 1 < screen.get_height():
if isinstance(grid[self.x][self.y + 1], Piece):
if grid[self.x][self.y + 1].focus == 0:
grid[self.x][self.y + 1].settarget(self.target[0], self.target[1], self.focus)
self.focus = 0
elif grid[self.x][self.y + 1] != 0:
if players[grid[self.x][self.y + 1]].kick(0, 1):
self.move(0, 1)
else:
self.move(0, 1)
else:
if self.y > 0:
if isinstance(grid[self.x][self.y - 1], Piece):
if grid[self.x][self.y - 1].focus == 0:
grid[self.x][self.y - 1].settarget(self.target[0], self.target[1], self.focus)
self.focus = 0
elif grid[self.x][self.y - 1] != 0:
if players[grid[self.x][self.y - 1]].kick(0, -1):
self.move(0, -1)
else:
self.move(0, -1)
elif self.x != self.target[0]:
if self.x < self.target[0]:
if self.x + 1 < screen.get_width():
if isinstance(grid[self.x + 1][self.y], Piece):
if grid[self.x + 1][self.y].focus == 0:
grid[self.x + 1][self.y].settarget(self.target[0], self.target[1], self.focus)
self.focus = 0
elif grid[self.x + 1][self.y] != 0:
if players[grid[self.x + 1][self.y]].kick(1, 0):
self.move(1, 0)
else:
self.move(1, 0)
else:
if self.x > 0:
if isinstance(grid[self.x - 1][self.y], Piece):
if grid[self.x - 1][self.y].focus == 0:
grid[self.x - 1][self.y].settarget(self.target[0], self.target[1], self.focus)
self.focus = 0
elif grid[self.x - 1][self.y] != 0:
if players[grid[self.x - 1][self.y]].kick(-1, 0):
self.move(-1, 0)
else:
self.move(-1, 0)
elif self.y + 1 < screen.get_height():
if grid[self.x][self.y + 1] == 0:
self.move(0, 1)
screen.fill(self.col, (self.x, self.y, 1, 1))
def move(self, xmuch, ymuch):
grid[self.x][self.y] = 0
self.x += xmuch
self.y += ymuch
grid[self.x][self.y] = self
def destroy(self):
pieces.remove(self)
grid[self.x][self.y] = 0
def settarget(self, x, y, focin=100, replace=False):
x = constrain(x, 0, screen.get_width() - 1)
y = constrain(y, 0, screen.get_height() - 1)
self.target = (x, y)
self.focus = focin
if replace:
if self.x + 1 < screen.get_width():
if isinstance(grid[self.x + 1][self.y], Piece):
if grid[self.x + 1][self.y].focus == 0:
grid[self.x + 1][self.y].settarget(self.x, self.y, 10)
return 0
if self.x > 0:
if isinstance(grid[self.x - 1][self.y], Piece):
if grid[self.x - 1][self.y].focus == 0:
grid[self.x - 1][self.y].settarget(self.x, self.y, 10)
return 0
if self.y + 1 < screen.get_height():
if isinstance(grid[self.x][self.y + 1], Piece):
if grid[self.x][self.y + 1].focus == 0:
grid[self.x][self.y + 1].settarget(self.x, self.y, 10, True)
return 0
class Player:
def __init__(self, playerid, xin, yin, col=(100, 100, 100)):
self.x = xin
self.y = yin
self.vx = 0
self.vy = 0
self.col = col
if playerid == 0:
playerid = 1
self.id = playerid
grid[xin][yin] = playerid
grid[xin][yin - 1] = playerid
grid[xin + 1][yin] = playerid
grid[xin + 1][yin - 1] = playerid
players[self.id] = self
self.jumping = 0
def show(self):
if self.jumping > 0:
self.jumping -= 1
if grid[self.x][self.y - 2] == 0 and grid[self.x + 1][self.y - 2] == 0:
self.move(0, -1)
elif self.y + 1 < screen.get_height():
if grid[self.x][self.y + 1] == 0 and grid[self.x + 1][self.y + 1] == 0:
self.move(0, 1)
screen.fill(self.col, (self.x, self.y - 1, 2, 2))
def move(self, xmuch, ymuch):
grid[self.x][self.y] = 0
grid[self.x][self.y - 1] = 0
grid[self.x + 1][self.y] = 0
grid[self.x + 1][self.y - 1] = 0
self.x += xmuch
self.y += ymuch
grid[self.x][self.y] = self.id
grid[self.x][self.y - 1] = self.id
grid[self.x + 1][self.y] = self.id
grid[self.x + 1][self.y - 1] = self.id
def jump(self, ymuch=10):
if self.y + 1 == screen.get_height():
if grid[self.x][self.y - 2] == 0 and grid[self.x + 1][self.y - 2] == 0:
self.jumping = ymuch
elif grid[self.x][self.y + 1] != 0 or grid[self.x + 1][self.y + 1] != 0:
if grid[self.x][self.y - 2] == 0 and grid[self.x + 1][self.y - 2] == 0:
self.jumping = ymuch
def side(self, xmuch):
if xmuch < 0:
if self.x > 0:
if grid[self.x - 1][self.y] == 0:
self.move(-1, 0)
elif not isinstance(grid[self.x - 1][self.y], Piece):
if players[grid[self.x - 1][self.y]].kick(-1, 0):
self.move(-1, 0)
moved = True
elif xmuch > 0:
if self.x + 2 < screen.get_width():
if grid[self.x + 2][self.y] == 0:
self.move(1, 0)
elif not isinstance(grid[self.x + 2][self.y], Piece):
if players[grid[self.x + 2][self.y]].kick(1, 0):
self.move(1, 0)
moved = True
def kick(self, xmuch, ymuch):
moved = False
if xmuch < 0:
if self.x > 0:
if grid[self.x - 1][self.y] == 0:
self.move(-1, 0)
moved = True
elif not isinstance(grid[self.x - 1][self.y], Piece):
if players[grid[self.x - 1][self.y]].kick(-1, 0):
self.move(-1, 0)
moved = True
elif xmuch > 0:
if self.x + 2 < screen.get_width():
if grid[self.x + 2][self.y] == 0:
self.move(1, 0)
moved = True
elif not isinstance(grid[self.x + 2][self.y], Piece):
if players[grid[self.x + 2][self.y]].kick(1, 0):
self.move(1, 0)
moved = True
if ymuch < 0:
if self.y - 1 > 0:
if grid[self.x][self.y - 2] == 0:
self.move(0, -1)
moved = True
elif not isinstance(grid[self.x][self.y - 2], Piece):
if players[grid[self.x][self.y - 2]].kick(0, -1):
self.move(0, -1)
moved = True
elif ymuch > 0:
if self.y + 1 < screen.get_width():
if grid[self.x][self.y + 1] == 0:
self.move(0, 1)
moved = True
self.jumping = 0
elif not isinstance(grid[self.x][self.y + 1], Piece):
if players[grid[self.x][self.y + 1]].kick(0, 1):
self.move(0, 1)
moved = True
return moved
def fist(self, x, y):
if x < -5:
generate(leftFIST, self.x - 10 + x, self.y + y)
elif x > 5:
generate(rightFIST, self.x - 10 + x, self.y + y)
else:
generate(FIST, self.x - 10 + x, self.y + y, 150)
def punch(self, x, y):
if x > 0:
chunk(rightFIST, self.x + 10, self.y - 20, self.x + x, self.y + y, abs(x + 10))
if x < 0:
chunk(leftFIST, self.x - 30, self.y - 20, self.x + x, self.y + y, abs(x + 10))
def unbury(self):
chunk(blank, self.x, self.y - 20, self.x + 10, self.y - 30)
chunk(blank, self.x - 20, self.y - 20, self.x - 40, self.y - 30)
def dragon(self, x, y):
if x < 0:
generate(leftDRAGON, self.x - int(leftDRAGON.get_width()/2) + x, self.y + y)
else:
generate(rightDRAGON, self.x - int(rightDRAGON.get_width()/2) + x, self.y + y)
def undertow(self, x):
chunk(blank, self.x + x - 10, self.y + 1, self.x - 10, self.y + 1, 50)
def raisin(self):
grid[self.x][self.y + 1].settarget(self.x, self.y, 10, True)
def generate(structure, x, y, focin=100):
targets = pygame.surfarray.array_colorkey(structure)
used = [0 for i in range(screen.get_width())]
for i in range(len(targets)):
if x + i == constrain(x + i, 0, screen.get_width() - 1):
for j in range(len(targets[i])):
if y + j == constrain(y + j, 0, screen.get_height() - 1):
if targets[i][j] > 0:
p, q = x + i, y + j
no = used[p]
for k in range(y, screen.get_height()):
if isinstance(grid[p][k], Piece):
if no > 0:
no -= 1
else:
used[p] += 1
grid[p][k].settarget(p, q, focin)
break
def chunk(structure, origx, origy, targx, targy, focin=100):
targets = pygame.surfarray.array_colorkey(structure)
for i in range(len(targets)):
if origx + i == constrain(origx + i, 0, screen.get_width() - 1):
for j in range(len(targets[i])):
if origy + j == constrain(origy + j, 0, screen.get_height() - 1):
if isinstance(grid[origx + i][origy + j], Piece):
if targets[i][j] > 0:
grid[origx + i][origy + j].settarget(targx + i, targy + j, focin)
else:
grid[origx + i][origy + j].focus = 0
groundx = int(screen.get_width())
groundy = int(screen.get_height()/3)
for i in range(groundx):
for j in range(groundy):
Piece(i, (2 * groundy) + j, (0, 100, j * 7))
for i in range(100, 120):
for j in range(0, 50):
Piece(i, j, (255, 255, 0))
P1 = Player("George", 400, 10, (0, 255, 0))
P1go = 0
P2 = Player("Goatee", 498, 10, (200, 0, 0))
while True:
screen.fill((0, 0, 0))
for p in pieces:
p.show()
P1.side(P1go)
if abs(P1.x - P2.x) > 10:
P2.side(P1.x - P2.x)
P2.jump()
for P in players.values():
P.show()
window.blit(pygame.transform.scale2x(screen, window), (0, 0))
pygame.display.flip()
for e in pygame.event.get():
if e.type == QUIT:
exit()
elif e.type == KEYDOWN:
mousepos = pygame.mouse.get_pos()
mousex = int(mousepos[0] / 2)
mousey = int(mousepos[1] / 2)
if e.unicode == "w":
# generate(rightFIST, mousex, mousey)
P1.fist(0, -20)
elif e.unicode == "e":
# chunk(rightFIST, mousex, mousey, mousex + 250, mousey, 300)
P1.fist(20, -20)
elif e.unicode == "q":
# chunk(leftFIST, mousex, mousey, mousex - 250, mousey, 300)
P1.fist(-20, -20)
elif e.unicode == "s":
P1.unbury()
elif e.unicode == "d":
P1.punch(100, -20)
elif e.unicode == "a":
P1.punch(-100, -20)
elif e.unicode == "z":
P1.undertow(20)
elif e.unicode == "c":
P1. undertow(-20)
elif e.unicode == "x":
P1.raisin()
elif e.key == K_UP:
P1.jump()
elif e.key == K_LEFT:
P1go -= 1
elif e.key == K_RIGHT:
P1go += 1
elif e.type == KEYUP:
if e.key == K_LEFT:
P1go += 1
elif e.key == K_RIGHT:
P1go -= 1
| ninjafrostpn/PythonProjects | Bending/Bending(old interface).py | Bending(old interface).py | py | 14,805 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.display",
... |
39725412461 | from app import db
import jinja2
from datetime import date
from flask import current_app, url_for
from flask_login import UserMixin, current_user, login_manager
class Admin(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(60))
password = db.Column(db.String(180))
name = db.Column(db.String(30))
role = db.Column(db.String(30))
xy = db.Column(db.String(50))
articles = db.relationship('Note_yet', lazy='dynamic')
articles_Note = db.relationship('Note')
@staticmethod
def query_all(name, stime, etime, matter, page=1):
activites = current_user.articles
if name != '':
activites = activites.filter(Note_yet.xm.like('%' + name + '%'))
if stime != '' and etime == '':
# etime = stime
activites = activites.filter(Note_yet.created_date == stime)
elif stime != '' and etime != '':
activites = activites.filter(Note_yet.created_date.between(stime, etime))
else:
pass
if matter != 'info':
activites = activites.filter(Note_yet.zt == matter)
return activites.paginate(
page, per_page=current_app.config['POST_PER_PAGE']
)
class Note(db.Model):
id = db.Column(db.Integer, primary_key=True)
xh = db.Column(db.String(128)) # 学号
xm = db.Column(db.String(10)) # 姓名
nj = db.Column(db.String(6)) # 年级
xy = db.Column(db.String(50)) # 学院
bj = db.Column(db.String(30)) # 班级
ksh = db.Column(db.String(20)) # 考生号
lqzy = db.Column(db.String(20)) # 录取专业
cc = db.Column(db.String(30)) # 专本科 层次
xq = db.Column(db.String(30)) # 校区
xb = db.Column(db.String(10)) # 性别
mz = db.Column(db.String(10)) # 民族
rxrq = db.Column(db.Date) # 入学日期
byrq = db.Column(db.Date) # 已经毕业日期
admin_id = db.Column(db.Integer, db.ForeignKey(Admin.id))
status = db.Column(db.String(12))
# 学生输入姓名账号后,从Note中复制得到的表
class Note_yet(db.Model):
id = db.Column(db.Integer, primary_key=True)
xh = db.Column(db.String(128))
xm = db.Column(db.String(10))
xy = db.Column(db.String(50))
bj = db.Column(db.String(30))
zt = db.Column(db.String(30))
zy = db.Column(db.String(20)) # 录取专业
created_date = db.Column(db.Date, default=date.today)
reason = db.Column(db.String(256))
status = db.Column(db.String(12))
home_address = db.Column(db.String(50)) # 家庭地址
home_tel = db.Column(db.String(12)) # 家庭通讯
per_tel = db.Column(db.String(12)) # 个人通讯
identity = db.Column(db.String(18)) # 身份证
school_sttime = db.Column(db.Date) # 入校时间
school_endtime = db.Column(db.Date) # 离校时间
bl_date = db.Column(db.Date) # 保留学籍截至日期
dom_campus = db.Column(db.String(10)) # 校区
# dom_built = db.Column(db.String(5)) #宿舍楼号
dom_dorm = db.Column(db.String(20)) # 寝室号
school = db.Column(db.String(50)) # 学校
campus = db.Column(db.String(50)) # 转专业新入院系
code = db.Column(db.String(50)) # 证书编号
sex = db.Column(db.String(5)) # 性别
leng_school = db.Column(db.String(8)) # 学制
discipline = db.Column(db.String(50)) # 转专业新入专业
classgrade = db.Column(db.String(20)) # 转专业新入班级
dorm = db.Column(db.String(20))
admin_id = db.Column(db.Integer, db.ForeignKey(Admin.id))
def to_json(self):
# 暂定
student = Note.query.filter(Note.xh == self.xh).first()
if self.zt == 'xueli':
sh = "<div><span class=\"layui-badge-rim\"><a href=" + url_for('print_all.xueli',
name_id=self.id) + " target=\"_blank\">学历证明</a></span></div>"
elif self.zt == 'xueji':
sh = "<div><span class=\"layui-badge-rim\"><a href=" + url_for('print_all.xueji',
name_id=self.id) + " target=\"_blank\">在校生学籍证明</a></span></div>"
elif self.zt == 'zhuxiao':
sh = "<div><span class=\"layui-badge-rim\"><a href=" + url_for('print_all.zhuxiaoxueji',
name_id=self.id) + " target=\"_blank\">注销学籍</a></span></div>"
elif self.zt == 'baoliuxueji':
sh = "<div><span class=\"layui-badge-rim\"><a href=" + url_for('print_all.xuejilixiaoqingdan',
name_id=self.id) + " target=\"_blank\">保留学籍离校清单</a></span><br>" \
"<span class=\"layui-badge-rim\"><a href=" + url_for(
'print_all.xuejishenqingbiao', name_id=self.id) + " target=\"_blank\">保留学籍申请表</a></span><br>" \
"<span class=\"layui-badge-rim\"><a href=" + url_for(
'print_all.xuejitongzhishu', name_id=self.id) + " target=\"_blank\">保留学籍通知书</a></span></div>"
elif self.zt == 'fuxue':
sh = "<div><span class=\"layui-badge-rim\"><a href=" + url_for('print_all.fuxueshenqing',
name_id=self.id) + " target=\"_blank\">复学申请</a></span><br>" \
"<span class=\"layui-badge-rim\"><a href=" + url_for(
'print_all.fuxueruban', name_id=self.id) + " target=\"_blank\">复学入班通知单</a></span><br></div>"
elif self.zt == 'tuixue':
sh = "<div><span class=\"layui-badge-rim\"><a href=" + url_for('print_all.tuixueshenqing',
name_id=self.id) + " target=\"_blank\">退学申请</a></span><br>" \
"<span class=\"layui-badge-rim\"><a href=" + url_for(
'print_all.tuixue_lixiaoqingdan', name_id=self.id) + " target=\"_blank\">退学离校清单</a></span><br>" \
"<span class=\"layui-badge-rim\"><a href=" + url_for(
'print_all.tuixue_tongzhi', name_id=self.id) + " target=\"_blank\">退学通知书</a></span></div>"
elif self.zt == 'xiuxue':
sh = "<div><span class=\"layui-badge-rim\"><a href=" + url_for('print_all.xiuxueshenqing',
name_id=self.id) + " target=\"_blank\">休学申请</a></span><br>" \
"<span class=\"layui-badge-rim\"><a href=" + url_for(
'print_all.xiuxuelixiaoqingdan', name_id=self.id) + " target=\"_blank\">休学离校清单</a></span><br>" \
"<span class=\"layui-badge-rim\"><a href=" + url_for(
'print_all.xiuxuetongzhi', name_id=self.id) + " target=\"_blank\">休学通知书</a></span></div>"
else:
sh = "<div><span class=\"layui-badge-rim\"><a href=" + url_for('print_all.zhuanyeshenqing',
name_id=self.id) + " target=\"_blank\">转专业申请</a></span><br>" \
"<span class=\"layui-badge-rim\"><a href=" + url_for(
'print_all.zhuanyeruban', name_id=self.id) + " target=\"_blank\">转专业入班</a></span><br></div>"
# 暂定
if (self.zt == 'tuixue' or self.zt == 'xiuxue' or self.zt == 'zhuxiao' or self.zt == 'fuxue') and (
student.status == None or student.status == ''):
cz = "<div>" \
"<form method=\"post\" action=" + url_for('admin_edit.delete_stu', name_id=self.id) + ">" \
"<a class=\"layui-btn\" style=\"background: #9acfea\" href=\"" + url_for(
'admin_edit.confirm_tx', name_id=self.id) + "\">确认</a>" \
"<input class=\"layui-btn\" id=\"Submit\" name=\"Submit\" type=\"submit\" value=\"删除\">" \
"<a class=\"layui-btn layui-btn-primary\" href=\"" + url_for(
'admin_edit.edit_new', ZT=self.zt, name_id=self.id) + "\">编辑</a>" \
"</form>" \
"</div>"
else:
cz = "<div>" \
"<form method=\"post\" action=" + url_for('admin_edit.delete_stu', name_id=self.id) + ">" \
"<input class=\"layui-btn\" id=\"Submit\" name=\"Submit\" type=\"submit\" value=\"删除\">" \
"<a class=\"layui-btn layui-btn-primary\" href=\"" + url_for(
'admin_edit.edit_new', ZT=self.zt, name_id=self.id) + "\">编辑</a>" \
"</form>" \
"</div>"
if (self.zt == 'zhuanzhuanye' or self.zt == 'fuxue') and self.discipline is not None and self.classgrade is not None:
bj = self.classgrade,
xy = self.campus
else:
bj = self.bj
xy = self.xy
return {
'xm': self.xm,
'xh': self.xh,
'xy': xy,
'bj': bj,
# 'xy': self.xy,
# 'bj': self.bj,
'sh': sh,
'created_date': str(self.created_date),
'reason': jinja2.escape(self.reason),
'cz': cz,
'discipline': self.discipline,
'classgrade': self.classgrade
}
@staticmethod # 静态回调
def query_all(name, stime, etime, matter, department, page, per_page):
activites = Note_yet.query
if name != '':
activites = activites.filter(Note_yet.xm.like('%' + name + '%'))
if stime != '' and etime == '':
# etime = stime
activites = activites.filter(Note_yet.created_date == stime)
elif stime != '' and etime != '':
activites = activites.filter(Note_yet.created_date.between(stime, etime))
else:
pass
if matter != 'info':
activites = activites.filter(Note_yet.zt == matter)
if department != 'admin':
activites = activites.filter(Note_yet.xy == department)
return activites.paginate(
page, per_page=per_page
)
| run-nerver/student_info_system | app/models/pro.py | pro.py | py | 11,598 | python | en | code | 82 | github-code | 36 | [
{
"api_name": "app.db.Model",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "app.db",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "flask_login.UserMixin",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "app.db.Column",
"l... |
22354713255 | import base64
import os
import typing
import unittest
import deepdiff
import fastapi.testclient
import kubernetes
import pytest
import sqlalchemy.orm
import mlrun.common.schemas
import mlrun.errors
import mlrun.runtimes.pod
import server.api.utils.singletons.k8s
import tests.api.runtimes.base
from mlrun.datastore import ParquetTarget
from mlrun.feature_store import RunConfig
from mlrun.feature_store.retrieval.job import _default_merger_handler
class TestSpark3Runtime(tests.api.runtimes.base.TestRuntimeBase):
def custom_setup_after_fixtures(self):
self._mock_create_namespaced_custom_object()
def custom_setup(self):
self.image_name = "mlrun/mlrun:latest"
def _generate_runtime(
self, set_resources: bool = True
) -> mlrun.runtimes.Spark3Runtime:
runtime = mlrun.runtimes.Spark3Runtime()
runtime.spec.image = self.image_name
if set_resources:
runtime.with_executor_requests(cpu=1, mem="512m")
runtime.with_driver_requests(cpu=1, mem="512m")
return runtime
def _assert_java_options(
self,
body: dict,
expected_driver_java_options: str,
expected_executor_java_options: str,
):
if expected_driver_java_options:
assert body["spec"]["driver"]["javaOptions"] == expected_driver_java_options
else:
assert "javaOptions" not in body["spec"]["driver"]
if expected_executor_java_options:
assert (
body["spec"]["executor"]["javaOptions"]
== expected_executor_java_options
)
else:
assert "javaOptions" not in body["spec"]["executor"]
@staticmethod
def _assert_cores(body: dict, expected_cores: dict):
for resource in ["executor", "driver"]:
if expected_cores.get(resource):
assert body[resource]["cores"] == expected_cores[resource]
def _assert_custom_object_creation_config(
self,
expected_runtime_class_name="spark",
assert_create_custom_object_called=True,
expected_volumes: typing.Optional[list] = None,
expected_driver_volume_mounts: typing.Optional[list] = None,
expected_executor_volume_mounts: typing.Optional[list] = None,
expected_driver_java_options=None,
expected_executor_java_options=None,
expected_driver_resources: dict = None,
expected_executor_resources: dict = None,
expected_cores: dict = None,
expected_code: typing.Optional[str] = None,
):
if assert_create_custom_object_called:
server.api.utils.singletons.k8s.get_k8s_helper().crdapi.create_namespaced_custom_object.assert_called_once()
assert self._get_create_custom_object_namespace_arg() == self.namespace
body = self._get_custom_object_creation_body()
self._assert_labels(body["metadata"]["labels"], expected_runtime_class_name)
self._assert_volume_and_mounts(
body,
expected_volumes,
expected_driver_volume_mounts,
expected_executor_volume_mounts,
)
self._assert_java_options(
body, expected_driver_java_options, expected_executor_java_options
)
if expected_driver_resources:
self._assert_resources(body["spec"]["driver"], expected_driver_resources)
if expected_executor_resources:
self._assert_resources(
body["spec"]["executor"], expected_executor_resources
)
if expected_cores:
self._assert_cores(body["spec"], expected_cores)
if expected_code:
body = self._get_custom_object_creation_body()
code = None
for envvar in body["spec"]["driver"]["env"]:
if envvar["name"] == "MLRUN_EXEC_CODE":
code = envvar["value"]
break
if code:
code = base64.b64decode(code).decode("UTF-8")
assert code == expected_code
def _assert_volume_and_mounts(
self,
body: dict,
expected_volumes: typing.Optional[list] = None,
expected_driver_volume_mounts: typing.Optional[list] = None,
expected_executor_volume_mounts: typing.Optional[list] = None,
):
if expected_volumes is not None:
sanitized_volumes = self._sanitize_list_for_serialization(expected_volumes)
assert (
deepdiff.DeepDiff(
body["spec"]["volumes"],
sanitized_volumes,
ignore_order=True,
report_repetition=True,
)
== {}
)
if expected_driver_volume_mounts is not None:
sanitized_driver_volume_mounts = self._sanitize_list_for_serialization(
expected_driver_volume_mounts
)
assert (
deepdiff.DeepDiff(
body["spec"]["driver"]["volumeMounts"],
sanitized_driver_volume_mounts,
ignore_order=True,
report_repetition=True,
)
== {}
)
if expected_executor_volume_mounts is not None:
sanitized_executor_volume_mounts = self._sanitize_list_for_serialization(
expected_executor_volume_mounts
)
assert (
deepdiff.DeepDiff(
body["spec"]["executor"]["volumeMounts"],
sanitized_executor_volume_mounts,
ignore_order=True,
report_repetition=True,
)
== {}
)
def _assert_resources(self, actual_resources, expected_values):
self._assert_limits(actual_resources, expected_values["limits"])
self._assert_requests(actual_resources, expected_values["requests"])
@staticmethod
def _assert_requests(actual: dict, expected: dict):
assert actual.get("coreRequest", None) == expected.get("cpu", None)
assert actual.get("memory", None) == expected.get("mem", None)
assert actual.get("serviceAccount", None) == expected.get(
"serviceAccount", "sparkapp"
)
@staticmethod
def _assert_limits(actual: dict, expected: dict):
assert actual.get("coreLimit", None) == expected.get("cpu", None)
assert actual.get("gpu", {}).get("name", None) == expected.get("gpu_type", None)
assert actual.get("gpu", {}).get("quantity", None) == expected.get("gpus", None)
def _assert_security_context(
self,
expected_driver_security_context=None,
expected_executor_security_context=None,
):
body = self._get_custom_object_creation_body()
if expected_driver_security_context:
assert (
body["spec"]["driver"].get("securityContext")
== expected_driver_security_context
)
else:
assert body["spec"]["driver"].get("securityContext") is None
if expected_executor_security_context:
assert (
body["spec"]["executor"].get("securityContext")
== expected_executor_security_context
)
else:
assert body["spec"]["executor"].get("securityContext") is None
def _assert_image_pull_secret(
self,
expected_image_pull_secret=None,
):
body = self._get_custom_object_creation_body()
if expected_image_pull_secret:
assert body["spec"].get("imagePullSecrets") == mlrun.utils.helpers.as_list(
expected_image_pull_secret
)
else:
assert body["spec"].get("imagePullSecrets") is None
def _sanitize_list_for_serialization(self, list_: list):
kubernetes_api_client = kubernetes.client.ApiClient()
return list(map(kubernetes_api_client.sanitize_for_serialization, list_))
def test_deploy_default_image_without_limits(
self, db: sqlalchemy.orm.Session, k8s_secrets_mock
):
mlrun.config.config.httpdb.builder.docker_registry = "test_registry"
runtime: mlrun.runtimes.Spark3Runtime = self._generate_runtime()
runtime.spec.image = None
runtime.spec.use_default_image = True
self.execute_function(runtime)
self._assert_custom_object_creation_config()
def test_run_without_runspec(self, db: sqlalchemy.orm.Session, k8s_secrets_mock):
runtime: mlrun.runtimes.Spark3Runtime = self._generate_runtime()
self.execute_function(runtime)
self._assert_custom_object_creation_config()
def test_run_with_default_resources(
self, db: sqlalchemy.orm.Session, k8s_secrets_mock
):
runtime: mlrun.runtimes.Spark3Runtime = self._generate_runtime(
set_resources=False
)
expected_executor_resources = {
"requests": {"cpu": "1", "mem": "5g"},
"limits": {"cpu": "2"},
}
expected_driver_resources = {
"requests": {"cpu": "1", "mem": "2g"},
"limits": {"cpu": "2"},
}
expected_cores = {
"executor": 1,
"driver": 1,
}
runtime.with_cores(expected_cores["executor"], expected_cores["driver"])
self.execute_function(runtime)
self._assert_custom_object_creation_config(
expected_driver_resources=expected_driver_resources,
expected_executor_resources=expected_executor_resources,
expected_cores=expected_cores,
)
def test_run_with_limits_and_requests(
self, db: sqlalchemy.orm.Session, k8s_secrets_mock
):
runtime: mlrun.runtimes.Spark3Runtime = self._generate_runtime(
set_resources=False
)
expected_executor_resources = {
"requests": {"cpu": "1", "mem": "1G", "serviceAccount": "executorsa"},
"limits": {"cpu": "2", "gpu_type": "nvidia.com/gpu", "gpus": 1},
}
expected_driver_resources = {
"requests": {"cpu": "2", "mem": "512m"},
"limits": {"cpu": "3", "gpu_type": "nvidia.com/gpu", "gpus": 1},
}
runtime.spec.service_account = "executorsa"
runtime.with_executor_requests(cpu="1", mem="1G")
runtime.with_executor_limits(cpu="2", gpus=1)
runtime.with_driver_requests(cpu="2", mem="512m")
runtime.with_driver_limits(cpu="3", gpus=1)
expected_cores = {
"executor": 8,
"driver": 2,
}
runtime.with_cores(expected_cores["executor"], expected_cores["driver"])
self.execute_function(runtime)
self._assert_custom_object_creation_config(
expected_driver_resources=expected_driver_resources,
expected_executor_resources=expected_executor_resources,
expected_cores=expected_cores,
)
def test_run_with_conflicting_limits_and_requests(
self, db: sqlalchemy.orm.Session, k8s_secrets_mock
):
runtime: mlrun.runtimes.Spark3Runtime = self._generate_runtime(
set_resources=False
)
runtime.spec.service_account = "executorsa"
runtime.with_executor_requests(cpu="1", mem="1G")
runtime.with_executor_limits(cpu="200m", gpus=1)
runtime.with_driver_requests(cpu="2", mem="512m")
runtime.with_driver_limits(cpu="3", gpus=1)
with pytest.raises(mlrun.errors.MLRunInvalidArgumentError):
self.execute_function(runtime)
def test_run_with_invalid_requests(
self, db: sqlalchemy.orm.Session, k8s_secrets_mock
):
runtime: mlrun.runtimes.Spark3Runtime = self._generate_runtime(
set_resources=False
)
with pytest.raises(ValueError):
# Java notation applies to spark-operator memory requests
runtime.with_driver_requests(mem="2Gi", cpu="3")
def test_run_with_invalid_limits(
self, db: sqlalchemy.orm.Session, k8s_secrets_mock
):
runtime: mlrun.runtimes.Spark3Runtime = self._generate_runtime(
set_resources=False
)
with pytest.raises(ValueError):
runtime.with_driver_limits(cpu="not a number", gpus=1)
def test_run_with_limits_and_requests_patch_true(
self, db: sqlalchemy.orm.Session, k8s_secrets_mock
):
runtime: mlrun.runtimes.Spark3Runtime = self._generate_runtime(
set_resources=False
)
runtime.with_executor_limits(cpu="3")
runtime.with_executor_requests(cpu="1", mem="1G")
runtime.with_executor_limits(gpus=1, patch=True)
expected_executor_resources = {
"requests": {"cpu": "1", "mem": "1G"},
"limits": {"cpu": "3", "gpu_type": "nvidia.com/gpu", "gpus": 1},
}
runtime.with_driver_requests(cpu="2")
runtime.with_driver_limits(cpu="3", gpus=1)
# patch = True
runtime.with_driver_requests(mem="512m", patch=True)
expected_driver_resources = {
"requests": {"cpu": "2", "mem": "512m"},
"limits": {"cpu": "3", "gpu_type": "nvidia.com/gpu", "gpus": 1},
}
expected_cores = {
"executor": 8,
"driver": 2,
}
runtime.with_cores(expected_cores["executor"], expected_cores["driver"])
self.execute_function(runtime)
self._assert_custom_object_creation_config(
expected_driver_resources=expected_driver_resources,
expected_executor_resources=expected_executor_resources,
expected_cores=expected_cores,
)
def test_run_with_limits_and_requests_patch_false(
self, db: sqlalchemy.orm.Session, k8s_secrets_mock
):
runtime: mlrun.runtimes.Spark3Runtime = self._generate_runtime(
set_resources=False
)
runtime.with_driver_requests(cpu="2")
runtime.with_driver_limits(cpu="3", gpus=1)
# default patch = False
runtime.with_driver_requests(mem="1G")
runtime.with_driver_limits(cpu="10")
expected_driver_resources = {
"requests": {"mem": "1G", "cpu": "1"},
"limits": {"cpu": "10"},
}
runtime.with_executor_requests(cpu="1", mem="1G")
runtime.with_executor_limits(cpu="3")
# default patch = False
runtime.with_executor_requests(mem="2G")
runtime.with_executor_limits(cpu="5")
expected_executor_resources = {
"requests": {"mem": "2G", "cpu": "1"},
"limits": {"cpu": "5"},
}
expected_cores = {
"executor": 8,
"driver": 2,
}
runtime.with_cores(expected_cores["executor"], expected_cores["driver"])
self.execute_function(runtime)
self._assert_custom_object_creation_config(
expected_driver_resources=expected_driver_resources,
expected_executor_resources=expected_executor_resources,
expected_cores=expected_cores,
)
def test_run_with_host_path_volume(
self, db: sqlalchemy.orm.Session, k8s_secrets_mock
):
runtime: mlrun.runtimes.Spark3Runtime = self._generate_runtime()
shared_volume = kubernetes.client.V1Volume(
name="shared-volume",
host_path=kubernetes.client.V1HostPathVolumeSource(
path="/shared-volume-host-path", type=""
),
)
shared_volume_driver_volume_mount = kubernetes.client.V1VolumeMount(
mount_path="/shared-volume-driver-mount-path", name=shared_volume.name
)
shared_volume_executor_volume_mount = kubernetes.client.V1VolumeMount(
mount_path="/shared-volume-executor-mount-path", name=shared_volume.name
)
driver_volume = kubernetes.client.V1Volume(
name="driver-volume",
host_path=kubernetes.client.V1HostPathVolumeSource(
path="/driver-volume-host-path", type=""
),
)
driver_volume_volume_mount = kubernetes.client.V1VolumeMount(
mount_path="/driver-mount-path", name=driver_volume.name
)
executor_volume = kubernetes.client.V1Volume(
name="executor-volume",
host_path=kubernetes.client.V1HostPathVolumeSource(
path="/executor-volume-host-path", type=""
),
)
executor_volume_volume_mount = kubernetes.client.V1VolumeMount(
mount_path="/executor-mount-path", name=executor_volume.name
)
runtime.with_driver_host_path_volume(
shared_volume.host_path.path,
shared_volume_driver_volume_mount.mount_path,
volume_name=shared_volume.name,
)
runtime.with_executor_host_path_volume(
shared_volume.host_path.path,
shared_volume_executor_volume_mount.mount_path,
volume_name=shared_volume.name,
)
runtime.with_driver_host_path_volume(
driver_volume.host_path.path,
driver_volume_volume_mount.mount_path,
volume_name=driver_volume.name,
)
runtime.with_executor_host_path_volume(
executor_volume.host_path.path,
executor_volume_volume_mount.mount_path,
volume_name=executor_volume.name,
)
self.execute_function(runtime)
self._assert_custom_object_creation_config(
expected_volumes=[shared_volume, driver_volume, executor_volume],
expected_driver_volume_mounts=[
shared_volume_driver_volume_mount,
driver_volume_volume_mount,
],
expected_executor_volume_mounts=[
shared_volume_executor_volume_mount,
executor_volume_volume_mount,
],
)
def test_java_options(self, db: sqlalchemy.orm.Session, k8s_secrets_mock):
runtime = self._generate_runtime()
driver_java_options = "-Dmyproperty=somevalue"
runtime.spec.driver_java_options = driver_java_options
executor_java_options = "-Dmyotherproperty=someothervalue"
runtime.spec.executor_java_options = executor_java_options
self.execute_function(runtime)
self._assert_custom_object_creation_config(
expected_driver_java_options=driver_java_options,
expected_executor_java_options=executor_java_options,
)
@pytest.mark.parametrize(
"executor_cores, driver_cores, expect_failure",
[
(4, None, False),
(3, 3, False),
(None, 2, False),
(None, None, False),
(0.5, None, True),
(None, -1, True),
],
)
def test_cores(
self,
executor_cores,
driver_cores,
expect_failure,
db: sqlalchemy.orm.Session,
k8s_secrets_mock,
):
runtime = self._generate_runtime()
if expect_failure:
with pytest.raises(mlrun.errors.MLRunInvalidArgumentError):
runtime.with_cores(
executor_cores=executor_cores, driver_cores=driver_cores
)
return
else:
runtime.with_cores(executor_cores=executor_cores, driver_cores=driver_cores)
# By default, if not specified otherwise, the cores are set to 1
expected_cores = {"executor": executor_cores or 1, "driver": driver_cores or 1}
self.execute_function(runtime)
self._assert_custom_object_creation_config(expected_cores=expected_cores)
@pytest.mark.parametrize(
["mount_v3io_to_executor", "with_igz_spark_twice"],
[(False, False), (True, False), (False, True), (True, True)],
)
def test_with_igz_spark_volume_mounts(
self,
mount_v3io_to_executor,
with_igz_spark_twice,
db: sqlalchemy.orm.Session,
k8s_secrets_mock,
):
runtime = self._generate_runtime()
orig = os.getenv("V3IO_USERNAME")
os.environ["V3IO_USERNAME"] = "me"
try:
runtime.with_executor_host_path_volume(
host_path="/tmp",
mount_path="/before",
volume_name="path-volume-before",
)
runtime.with_igz_spark(mount_v3io_to_executor=mount_v3io_to_executor)
if with_igz_spark_twice:
runtime.with_igz_spark(mount_v3io_to_executor=mount_v3io_to_executor)
runtime.with_executor_host_path_volume(
host_path="/tmp",
mount_path="/after",
volume_name="path-volume-after",
)
finally:
if orig:
os.environ["V3IO_USERNAME"] = orig
else:
os.unsetenv("V3IO_USERNAME")
self.execute_function(runtime)
user_added_executor_volume_mounts = [
kubernetes.client.V1VolumeMount(
mount_path="/before", name="path-volume-before"
),
kubernetes.client.V1VolumeMount(
mount_path="/after", name="path-volume-after"
),
]
common_volume_mounts = [
kubernetes.client.V1VolumeMount(mount_path="/dev/shm", name="shm"),
kubernetes.client.V1VolumeMount(
mount_path="/var/run/iguazio/dayman", name="v3iod-comm"
),
kubernetes.client.V1VolumeMount(
mount_path="/var/run/iguazio/daemon_health", name="daemon-health"
),
kubernetes.client.V1VolumeMount(
mount_path="/etc/config/v3io", name="v3io-config"
),
]
v3io_mounts = [
kubernetes.client.V1VolumeMount(
mount_path="/v3io", name="v3io", sub_path=""
),
kubernetes.client.V1VolumeMount(
mount_path="/User", name="v3io", sub_path="users/me"
),
]
expected_driver_mounts = common_volume_mounts + v3io_mounts
expected_executor_mounts = (
common_volume_mounts + user_added_executor_volume_mounts
)
if mount_v3io_to_executor:
expected_executor_mounts += v3io_mounts
self._assert_custom_object_creation_config(
expected_driver_volume_mounts=expected_driver_mounts,
expected_executor_volume_mounts=expected_executor_mounts,
)
def test_deploy_with_image_pull_secret(
self, db: sqlalchemy.orm.Session, k8s_secrets_mock
):
# no image pull secret
runtime: mlrun.runtimes.Spark3Runtime = self._generate_runtime()
self.execute_function(runtime)
self._assert_image_pull_secret()
# default image pull secret
mlrun.config.config.function.spec.image_pull_secret.default = "my_secret"
runtime: mlrun.runtimes.Spark3Runtime = self._generate_runtime()
self.execute_function(runtime)
self._assert_image_pull_secret(
mlrun.config.config.function.spec.image_pull_secret.default,
)
# override default image pull secret
runtime: mlrun.runtimes.Spark3Runtime = self._generate_runtime()
new_image_pull_secret = "my_new_secret"
runtime.spec.image_pull_secret = new_image_pull_secret
self.execute_function(runtime)
self._assert_image_pull_secret(new_image_pull_secret)
def test_get_offline_features(
self,
db: sqlalchemy.orm.Session,
client: fastapi.testclient.TestClient,
k8s_secrets_mock,
):
# TODO - this test needs to be moved outside of the api runtimes tests and into the spark runtime sdk tests
# once moved, the `watch=False` can be removed
import mlrun.feature_store as fstore
fv = fstore.FeatureVector("my-vector", features=[])
fv.save = unittest.mock.Mock()
runtime = self._generate_runtime()
# auto mount requires auth info but this test is supposed to run in the client
# re-enable when test is moved
runtime.spec.disable_auto_mount = True
runtime.with_igz_spark = unittest.mock.Mock()
self._reset_mocks()
mlrun.config.config.artifact_path = "v3io:///mypath"
runtime.with_driver_limits(cpu="1")
runtime.with_driver_requests(cpu="1", mem="1G")
runtime.with_executor_limits(cpu="1")
runtime.with_executor_requests(cpu="1", mem="1G")
# remote-spark is not a merge engine but a runtime
with pytest.raises(mlrun.errors.MLRunInvalidArgumentError):
fstore.get_offline_features(
fv,
with_indexes=True,
timestamp_for_filtering="timestamp",
engine="remote-spark",
run_config=RunConfig(local=False, function=runtime, watch=False),
target=ParquetTarget(),
)
self.project = "default"
self._create_project(client)
resp = fstore.get_offline_features(
fv,
with_indexes=True,
timestamp_for_filtering="timestamp",
engine="spark",
# setting watch=False, because we don't want to wait for the job to complete when running in API
run_config=RunConfig(local=False, function=runtime, watch=False),
target=ParquetTarget(),
)
runspec = resp.run.spec.to_dict()
expected_runspec = {
"parameters": {
"vector_uri": "store://feature-vectors/default/my-vector",
"target": {
"name": "parquet",
"kind": "parquet",
"partitioned": True,
"max_events": 10000,
"flush_after_seconds": 900,
},
"entity_timestamp_column": None,
"drop_columns": None,
"with_indexes": True,
"query": None,
"order_by": None,
"start_time": None,
"end_time": None,
"timestamp_for_filtering": "timestamp",
"engine_args": None,
},
"outputs": [],
"output_path": "v3io:///mypath",
"secret_sources": [],
"function": "None/my-vector-merger@349f744e83e1a71d8b1faf4bbf3723dc0625daed",
"data_stores": [],
"handler": "merge_handler",
"state_thresholds": mlrun.mlconf.function.spec.state_thresholds.default.to_dict(),
}
assert (
deepdiff.DeepDiff(
runspec,
expected_runspec,
# excluding function attribute as it contains hash of the object, excluding this path because any change
# in the structure of the run will require to update the function hash
exclude_paths=["root['function']"],
)
== {}
)
self.name = "my-vector-merger"
expected_code = _default_merger_handler.replace(
"{{{engine}}}", "SparkFeatureMerger"
)
self._assert_custom_object_creation_config(
expected_driver_resources={
"requests": {"cpu": "1", "mem": "1G"},
"limits": {"cpu": "1"},
},
expected_executor_resources={
"requests": {"cpu": "1", "mem": "1G"},
"limits": {"cpu": "1"},
},
expected_code=expected_code,
)
def test_run_with_source_archive_pull_at_runtime(
self, db: sqlalchemy.orm.Session, client: fastapi.testclient.TestClient
):
runtime: mlrun.runtimes.Spark3Runtime = self._generate_runtime()
with pytest.raises(
mlrun.errors.MLRunInvalidArgumentError,
match="pull_at_runtime is not supported for spark runtime, use pull_at_runtime=False",
):
runtime.with_source_archive(source="git://github.com/mock/repo")
runtime.with_source_archive(
source="git://github.com/mock/repo", pull_at_runtime=False
)
def test_run_with_load_source_on_run(
self, db: sqlalchemy.orm.Session, k8s_secrets_mock
):
# set default output path
mlrun.mlconf.artifact_path = "v3io:///tmp"
# generate runtime and set source code to load on run
runtime: mlrun.runtimes.Spark3Runtime = self._generate_runtime()
runtime.metadata.name = "test-spark-runtime"
runtime.metadata.project = self.project
runtime.spec.build.source = "git://github.com/mock/repo"
runtime.spec.build.load_source_on_run = True
# expect pre-condition error, not supported
with pytest.raises(mlrun.errors.MLRunPreconditionFailedError) as exc:
runtime.run(auth_info=mlrun.common.schemas.AuthInfo())
assert (
str(exc.value) == "Sparkjob does not support loading source code on run, "
"use func.with_source_archive(pull_at_runtime=False)"
)
| mlrun/mlrun | tests/api/runtimes/test_spark.py | test_spark.py | py | 29,207 | python | en | code | 1,129 | github-code | 36 | [
{
"api_name": "tests.api.runtimes.base.api",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "tests.api.runtimes.base",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "mlrun.common.schemas.runtimes.Spark3Runtime",
"line_number": 32,
"usage_type": "... |
4798482149 | import requests
from datetime import datetime
import smtplib
from dotenv import load_dotenv
import os
import time
load_dotenv(override=True)
EMAIL = os.environ.get("SENDER")
PWD = os.environ.get("PWD")
# From https://www.latlong.net/
MY_LAT = 6.320439
MY_LONG = -75.567467
# Your position is within +5 or -5 degrees of the ISS position
def is_iss_close() -> bool:
response = requests.get(url="http://api.open-notify.org/iss-now.json")
response.raise_for_status()
data = response.json()
iss_latitude = float(data["iss_position"]["latitude"])
iss_longitude = float(data["iss_position"]["longitude"])
if abs(iss_latitude - MY_LAT) <= 5 and abs(iss_longitude - MY_LONG) <= 5:
return True
return False
def is_night():
parameters = {
"lat": MY_LAT,
"lng": MY_LONG,
"formatted": 0,
}
response = requests.get("https://api.sunrise-sunset.org/json", params=parameters)
response.raise_for_status()
data = response.json()
sunrise = int(data["results"]["sunrise"].split("T")[1].split(":")[0])
sunset = int(data["results"]["sunset"].split("T")[1].split(":")[0])
time_now = datetime.now().hour
if time_now >= sunset or time_now <= sunrise:
return True
return False
def send_email():
if is_iss_close() and is_night():
with smtplib.SMTP("smtp.gmail.com") as conn:
conn.starttls()
conn.login(user=EMAIL, password=PWD)
conn.sendmail(
from_addr=EMAIL,
to_addrs=EMAIL,
msg="Subject:Look up! ISS is close\n Look up, pal."
)
if __name__ == '__main__':
while True:
send_email()
time.sleep(60)
| andresmesad09/iss_overhead | main.py | main.py | py | 1,721 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
... |
39155415013 | import nltk
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
import numpy
import tflearn
import tensorflow
import random
import json
import pickle
with open('intents.json') as file:
data = json.load(file)
# If preprocessed data present no need to do again
try:
with open("data.pickle", "rb") as f:
words,labels,training,output = pickle.load(f)
except:
# Extracting data to lists
words = []
labels = []
docs_x = []
docs_y = []
# Stemming .... loading tags and words data
for intent in data['intents']:
for pattern in intent['patterns']:
wrds = nltk.word_tokenize(pattern)
words.extend(wrds)
docs_x.append(wrds)
docs_y.append(intent["tag"])
if intent['tag'] not in labels:
labels.append(intent['tag'])
# stemming & words and labels sorted and unique
words = [stemmer.stem(w.lower()) for w in words if w != "?"] # to get to roots
words = sorted(list(set(words))) # removing duplicates
labels = sorted(labels)
# lists for number of words and labels for model
training = []
output = []
out_empty = [0 for _ in range(len(labels))]
# converting data to feed to model
for x, doc in enumerate(docs_x):
bag = []
wrds = [stemmer.stem(w.lower()) for w in doc]
for w in words:
if w in wrds:
bag.append(1)
else:
bag.append(0)
output_row = out_empty[:]
output_row[labels.index(docs_y[x])] = 1
training.append(bag) # [1,2,0,5,4,2,1,...] no. of root words
output.append(output_row) # [] similar for labels
# converting lists to numpy arrays
training = numpy.array(training)
output = numpy.array(output)
# saving model to pickle file
with open("data.pickle", "wb") as f:
pickle.dump((words,labels,training,output), f)
# building the model in tensorflow
tensorflow.compat.v1.reset_default_graph()
# input data
net = tflearn.input_data(shape=[None, len(training[0])])
# 2 hidden layers with 8 neurons
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
# fully connected layer and regression layer (probablity calculation using activation fucntion)
# output layer with activation function (softmax)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)
# training model DNN is a type of neural network
model = tflearn.DNN(net)
# training and saving the DNN model
try:
model.load("model.tflearn")
except:
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
model.save("model.tflearn")
#The bag_of_words function will transform our string input to a bag of words using our created words list.
def bag_of_words(s, words):
bag = [0 for _ in range(len(words))]
s_words = nltk.word_tokenize(s)
s_words = [stemmer.stem(word.lower()) for word in s_words]
for se in s_words:
for i, w in enumerate(words):
if w == se:
bag[i] = 1
return numpy.array(bag)
# The chat() getting a prediction from the model and grabbing an appropriate response.
def chat():
print("You are now talking with the bot (type quit to stop)!")
while True:
inp = input("You: ")
if inp.lower() == "quit":
break
# results give probablity of responses
results = model.predict([bag_of_words(inp, words)])
results_index = numpy.argmax(results)
tag = labels[results_index]
for tg in data["intents"]:
if tg['tag'] == tag:
responses = tg['responses']
print(random.choice(responses))
# running the bot
chat() | docmhvr/AI_chatbot | main.py | main.py | py | 3,802 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "nltk.stem.lancaster.LancasterStemmer",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "nltk.word_t... |
38006333091 | # scp ./move_jetbot_17_11.py jetson@192.168.0.240:/home/jetson/Documents/jetbot-master/notebooks/collision_avoidance
# scp -r jetson@192.168.0.240:/home/jetson/Documents/jetbot-master/notebooks/collision_avoidance/images ./images
# scp -r ./images/dataset jetson@192.168.0.240:/home/jetson/Documents/jetbot-master/notebooks/collision_avoidance/dataset
# scp -r ./tf_model jetson@192.168.0.240:/home/jetson/Documents/jetbot-master/notebooks/collision_avoidance
# scp -r ./jetbot jetson@192.168.0.240:/home/jetson/Documents/jetbot-master/notebooks/collision_avoidance/jetbot
# jupyter notebook --no-browser --port=8888 --ip 0.0.0.0
# scp ./udp_client.py jetson@192.168.0.240:/home/jetson/Documents/jetbot-master/notebooks/collision_avoidance
# FIX na debilne błędy:
# rm -rf ~/.cache/gstreamer-1.0
# source ~/.bashrc
# export LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1
import os
import cv2
import time
import nanocamera as nano
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dropout, Dense
from tensorflow.keras import layers
from tensorflow.keras.preprocessing.image import img_to_array
from jetbot import Robot
def load_model(model_path=os.path.join('tf_model', 'best_model.ckpt')):
model_loaded = tf.keras.models.Sequential()
model_loaded.add(
tf.keras.layers.Conv2D(16, kernel_size=(3, 3), activation='relu', input_shape=(224, 224, 1), padding='same'))
# model.add(LeakyReLU(alpha=0.1))
model_loaded.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), padding='same'))
model_loaded.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation='relu', padding='same'))
# model.add(tf.keras.activations.relu(alpha=0.1))
model_loaded.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), padding='same'))
model_loaded.add(tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same'))
# model.add(tf.keras.activations.relu(alpha=0.1))
model_loaded.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), padding='same'))
model_loaded.add(tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same'))
# model.add(tf.keras.activations.relu(alpha=0.1))
model_loaded.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), padding='same'))
model_loaded.add(tf.keras.layers.Flatten())
model_loaded.add(tf.keras.layers.Dropout(0.5))
model_loaded.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
# model.add(tf.keras.activations.relu(alpha=0.1))
model_loaded.add(Dropout(0.5))
model_loaded.add(tf.keras.layers.Dense(256, activation=tf.nn.relu))
model_loaded.add(tf.keras.layers.Dense(1, activation='sigmoid'))
# model_loaded.summary()
# print(model_loaded)
model_loaded.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model_loaded.load_weights(model_path)
return model_loaded
# def preprocess(camera_value):
# global device, normalize
# x = camera_value
# x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)
# x = x.transpose((2, 0, 1))
# x = torch.from_numpy(x).float()
# x = normalize(x)
# x = x.to(device)
# x = x[None, ...]
# return x
def classify_image(frame):
image = cv2.resize(frame, (224, 224))
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
img_arr = img_to_array(image)
img_arr = img_arr / 255.
np_image = np.expand_dims(img_arr, axis=0)
pred_value = model.predict(np_image)[0][0]
return pred_value
def move_robot(prob_free, speed=0.3, sleep_time=0.1):
if prob_free > 0.5:
robot.forward(speed)
else:
robot.backward(speed)
time.sleep(sleep_time)
robot.left(speed)
# time.sleep(sleep_time)
# robot.stop()
def save_frame(frame):
cv2.imwrite(f'./images/image_{i}.jpg', frame)
return 1
if __name__ == '__main__':
# 224
i = 697
print('start')
camera = nano.Camera(flip=0, width=224, height=224, fps=30)
print('start camera')
model = load_model()
print('load model')
robot = Robot()
print('CSI Camera ready? - ', camera.isReady())
while camera.isReady():
try:
print('Next frame', end='')
frame = camera.read()
i += save_frame(frame)
prob_free = classify_image(frame)
is_free = prob_free > 0.9
print(f': {"Free" if is_free else "Block"}: {prob_free * 100:.2f}%')
move_robot(prob_free)
except:
break
print('Robot.stop()')
robot.stop()
print('robot stopped')
camera.release()
del camera
print('end')
| panjacob/brainbot_movement | move_jetbot_na_samych_obrazach.py | move_jetbot_na_samych_obrazach.py | py | 4,770 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.models.Sequential",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "tensorfl... |
26402106184 | import os
import sys
import random
import pygame
import sqlite3
from PyQt5.QtWidgets import QWidget, QApplication, QPushButton
from PyQt5.QtWidgets import QInputDialog
pygame.init()
db = sqlite3.connect('clicker.db')
cur = db.cursor()
clock = pygame.time.Clock()
display_width = 1920
display_height = 1080
size = width, height = 1920, 1080
screen = pygame.display.set_mode(size)
white = (255, 255, 255)
black = (0, 0, 0)
grey = (128, 128, 128)
light_grey = (224, 224, 224)
light_blue = (173, 216, 230)
grey = (128, 128, 128)
blue = (0, 100, 250)
red = (255, 0, 0)
green1 = (0, 50, 0)
green2 = (0, 100, 0)
green3 = (0, 150, 0)
green4 = (0, 200, 0)
nado = 0
green5 = (0, 250, 0)
yel = (255, 255, 0)
foto = "111.jpg"
# Start auto clicker
MYEVENTTYPE = pygame.USEREVENT + 1
pygame.time.set_timer(MYEVENTTYPE, 1000)
screen_rect = (0, 0, display_width, display_height)
pygame.mixer.music.load('fon.mp3')
pygame.mixer.music.set_volume(0.07)
pygame.mixer.music.play()
pygame.mixer.music.play(loops=-1)
sound1 = pygame.mixer.Sound('monnetka.mp3')
sound2 = pygame.mixer.Sound('upgr.mp3')
sound3 = pygame.mixer.Sound('oshibka.mp3')
sound4 = pygame.mixer.Sound('game-won.mp3')
gameDisplay = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption("Кликальный клик")
user = ''
def circle(display, color, x, y, radius):
pygame.draw.circle(display, color, [x, y], radius)
def autominer():
global coins
global autog
coins = coins + autog
def DrawText(text, Textcolor, Rectcolor, x, y, fsize):
font = pygame.font.Font('freesansbold.ttf', fsize)
text = font.render(text, True, Textcolor, Rectcolor)
textRect = text.get_rect()
textRect.center = (x, y)
gameDisplay.blit(text, textRect)
def create_particles(position, nado):
q = random.randint(5, 15)
particle_count = q
numbers = range(-5, 6)
for _ in range(particle_count):
Particle(position, random.choice(numbers), random.choice(numbers), nado)
def load_image(name, colorkey=None):
fullname = os.path.join(name)
if not os.path.isfile(fullname):
print(f"Файл с изображением '{fullname}' не найден")
sys.exit()
image = pygame.image.load(fullname)
if colorkey is not None:
image = image.convert()
if colorkey == -1:
colorkey = image.get_at((0, 0))
image.set_colorkey(colorkey)
else:
image = image.convert_alpha()
return image
all_sprites = pygame.sprite.Group()
class Particle(pygame.sprite.Sprite):
# сгенерируем частицы разного размера\
fire = []
fiire = []
star1 = 'starr.png'
star2 = 'starrr.png'
star3 = 'starrrr.png'
star4 = 'starrrrr.png'
for scale in (15, 20, 30):
fire.append(pygame.transform.scale(load_image(star1), (scale, scale)))
fiire.append(pygame.transform.scale(load_image(star2), (scale, scale)))
fiire.append(pygame.transform.scale(load_image(star3), (scale, scale)))
fiire.append(pygame.transform.scale(load_image(star4), (scale, scale)))
def __init__(self, pos, dx, dy, nado):
super().__init__(all_sprites)
if nado != 0:
self.image = random.choice(self.fire)
else:
self.image = random.choice(self.fiire)
self.rect = self.image.get_rect()
# у каждой частицы своя скорость — это вектор
self.velocity = [dx, dy]
# и свои координаты
self.rect.x, self.rect.y = pos
# гравитация будет одинаковой (значение константы)
self.gravity = 0.2
def update(self):
# применяем гравитационный эффект:
# движение с ускорением под действием гравитации
self.velocity[1] += self.gravity
# перемещаем частицу
self.rect.x += self.velocity[0]
self.rect.y += self.velocity[1]
# убиваем, если частица ушла за экран
if not self.rect.colliderect(screen_rect):
self.kill()
def rectangle(display, color, x, y, w, h):
pygame.draw.rect(display, color, (x, y, w, h))
def terminate():
pygame.quit()
sys.exit()
def start_screen():
app = QApplication(sys.argv)
ex = Example()
intro_text = ["ВЫ начинаете играть в лучшую игру 22 века", "",
"Правила игры:",
"Кликай",
"И",
"Очень много кликай"]
fon = pygame.transform.scale(load_image('menu1.jpg'), (1920, 1080))
screen.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 250
for line in intro_text:
DrawText(line, black, white, 950, text_coord, 50)
text_coord += 50
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN:
return # начинаем игру
pygame.display.flip()
clock.tick(90)
def pobeda(clicks):
intro_text = ["вы прошли игру",
"вы молодец",
f"для победы вам понадобилось {str(clicks)} кликов"]
fon = pygame.transform.scale(load_image('menu1.jpg'), (1920, 1080))
screen.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 250
for line in intro_text:
DrawText(line, black, white, 950, text_coord, 50)
text_coord += 50
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
clock.tick(90)
def final_screen(clicks, session):
intro_text = ["вы вышли из игры",
"за данный сеанс/всё время вы сделали",
f"{str(session)}/{str(clicks)}",
"клик(ов)(а)",
"моргенштерн доволен",
"нажмите ЛКМ или ПКМ для выхода",
"или иную баттен для возврата"]
fon = pygame.transform.scale(load_image('lilmorgen.jfif'), (1920, 1080))
screen.blit(fon, (0, 0))
font = pygame.font.Font(None, 30)
text_coord = 500
for line in intro_text:
DrawText(line, black, white, 950, text_coord, 50)
text_coord += 50
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
pygame.quit()
quit()
sys.exit()
elif event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
clock.tick(90)
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 150, 150)
self.setWindowTitle('Диалоговые окна')
self.run()
def run(self):
global user
name, ok_pressed = QInputDialog.getText(self, "Введите имя",
"Как тебя зовут?")
while name == '':
name, ok_pressed = QInputDialog.getText(self, "Введите имя",
"Как тебя зовут?")
r = cur.execute('select player from users where player = ?', (name,)).fetchone()
plrs = cur.execute('select player from users').fetchall()
if r is None:
cur.execute('insert into users ("player", "id") values(?, ?)', (name, len(plrs) + 1))
db.commit()
user = name
def main_loop():
global user
global clock
global autog
global ver
global color1
global color2
global color3
global clicks
sleva_first_lvl = ['деДский сад', 'дошик', 'шаверма', 'вода', 'пицца с ананасами', 'пюрешка', 'чёрный хлеб', 'суси',
'плесневелый сыр', 'мойва', 'паштет', 'виноград', 'овощи', 'нож', 'воздух']
sleva_second_lvl = ['шкИла', 'ролтон', 'шаурма', 'чай', 'норм пицца', 'макарошки', 'хороший хлеб',
'роллы', 'сыр', 'красная икра', 'сосисоны', 'сок', 'фрукты', 'ложка', 'вакуум']
sleva_third_lvl = ['уник', 'лапша', 'гирос', 'кофеёк', 'пепперони', 'котлетки', 'нарезной хлеб', 'рыбка с икрой',
'президент', 'чёрная икра', 'колбаса', 'вино', 'мясо', 'вилка', 'пространство']
sprava_first_lvl = ['хухл', 'мышь', 'клава', 'моник', 'крутая мышь', 'тубаретка', 'лампа', 'питхон', 'пучарм',
'консоль', 'сапёр', 'мелки', 'скайп', 'голуби', 'йандикс']
sprava_second_lvl = ['мэелру', 'кот', 'кока', 'монитор', 'зачётная мышь', 'стул', 'подсветка', 'питон v.3.9', 'пайчарм', 'цээмдэ', 'пасьянс', 'пэинт', 'зум',
'почтальон', 'яндекс']
sprava_third_lvl = ['рамблер', 'собака', 'к$&%', 'прожектор', 'лютая мышь', 'кресло', 'освещение', 'python v.3.9',
'pycharm', 'cmd', 'змейка', 'фотошоп', 'дискорд', 'эл.почта', 'Yandex', ]
grom = 0.07
groom = 0.5
kk = random.randint(0, 1720)
jj = random.randint(0, 980)
kkk = random.randint(10, 150)
jjj = random.randint(10, 150)
sound1.set_volume(groom)
sound2.set_volume(groom)
sound3.set_volume(groom)
sound4.set_volume(groom - 0.3)
clicks_session = 0
clicks = cur.execute('select clicks from users where player = ?', (user,)).fetchone()[0]
coins = cur.execute('select coins from users where player = ?', (user,)).fetchone()[0]
autog = cur.execute('select auto from users where player = ?', (user,)).fetchone()[0]
up1 = cur.execute('select up1 from users where player = ?', (user,)).fetchone()[0]
up2 = cur.execute('select up2 from users where player = ?', (user,)).fetchone()[0]
up3 = cur.execute('select up3 from users where player = ?', (user,)).fetchone()[0]
up4 = cur.execute('select up4 from users where player = ?', (user,)).fetchone()[0]
up5 = cur.execute('select up5 from users where player = ?', (user,)).fetchone()[0]
up6 = cur.execute('select up6 from users where player = ?', (user,)).fetchone()[0]
up7 = cur.execute('select up7 from users where player = ?', (user,)).fetchone()[0]
up8 = cur.execute('select up8 from users where player = ?', (user,)).fetchone()[0]
up9 = cur.execute('select up9 from users where player = ?', (user,)).fetchone()[0]
up10 = cur.execute('select up10 from users where player = ?', (user,)).fetchone()[0]
up11 = cur.execute('select up11 from users where player = ?', (user,)).fetchone()[0]
up12 = cur.execute('select up12 from users where player = ?', (user,)).fetchone()[0]
up13 = cur.execute('select up13 from users where player = ?', (user,)).fetchone()[0]
up14 = cur.execute('select up14 from users where player = ?', (user,)).fetchone()[0]
up15 = cur.execute('select up15 from users where player = ?', (user,)).fetchone()[0]
uup1 = cur.execute('select uup1 from users where player = ?', (user,)).fetchone()[0]
uup2 = cur.execute('select uup2 from users where player = ?', (user,)).fetchone()[0]
uup3 = cur.execute('select uup3 from users where player = ?', (user,)).fetchone()[0]
uup4 = cur.execute('select uup4 from users where player = ?', (user,)).fetchone()[0]
uup5 = cur.execute('select uup5 from users where player = ?', (user,)).fetchone()[0]
uup6 = cur.execute('select uup6 from users where player = ?', (user,)).fetchone()[0]
uup7 = cur.execute('select uup7 from users where player = ?', (user,)).fetchone()[0]
uup8 = cur.execute('select uup8 from users where player = ?', (user,)).fetchone()[0]
uup9 = cur.execute('select uup9 from users where player = ?', (user,)).fetchone()[0]
uup10 = cur.execute('select uup10 from users where player = ?', (user,)).fetchone()[0]
uup11 = cur.execute('select uup11 from users where player = ?', (user,)).fetchone()[0]
uup12 = cur.execute('select uup12 from users where player = ?', (user,)).fetchone()[0]
uup13 = cur.execute('select uup13 from users where player = ?', (user,)).fetchone()[0]
uup14 = cur.execute('select uup14 from users where player = ?', (user,)).fetchone()[0]
uup15 = cur.execute('select uup15 from users where player = ?', (user,)).fetchone()[0]
mong = cur.execute('select click from users where player = ?', (user,)).fetchone()[0]
cost1 = cur.execute('select cost1 from users where player = ?', (user,)).fetchone()[0]
cost2 = cur.execute('select cost2 from users where player = ?', (user,)).fetchone()[0]
cost3 = cur.execute('select cost3 from users where player = ?', (user,)).fetchone()[0]
cost4 = cur.execute('select cost4 from users where player = ?', (user,)).fetchone()[0]
cost5 = cur.execute('select cost5 from users where player = ?', (user,)).fetchone()[0]
cost6 = cur.execute('select cost6 from users where player = ?', (user,)).fetchone()[0]
cost7 = cur.execute('select cost7 from users where player = ?', (user,)).fetchone()[0]
cost8 = cur.execute('select cost8 from users where player = ?', (user,)).fetchone()[0]
cost9 = cur.execute('select cost9 from users where player = ?', (user,)).fetchone()[0]
cost10 = cur.execute('select cost10 from users where player = ?', (user,)).fetchone()[0]
cost11 = cur.execute('select cost11 from users where player = ?', (user,)).fetchone()[0]
cost12 = cur.execute('select cost12 from users where player = ?', (user,)).fetchone()[0]
cost13 = cur.execute('select cost13 from users where player = ?', (user,)).fetchone()[0]
cost14 = cur.execute('select cost14 from users where player = ?', (user,)).fetchone()[0]
cost15 = cur.execute('select cost15 from users where player = ?', (user,)).fetchone()[0]
ccost1 = cur.execute('select ccost1 from users where player = ?', (user,)).fetchone()[0]
ccost2 = cur.execute('select ccost2 from users where player = ?', (user,)).fetchone()[0]
ccost3 = cur.execute('select ccost3 from users where player = ?', (user,)).fetchone()[0]
ccost4 = cur.execute('select ccost4 from users where player = ?', (user,)).fetchone()[0]
ccost5 = cur.execute('select ccost5 from users where player = ?', (user,)).fetchone()[0]
ccost6 = cur.execute('select ccost6 from users where player = ?', (user,)).fetchone()[0]
ccost7 = cur.execute('select ccost7 from users where player = ?', (user,)).fetchone()[0]
ccost8 = cur.execute('select ccost8 from users where player = ?', (user,)).fetchone()[0]
ccost9 = cur.execute('select ccost9 from users where player = ?', (user,)).fetchone()[0]
ccost10 = cur.execute('select ccost10 from users where player = ?', (user,)).fetchone()[0]
ccost11 = cur.execute('select ccost11 from users where player = ?', (user,)).fetchone()[0]
ccost12 = cur.execute('select ccost12 from users where player = ?', (user,)).fetchone()[0]
ccost13 = cur.execute('select ccost13 from users where player = ?', (user,)).fetchone()[0]
ccost14 = cur.execute('select ccost14 from users where player = ?', (user,)).fetchone()[0]
ccost15 = cur.execute('select ccost15 from users where player = ?', (user,)).fetchone()[0]
ulta = cur.execute('select ulta from users where player = ?', (user,)).fetchone()[0]
voz = cur.execute('select voz from users where player = ?', (user,)).fetchone()[0]
kart = 0
bonus = 0
nado = 0
udv = 1
vrem = 400
nomer = cur.execute('select nomer from users where player = ?', (user,)).fetchone()[0]
game_running = True
while game_running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_running = False
if event.type == MYEVENTTYPE:
coins = coins + autog
if event.type == pygame.MOUSEBUTTONDOWN and (event.button == 1 or event.button == 3):
color = gameDisplay.get_at(pygame.mouse.get_pos())
mopos = pygame.mouse.get_pos()
if color[0] == 255 and color[1] == 255 and color[2] == 0 and udv == 1:
udv = 2
if mopos[0] >= 625 and mopos[1] >= 130:
if mopos[0] <= 885 and mopos[1] <= 360:
sound1.play()
coins += mong * udv * voz
clicks += 1
clicks_session += 1
if udv != 2:
create_particles(pygame.mouse.get_pos(), 1)
else:
create_particles(pygame.mouse.get_pos(), 0)
if mopos[0] <= 1350 and mopos[1] <= 50:
if mopos[0] >= 1150 and mopos[1] >= 0:
if coins >= cost1:
sound2.play()
coins = coins - cost1
cost1 = cost1 * 1.3
mong = mong + 1
cost1 = round(cost1, 0)
else:
sound3.play()
if mopos[0] <= 1350 and mopos[1] <= 110:
if mopos[0] >= 1150 and mopos[1] >= 60:
if coins >= cost2:
sound2.play()
coins = coins - cost2
cost2 = cost2 * 1.3
mong = mong + 5
cost2 = round(cost2, 0)
else:
sound3.play()
if mopos[0] <= 1350 and mopos[1] <= 170:
if mopos[0] >= 1150 and mopos[1] >= 120:
if coins >= cost3:
sound2.play()
coins = coins - cost3
cost3 = cost3 * 1.3
mong = mong + 15
cost3 = round(cost3, 0)
else:
sound3.play()
if mopos[0] <= 1350 and mopos[1] <= 230:
if mopos[0] >= 1150 and mopos[1] >= 180:
if coins >= cost4:
sound2.play()
coins = coins - cost4
cost4 = cost4 * 1.3
mong = mong + 50
cost4 = round(cost4, 0)
else:
sound3.play()
if mopos[0] <= 1350 and mopos[1] <= 290:
if mopos[0] >= 1150 and mopos[1] >= 240:
if coins >= cost5:
sound2.play()
coins = coins - cost5
cost5 = cost5 * 1.3
mong = mong + 250
cost5 = round(cost5, 0)
else:
sound3.play()
if mopos[0] <= 1350 and mopos[1] <= 350:
if mopos[0] >= 1150 and mopos[1] >= 300:
if coins >= cost6:
sound2.play()
coins = coins - cost6
cost6 = cost6 * 1.3
mong = mong + 500
cost6 = round(cost6, 0)
else:
sound3.play()
if mopos[0] <= 1350 and mopos[1] <= 410:
if mopos[0] >= 1150 and mopos[1] >= 360:
if coins >= cost7:
sound2.play()
coins = coins - cost7
cost7 = cost7 * 1.3
mong = mong + 1000
cost7 = round(cost7, 0)
else:
sound3.play()
if mopos[0] <= 1350 and mopos[1] <= 470:
if mopos[0] >= 1150 and mopos[1] >= 420:
if coins >= cost8:
sound2.play()
coins = coins - cost8
cost8 = cost8 * 1.3
mong = mong + 2500
cost8 = round(cost8, 0)
else:
sound3.play()
if mopos[0] <= 1350 and mopos[1] <= 530:
if mopos[0] >= 1150 and mopos[1] >= 480:
if coins >= cost9:
sound2.play()
coins = coins - cost9
cost9 = cost9 * 1.3
mong = mong + up9
cost9 = round(cost9, 0)
else:
sound3.play()
if mopos[0] <= 1350 and mopos[1] <= 590:
if mopos[0] >= 1150 and mopos[1] >= 540:
if coins >= cost10:
sound2.play()
coins = coins - cost10
cost10 = cost10 * 1.3
mong = mong + up10
cost10 = round(cost10, 0)
else:
sound3.play()
if mopos[0] <= 1350 and mopos[1] <= 650:
if mopos[0] >= 1150 and mopos[1] >= 600:
if coins >= cost11:
sound2.play()
coins = coins - cost11
cost11 = cost11 * 1.3
mong = mong + up11
cost11 = round(cost11, 0)
else:
sound3.play()
if mopos[0] <= 1350 and mopos[1] <= 710:
if mopos[0] >= 1150 and mopos[1] >= 660:
if coins >= cost12:
sound2.play()
coins = coins - cost12
cost12 = cost12 * 1.3
mong = mong + up12
cost12 = round(cost12, 0)
else:
sound3.play()
if mopos[0] <= 1350 and mopos[1] <= 770:
if mopos[0] >= 1150 and mopos[1] >= 720:
if coins >= cost13:
sound2.play()
coins = coins - cost13
cost13 = cost13 * 1.3
mong = mong + up13
cost13 = round(cost13, 0)
else:
sound3.play()
if mopos[0] <= 1350 and mopos[1] <= 830:
if mopos[0] >= 1150 and mopos[1] >= 780:
if coins >= cost14:
sound2.play()
coins = coins - cost14
cost14 = cost14 * 1.3
mong = mong + up14
cost14 = round(cost14, 0)
else:
sound3.play()
if mopos[0] <= 1350 and mopos[1] <= 890:
if mopos[0] >= 1150 and mopos[1] >= 840:
if coins >= cost15:
sound2.play()
coins = coins - cost15
cost15 = cost15 * 1.3
mong = mong + up15
cost15 = round(cost15, 0)
else:
sound3.play()
if mopos[0] >= 130 and mopos[1] >= 0:
if mopos[0] <= 330 and mopos[1] <= 50:
if coins >= ccost1:
sound2.play()
coins = coins - ccost1
ccost1 = ccost1 * 1.4
autog = autog + (uup1 * voz)
ccost1 = round(ccost1, 0)
else:
sound3.play()
if mopos[0] >= 130 and mopos[1] >= 60:
if mopos[0] <= 330 and mopos[1] <= 110:
if coins >= ccost2:
sound2.play()
coins = coins - ccost2
ccost2 = ccost2 * 1.4
autog = autog + (uup2 * voz)
ccost2 = round(ccost2, 0)
else:
sound3.play()
if mopos[0] >= 130 and mopos[1] >= 120:
if mopos[0] <= 330 and mopos[1] <= 170:
if coins >= ccost3:
sound2.play()
coins = coins - ccost3
ccost3 = ccost3 * 1.4
autog = autog + (uup3 * voz)
ccost3 = round(ccost3, 0)
else:
sound3.play()
if mopos[0] >= 130 and mopos[1] >= 180:
if mopos[0] <= 330 and mopos[1] <= 230:
if coins >= ccost4:
sound2.play()
coins = coins - ccost4
ccost4 = ccost4 * 1.4
autog = autog + (uup4 * voz)
ccost4 = round(ccost4, 0)
else:
sound3.play()
if mopos[0] >= 130 and mopos[1] >= 240:
if mopos[0] <= 330 and mopos[1] <= 290:
if coins >= ccost5:
sound2.play()
coins = coins - ccost5
ccost5 = ccost5 * 1.4
autog = autog + (uup5 * voz)
ccost5 = round(ccost5, 0)
else:
sound3.play()
if mopos[0] >= 130 and mopos[1] >= 300:
if mopos[0] <= 330 and mopos[1] <= 350:
if coins >= ccost6:
sound2.play()
coins = coins - ccost6
ccost6 = ccost6 * 1.4
autog = autog + (uup6 * voz)
ccost6 = round(ccost6, 0)
else:
sound3.play()
if mopos[0] >= 130 and mopos[1] >= 360:
if mopos[0] <= 330 and mopos[1] <= 410:
if coins >= ccost7:
sound2.play()
coins = coins - ccost7
ccost7 = ccost7 * 1.4
autog = autog + (uup7 * voz)
ccost7 = round(ccost7, 0)
else:
sound3.play()
if mopos[0] >= 130 and mopos[1] >= 420:
if mopos[0] <= 330 and mopos[1] <= 470:
if coins >= ccost8:
sound2.play()
coins = coins - ccost8
ccost8 = ccost8 * 1.4
autog = autog + (uup8 * voz)
ccost8 = round(ccost8, 0)
else:
sound3.play()
if mopos[0] >= 130 and mopos[1] >= 480:
if mopos[0] <= 330 and mopos[1] <= 530:
if coins >= ccost9:
sound2.play()
coins = coins - ccost9
ccost9 = ccost9 * 1.4
autog = autog + (uup9 * voz)
ccost9 = round(ccost9, 0)
else:
sound3.play()
if mopos[0] >= 130 and mopos[1] >= 540:
if mopos[0] <= 330 and mopos[1] <= 590:
if coins >= ccost10:
sound2.play()
coins = coins - ccost10
ccost10 = ccost10 * 1.4
autog = autog + (uup10 * voz)
ccost10 = round(ccost10, 0)
else:
sound3.play()
if mopos[0] >= 130 and mopos[1] >= 600:
if mopos[0] <= 330 and mopos[1] <= 650:
if coins >= ccost11:
sound2.play()
coins = coins - ccost11
ccost11 = ccost11 * 1.4
autog = autog + (uup11 * voz)
ccost11 = round(ccost11, 0)
else:
sound3.play()
if mopos[0] >= 130 and mopos[1] >= 660:
if mopos[0] <= 330 and mopos[1] <= 710:
if coins >= ccost12:
sound2.play()
coins = coins - ccost12
ccost12 = ccost12 * 1.4
autog = autog + (uup12 * voz)
ccost12 = round(ccost12, 0)
else:
sound3.play()
if mopos[0] >= 130 and mopos[1] >= 720:
if mopos[0] <= 330 and mopos[1] <= 770:
if coins >= ccost13:
sound2.play()
coins = coins - ccost13
ccost13 = ccost13 * 1.4
autog = autog + (uup13 * voz)
ccost13 = round(ccost13, 0)
else:
sound3.play()
if mopos[0] >= 130 and mopos[1] >= 780:
if mopos[0] <= 330 and mopos[1] <= 830:
if coins >= ccost14:
sound2.play()
coins = coins - ccost14
ccost14 = ccost14 * 1.4
autog = autog + (uup14 * voz)
ccost14 = round(ccost14, 0)
else:
sound3.play()
if mopos[0] >= 130 and mopos[1] >= 840:
if mopos[0] <= 330 and mopos[1] <= 890:
if coins >= ccost15:
sound2.play()
coins = coins - ccost15
ccost15 = ccost15 * 1.4
autog = autog + (uup15 * voz)
ccost15 = round(ccost15, 0)
else:
sound3.play()
if mopos[0] <= 850 and mopos[1] <= 1080:
if mopos[0] >= 650 and mopos[1] >= 830:
if coins >= ulta:
voz += 0.2
if voz >= 1.5:
pobeda(clicks)
return
mong = 1
cost1 = 5
cost2 = 50
cost3 = 250
cost4 = 1000
cost5 = 25000
cost6 = 100000
cost7 = 500000
cost8 = 1500000
cost9 = 3000000
cost10 = 5000000
ccost1 = 10
ccost2 = 100
ccost3 = 500
ccost4 = 2000
ccost5 = 50000
ccost6 = 200000
ccost7 = 1000000
ccost8 = 3000000
ccost9 = 6000000
ccost10 = 10000000
ulta = ulta * 10
autog = 0
nomer += 1
coins = 0
sound4.play()
if mopos[0] >= 1820 and mopos[1] >= 980:
cur.execute('''update users set clicks = ?,
coins = ?,
ulta = ?,
voz = ?,
auto = ?,
click = ?,
up1 = ?,
up2 = ?,
up3 = ?,
up4 = ?,
up5 = ?,
up6 = ?,
up7 = ?,
up8 = ?,
up9 = ?,
up10 = ?,
up11 = ?,
up12 = ?,
up13 = ?,
up14 = ?,
up15 = ?,
uup1 = ?,
uup2 = ?,
uup3 = ?,
uup4 = ?,
uup5 = ?,
uup6 = ?,
uup7 = ?,
uup8 = ?,
uup9 = ?,
uup10 = ?,
uup11 = ?,
uup12 = ?,
uup13 = ?,
uup14 = ?,
uup15 = ?,
cost1 = ?,
cost2 = ?,
cost3 = ?,
cost4 = ?,
cost5 = ?,
cost6 = ?,
cost7 = ?,
cost8 = ?,
cost9 = ?,
cost10 = ?,
cost11 = ?,
cost12 = ?,
cost13 = ?,
cost14 = ?,
cost15 = ?,
ccost1 = ?,
ccost2 = ?,
ccost3 = ?,
ccost4 = ?,
ccost5 = ?,
ccost6 = ?,
ccost7 = ?,
ccost8 = ?,
ccost9 = ?,
ccost10 = ?,
ccost11 = ?,
ccost12 = ?,
ccost13 = ?,
ccost14 = ?,
ccost15 = ? where player = ?''', (clicks, coins, ulta, voz, autog, mong, up1, up2, up3, up4, up5, up6, up7, up8, up9, up10, up11, up12, up13, up14, up15,
uup1, uup2, uup3, uup4, uup5, uup6, uup7, uup8, uup9, uup10, uup11, uup12, uup13, uup14, uup15,
cost1, cost2, cost3, cost4, cost5, cost6, cost7, cost8, cost9, cost10, cost11, cost12, cost13, cost14,
cost15, ccost1, ccost2, ccost3, ccost4, ccost5, ccost6, ccost7, ccost8, ccost9, ccost10, ccost11, ccost12,
ccost13, ccost14, ccost15, user,))
db.commit()
final_screen(clicks, clicks_session)
return
if mopos[0] >= 1500 and mopos[1] >= 200:
if mopos[0] <= 1600 and mopos[1] <= 300:
kart = 0
pygame.mixer.music.load('fon.mp3')
pygame.mixer.music.set_volume(grom)
pygame.mixer.music.play()
pygame.mixer.music.play(loops=-1)
if mopos[0] >= 1660 and mopos[1] >= 200:
if mopos[0] <= 1760 and mopos[1] <= 300:
kart = 1
pygame.mixer.music.load('fonn2.mp3')
pygame.mixer.music.set_volume(grom)
pygame.mixer.music.play()
pygame.mixer.music.play(loops=-1)
if mopos[0] >= 1820 and mopos[1] >= 200:
if mopos[0] <= 1920 and mopos[1] <= 300:
kart = 2
pygame.mixer.music.load('morgenn.mp3')
pygame.mixer.music.set_volume(grom)
pygame.mixer.music.play()
pygame.mixer.music.play(loops=-1)
if mopos[0] >= 1560 and mopos[1] >= 470:
if mopos[0] <= 1660 and mopos[1] <= 500:
grom -= 0.01
if grom <= 0:
grom = 0
grom = min(grom, 10.0)
pygame.mixer.music.set_volume(grom)
if mopos[0] >= 1790 and mopos[1] >= 437:
if mopos[0] <= 1890 and mopos[1] <= 537:
grom += 0.01
if grom < 0.01:
grom = 0
grom = min(grom, 10.0)
pygame.mixer.music.set_volume(grom)
if mopos[0] >= 1560 and mopos[1] >= 670:
if mopos[0] <= 1660 and mopos[1] <= 700:
groom -= 0.01
if groom <= 0:
groom = 0
groom = min(groom, 10.0)
sound1.set_volume(groom)
sound2.set_volume(groom)
sound3.set_volume(groom)
if mopos[0] >= 1790 and mopos[1] >= 637:
if mopos[0] <= 1890 and mopos[1] <= 737:
groom += 0.01
if groom <= 0:
groom = 0
groom = min(groom, 10.0)
sound1.set_volume(groom)
sound2.set_volume(groom)
sound3.set_volume(groom)
gameDisplay.fill(blue)
rectangle(gameDisplay, red, 750, 0, 750, 1080)
rectangle(gameDisplay, green4, 1500, 0, 420, 1080)
if kart != 2:
rectangle(gameDisplay, red, 1820, 980, 100, 1000)
else:
image2 = load_image('абанк.png')
blocksred = pygame.transform.scale(image2, (100, 100))
screen.blit(blocksred, (1820, 980))
if udv == 2:
gameDisplay.fill(yel)
vrem -= 1
nado = 0
bonus = 0
if vrem == 0:
udv = 1
vrem = 200
DrawText(f"Вы - {user}", black, light_blue, 1500, 1050, 25)
DrawText(f"Количество кликов: {clicks_session}/{clicks}", black, light_blue, 200, 1050, 25)
if kart == 0:
rectangle(gameDisplay, black, 1500, 200, 100, 100)
rectangle(gameDisplay, blue, 1660, 200, 100, 100)
rectangle(gameDisplay, blue, 1820, 200, 100, 100)
foto = "111.jpg"
image = load_image(foto)
image1 = pygame.transform.scale(image, (260, 225))
screen.blit(image1, (625, 130))
if kart == 1:
rectangle(gameDisplay, blue, 1500, 200, 100, 100)
rectangle(gameDisplay, black, 1660, 200, 100, 100)
rectangle(gameDisplay, blue, 1820, 200, 100, 100)
foto = "222.jpg"
image = load_image(foto)
image1 = pygame.transform.scale(image, (260, 225))
screen.blit(image1, (625, 130))
DrawText("Настройки:", black, green4, 1720, 100, 35)
DrawText("Громкость Музыки:", black, green4, 1720, 400, 35)
if grom < 0.02 and grom > 0.01:
grom = 0.02
if grom < 0.11 and grom > 0.10:
grom = 0.11
DrawText(str(round(autog, 2)) + " монет в секунду", black, light_blue, 750, 415, 35)
DrawText('и ' + str(round(mong * udv * voz, 2)) + " за клик", black, light_blue, 750, 455, 35)
DrawText("Вы зарабатываете ", black, light_blue, 750, 375, 35)
rectangle(gameDisplay, red, 1560, 470, 100, 30)
rectangle(gameDisplay, red, 1790, 470, 100, 30)
rectangle(gameDisplay, red, 1825, 437, 30, 100)
DrawText("Громкость эффектов:", black, green4, 1720, 600, 35)
DrawText(str(int(groom * 100)), black, green4, 1730, 685, 35)
rectangle(gameDisplay, red, 1560, 670, 100, 30)
rectangle(gameDisplay, red, 1790, 670, 100, 30)
rectangle(gameDisplay, red, 1825, 637, 30, 100)
DrawText("Выход", black, red, 1875, 1030, 20)
DrawText("Кликальный клик", black, light_blue, 750, 100, 50)
DrawText("у вас есть " + str(f'{coins:.2f}') + " монет", black, light_blue, 750, 55, 20)
DrawText("Обнуление: " + str(nomer), black, light_blue, 750, 27, 20)
DrawText(str(int(grom * 100)), black, green4, 1730, 485, 35)
for i in range(0, 900, 60):
rectangle(gameDisplay, blue, 1150, i, 200, 50)
rectangle(gameDisplay, red, 145, i, 200, 50)
t = 0
ccosty = [ccost1, ccost2, ccost3, ccost4, ccost5, ccost6, ccost7, ccost8, ccost9, ccost10, ccost11, ccost12,
ccost13, ccost14, ccost15]
uups = [uup1, uup2, uup3, uup4, uup5, uup6, uup7, uup8, uup9, uup10, uup11, uup12, uup13, uup14, uup15]
costy = [cost1, cost2, cost3, cost4, cost5, cost6, cost7, cost8, cost9, cost10, cost11, cost12, cost13, cost14,
cost15]
ups = [up1, up2, up3, up4, up5, up6, up7, up8, up9, up10, up11, up12, up13, up14, up15]
for i in range(27, 927, 60):
if nomer == 0:
DrawText(f'{sleva_first_lvl[t]}(+{round(ups[t] * voz, 2)})', black, light_blue, 470, i, 20)
DrawText(f'{sprava_first_lvl[t]}(+{round(uups[t] * voz, 2)})', black, light_blue, 1030, i, 20)
elif nomer == 1:
DrawText(f'{sleva_second_lvl[t]}(+{round(ups[t] * voz, 2)})', black, light_blue, 470, i, 20)
DrawText(f'{sprava_second_lvl[t]}(+{round(uups[t] * voz, 2)})', black, light_blue, 1030, i, 20)
else:
DrawText(f'{sleva_third_lvl[t]}(+{round(ups[t] * voz, 2)})', black, light_blue, 470, i, 20)
DrawText(f'{sprava_third_lvl[t]}(+{round(uups[t] * voz, 2)})', black, light_blue, 1030, i, 20)
DrawText("цена:" + str(int(costy[t])), black, light_blue, 1425, i, 18)
DrawText("цена:" + str(int(ccosty[t])), black, light_blue, 73, i, 18)
t += 1
if kart == 2:
foto = "333.jpg"
image = load_image(foto)
image1 = pygame.transform.scale(image, (260, 225))
screen.blit(image1, (625, 130))
imagee = load_image('альфабанк.png')
image2 = load_image('абанк.png')
bankblue = load_image('abankblue.jpg')
gest = load_image('жесткийбанк.png')
imagee1 = pygame.transform.scale(image2, (100, 30))
imagee11 = pygame.transform.scale(imagee, (30, 100))
blocksred = pygame.transform.scale(image2, (200, 50))
blocksblue = pygame.transform.scale(bankblue, (200, 50))
gestyanka = pygame.transform.scale(gest, (100, 100))
bluemusic = pygame.transform.scale(bankblue, (100, 100))
screen.blit(imagee11, (1825, 437))
screen.blit(imagee11, (1825, 637))
screen.blit(imagee1, (1560, 670))
screen.blit(imagee1, (1790, 670))
screen.blit(imagee1, (1560, 470))
screen.blit(imagee1, (1790, 470))
screen.blit(gestyanka, (1820, 200))
screen.blit(bluemusic, (1500, 200))
screen.blit(bluemusic, (1660, 200))
for i in range(0, 900, 60):
screen.blit(blocksred, (145, i))
screen.blit(blocksblue, (1150, i))
if coins >= ulta // 10000:
rectangle(gameDisplay, green1, 650, 1030, 200, 50)
if coins >= ulta // 1000:
rectangle(gameDisplay, green2, 650, 980, 200, 100)
if coins >= ulta // 100:
rectangle(gameDisplay, green3, 650, 930, 200, 150)
if coins >= ulta // 10:
rectangle(gameDisplay, green4, 650, 880, 200, 200)
if coins >= ulta:
rectangle(gameDisplay, green5, 650, 830, 200, 250)
DrawText("УЛЬТУЙ", red, green5, 750, 970, 45)
nado -= 1
nado = max(nado, 0)
bonus += 1
if nado >= 1:
rectangle(gameDisplay, yel, kk, jj, kkk, jjj)
if bonus == 56:
bonus = 0
nado = 37
kk = random.randint(0, 1720)
jj = random.randint(0, 980)
kkk = random.randint(10, 150)
jjj = random.randint(10, 150)
all_sprites.update()
all_sprites.draw(screen)
pygame.display.update()
clock.tick(90)
start_screen()
main_loop()
pygame.quit()
quit()
| babysheesh/clicker | кликер2.0/main.py | main.py | py | 49,547 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.time.Clock",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_... |
14298819617 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/1/1 0:31
# @Author : lingxiangxiang
# @File : demonlogging.py
import logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',datefmt=' %Y/%m/%d %H:%M:%S', filename='myapp.log', filemode='w')
logger = logging.getLogger(__name__)
def hello(name):
print("hello {0}".format(name))
hello("ajing")
logger.info("excute function hello")
logger.debug("debug")
logger.info("info")
logger.warning("warning")
logger.error("error")
| ajing2/python3 | Basics/module/demonlogging.py | demonlogging.py | py | 568 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
}
] |
16044333714 | from config import *
import numpy as np
from os import listdir
from os.path import join, isdir
from tqdm import tqdm
import cv2
from tensorflow.keras.utils import to_categorical
import random
def create_dataset(dataset_path: str, data_aug: bool):
"""
Parameters
----------
dataset_path
path to dataset each sub-folder being one class
data_aug
data augmentation by obtaining frames with different temporal stride
Returns
-------
numpy array
array of clips, array of labels, classes
"""
labels = []
clips = []
classes = []
class_idx = 0
# iterate through class folders, one folder per class
for f in sorted(listdir(dataset_path)):
class_path = join(dataset_path, f)
if isdir(class_path):
for vid in tqdm(listdir(class_path), desc="Extracting frames from " + f):
if vid.endswith(VIDEO_EXTENSION[0]) or vid.endswith(VIDEO_EXTENSION[1]):
video_path = join(class_path, vid)
if ONE_CLIPXVIDEO:
clip = extract_frames_single_clip(video_path, BATCH_INPUT_LENGTH)
clips.append(clip)
labels.append(class_idx)
else:
stride_list = [x for x in range(2, 4)] if data_aug else [TEMPORAL_STRIDE]
for stride in stride_list:
video_clips = extract_frames_by_stride(video_path, stride)
clips.extend(video_clips)
labels.extend([class_idx for x in range(len(video_clips))])
class_idx += 1
classes.append(f)
clips, labels = suffle_two_lists(clips, labels)
return np.asarray(clips), to_categorical(np.asarray(labels)), classes
def extract_frames_single_clip(path: str, clip_size: int) -> np.ndarray:
"""
Parameters
----------
path
path to video
clip_size
number of frames per clip
Returns
-------
list
list of clips, (BATCH_INPUT_SHAPE x IMG_SIZE x IMG_SIZE x C)
"""
channels = 1 if USE_GRAY else 3
clip = np.zeros(shape=(clip_size, IMAGE_SIZE, IMAGE_SIZE, channels))
# create video capture
vidcap = cv2.VideoCapture(path)
total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
temporal_stride = max(int(total_frames / clip_size), 1)
print('temporal stride: ' + str(temporal_stride))
for count in range(clip_size):
# move video to desired frame given stride
vidcap.set(cv2.CAP_PROP_POS_FRAMES, count * temporal_stride)
# read next frame
success, frame = vidcap.read()
if not success: break
clip[count, :, :, :] = preprocess_frame(frame)
return clip
def preprocess_frame(frame):
"""
Parameters
----------
frame
cv2 frame
Returns
-------
frame
preprocessed frame
"""
color_space = cv2.COLOR_BGR2GRAY if USE_GRAY else cv2.COLOR_BGR2RGB
channels = 1 if USE_GRAY else 3
# reshape and normalize frame
new_frame = cv2.cvtColor(frame, color_space)
new_frame = cv2.resize(new_frame, (IMAGE_SIZE, IMAGE_SIZE)) / 256.0
new_frame = np.reshape(new_frame, (IMAGE_SIZE, IMAGE_SIZE, channels))
return new_frame
def suffle_two_lists(a: list, b: list):
c = list(zip(a, b))
random.shuffle(c)
a, b = zip(*c)
return a, b
def extract_frames_by_stride(path: str, stride: int) -> list:
"""
Parameters
----------
path
path to video
stride
temporal stride
Returns
-------
list
list of clips, (BATCH_INPUT_SHAPE x IMG_SIZE x IMG_SIZE x C)
"""
channels = 1 if USE_GRAY else 3
clip = np.zeros(shape=(BATCH_INPUT_LENGTH, IMAGE_SIZE, IMAGE_SIZE, channels))
# create video capture
vidcap = cv2.VideoCapture(path)
total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
# num_clips = int((total_frames / stride) / BATCH_INPUT_SHAPE)
list_clips = []
cnt = 0
# run through all video frames
for idx in range(total_frames):
# read next frame
success, frame = vidcap.read()
if not success: break
# do something with temporal stride
if idx % stride == 0:
clip[cnt, :, :, :] = preprocess_frame(frame)
cnt += 1
if cnt == BATCH_INPUT_LENGTH:
list_clips.append(np.copy(clip))
cnt = 0
return list_clips
def plot_clip(clip: np.ndarray):
"""
Parameters
----------
clip
numpy array of shape (BATCH_INPUT_SHAPE x IMG_SIZE x IMG_SIZE x C)
"""
cv2.imshow(' ', np.hstack(clip))
cv2.waitKey(0)
def get_classes(dataset_path) -> list:
"""
It is expected that the dataset_path contains no more than one folder per class
"""
classes = []
# iterate through class folders, one folder per class
for f in sorted(listdir(dataset_path)):
class_path = join(dataset_path, f)
if isdir(class_path):
classes.append(f)
return classes
| serginogues/cnn_lstm_activity_recognition | utils.py | utils.py | py | 5,139 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": ... |
74520952744 | """
Usage: import the module, or run from
the command line as such:
python3 process_util.py --input=/path/to/input/file --output=/path/to/output/file --chunksize=chunksize
"""
import os
import sys
import itertools
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
DEFAULT_TEMP_SQLITE_DB_NAME = 'temp.db'
DEFAULT_INPUT_FILE_PATH = 'data/test.tsv'
DEFAULT_OUTPUT_FILE_PATH = 'data/test_proc.tsv'
DEFAULT_CHUNK_SIZE = 500
DEFAULT_SQLITE_DB_TEMP_NAME = 'temp.db'
class Config:
"""Config class for settin input path, output path, chunk size, name of temp db
Derive from this class to override some values
"""
INPUT_FILE_PATH = DEFAULT_INPUT_FILE_PATH
OUTPUT_FILE_PATH = DEFAULT_OUTPUT_FILE_PATH
CHUNK_SIZE = DEFAULT_CHUNK_SIZE
SQLITE_DB_TEMP_NAME = DEFAULT_SQLITE_DB_TEMP_NAME
class Process:
"""Class for preprocessing.
Usage:
config = Config()
process = Process(config)
process.process_data()
"""
def __init__(self, config):
self._config = config
self._n = 0
## TODO: refactor this if we have new attribute type with different size
first_row = pd.read_csv(config.INPUT_FILE_PATH, nrows=1, sep='\t')
features_str = first_row['features'].values[0]
feature_size = len(features_str.split(','))-1
# track means of attrs for update by chunks
self._means = np.zeros(feature_size)
# track means of square attrs for update by chunks
self._sq_means =np.zeros(feature_size)
# track std of attrs for update by chunks
self._std = np.zeros(feature_size)
## TODO: extract feature types dynamicaly
self._groups = [features_str.split(',')[0]]
self._feature_names = []
self._featur_stat_names = []
def process_data(self):
"""This public method. Process file specified in config by chunks
"""
try:
#remove temp.db if exist
try:
os.remove(self._config.SQLITE_DB_TEMP_NAME)
except OSError:
pass
self._disk_engine = create_engine(f'sqlite:///{self._config.SQLITE_DB_TEMP_NAME}')
self._conn = self._disk_engine.connect()
rows_count = 0
for chunk_df in pd.read_csv(self._config.INPUT_FILE_PATH, chunksize=self._config.CHUNK_SIZE, sep='\t'):
self.process_chunk(chunk_df)
rows_count += len(chunk_df)
print(f'Processed {rows_count} rows')
self.db_to_csv()
finally:
self.dispose()
def process_chunk(self, chunk_df):
"""Method perform process logic on chunk dataframe
# Arguments:
chunk_df: pandas dataframe
"""
chunk_transformed = self.transform_columns(chunk_df)
self._feature_names = chunk_transformed.loc[:, chunk_transformed.columns != 'id_job'].columns.values
chunk_transformed = self.append_max_feature_atr_index(chunk_transformed)
new_statistics = self.find_new_statistics(chunk_transformed)
new_means, new_sq_means, new_std = new_statistics
chunk_transformed = self.append_abs_diff(chunk_transformed, new_statistics)
chunk_transformed = self.populate_with_z_score(chunk_transformed, new_statistics)
## TODO: extend with new type of scores
self.update_previous_data_with_new_statistics(new_statistics)
self.append_new_data(chunk_transformed)
self._means = new_means
self._sq_means = new_sq_means
self._std = new_std
self._n += len(chunk_df)
def append_max_feature_atr_index(self, df):
"""Find max feature index per row
# Arguments:
df: pandas dataframe
# Returns:
df: dataframe with new max_feature_{group}_index column
"""
for group in self._groups:
filter_col = [col for col in df if col.startswith(f'features_{group}')]
max_feature_col_name = df[filter_col].idxmax(axis=1)
max_feature_ind = list(map(lambda x: x[2], max_feature_col_name.str.split('_')))
df[f'max_feature_{group}_index'] = max_feature_ind
return df
def append_abs_diff(self, df, new_statistics):
"""Find diff between max element in atrribut with corresponding attr mean value
# Arguments:
df: pandas dataframe
new_statistics: tuple that contains means for attrs from start to current chunk included,
means of squared elemetns for attrs from start to current chunk included,
std for attrs from start to current chunk included
# Returns:
df: dataframe with max_feature_{group}_abs_mean_diff column
"""
new_means = new_statistics[0]
for group in self._groups:
filter_col = [col for col in df if col.startswith(f'features_{group}')]
max_feature_ind = df[f'max_feature_{group}_index']
max_feature = df[filter_col].max(axis=1)
mean_values = np.take(new_means, max_feature_ind)
#abs operation permorm in the end. Because restore va
df[f'max_feature_{group}_abs_mean_diff'] = max_feature - mean_values
return df
def transform_columns(self, df):
"""Transform features column (2,324,423,423,234, ...)
to multiple columns 'features_2_{i}'
# Arguments:
df: pandas dataframe
# Returns:
df: dataframe with new columns
"""
df_transformed = pd.DataFrame()
df_transformed['id_job'] = df['id_job']
## TODO: refactor in case of new attr types
## (I don't know how we add new type. In new rows or new col or in the current cell. So made it as simple as possible)
temp = df['features'].str.split(",", expand = True)
for col_ind in range(temp.shape[1]):
col_name = f'features_{temp.iloc[0, 0]}_{col_ind - 1}'
if col_ind > 0:
df_transformed[col_name] = temp.iloc[: , col_ind]
df_transformed[col_name] = df_transformed[col_name].astype(str).astype('int64')
return df_transformed
def find_new_statistics(self, df):
"""This methods finds mean of attrs, mean of square attrs, std of attr
based on previous values _means, _sq_means, _std
# Arguments:
df: pandas dataframe
# Returns
new_statistics: tuple that contains means for attrs from start to current chunk included,
means of squared elemetns for attrs from start to current chunk included,
std for attrs from start to current chunk included
"""
n_cur = len(df)
features = df[self._feature_names].values
new_means = self._means + (features.mean(axis=0) - self._means)/((self._n + n_cur)/n_cur)
new_sq_means = self._sq_means + (np.square(features).mean(axis=0) - self._sq_means)/((self._n + n_cur)/n_cur)
new_std = np.sqrt(new_sq_means - np.square(new_means))
new_statistics = (new_means, new_sq_means, new_std)
return new_statistics
def populate_with_z_score(self, df, new_statistics):
"""Find z score for given feature attrs and replace it
# Arguments:
df: pandas dataframe
new_statistics: tuple that contains means for attrs from start to current chunk included,
means of squared elemetns for attrs from start to current chunk included,
std for attrs from start to current chunk included
# Returns:
df: dataframe with 'features_{group}_stand_{attr}' columns
"""
features = df[self._feature_names].values
new_means, new_sq_means, new_std = new_statistics
z_score_for_new = (features - new_means)/new_std
def transform_name(name):
parts = name.split('_')
return f'{parts[0]}_{parts[1]}_stand_{parts[2]}'
new_names = map(transform_name, self._feature_names)
self._featur_stat_names = list(new_names)
z_score_df = pd.DataFrame(z_score_for_new, columns=self._featur_stat_names, index=df.index)
df = pd.concat([df, z_score_df], axis=1)
df = df.drop(self._feature_names, axis=1)
return df
def update_previous_data_with_new_statistics(self, new_statistics):
"""Update previously processed chunks that stored in sqlite db with new zcore and
diff between max value in the attribute and corresponding mean
# Arguments: new_statistics: tuple that contains means for attrs from start to current chunk included,
means of squared elemetns for attrs from start to current chunk included,
std for attrs from start to current chunk included
"""
if os.path.isfile(self._config.SQLITE_DB_TEMP_NAME) and self._disk_engine.has_table('temp'):
new_means, new_sq_means, new_std = new_statistics
for chunk_df in pd.read_sql_query('SELECT * FROM temp', con=self._conn, chunksize=self._config.CHUNK_SIZE):
# restore value using std and mean on previous step
chunk_df[self._featur_stat_names] = chunk_df[self._featur_stat_names] * self._std + self._means
# find new z score with new mean and std
chunk_df[self._featur_stat_names] = (chunk_df[self._featur_stat_names] - new_means)/new_std
#update abs diff between max value in the attribute and corresponding mean
for group in self._groups:
max_feature_ind = chunk_df[f'max_feature_{group}_index']
mean_values = np.take(self._means, max_feature_ind)
new_mean_values = np.take(new_means, max_feature_ind)
#restore previous value
chunk_df[f'max_feature_{group}_abs_mean_diff'] = chunk_df[f'max_feature_{group}_abs_mean_diff'] + pd.Series(mean_values)
#update with new mean. Do not calculate abs because it is imposible to restore then.
#Abs operation will be in the end
chunk_df[f'max_feature_{group}_abs_mean_diff'] = chunk_df[f'max_feature_{group}_abs_mean_diff'] - pd.Series(new_mean_values)
# here is a workaround. I place this updated chunk on the new buffer table because I
# didn't find a way to perform multiple updates sql statements at once.
# So I simply put updted chunk to another table and remove old chunk from origin table
chunk_df.to_sql('temp_2', con=self._conn, if_exists='append', index=False)
ids = ','.join(map(str, chunk_df['id_job'].values))
sql = f'DELETE from temp WHERE id_job in ({ids});'
self._conn.execute(sql)
# after we process all chunks there will be empty origin table. So remove it.
# And rename our buffer table as origin
self._conn.execute('DROP TABLE temp')
self._conn.execute('ALTER TABLE temp_2 rename to temp')
def append_new_data(self, df):
"""Append new chunk to sqlite db
# Arguments:
df: pandas dataframe
"""
df.to_sql('temp', self._disk_engine, if_exists='append', index=False)
def db_to_csv(self):
"""Convert sqlite db to tsv file and perform abs operation for max_feature_{group}_abs_mean_diff column
"""
for chunk_df in pd.read_sql_query('SELECT * FROM temp', con=self._conn, chunksize=self._config.CHUNK_SIZE):
# perfomr abs operation
for group in self._groups:
chunk_df[f'max_feature_{group}_abs_mean_diff'] = np.abs(chunk_df[f'max_feature_{group}_abs_mean_diff'])
if not os.path.isfile(self._config.OUTPUT_FILE_PATH):
chunk_df.to_csv(self._config.OUTPUT_FILE_PATH, header='column_names', index=False, sep='\t')
else: # else it exists so append without writing the header
chunk_df.to_csv(self._config.OUTPUT_FILE_PATH, mode='a', header=False, index=False, sep='\t')
def delete_temp_db(self):
"""Remove temprorary sqlite db
"""
try:
os.remove(self._config.SQLITE_DB_TEMP_NAME)
except OSError:
pass
def dispose(self):
"""Close sqlite connecton and remove temprorary db
"""
self._conn.close()
self.delete_temp_db()
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='preprocess tsv file')
parser.add_argument('--input', required=False,
metavar="/path/to/input/file",
help='Path to file to preprocess',
default=DEFAULT_INPUT_FILE_PATH)
parser.add_argument('--output', required=False,
metavar="/path/to/output/file",
help="Path to output file",
default=DEFAULT_OUTPUT_FILE_PATH)
parser.add_argument('--chunksize', required=False,
metavar="chunksize",
help="chunksize",
default=DEFAULT_CHUNK_SIZE)
args = parser.parse_args()
print("Input file path: ", args.input)
print("Output file path: ", args.output)
print("chunksize: ", args.chunksize)
class MyConfig(Config):
INPUT_FILE_PATH = args.input
OUTPUT_FILE_PATH = args.output
CHUNK_SIZE = int(args.chunksize)
my_config = MyConfig()
process = Process(my_config)
process.process_data()
print('Preprocess has finished')
| emaksOne/preprocess_data | process_util.py | process_util.py | py | 14,141 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_numbe... |
37069849941 | #!/usr/bin/python3
"""
Gather data from an API
"""
import json
import requests
import sys
def get_employee_name(employee_id):
"""
Function to get employee name
"""
base_url = "https://jsonplaceholder.typicode.com"
user_url = f"{base_url}/users/{employee_id}"
response = requests.get(user_url)
if response.status_code == 200:
user_data = response.json()
return user_data.get("name")
else:
print("Unable to fetch employee name")
sys.exit(1)
def get_employee_todo_list(employee_id):
"""
function to retrive the to do list
"""
name = get_employee_name(employee_id)
if not name:
sys.exit(1)
todo_url = f"{base_url}/todos?userId={employee_id}"
response = requests.get(todo_url)
if response.status_code == 200:
todos = response.json()
user_data = {employee_id: []}
for todo in todos:
task_data = {
"task": todo["title"],
"completed": todo["completed"],
"username": name
}
user_data[employee_id].append(task_data)
json_filename = f"{employee_id}.json"
with open(json_filename, mode="w") as json_file:
json.dump(user_data, json_file)
print(f"Data exported to {json_filename}")
else:
print(f"Error: Unable to fetch data for employee {employee_id}")
if __name__ == "__main__":
"""
main function
"""
if len(sys.argv) != 2:
print("Usage: python employee_todo.py <employee_id>")
sys.exit(1)
employee_id = int(sys.argv[1])
base_url = "https://jsonplaceholder.typicode.com"
get_employee_todo_list(employee_id)
| wughangar/alx-system_engineering-devops | 0x15-api/2-export_to_JSON.py | 2-export_to_JSON.py | py | 1,733 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 37,
... |
75034720424 | """
URL configuration for config project.
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(
title="PWLService",
default_version='v1',
description="Учебный портал. Представляет из себя сервис в "
"котором каждый может разместить свои уроки и "
"учиться у других",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="u_alex90@mail.ru"),
license=openapi.License(name="GNU License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
path('admin/', admin.site.urls),
path('user/', include('app_users.urls')),
path('', include('app_pwl.urls')),
path('payment/', include('app_payment.urls')),
path('subscribe/', include('app_subscriptions.urls')),
path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
]
| ualex90/PWLService | config/urls.py | urls.py | py | 1,870 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "drf_yasg.views.get_schema_view",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi.Info",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": ... |
40517757437 | import os
import glob
import time
from datetime import datetime
from argparse import ArgumentParser
import pandas as pd
import matplotlib.pyplot as plt
import torch
import numpy as np
from agents.PPO import PPO
from environment.drl_environment import DRLEnvironment
def train():
env_name = "DRL"
has_continuous_action_space = True # continuous action space; else discrete
max_ep_len = 30 # max timesteps in one episode
max_training_timesteps = 20000 # break training loop if timeteps > max_training_timesteps
print_freq = max_ep_len * 2 # print avg reward in the interval (in num timesteps)
log_freq = max_ep_len * 2 # log avg reward in the interval (in num timesteps)
save_model_freq = 200 # save model frequency (in num timesteps)
action_std = 0.60 # starting std for action distribution (Multivariate Normal)
action_std_decay_rate = 0.05 # linearly decay action_std (action_std = action_std - action_std_decay_rate)
min_action_std = 0.1 # minimum action_std (stop decay after action_std <= min_action_std)
action_std_decay_freq = 1000 # action_std decay frequency (in num timesteps)
update_timestep = max_ep_len * 2 # update policy every n timesteps
K_epochs = 40 # update policy for K epochs in one PPO update
eps_clip = 0.2 # clip parameter for PPO
gamma = 0.99 # Higher discount factor for continuos actions
lr_actor = 0.0001 # learning rate for actor network
lr_critic = 0.0001 # learning rate for critic network
random_seed = 47 # set random seed if required (0 = no random seed)
env = DRLEnvironment(viz_image_cv2=False, observation_type="lidar")
# state space dimension
state_dim = env.get_observation_space()[0]
# action space dimension
if has_continuous_action_space:
action_dim = env.action_space[0]
else:
action_dim = env.action_space[0]
log_dir = "logs"
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_dir = log_dir + '/' + env_name + '/'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
#### get number of log files in log directory
run_num = 0
current_num_files = next(os.walk(log_dir))[2]
run_num = len(current_num_files)
#### create new log file for each run
log_f_name = log_dir + '/PPO_' + env_name + "_log_" + str(run_num) + ".csv"
run_num_pretrained = 8 #### change this to prevent overwriting weights in same env_name folder
continue_training = False
directory = "models"
if not os.path.exists(directory):
os.makedirs(directory)
directory = directory + '/' + env_name + '/'
if not os.path.exists(directory):
os.makedirs(directory)
checkpoint_path = directory + "PPO_{}_{}_{}.pth".format(env_name, random_seed, run_num_pretrained)
print("Archivo de pesos : " + checkpoint_path)
if random_seed:
torch.manual_seed(random_seed)
np.random.seed(random_seed)
# initialize a PPO agent
ppo_agent = PPO(state_dim, action_dim, lr_actor, lr_critic, gamma, K_epochs, eps_clip, has_continuous_action_space, action_std)
# track total training time
start_time = datetime.now().replace(microsecond=0)
# logging file
log_f = open(log_f_name,"w+")
log_f.write('episode,timestep,reward\n')
# printing and logging variables
print_running_reward = 0
print_running_episodes = 0
log_running_reward = 0
log_running_episodes = 0
time_step = 0
i_episode = 0
if continue_training:
print("Cargando modelo anterior : " + checkpoint_path)
ppo_agent.load(checkpoint_path)
env.init_race_environment()
# training loop
while time_step <= max_training_timesteps:
state = env.start_race()
current_ep_reward = 0
for t in range(1, max_ep_len+1):
# select action with policy
action = ppo_agent.select_action(state)
state, reward, done = env.step(action)
# saving reward and is_terminals
ppo_agent.buffer.rewards.append(reward)
ppo_agent.buffer.is_terminals.append(done)
time_step +=1
current_ep_reward += reward
# update PPO agent
if time_step % update_timestep == 0:
ppo_agent.update()
# if continuous action space; then decay action std of ouput action distribution
if has_continuous_action_space and time_step % action_std_decay_freq == 0:
ppo_agent.decay_action_std(action_std_decay_rate, min_action_std)
# log in logging file
if time_step % log_freq == 0:
# log average reward till last episode
log_avg_reward = log_running_reward / log_running_episodes
log_avg_reward = round(log_avg_reward, 4)
log_f.write('{},{},{}\n'.format(i_episode, time_step, log_avg_reward))
log_f.flush()
log_running_reward = 0
log_running_episodes = 0
# printing average reward
if time_step % print_freq == 0:
# print average reward till last episode
print_avg_reward = print_running_reward / print_running_episodes
print_avg_reward = round(print_avg_reward, 2)
print("Episodio : {} \t\t Timestep : {} \t\t Recompensa : {}".format(i_episode, time_step, print_avg_reward))
print_running_reward = 0
print_running_episodes = 0
# save model weights
if time_step % save_model_freq == 0:
ppo_agent.save(checkpoint_path)
print("Modelo guardado")
print("Tiempo de entrenamiento : ", datetime.now().replace(microsecond=0) - start_time)
print("--------------------------------------------------------------------------------------------")
# break; if the episode is over
if done:
break
print_running_reward += current_ep_reward
print_running_episodes += 1
log_running_reward += current_ep_reward
log_running_episodes += 1
i_episode += 1
env.reset()
time.sleep(3)
log_f.close()
def test():
env_name = "DRL"
has_continuous_action_space = True
max_ep_len = 300 # max timesteps in one episode
action_std = 0.10 # set same std for action distribution which was used while saving
total_test_episodes = 10 # total num of testing episodes
K_epochs = 80 # update policy for K epochs
eps_clip = 0.2 # clip parameter for PPO
gamma = 0.99 # discount factor
lr_actor = 0.0001 # learning rate for actor
lr_critic = 0.0001 # learning rate for critic
env = DRLEnvironment(viz_image_cv2=False, observation_type="lidar")
# state space dimension
state_dim = env.get_observation_space()[0]
# action space dimension
if has_continuous_action_space:
action_dim = env.action_space[0]
else:
action_dim = env.action_space[0]
# initialize a PPO agent
ppo_agent = PPO(state_dim, action_dim, lr_actor, lr_critic, gamma, K_epochs, eps_clip, has_continuous_action_space, action_std)
# preTrained weights directory
random_seed = 47 #### set this to load a particular checkpoint trained on random seed
run_num_pretrained = 5 #### set this to load a particular checkpoint num
directory = "models" + '/' + env_name + '/'
checkpoint_path = directory + "PPO_{}_{}_{}.pth".format(env_name, random_seed, run_num_pretrained)
print("Cargando modelo anterior : " + checkpoint_path)
ppo_agent.load(checkpoint_path)
print("--------------------------------------------------------------------------------------------")
test_running_reward = 0
env.init_race_environment()
for ep in range(1, total_test_episodes+1):
ep_reward = 0
state = env.start_race()
for t in range(1, max_ep_len+1):
action = ppo_agent.select_action(state)
state, reward, done = env.step(action)
ep_reward += reward
if done:
break
# clear buffer
ppo_agent.buffer.clear()
test_running_reward += ep_reward
print('Episodio: {} \t\t Recompensa: {}'.format(ep, round(ep_reward, 2)))
ep_reward = 0
env.reset()
time.sleep(3)
print("============================================================================================")
avg_test_reward = test_running_reward / total_test_episodes
avg_test_reward = round(avg_test_reward, 2)
print("Recompensa promedio : " + str(avg_test_reward))
print("============================================================================================")
def plot():
env_name = 'DRL'
fig_num = 0 #### change this to prevent overwriting figures in same env_name folder
plot_avg = False # plot average of all runs; else plot all runs separately
fig_width = 10
fig_height = 6
# smooth out rewards to get a smooth and a less smooth (var) plot lines
window_len_smooth = 20
min_window_len_smooth = 1
linewidth_smooth = 1.5
alpha_smooth = 1
window_len_var = 5
min_window_len_var = 1
linewidth_var = 2
alpha_var = 0.1
colors = ['red', 'blue', 'green', 'orange', 'purple', 'olive', 'brown', 'magenta', 'cyan', 'crimson','gray', 'black']
# make directory for saving figures
figures_dir = "plots"
if not os.path.exists(figures_dir):
os.makedirs(figures_dir)
# make environment directory for saving figures
figures_dir = figures_dir + '/' + env_name + '/'
if not os.path.exists(figures_dir):
os.makedirs(figures_dir)
fig_save_path = figures_dir + '/PPO_' + env_name + '_fig_' + str(fig_num) + '.png'
# get number of log files in directory
log_dir = "logs_to_plot" + '/' + env_name + '/'
current_num_files = next(os.walk(log_dir))[2]
num_runs = len(current_num_files)
all_runs = []
for run_num in range(num_runs):
log_f_name = log_dir + '/PPO_' + env_name + "_log_" + str(run_num) + ".csv"
data = pd.read_csv(log_f_name)
data = pd.DataFrame(data)
all_runs.append(data)
ax = plt.gca()
if plot_avg:
# average all runs
df_concat = pd.concat(all_runs)
df_concat_groupby = df_concat.groupby(df_concat.index)
data_avg = df_concat_groupby.mean()
# smooth out rewards to get a smooth and a less smooth (var) plot lines
data_avg['reward_smooth'] = data_avg['reward'].rolling(window=window_len_smooth, win_type='triang', min_periods=min_window_len_smooth).mean()
data_avg['reward_var'] = data_avg['reward'].rolling(window=window_len_var, win_type='triang', min_periods=min_window_len_var).mean()
data_avg.plot(kind='line', x='timestep' , y='reward_smooth',ax=ax,color=colors[0], linewidth=linewidth_smooth, alpha=alpha_smooth)
data_avg.plot(kind='line', x='timestep' , y='reward_var',ax=ax,color=colors[0], linewidth=linewidth_var, alpha=alpha_var)
# keep only reward_smooth in the legend and rename it
handles, labels = ax.get_legend_handles_labels()
ax.legend([handles[0]], ["reward_avg_" + str(len(all_runs)) + "_runs"], loc=2).remove()
else:
for i, run in enumerate(all_runs):
# smooth out rewards to get a smooth and a less smooth (var) plot lines
run['reward_smooth_' + str(i)] = run['reward'].rolling(window=window_len_smooth, win_type='triang', min_periods=min_window_len_smooth).mean()
run['reward_var_' + str(i)] = run['reward'].rolling(window=window_len_var, win_type='triang', min_periods=min_window_len_var).mean()
# plot the lines
run.plot(kind='line', x='timestep' , y='reward_smooth_' + str(i),ax=ax,color=colors[i % len(colors)], linewidth=linewidth_smooth, alpha=alpha_smooth)
run.plot(kind='line', x='timestep' , y='reward_var_' + str(i),ax=ax,color=colors[i % len(colors)], linewidth=linewidth_var, alpha=alpha_var)
# keep alternate elements (reward_smooth_i) in the legend
handles, labels = ax.get_legend_handles_labels()
new_handles = []
new_labels = []
for i in range(len(handles)):
if(i%2 == 0):
new_handles.append(handles[i])
new_labels.append(labels[i])
ax.legend(new_handles, new_labels, loc=2).remove()
ax.grid(color='gray', linestyle='-', linewidth=1, alpha=0.2)
ax.set_xlabel("Timesteps", fontsize=12)
ax.set_ylabel("Rewards", fontsize=12)
plt.title(env_name, fontsize=14)
fig = plt.gcf()
fig.set_size_inches(fig_width, fig_height)
plt.savefig(fig_save_path)
plt.show()
def main(args):
if args.mode == 'train':
train()
if args.mode == 'test':
test()
if args.mode == 'plot':
plot()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
"--mode",
type=str,
choices=[
"test",
"plot"
],
default="train",
)
args = parser.parse_args()
main(args) | AgileCodeCO/airsim-drl-reinforcement-learning | main.py | main.py | py | 13,673 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "environment.drl_environment.DRLEnvironment",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name":... |
7504826492 | from datetime import datetime
import time
import json
from master import Master
import os
LOCAL_PATH = os.path.join(os.getcwd(), "filesys", "colonyData.json")
# ANSI color codes
RED = '\033[0;31m'
GREEN = '\033[0;32m'
YELLOW = '\033[0;33m'
NC = '\033[0m' # No color, to reset
class UI:
def __init__(self):
self.master = Master(1)
# Temperature limits
self.min_temperature = 15
self.max_temperature = 30
# Light intensity limits
self.min_light_intensity = 0
self.max_light_intensity = 100
# Daytime hours limits
self.min_daytime_hours = 1
self.max_daytime_hours = 24
# Observation interval limits
self.min_observation_interval = 1
self.max_observation_interval = 24
self.loadColonyData()
def clearScreen(self):
os.system('cls' if os.name == 'nt' else 'clear')
print("\033c")
print(f"{GREEN}")
print(r"""
$$$$$$$\ $$\ $$\ $$$$$$\ $$$$$$$\ $$\ $$\ $$$$$$$$\ $$$$$$\ $$\ $$\ $$$$$$\
$$ __$$\ $$ | $\ $$ |$$ __$$\ $$ __$$\ $$ | $$ |$$ _____|$$ __$$\ $$ | $$ |\_$$ _|
$$ | $$ |$$ |$$$\ $$ |$$ / $$ |$$ | $$ |$$ | $$ |$$ | $$ / \__| $$ | $$ | $$ |
$$ | $$ |$$ $$ $$\$$ |$$$$$$$$ |$$$$$$$ |\$$\ $$ |$$$$$\ \$$$$$$\ $$ | $$ | $$ |
$$ | $$ |$$$$ _$$$$ |$$ __$$ |$$ __$$< \$$\$$ / $$ __| \____$$\ $$ | $$ | $$ |
$$ | $$ |$$$ / \$$$ |$$ | $$ |$$ | $$ | \$$$ / $$ | $$\ $$ | $$ | $$ | $$ |
$$$$$$$ |$$ / \$$ |$$ | $$ |$$ | $$ | \$ / $$$$$$$$\ \$$$$$$ | \$$$$$$ |$$$$$$\
\_______/ \__/ \__|\__| \__|\__| \__| \_/ \________| \______/ \______/ \______|
Version 1.0
""")
def displayMainMenu(self):
self.clearScreen()
print("Options:")
print("1. Check Colonies")
print("2. Insert New Colony")
print("3. Withdraw Old Colony")
print("4. Check Out-of-bounds Settings")
print("5. Pause/Resume Experiments")
print("0. Quit")
def listColonies(self):
self.clearScreen()
self.clearInactiveEntries()
print("Currently active colonies")
with open(f'{LOCAL_PATH}', 'r') as file:
data = json.load(file)
colonyIDs = list(data.keys()) # Store the actual colony IDs
for i, colonyID in enumerate(colonyIDs, start=1):
print(f"{i}. {colonyID}")
print("0. Back to main menu")
# Return the list of colony IDs
return colonyIDs
def displayColonyData(self, colonyID):
self.master.updateColony(colonyID, None, True)
self.master.getObservationData(colonyID)
with open(f'{LOCAL_PATH}', 'r') as file:
data = json.load(file)
self.clearScreen()
print(f"Colony Data for colony{colonyID}:")
print(json.dumps(data[f"colony{colonyID}"], indent=4))
# Update the settings for the specified colony
def updateColonySettings(self, colonyID):
# Get the colony data from the Master instance
updated_colony = self.master.colonyStorage[colonyID]
while True:
self.clearScreen()
print(f"Changing settings for {updated_colony}:")
# Display daytime settings
print(f"Daytime Settings:")
print(f"1. Daytime temperature: {updated_colony.dayTemp} (Limit: {self.min_temperature}-{self.max_temperature}°C)")
print(f"2. Daytime red light: {updated_colony.redDay} (Limit: {self.min_light_intensity}-{self.max_light_intensity})")
print(f"3. Daytime blue light: {updated_colony.blueDay} (Limit: {self.min_light_intensity}-{self.max_light_intensity})")
print(f"4. Daytime hours: {updated_colony.dayInterval.hour} (Limit: {self.min_daytime_hours}-{self.max_daytime_hours} hours)")
# Display nighttime settings
print(f"Nighttime Settings:")
print(f"5. Nighttime temperature: {updated_colony.nightTemp} (Limit: {self.min_temperature}-{self.max_temperature}°C)")
print(f"6. Nighttime red light: {updated_colony.redNight} (Limit: {self.min_light_intensity}-{self.max_light_intensity})")
print(f"7. Nighttime blue light: {updated_colony.blueNight} (Limit: {self.min_light_intensity}-{self.max_light_intensity})")
# Display Observation interval setting
print(f"Observation Settings:")
print(f"8. Observation interval: {self.master.observationFrequency} (Limit: {self.min_observation_interval}-{self.max_observation_interval} hours)")
print("0. Back to main menu")
setting_choice = input("Enter the setting to change (or 0 to go back): ")
if setting_choice == '0':
break
elif setting_choice in ['1', '2', '3', '4', '5', '6', '7', '8']:
new_value = input("Enter the new value: ")
try:
new_value = int(new_value)
except ValueError:
print("Invalid input. Please enter a number.")
input("Press Enter to continue...")
continue
min_limit, max_limit = self.getLimits(setting_choice)
if min_limit <= new_value <= max_limit:
setting_key = self.getSettingKey(setting_choice)
# Update the colony settings directly
setattr(updated_colony, setting_key, new_value)
print(f"Setting updated. New value for {setting_key}: {getattr(updated_colony, setting_key)}")
# Add the following print statement to check the attribute names in the Colony instance
print(f"Attributes in updated_colony: {dir(updated_colony)}")
# Update the JSON file with the new setting
with open(LOCAL_PATH, 'r') as f:
settings = json.load(f)
colony_settings = settings.get(f'colony{colonyID}', {})
colony_settings[setting_key] = new_value
settings[f'colony{colonyID}'] = colony_settings
with open(LOCAL_PATH, 'w') as f:
json.dump(settings, f, indent=4)
input("Press Enter to continue...")
else:
print(f"Invalid input. Value must be between {min_limit} and {max_limit}.")
input("Press Enter to continue...")
else:
print("Invalid choice. Please select a valid option.")
input("Press Enter to continue...")
# Save colony data to colonyData.json
def saveColonyData(self):
with open(LOCAL_PATH, 'r') as f:
settings = json.load(f)
for colonyID, colony_instance in self.colonyStorage.items():
colony_data = settings.get(f'colony{colonyID}', {})
for attribute, value in colony_instance.__dict__.items():
colony_data[attribute] = value
settings[f'colony{colonyID}'] = colony_data
with open(LOCAL_PATH, 'w') as f:
json.dump(settings, f, indent=4)
# Return the limits for the specified setting
def getLimits(self, setting_choice):
if setting_choice == '1' or setting_choice == '5':
return self.min_temperature, self.max_temperature
elif setting_choice in ['2', '3', '4', '6', '7']:
return self.min_light_intensity, self.max_light_intensity
elif setting_choice in ['8']:
return self.min_observation_interval, self.max_observation_interval
else:
return None, None
# Add this method to your UI class
def getSettingKey(self, setting_choice):
setting_key_mapping = {
'1': 'dayTemp',
'2': 'redDay',
'3': 'blueDay',
'4': 'dayInterval',
'5': 'nightTemp',
'6': 'redNight',
'7': 'blueNight',
'8': 'observationInterval', # Adjusted to match your Colony class attribute
}
return setting_key_mapping.get(setting_choice)
# Insert colony into the system, and add it to the colonyData.json file
def insertColony(self):
self.clearScreen()
# Check if no colonies are available
if self.master.getAvailability() == 0:
self.clearScreen()
print("All colonies are occupied. Withdraw an old colony first.")
input("Press Enter to continue...")
else:
self.clearScreen()
print(self.master.getAvailability())
colony_choice = input("Enter the number of the colony to insert (or 0 to go back): ")
if colony_choice != '0':
print(f'Inserting colony{colony_choice}')
colonyID = int(colony_choice)
self.master.insertColony(colonyID) # Insert the colony into the Master instance
self.master.getObservationData(colonyID) # Get observation data for the colony
time.sleep(1)
print("Colony inserted successfully!")
else:
print("Returning to main menu...")
# Extract colony from the system, and remove it from the colonyData.json file
def extractColony(self):
self.clearScreen()
self.listColonies()
colony_choice = input("Enter the number of the colony to extract (or 0 to go back): ")
if colony_choice != '0':
print(f'Extracting colony{colony_choice}')
colonyID = int(colony_choice)
# Extract the colony and remove it from the system
if self.master.extractColony(colonyID):
# Remove the colony from the colonyData.json file
self.clearInactiveEntries()
time.sleep(1)
print(f"Colony{colonyID} extracted successfully!")
else:
print(f"ERROR: Unable to extract Colony{colonyID}.")
else:
print("Returning to main menu...")
# Remove extracted entries from the colonyData.json
def clearInactiveEntries(self):
try:
with open(LOCAL_PATH, "r") as file:
try:
all_colonies_data = json.load(file)
except json.JSONDecodeError:
all_colonies_data = {}
except FileNotFoundError:
all_colonies_data = {}
active_colonyIDs = set(map(str, self.master.colonyStorage.keys()))
filtered_colonies_data = {key: value for key, value in all_colonies_data.items() if key[6:] in active_colonyIDs}
with open(LOCAL_PATH, "w") as file:
json.dump(filtered_colonies_data, file, indent=4)
# Load colony data from colonyData.json into the Master instance
def loadColonyData(self):
try:
with open(LOCAL_PATH, 'r') as file:
updated_colony = json.load(file)
for colonyID, values in updated_colony.items():
colony_number = int(colonyID.split("colony")[1]) # Extract colony number
self.master.insertColony(colony_number) # Insert colony into Master
self.master.updateColony(colony_number, values, True) # Update colony data in Master
except (FileNotFoundError, json.JSONDecodeError):
self.master.logMessage(f"ALL: Unable to load colony data from {LOCAL_PATH}")
# Check if settings in colonyData.json are out of bounds
def checkOutOfBoundsSettings(self):
# Check if settings in colonyData.json are out of bounds
self.clearScreen()
print("Checking out-of-bounds settings...")
# Loop through all current colonies and check if their settings are out of bounds. If they are, print a warning message if otherwise, print a success message
with open(f'{LOCAL_PATH}', 'r') as file:
data = json.load(file)
for colony, info in data.items():
out_of_bounds = False # Flag to track if any setting is out of bounds
colony_name = colony
# Check each setting
if not (self.min_temperature <= info['daytime_temperature'] <= self.max_temperature):
print(f"Colony {colony_name} has out-of-bounds daytime temperature: {info['daytime_temperature']} (Limit: {self.min_temperature}-{self.max_temperature}°C)")
out_of_bounds = True
if not (self.min_temperature <= info['nighttime_temperature'] <= self.max_temperature):
print(f"Colony {colony_name} has out-of-bounds nighttime temperature: {info['nighttime_temperature']} (Limit: {self.min_temperature}-{self.max_temperature}°C)")
out_of_bounds = True
if not (self.min_light_intensity <= info['daytime_red_light'] <= self.max_light_intensity):
print(f"Colony {colony_name} has out-of-bounds daytime red light: {info['daytime_red_light']} (Limit: {self.min_light_intensity}-{self.max_light_intensity})")
out_of_bounds = True
if not (self.min_light_intensity <= info['daytime_blue_light'] <= self.max_light_intensity):
print(f"Colony {colony_name} has out-of-bounds daytime blue light: {info['daytime_blue_light']} (Limit: {self.min_light_intensity}-{self.max_light_intensity})")
out_of_bounds = True
if not (self.min_light_intensity <= info['nighttime_red_light'] <= self.max_light_intensity):
print(f"Colony {colony_name} has out-of-bounds nighttime red light: {info['nighttime_red_light']} (Limit: {self.min_light_intensity}-{self.max_light_intensity})")
out_of_bounds = True
if not (self.min_light_intensity <= info['nighttime_blue_light'] <= self.max_light_intensity):
print(f"Colony {colony_name} has out-of-bounds nighttime blue light: {info['nighttime_blue_light']} (Limit: {self.min_light_intensity}-{self.max_light_intensity})")
out_of_bounds = True
try:
daytime_hours = int(info['daytime_hours'].split(':')[0])
if not (self.min_daytime_hours <= daytime_hours <= self.max_daytime_hours):
print(f"Colony {colony_name} has out-of-bounds daytime hours: {daytime_hours} (Limit: {self.min_daytime_hours}-{self.max_daytime_hours} hours)")
out_of_bounds = True
except ValueError:
print(f"Colony {colony_name} has an invalid format for daytime hours.")
out_of_bounds = True
if not (self.min_observation_interval <= int(info['observation_interval']) <= self.max_observation_interval):
print(f"Colony {colony_name} has out-of-bounds observation interval: {info['observation_interval']} (Limit: {self.min_observation_interval}-{self.max_observation_interval} hours)")
out_of_bounds = True
if out_of_bounds:
print()
else:
print(f"Colony {colony_name}: All settings are within bounds")
input("Press Enter to continue...")
# Main loop
def run(self):
while True:
self.displayMainMenu()
choice = input("Enter your choice: ")
if choice == '1':
colonyIDs = self.listColonies() # Get the list of colony IDs [colony1, colony2, ...]
colonyIDs = [colonyID[6:] for colonyID in colonyIDs] # Remove the 'colony' prefix [1, 2, ...]
colony_choice = input("Enter your choice: ")
if colony_choice != '0':
actual_colonyID = colonyIDs[int(colony_choice) - 1] # Subtract 1 to get the index
self.displayColonyData(actual_colonyID) # Display the colony data
action = input("1. Change settings \n0. Back to the main menu: ")
if action == '1':
self.updateColonySettings(actual_colonyID) # Update the colony settings
else:
print("Returning to the main menu...")
elif choice == '2':
# Insert colony into system
self.insertColony()
elif choice == '3':
# Extract colony from system
self.extractColony()
elif choice == '4':
# Check out-of-bounds settings
self.checkOutOfBoundsSettings()
elif choice == '5':
pass
elif choice == '0':
print("Quitting...")
break
else:
print("Invalid choice! Please enter a valid option.")
if __name__ == "__main__":
ui = UI()
ui.run()
| Olliyard/DWARVES | Master/Archived/ui - Copy.py | ui - Copy.py | py | 17,234 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "master.Master",
"line_number": 1... |
40657145010 | import tensorflow as tf
import tensorlayer as tl
import numpy as np
import scipy
import time
import math
import argparse
import random
import sys
import os
import matplotlib.pyplot as plt
from model import *
from tensorlayer.prepro import *
from tensorlayer.layers import *
from termcolor import colored, cprint
parser = argparse.ArgumentParser(description="Run the NN for pokemon showdown replays")
parser.add_argument('pokemonpath')
parser.add_argument('-gpu', '--cuda-gpus')
parser.add_argument('-bs', '--batch-size', type = int, default = 16)
parser.add_argument('-maxl', '--name-max-length', type = int, default = 20, help = "Max length of pokemon name")
parser.add_argument('-hu', '--hidden-units', type = int, default = 128, help = "Hidden state vec length for RNN")
parser.add_argument('-pu', '--pkmn-units', type = int, default = 128, help = "Hidden state vec length for each pokemon")
parser.add_argument('-load', '--load', type = str, default = "None", help = "File to load to continue training")
args = parser.parse_args()
# Hyper parameters
batch_size = args.batch_size # batch size
n_inputs = 128 # ascii
n_steps = args.name_max_length # name length
n_hidden_units = args.hidden_units # hidden state tensor length
n_pkmn_units = args.pkmn_units # pkmn state tensor length
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_gpus
continue_loop = True
# Read the data
file_content = [line.rstrip('\n') for line in open(args.pokemonpath, 'r')]
dataCount = len(file_content)
pokemons = np.zeros((dataCount, n_steps))
pokemons_size = np.zeros((dataCount))
pokemon_vectors = np.zeros((dataCount, n_pkmn_units))
for i in range(len(file_content)):
name_vec = np.array(np.array(file_content[i][:n_steps], 'c').view(np.uint8))
name_len = name_vec.shape[0]
pokemons_size[i] = name_len
pokemons[i] = np.pad(name_vec, (0, n_steps - name_len), 'constant')
ph_pkmn = tf.placeholder('uint8', [None, n_steps], name = "p1pkmn")
ph_pkmn_size = tf.placeholder('int32', [None], name = "p1pkmn_size")
# cell = tf.contrib.rnn.BasicRNNCell(n_hidden_units)
cell = input_rnn_cell(n_hidden_units)
zero_state = cell.zero_state(batch_size, dtype = tf.float32)
# Collect pokemon latent vector
with tf.variable_scope("Pokemon_encode_RNN", reuse = tf.AUTO_REUSE):
_, latent_pkmn_tmp = tf.nn.dynamic_rnn(
cell = cell,
dtype = tf.float32,
sequence_length = ph_pkmn_size,
inputs = tf.one_hot(ph_pkmn[:, :], 128),
initial_state = zero_state)
latent_pkmn = pkmn_model(latent_pkmn_tmp[1], n_pkmn_units, is_train = False, reuse = tf.AUTO_REUSE).outputs
# Create session
saver = tf.train.Saver()
sess = tf.Session()
# sess.run(tf.local_variables_initializer())
# tl.layers.initialize_global_variables(sess)
if args.load != "None":
saver.restore(sess, args.load)
print("Model loaded")
# Collect pokemon vectors
batch_count = math.ceil(dataCount / batch_size)
print("Collecting pokemon vectors...")
for i in range(batch_count):
# Collect pokemon names
batch_pkmn = np.zeros((batch_size, n_steps))
batch_pkmn_size = np.zeros((batch_size))
# Calculate actual batch size
batch_start = i * batch_size
batch_size_real = batch_size
if batch_start + batch_size > dataCount:
batch_size_real = dataCount - i * batch_size
# Feed data
batch_pkmn[0 : batch_size_real, :] = pokemons[batch_start : batch_start + batch_size_real]
batch_pkmn_size[0 : batch_size_real] = pokemons_size[batch_start : batch_start + batch_size_real]
# Run the model
result = sess.run(latent_pkmn, feed_dict = {ph_pkmn : batch_pkmn, ph_pkmn_size : batch_pkmn_size})
pokemon_vectors[batch_start : batch_start + batch_size_real] = result[0 : batch_size_real]
print("%d / %d" % (batch_start + batch_size_real, dataCount))
print("Complete.")
def distance(a, b):
return scipy.spatial.distance.euclidean(a, b)
def cosine_distance(a, b):
return scipy.spatial.distance.cosine(a, b)
def by_distance(a):
return a['dist']
def by_cosine_distance(a):
return a['cosine_dist']
while continue_loop:
pkmn_str = input("Please enter a pokemon name: ")
target_id = -1
for i in range(dataCount):
if file_content[i] == pkmn_str:
target_id = i
if target_id == -1:
print("Pokemon %s not fond in %s" % (pkmn_str, args.pokemonpath))
pokemon_dict = []
for i in range(dataCount):
tmp = {}
tmp['idx'] = i
tmp['dist'] = distance(pokemon_vectors[target_id], pokemon_vectors[i])
tmp['cosine_dist'] = cosine_distance(pokemon_vectors[target_id], pokemon_vectors[i])
pokemon_dict.append(tmp)
# Sort by Distance
pokemon_dict.sort(key = by_distance)
print("\n Sort by Distance: \n====================================\nIndex\tDist\tName")
# The first one must be itself so start from 2nd
for i in range(1, 16):
print("%02d\t%02.2f\t%s" % (i, pokemon_dict[i]['dist'], file_content[pokemon_dict[i]['idx']]))
# Sort by cosine distance
pokemon_dict.sort(key = by_cosine_distance)
print("\n Sort by Cosine Distance: \n====================================\nIndex\tcDist\tName")
# The first one must be itself so start from 2nd
for i in range(1, 16):
print("%02d\t%02.2f\t%s" % (i, pokemon_dict[i]['cosine_dist'], file_content[pokemon_dict[i]['idx']]))
print("\n Copyable:\n[LIST=1]")
# The first one must be itself so start from 2nd
for i in range(1, 13):
print("[*]%01.2f %s" % (pokemon_dict[i]['cosine_dist'], file_content[pokemon_dict[i]['idx']]))
print("[/LIST]")
print("\n\n")
| betairylia/Pokemon-Showdown-Win-Rate-Prediction | PokemonVector.py | PokemonVector.py | py | 5,926 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
... |
29785722520 | from django.shortcuts import render,redirect
from django.views import View
from account.models import Contact,Staff
from home.models import Students
from .forms import Studentform
from django.contrib import messages
# Create your views here.
class Home(View):
def get(self,request):
return render(request,'home.html')
class Enquiry(View):
def get(self,request):
customer=Contact.objects.all()
return render(request,'enquiry.html',{'form':customer})
class StaffS(View):
def get(self,request):
customer=Staff.objects.all()
return render(request,'staff.html',{'form':customer})
class Showstudent(View):
def get(self,request):
student=Students.objects.all()
return render(request,'showstudent.html',{'form':student})
class Form(View):
def get(self,request):
std1=Studentform()
return render(request,'forms.html',{'form':std1})
def post(self,request):
if request.method == "POST":
std1=Studentform(request.POST)
if std1.is_valid():
std1.save()
student=Students.objects.all()
return render(request,'showstudent.html',{'form':student})
else:
print("Form not valid")
return redirect("showstudent")
class Show(View):
def get(self,request):
student=Students.objects.all()
return render(request,'showstudent.html',{'form':student})
class Profile(View):
def get(self,request):
print(12)
if request.session['email'] is not None:
customer=Staff.objects.filter(email=request.session['email'])
return render(request,'profile.html',{'form':customer})
class Edit(View):
def get(self,request):
edit1=request.session['email']
edit=Staff.objects.filter(email=edit1)
return render(request,'edit.html',{'form':edit})
def post(self,request):
edit1=request.session['email']
print(1)
if request.method=='POST':
print(2)
if Staff.objects.filter(email=edit1).exists():
print(3)
if request.POST['password']:
Staff.objects.filter(email=edit1).update(password=request.POST['password'])
if request.POST['name']:
Staff.objects.filter(email=edit1).update(name=request.POST['name'])
if request.POST['email']:
if Staff.objects.filter(email=edit1).exists():
edit=Staff.objects.filter(email=edit1)
messages.error(request,"EMAIL ALREADY EXISTS")
return render(request,'edit.html',{'form':edit})
else:
Staff.objects.filter(email=edit1).update(email=request.POST['email'])
request.session['email']=request.POST['email']
if request.POST['phno']:
Staff.objects.filter(email=edit1).update(phno=request.POST['phno'])
customer=Staff.objects.filter(email=request.session['email'])
return render(request,'profile.html',{'form':customer})
| Afnas4/project1 | students/home/views.py | views.py | py | 3,323 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.views.View",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.views.View",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "account.m... |
3965122741 | # GBM'in hız ve tahmin performansını arttırmak üzere optimize edilmiş; ölçeklenebilir ve farklı platformlara entegre edilebilir halidir.
# R, Python, Hadoop, Scala, Julia ile kullanılabilir.
# Ölçeklenebilirdir.
# Hızlıdır.
# Tahmin başarısı yüksektir ve bir çok Kaggle yarışmasında başarısını kanıtlamıştır.
# Özetle GBM in hız ve performansı arttırılmış hali ve farklı platformlara entegre edilebilirliği olan bir modeldir.
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import mean_squared_error, r2_score
import xgboost
from xgboost import XGBRegressor
from warnings import filterwarnings # Uyarıların çıkmasını engeller.
filterwarnings('ignore')
df = pd.read_csv(r"C:\Users\PC\Desktop\VeriBilimiOkulu\MakineOgrenmesi\datasets\Hitters.csv")
df = df.dropna() # Eksik değerleri uçurduk, konu kapsamında olmadığından dolayı
dms = pd.get_dummies(df[['League', 'Division', 'NewLeague']]) # Kategorik değişkenleri one-hot-encoding yaklaşımı ile dummy'e çevirdik
y = df["Salary"] # Bağımlı değişkenimiz.
X_ = df.drop(['Salary','League', 'Division', 'NewLeague'], axis=1).astype('float64')
X = pd.concat([X_, dms[['League_N', 'Division_W', 'NewLeague_N']]], axis = 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.25, random_state=42)
############################## MODEL ve TAHMİN ##############################
xgb = XGBRegressor().fit(X_train, y_train)
y_pred = xgb.predict(X_test)
print(np.sqrt(mean_squared_error(y_test, y_pred))) # ilkel test hatamız 355.465
############################## MODEL TUNNİNG ##############################
xgb_prarms = {'learning_rate': [0.1, 0.01, 0.5], 'max_depth': [2,3,4,5,8], 'n_estimators': [100, 200, 500, 1000],
'colsample_bytree': [0.4, 0.7, 1]}
# learning_rate : öğrenme oranı, overfittingi engellemek için kullanılır.daraltma adım boyunu ifade etmektedir.
# Oluşturulacak ağaçda değişkenlenden alınacak alt küme oranını ifade ediyor.
xgb_cv_model = GridSearchCV(xgb, xgb_prarms, cv = 10, n_jobs=-1, verbose=2).fit(X_train, y_train)
print(xgb_cv_model.best_params_)
xgb_tuned = XGBRegressor(colsample_bytree = 0.4, learning_rate = 0.1,
max_depth = 2, n_estimators = 1000 ).fit(X_train, y_train)
y_pred = xgb_tuned.predict(X_test)
print(np.sqrt(mean_squared_error(y_test, y_pred))) # test hatamızı bulduk 367.85
| gorkenvm/DataScienceBootcamp-Prepration | ML/Dogrusal Olmayan Regresyon Modelleri/9_XGBoost.py | 9_XGBoost.py | py | 2,575 | python | tr | code | 2 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pandas.con... |
25938234352 | from sanic.request import File
from pydantic.class_validators import root_validator
from typing import Optional, Union, IO
import io
import openpyxl
from pydantic import Field
from infrastructure.configs.translation_task import FILE_TRANSLATION_TASKS, PLAIN_TEXT_TRANSLATION_TASKS, AllowedFileTranslationExtensionEnum
from core.base_classes.aggregate_root import AggregateRoot
from modules.task.domain.entities.task import TaskEntity, TaskProps
from typing import get_args
from core.utils.file import get_doc_file_meta, get_presentation_file_meta, get_worksheet_file_meta, get_txt_file_meta
from core.utils.text import count_chars
from infrastructure.configs.task import (
TranslationTaskStepEnum,
TranslationTaskNameEnum,
TRANSLATION_PRIVATE_TASKS,
)
class TranslationRequestProps(TaskProps):
current_step: TranslationTaskStepEnum = Field(...)
task_name: TranslationTaskNameEnum = Field(...)
num_chars: int = 0
receiver_email: Optional[str]
total_email_sent: Optional[int]
@root_validator(pre=True)
def validate(cls, values):
if values['task_name'] in TRANSLATION_PRIVATE_TASKS and not values['creator_id'].value:
raise ValueError('Creator cannot be None')
return values
class TranslationRequestEntity(TaskEntity, AggregateRoot[TranslationRequestProps]):
@property
def props_klass(self):
return get_args(self.__orig_bases__[1])[0]
async def update_num_chars(self, doc):
if self.props.task_name in PLAIN_TEXT_TRANSLATION_TASKS:
self.props.num_chars = count_chars(doc)
return
if self.props.task_name in FILE_TRANSLATION_TASKS:
if self.props.file_type == AllowedFileTranslationExtensionEnum.docx.value:
binary_doc, total_doc_paragraphs, character_count = get_doc_file_meta(doc)
self.props.num_chars = character_count
if self.props.file_type == AllowedFileTranslationExtensionEnum.pptx.value:
binary_presentation, total_presentation_paragraphs, total_slides, character_count = get_presentation_file_meta(doc)
self.props.num_chars = character_count
if self.props.file_type == AllowedFileTranslationExtensionEnum.xlsx.value:
binary_worksheet, total_sheets, total_cells, character_count = get_worksheet_file_meta(doc)
self.props.num_chars = character_count
if self.props.file_type == AllowedFileTranslationExtensionEnum.txt.value:
self.props.num_chars = get_txt_file_meta(doc)
| KCDichDaNgu/KC4.0_DichDaNgu_BackEnd | src/modules/translation_request/domain/entities/translation_request.py | translation_request.py | py | 2,827 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "modules.task.domain.entities.task.TaskProps",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "infrastructure.configs.task.TranslationTaskStepEnum",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "pydantic.Field",
"line_number": 26,
"usag... |
15318192424 | from django.shortcuts import get_object_or_404
from rest_framework import viewsets, mixins, permissions, status
from rest_framework.decorators import action
from django.db.models import Q
from rest_framework.response import Response
from users.models import User, Profile, FriendRequest
from users.serializers import UserSerializer, ProfileSerializer, FriendRequestSerializer, FriendResponseSerializer
# User APIEndpoint
class CreateUserView(viewsets.ModelViewSet, mixins.CreateModelMixin, mixins.ListModelMixin):
model = User
serializer_class = UserSerializer
permission_classes = (permissions.AllowAny, )
def perform_create(self, serializer):
return serializer.save()
def get_queryset(self):
return User.objects.all()
# Profile APIEndpoint
class CreateProfileView(viewsets.GenericViewSet, mixins.ListModelMixin):
serializer_class = ProfileSerializer
def get_queryset(self):
return Profile.objects.all()
class CreateFriendRequestView(viewsets.ModelViewSet, mixins.CreateModelMixin, mixins.ListModelMixin):
model = FriendRequest
serializer_class = FriendRequestSerializer
permission_classes = (permissions.IsAuthenticated,)
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
user = self.request.user
user_profile = Profile.objects.get(user=user)
serializer.is_valid(raise_exception=True)
receiver_profile = Profile.objects.get(user=serializer.validated_data['to_user'])
friend_requests_from_user = FriendRequest.objects.filter(from_user=user, to_user=serializer.validated_data['to_user']).count()
if receiver_profile in user_profile.friends_list.all():
return Response({
'status': 'Wrong invite',
'data': 'already in friends list'
})
elif friend_requests_from_user == 1:
return Response({
'status': 'Wrong invite',
'data': 'invitation already exist'
})
else:
serializer.save(from_user=user)
return Response({"status": 'ok', 'data': serializer.data}, status=status.HTTP_201_CREATED)
def get_queryset(self):
user = self.request.user
return FriendRequest.objects.filter(Q(from_user=user) | Q(to_user=user))
class CreateFriendResponseView(viewsets.ModelViewSet, mixins.ListModelMixin):
serializer_class = FriendResponseSerializer
permission_classes = (permissions.IsAuthenticated, )
queryset = FriendRequest.objects.all()
def get_queryset(self):
user = self.request.user
response = FriendRequest.objects.filter(to_user=user)
return response
def create(self, request, *args, **kwargs):
# TODO: FIX CREATE TO DISABLE ACCEPTED AND POST FIELD
invitations_ids_list = FriendRequest.objects.filter(to_user=self.request.user).values_list('id', flat=True)
return Response({
'status': 'ok',
'detail': 'to response invite select pk',
'invitations ids': f'{list(invitations_ids_list)}'
})
@action(
methods=['POST', 'GET'],
detail=True,
permission_classes=[permissions.IsAuthenticated],
url_name='response',
url_path='response'
)
def response_friend_request(self, request, pk=None):
friend_request = get_object_or_404(FriendRequest, pk=pk)
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
print(serializer.validated_data['accepted'])
print(friend_request.status)
if serializer.validated_data['accepted'] is None:
if serializer.validated_data['accepted'] == 'accepted':
friend_request.status = 'accepted'
inviter = Profile.objects.get(user=self.request.user)
receiver = Profile.objects.get(user=friend_request.to_user)
inviter.friends_list.add(receiver)
receiver.friends_list.add(inviter)
friend_request.save()
data = FriendRequestSerializer(friend_request).data
if serializer.validated_data['accepted'] == 'declined':
friend_request.status = 'declined'
friend_request.save()
data = FriendRequestSerializer(friend_request).data
else:
data = {f'already responded as {friend_request.status}'}
return Response({'status': 'ok', 'data': data})
| Mrklata/Twitter-Fb-alternative | users/api.py | api.py | py | 4,554 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "rest_framework.viewsets.ModelViewSet",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.viewsets",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "rest_framework.mixins.CreateModelMixin",
"line_number": 12,
"usage_type... |
26744629167 | import torch
from torch import nn
import torch.nn.functional as F
"""
Components
"""
class ASPP_module(nn.Module):
def __init__(self, inplanes, planes, rate): # inplanes: input channel; planes: output channel
super(ASPP_module, self).__init__()
if rate == 1:
kernel_size = 1
padding = 0
else:
kernel_size = 3
padding = rate
self.atrous_convolution = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, stride=1, padding=padding, dilation=rate, bias=False)
self.bn = nn.BatchNorm2d(planes)
self.relu = nn.ReLU()
def forward(self, x):
x = self.atrous_convolution(x)
x = self.bn(x)
return self.relu(x)
class aspp(nn.Module):
def __init__(self, output_stride=16, depth=256):
super(aspp, self).__init__()
if output_stride not in [8,16]:
raise ValueError('output_stride must be either 8 or 16.')
self.atrous_rate = [6,12,18]
if output_stride == 8:
self.atrous_rate = [2 * rate for rate in self.atrous_rate]
self.conv_1x1 = ASPP_module(depth, depth,rate=1)
self.conv_3x3_1 = ASPP_module(depth, depth, rate=self.atrous_rate[0])
self.conv_3x3_2 = ASPP_module(depth, depth, rate=self.atrous_rate[1])
self.conv_3x3_3 = ASPP_module(depth, depth, rate=self.atrous_rate[2])
self.relu = nn.ReLU()
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(depth, depth, 1, stride=1, bias=False),
nn.BatchNorm2d(depth),
nn.ReLU())
self.conv1 = nn.Conv2d(depth * 5, depth, 1, bias=False)
self.bn1 = nn.BatchNorm2d(depth)
def forward(self, x):
x1 = self.conv_1x1(x)
x2 = self.conv_3x3_1(x)
x3 = self.conv_3x3_2(x)
x4 = self.conv_3x3_3(x)
x5 = self.global_avg_pool(x)
x5 = F.interpolate(x5, size=x.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
return x | yuanlinping/deep_colormap_extraction | netArchitecture/ASPP.py | ASPP.py | py | 2,239 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
13050899024 | from sqlalchemy import (
testing, null, exists, text, union, literal, literal_column, func, between,
Unicode, desc, and_, bindparam, select, distinct, or_, collate, insert,
Integer, String, Boolean, exc as sa_exc, util, cast)
from sqlalchemy.sql import operators, expression
from sqlalchemy import column, table
from sqlalchemy.engine import default
from sqlalchemy.orm import (
attributes, mapper, relationship, create_session, synonym, Session,
aliased, column_property, joinedload_all, joinedload, Query, Bundle,
subqueryload, backref, lazyload, defer)
from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing.schema import Table, Column
import sqlalchemy as sa
from sqlalchemy.testing.assertions import (
eq_, assert_raises, assert_raises_message, expect_warnings)
from sqlalchemy.testing import fixtures, AssertsCompiledSQL, assert_warnings
from test.orm import _fixtures
from sqlalchemy.orm.util import join, with_parent
import contextlib
from sqlalchemy.testing import mock, is_, is_not_
from sqlalchemy import inspect
class QueryTest(_fixtures.FixtureTest):
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
class MiscTest(QueryTest):
run_create_tables = None
run_inserts = None
def test_with_session(self):
User = self.classes.User
s1 = Session()
s2 = Session()
q1 = s1.query(User)
q2 = q1.with_session(s2)
assert q2.session is s2
assert q1.session is s1
class RowTupleTest(QueryTest):
run_setup_mappers = None
def test_custom_names(self):
User, users = self.classes.User, self.tables.users
mapper(User, users, properties={'uname': users.c.name})
row = create_session().query(User.id, User.uname).\
filter(User.id == 7).first()
assert row.id == 7
assert row.uname == 'jack'
def test_column_metadata(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users)
mapper(Address, addresses)
sess = create_session()
user_alias = aliased(User)
user_alias_id_label = user_alias.id.label('foo')
address_alias = aliased(Address, name='aalias')
fn = func.count(User.id)
name_label = User.name.label('uname')
bundle = Bundle('b1', User.id, User.name)
cte = sess.query(User.id).cte()
for q, asserted in [
(
sess.query(User),
[
{
'name': 'User', 'type': User, 'aliased': False,
'expr': User, 'entity': User}]
),
(
sess.query(User.id, User),
[
{
'name': 'id', 'type': users.c.id.type,
'aliased': False, 'expr': User.id, 'entity': User},
{
'name': 'User', 'type': User, 'aliased': False,
'expr': User, 'entity': User}
]
),
(
sess.query(User.id, user_alias),
[
{
'name': 'id', 'type': users.c.id.type,
'aliased': False, 'expr': User.id, 'entity': User},
{
'name': None, 'type': User, 'aliased': True,
'expr': user_alias, 'entity': user_alias}
]
),
(
sess.query(user_alias.id),
[
{
'name': 'id', 'type': users.c.id.type,
'aliased': True, 'expr': user_alias.id,
'entity': user_alias},
]
),
(
sess.query(user_alias_id_label),
[
{
'name': 'foo', 'type': users.c.id.type,
'aliased': True, 'expr': user_alias_id_label,
'entity': user_alias},
]
),
(
sess.query(address_alias),
[
{
'name': 'aalias', 'type': Address, 'aliased': True,
'expr': address_alias, 'entity': address_alias}
]
),
(
sess.query(name_label, fn),
[
{
'name': 'uname', 'type': users.c.name.type,
'aliased': False, 'expr': name_label, 'entity': User},
{
'name': None, 'type': fn.type, 'aliased': False,
'expr': fn, 'entity': User},
]
),
(
sess.query(cte),
[
{
'aliased': False,
'expr': cte.c.id, 'type': cte.c.id.type,
'name': 'id', 'entity': None
}]
),
(
sess.query(users),
[
{'aliased': False,
'expr': users.c.id, 'type': users.c.id.type,
'name': 'id', 'entity': None},
{'aliased': False,
'expr': users.c.name, 'type': users.c.name.type,
'name': 'name', 'entity': None}
]
),
(
sess.query(users.c.name),
[{
"name": "name", "type": users.c.name.type,
"aliased": False, "expr": users.c.name, "entity": None
}]
),
(
sess.query(bundle),
[
{
'aliased': False,
'expr': bundle,
'type': Bundle,
'name': 'b1', 'entity': User
}
]
)
]:
eq_(
q.column_descriptions,
asserted
)
def test_unhashable_type(self):
from sqlalchemy.types import TypeDecorator, Integer
from sqlalchemy.sql import type_coerce
class MyType(TypeDecorator):
impl = Integer
hashable = False
def process_result_value(self, value, dialect):
return [value]
User, users = self.classes.User, self.tables.users
mapper(User, users)
s = Session()
q = s.query(User, type_coerce(users.c.id, MyType).label('foo')).\
filter(User.id == 7)
row = q.first()
eq_(
row, (User(id=7), [7])
)
class RawSelectTest(QueryTest, AssertsCompiledSQL):
__dialect__ = 'default'
def test_select_from_entity(self):
User = self.classes.User
self.assert_compile(
select(['*']).select_from(User),
"SELECT * FROM users"
)
def test_where_relationship(self):
User = self.classes.User
self.assert_compile(
select([User]).where(User.addresses),
"SELECT users.id, users.name FROM users, addresses "
"WHERE users.id = addresses.user_id"
)
def test_where_m2m_relationship(self):
Item = self.classes.Item
self.assert_compile(
select([Item]).where(Item.keywords),
"SELECT items.id, items.description FROM items, "
"item_keywords AS item_keywords_1, keywords "
"WHERE items.id = item_keywords_1.item_id "
"AND keywords.id = item_keywords_1.keyword_id"
)
def test_inline_select_from_entity(self):
User = self.classes.User
self.assert_compile(
select(['*'], from_obj=User),
"SELECT * FROM users"
)
def test_select_from_aliased_entity(self):
User = self.classes.User
ua = aliased(User, name="ua")
self.assert_compile(
select(['*']).select_from(ua),
"SELECT * FROM users AS ua"
)
def test_correlate_entity(self):
User = self.classes.User
Address = self.classes.Address
self.assert_compile(
select(
[
User.name, Address.id,
select([func.count(Address.id)]).
where(User.id == Address.user_id).
correlate(User).as_scalar()]),
"SELECT users.name, addresses.id, "
"(SELECT count(addresses.id) AS count_1 "
"FROM addresses WHERE users.id = addresses.user_id) AS anon_1 "
"FROM users, addresses"
)
def test_correlate_aliased_entity(self):
User = self.classes.User
Address = self.classes.Address
uu = aliased(User, name="uu")
self.assert_compile(
select(
[
uu.name, Address.id,
select([func.count(Address.id)]).
where(uu.id == Address.user_id).
correlate(uu).as_scalar()]),
# for a long time, "uu.id = address.user_id" was reversed;
# this was resolved as of #2872 and had to do with
# InstrumentedAttribute.__eq__() taking precedence over
# QueryableAttribute.__eq__()
"SELECT uu.name, addresses.id, "
"(SELECT count(addresses.id) AS count_1 "
"FROM addresses WHERE uu.id = addresses.user_id) AS anon_1 "
"FROM users AS uu, addresses"
)
def test_columns_clause_entity(self):
User = self.classes.User
self.assert_compile(
select([User]),
"SELECT users.id, users.name FROM users"
)
def test_columns_clause_columns(self):
User = self.classes.User
self.assert_compile(
select([User.id, User.name]),
"SELECT users.id, users.name FROM users"
)
def test_columns_clause_aliased_columns(self):
User = self.classes.User
ua = aliased(User, name='ua')
self.assert_compile(
select([ua.id, ua.name]),
"SELECT ua.id, ua.name FROM users AS ua"
)
def test_columns_clause_aliased_entity(self):
User = self.classes.User
ua = aliased(User, name='ua')
self.assert_compile(
select([ua]),
"SELECT ua.id, ua.name FROM users AS ua"
)
def test_core_join(self):
User = self.classes.User
Address = self.classes.Address
from sqlalchemy.sql import join
self.assert_compile(
select([User]).select_from(join(User, Address)),
"SELECT users.id, users.name FROM users "
"JOIN addresses ON users.id = addresses.user_id"
)
def test_insert_from_query(self):
User = self.classes.User
Address = self.classes.Address
s = Session()
q = s.query(User.id, User.name).filter_by(name='ed')
self.assert_compile(
insert(Address).from_select(('id', 'email_address'), q),
"INSERT INTO addresses (id, email_address) "
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.name = :name_1"
)
def test_insert_from_query_col_attr(self):
User = self.classes.User
Address = self.classes.Address
s = Session()
q = s.query(User.id, User.name).filter_by(name='ed')
self.assert_compile(
insert(Address).from_select(
(Address.id, Address.email_address), q),
"INSERT INTO addresses (id, email_address) "
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.name = :name_1"
)
def test_update_from_entity(self):
from sqlalchemy.sql import update
User = self.classes.User
self.assert_compile(
update(User),
"UPDATE users SET id=:id, name=:name"
)
self.assert_compile(
update(User).values(name='ed').where(User.id == 5),
"UPDATE users SET name=:name WHERE users.id = :id_1",
checkparams={"id_1": 5, "name": "ed"}
)
def test_delete_from_entity(self):
from sqlalchemy.sql import delete
User = self.classes.User
self.assert_compile(
delete(User),
"DELETE FROM users"
)
self.assert_compile(
delete(User).where(User.id == 5),
"DELETE FROM users WHERE users.id = :id_1",
checkparams={"id_1": 5}
)
def test_insert_from_entity(self):
from sqlalchemy.sql import insert
User = self.classes.User
self.assert_compile(
insert(User),
"INSERT INTO users (id, name) VALUES (:id, :name)"
)
self.assert_compile(
insert(User).values(name="ed"),
"INSERT INTO users (name) VALUES (:name)",
checkparams={"name": "ed"}
)
def test_col_prop_builtin_function(self):
class Foo(object):
pass
mapper(
Foo, self.tables.users, properties={
'foob': column_property(
func.coalesce(self.tables.users.c.name))
})
self.assert_compile(
select([Foo]).where(Foo.foob == 'somename').order_by(Foo.foob),
"SELECT users.id, users.name FROM users "
"WHERE coalesce(users.name) = :coalesce_1 "
"ORDER BY coalesce(users.name)"
)
class GetTest(QueryTest):
def test_get(self):
User = self.classes.User
s = create_session()
assert s.query(User).get(19) is None
u = s.query(User).get(7)
u2 = s.query(User).get(7)
assert u is u2
s.expunge_all()
u2 = s.query(User).get(7)
assert u is not u2
def test_get_composite_pk_no_result(self):
CompositePk = self.classes.CompositePk
s = Session()
assert s.query(CompositePk).get((100, 100)) is None
def test_get_composite_pk_result(self):
CompositePk = self.classes.CompositePk
s = Session()
one_two = s.query(CompositePk).get((1, 2))
assert one_two.i == 1
assert one_two.j == 2
assert one_two.k == 3
def test_get_too_few_params(self):
CompositePk = self.classes.CompositePk
s = Session()
q = s.query(CompositePk)
assert_raises(sa_exc.InvalidRequestError, q.get, 7)
def test_get_too_few_params_tuple(self):
CompositePk = self.classes.CompositePk
s = Session()
q = s.query(CompositePk)
assert_raises(sa_exc.InvalidRequestError, q.get, (7,))
def test_get_too_many_params(self):
CompositePk = self.classes.CompositePk
s = Session()
q = s.query(CompositePk)
assert_raises(sa_exc.InvalidRequestError, q.get, (7, 10, 100))
def test_get_against_col(self):
User = self.classes.User
s = Session()
q = s.query(User.id)
assert_raises(sa_exc.InvalidRequestError, q.get, (5, ))
def test_get_null_pk(self):
"""test that a mapping which can have None in a
PK (i.e. map to an outerjoin) works with get()."""
users, addresses = self.tables.users, self.tables.addresses
s = users.outerjoin(addresses)
class UserThing(fixtures.ComparableEntity):
pass
mapper(
UserThing, s, properties={
'id': (users.c.id, addresses.c.user_id),
'address_id': addresses.c.id,
})
sess = create_session()
u10 = sess.query(UserThing).get((10, None))
eq_(u10, UserThing(id=10))
def test_no_criterion(self):
"""test that get()/load() does not use preexisting filter/etc.
criterion"""
User, Address = self.classes.User, self.classes.Address
s = create_session()
q = s.query(User).join('addresses').filter(Address.user_id == 8)
assert_raises(sa_exc.InvalidRequestError, q.get, 7)
assert_raises(
sa_exc.InvalidRequestError,
s.query(User).filter(User.id == 7).get, 19)
# order_by()/get() doesn't raise
s.query(User).order_by(User.id).get(8)
def test_no_criterion_when_already_loaded(self):
"""test that get()/load() does not use preexisting filter/etc.
criterion, even when we're only using the identity map."""
User, Address = self.classes.User, self.classes.Address
s = create_session()
s.query(User).get(7)
q = s.query(User).join('addresses').filter(Address.user_id == 8)
assert_raises(sa_exc.InvalidRequestError, q.get, 7)
def test_unique_param_names(self):
users = self.tables.users
class SomeUser(object):
pass
s = users.select(users.c.id != 12).alias('users')
m = mapper(SomeUser, s)
assert s.primary_key == m.primary_key
sess = create_session()
assert sess.query(SomeUser).get(7).name == 'jack'
def test_load(self):
User, Address = self.classes.User, self.classes.Address
s = create_session()
assert s.query(User).populate_existing().get(19) is None
u = s.query(User).populate_existing().get(7)
u2 = s.query(User).populate_existing().get(7)
assert u is u2
s.expunge_all()
u2 = s.query(User).populate_existing().get(7)
assert u is not u2
u2.name = 'some name'
a = Address(email_address='some other name')
u2.addresses.append(a)
assert u2 in s.dirty
assert a in u2.addresses
s.query(User).populate_existing().get(7)
assert u2 not in s.dirty
assert u2.name == 'jack'
assert a not in u2.addresses
@testing.provide_metadata
@testing.requires.unicode_connections
def test_unicode(self):
"""test that Query.get properly sets up the type for the bind
parameter. using unicode would normally fail on postgresql, mysql and
oracle unless it is converted to an encoded string"""
metadata = self.metadata
table = Table(
'unicode_data', metadata,
Column(
'id', Unicode(40), primary_key=True,
test_needs_autoincrement=True),
Column('data', Unicode(40)))
metadata.create_all()
ustring = util.b('petit voix m\xe2\x80\x99a').decode('utf-8')
table.insert().execute(id=ustring, data=ustring)
class LocalFoo(self.classes.Base):
pass
mapper(LocalFoo, table)
eq_(
create_session().query(LocalFoo).get(ustring),
LocalFoo(id=ustring, data=ustring))
def test_populate_existing(self):
User, Address = self.classes.User, self.classes.Address
s = create_session()
userlist = s.query(User).all()
u = userlist[0]
u.name = 'foo'
a = Address(name='ed')
u.addresses.append(a)
self.assert_(a in u.addresses)
s.query(User).populate_existing().all()
self.assert_(u not in s.dirty)
self.assert_(u.name == 'jack')
self.assert_(a not in u.addresses)
u.addresses[0].email_address = 'lala'
u.orders[1].items[2].description = 'item 12'
# test that lazy load doesn't change child items
s.query(User).populate_existing().all()
assert u.addresses[0].email_address == 'lala'
assert u.orders[1].items[2].description == 'item 12'
# eager load does
s.query(User). \
options(joinedload('addresses'), joinedload_all('orders.items')). \
populate_existing().all()
assert u.addresses[0].email_address == 'jack@bean.com'
assert u.orders[1].items[2].description == 'item 5'
class InvalidGenerationsTest(QueryTest, AssertsCompiledSQL):
def test_no_limit_offset(self):
User = self.classes.User
s = create_session()
for q in (
s.query(User).limit(2),
s.query(User).offset(2),
s.query(User).limit(2).offset(2)
):
assert_raises(sa_exc.InvalidRequestError, q.join, "addresses")
assert_raises(
sa_exc.InvalidRequestError, q.filter, User.name == 'ed')
assert_raises(sa_exc.InvalidRequestError, q.filter_by, name='ed')
assert_raises(sa_exc.InvalidRequestError, q.order_by, 'foo')
assert_raises(sa_exc.InvalidRequestError, q.group_by, 'foo')
assert_raises(sa_exc.InvalidRequestError, q.having, 'foo')
q.enable_assertions(False).join("addresses")
q.enable_assertions(False).filter(User.name == 'ed')
q.enable_assertions(False).order_by('foo')
q.enable_assertions(False).group_by('foo')
def test_no_from(self):
users, User = self.tables.users, self.classes.User
s = create_session()
q = s.query(User).select_from(users)
assert_raises(sa_exc.InvalidRequestError, q.select_from, users)
q = s.query(User).join('addresses')
assert_raises(sa_exc.InvalidRequestError, q.select_from, users)
q = s.query(User).order_by(User.id)
assert_raises(sa_exc.InvalidRequestError, q.select_from, users)
assert_raises(sa_exc.InvalidRequestError, q.select_from, users)
q.enable_assertions(False).select_from(users)
# this is fine, however
q.from_self()
def test_invalid_select_from(self):
User = self.classes.User
s = create_session()
q = s.query(User)
assert_raises(sa_exc.ArgumentError, q.select_from, User.id == 5)
assert_raises(sa_exc.ArgumentError, q.select_from, User.id)
def test_invalid_from_statement(self):
User, addresses, users = (self.classes.User,
self.tables.addresses,
self.tables.users)
s = create_session()
q = s.query(User)
assert_raises(sa_exc.ArgumentError, q.from_statement, User.id == 5)
assert_raises(
sa_exc.ArgumentError, q.from_statement, users.join(addresses))
def test_invalid_column(self):
User = self.classes.User
s = create_session()
q = s.query(User)
assert_raises(sa_exc.InvalidRequestError, q.add_column, object())
def test_invalid_column_tuple(self):
User = self.classes.User
s = create_session()
q = s.query(User)
assert_raises(sa_exc.InvalidRequestError, q.add_column, (1, 1))
def test_distinct(self):
"""test that a distinct() call is not valid before 'clauseelement'
conditions."""
User = self.classes.User
s = create_session()
q = s.query(User).distinct()
assert_raises(sa_exc.InvalidRequestError, q.select_from, User)
assert_raises(
sa_exc.InvalidRequestError, q.from_statement,
text("select * from table"))
assert_raises(sa_exc.InvalidRequestError, q.with_polymorphic, User)
def test_order_by(self):
"""test that an order_by() call is not valid before 'clauseelement'
conditions."""
User = self.classes.User
s = create_session()
q = s.query(User).order_by(User.id)
assert_raises(sa_exc.InvalidRequestError, q.select_from, User)
assert_raises(
sa_exc.InvalidRequestError, q.from_statement,
text("select * from table"))
assert_raises(sa_exc.InvalidRequestError, q.with_polymorphic, User)
def test_mapper_zero(self):
User, Address = self.classes.User, self.classes.Address
s = create_session()
q = s.query(User, Address)
assert_raises(sa_exc.InvalidRequestError, q.get, 5)
def test_from_statement(self):
User = self.classes.User
s = create_session()
for meth, arg, kw in [
(Query.filter, (User.id == 5,), {}),
(Query.filter_by, (), {'id': 5}),
(Query.limit, (5, ), {}),
(Query.group_by, (User.name,), {}),
(Query.order_by, (User.name,), {})
]:
q = s.query(User)
q = meth(q, *arg, **kw)
assert_raises(
sa_exc.InvalidRequestError,
q.from_statement, text("x")
)
q = s.query(User)
q = q.from_statement(text("x"))
assert_raises(
sa_exc.InvalidRequestError,
meth, q, *arg, **kw
)
class OperatorTest(QueryTest, AssertsCompiledSQL):
"""test sql.Comparator implementation for MapperProperties"""
__dialect__ = 'default'
def _test(self, clause, expected, entity=None, checkparams=None):
dialect = default.DefaultDialect()
if entity is not None:
# specify a lead entity, so that when we are testing
# correlation, the correlation actually happens
sess = Session()
lead = sess.query(entity)
context = lead._compile_context()
context.statement.use_labels = True
lead = context.statement.compile(dialect=dialect)
expected = (str(lead) + " WHERE " + expected).replace("\n", "")
clause = sess.query(entity).filter(clause)
self.assert_compile(clause, expected, checkparams=checkparams)
def _test_filter_aliases(
self,
clause, expected, from_, onclause, checkparams=None):
dialect = default.DefaultDialect()
sess = Session()
lead = sess.query(from_).join(onclause, aliased=True)
full = lead.filter(clause)
context = lead._compile_context()
context.statement.use_labels = True
lead = context.statement.compile(dialect=dialect)
expected = (str(lead) + " WHERE " + expected).replace("\n", "")
self.assert_compile(full, expected, checkparams=checkparams)
def test_arithmetic(self):
User = self.classes.User
create_session().query(User)
for (py_op, sql_op) in ((operators.add, '+'), (operators.mul, '*'),
(operators.sub, '-'),
(operators.truediv, '/'),
(operators.div, '/'),
):
for (lhs, rhs, res) in (
(5, User.id, ':id_1 %s users.id'),
(5, literal(6), ':param_1 %s :param_2'),
(User.id, 5, 'users.id %s :id_1'),
(User.id, literal('b'), 'users.id %s :param_1'),
(User.id, User.id, 'users.id %s users.id'),
(literal(5), 'b', ':param_1 %s :param_2'),
(literal(5), User.id, ':param_1 %s users.id'),
(literal(5), literal(6), ':param_1 %s :param_2'),
):
self._test(py_op(lhs, rhs), res % sql_op)
def test_comparison(self):
User = self.classes.User
create_session().query(User)
ualias = aliased(User)
for (py_op, fwd_op, rev_op) in ((operators.lt, '<', '>'),
(operators.gt, '>', '<'),
(operators.eq, '=', '='),
(operators.ne, '!=', '!='),
(operators.le, '<=', '>='),
(operators.ge, '>=', '<=')):
for (lhs, rhs, l_sql, r_sql) in (
('a', User.id, ':id_1', 'users.id'),
('a', literal('b'), ':param_2', ':param_1'), # note swap!
(User.id, 'b', 'users.id', ':id_1'),
(User.id, literal('b'), 'users.id', ':param_1'),
(User.id, User.id, 'users.id', 'users.id'),
(literal('a'), 'b', ':param_1', ':param_2'),
(literal('a'), User.id, ':param_1', 'users.id'),
(literal('a'), literal('b'), ':param_1', ':param_2'),
(ualias.id, literal('b'), 'users_1.id', ':param_1'),
(User.id, ualias.name, 'users.id', 'users_1.name'),
(User.name, ualias.name, 'users.name', 'users_1.name'),
(ualias.name, User.name, 'users_1.name', 'users.name'),
):
# the compiled clause should match either (e.g.):
# 'a' < 'b' -or- 'b' > 'a'.
compiled = str(py_op(lhs, rhs).compile(
dialect=default.DefaultDialect()))
fwd_sql = "%s %s %s" % (l_sql, fwd_op, r_sql)
rev_sql = "%s %s %s" % (r_sql, rev_op, l_sql)
self.assert_(compiled == fwd_sql or compiled == rev_sql,
"\n'" + compiled + "'\n does not match\n'" +
fwd_sql + "'\n or\n'" + rev_sql + "'")
def test_o2m_compare_to_null(self):
User = self.classes.User
self._test(User.id == None, "users.id IS NULL")
self._test(User.id != None, "users.id IS NOT NULL")
self._test(~(User.id == None), "users.id IS NOT NULL")
self._test(~(User.id != None), "users.id IS NULL")
self._test(None == User.id, "users.id IS NULL")
self._test(~(None == User.id), "users.id IS NOT NULL")
def test_m2o_compare_to_null(self):
Address = self.classes.Address
self._test(Address.user == None, "addresses.user_id IS NULL")
self._test(~(Address.user == None), "addresses.user_id IS NOT NULL")
self._test(~(Address.user != None), "addresses.user_id IS NULL")
self._test(None == Address.user, "addresses.user_id IS NULL")
self._test(~(None == Address.user), "addresses.user_id IS NOT NULL")
def test_o2m_compare_to_null_orm_adapt(self):
User, Address = self.classes.User, self.classes.Address
self._test_filter_aliases(
User.id == None,
"users_1.id IS NULL", Address, Address.user),
self._test_filter_aliases(
User.id != None,
"users_1.id IS NOT NULL", Address, Address.user),
self._test_filter_aliases(
~(User.id == None),
"users_1.id IS NOT NULL", Address, Address.user),
self._test_filter_aliases(
~(User.id != None),
"users_1.id IS NULL", Address, Address.user),
def test_m2o_compare_to_null_orm_adapt(self):
User, Address = self.classes.User, self.classes.Address
self._test_filter_aliases(
Address.user == None,
"addresses_1.user_id IS NULL", User, User.addresses),
self._test_filter_aliases(
Address.user != None,
"addresses_1.user_id IS NOT NULL", User, User.addresses),
self._test_filter_aliases(
~(Address.user == None),
"addresses_1.user_id IS NOT NULL", User, User.addresses),
self._test_filter_aliases(
~(Address.user != None),
"addresses_1.user_id IS NULL", User, User.addresses),
def test_o2m_compare_to_null_aliased(self):
User = self.classes.User
u1 = aliased(User)
self._test(u1.id == None, "users_1.id IS NULL")
self._test(u1.id != None, "users_1.id IS NOT NULL")
self._test(~(u1.id == None), "users_1.id IS NOT NULL")
self._test(~(u1.id != None), "users_1.id IS NULL")
def test_m2o_compare_to_null_aliased(self):
Address = self.classes.Address
a1 = aliased(Address)
self._test(a1.user == None, "addresses_1.user_id IS NULL")
self._test(~(a1.user == None), "addresses_1.user_id IS NOT NULL")
self._test(a1.user != None, "addresses_1.user_id IS NOT NULL")
self._test(~(a1.user != None), "addresses_1.user_id IS NULL")
def test_relationship_unimplemented(self):
User = self.classes.User
for op in [
User.addresses.like,
User.addresses.ilike,
User.addresses.__le__,
User.addresses.__gt__,
]:
assert_raises(NotImplementedError, op, "x")
def test_o2m_any(self):
User, Address = self.classes.User, self.classes.Address
self._test(
User.addresses.any(Address.id == 17),
"EXISTS (SELECT 1 FROM addresses "
"WHERE users.id = addresses.user_id AND addresses.id = :id_1)",
entity=User
)
def test_o2m_any_aliased(self):
User, Address = self.classes.User, self.classes.Address
u1 = aliased(User)
a1 = aliased(Address)
self._test(
u1.addresses.of_type(a1).any(a1.id == 17),
"EXISTS (SELECT 1 FROM addresses AS addresses_1 "
"WHERE users_1.id = addresses_1.user_id AND "
"addresses_1.id = :id_1)",
entity=u1
)
def test_o2m_any_orm_adapt(self):
User, Address = self.classes.User, self.classes.Address
self._test_filter_aliases(
User.addresses.any(Address.id == 17),
"EXISTS (SELECT 1 FROM addresses "
"WHERE users_1.id = addresses.user_id AND addresses.id = :id_1)",
Address, Address.user
)
def test_m2o_compare_instance(self):
User, Address = self.classes.User, self.classes.Address
u7 = User(id=5)
attributes.instance_state(u7)._commit_all(attributes.instance_dict(u7))
u7.id = 7
self._test(Address.user == u7, ":param_1 = addresses.user_id")
def test_m2o_compare_instance_negated(self):
User, Address = self.classes.User, self.classes.Address
u7 = User(id=5)
attributes.instance_state(u7)._commit_all(attributes.instance_dict(u7))
u7.id = 7
self._test(
Address.user != u7,
"addresses.user_id != :user_id_1 OR addresses.user_id IS NULL",
checkparams={'user_id_1': 7})
def test_m2o_compare_instance_orm_adapt(self):
User, Address = self.classes.User, self.classes.Address
u7 = User(id=5)
attributes.instance_state(u7)._commit_all(attributes.instance_dict(u7))
u7.id = 7
self._test_filter_aliases(
Address.user == u7,
":param_1 = addresses_1.user_id", User, User.addresses,
checkparams={'param_1': 7}
)
def test_m2o_compare_instance_negated_warn_on_none(self):
User, Address = self.classes.User, self.classes.Address
u7_transient = User(id=None)
with expect_warnings("Got None for value of column users.id; "):
self._test_filter_aliases(
Address.user != u7_transient,
"addresses_1.user_id != :user_id_1 "
"OR addresses_1.user_id IS NULL",
User, User.addresses,
checkparams={'user_id_1': None}
)
def test_m2o_compare_instance_negated_orm_adapt(self):
User, Address = self.classes.User, self.classes.Address
u7 = User(id=5)
attributes.instance_state(u7)._commit_all(attributes.instance_dict(u7))
u7.id = 7
u7_transient = User(id=7)
self._test_filter_aliases(
Address.user != u7,
"addresses_1.user_id != :user_id_1 OR addresses_1.user_id IS NULL",
User, User.addresses,
checkparams={'user_id_1': 7}
)
self._test_filter_aliases(
~(Address.user == u7), ":param_1 != addresses_1.user_id",
User, User.addresses,
checkparams={'param_1': 7}
)
self._test_filter_aliases(
~(Address.user != u7),
"NOT (addresses_1.user_id != :user_id_1 "
"OR addresses_1.user_id IS NULL)", User, User.addresses,
checkparams={'user_id_1': 7}
)
self._test_filter_aliases(
Address.user != u7_transient,
"addresses_1.user_id != :user_id_1 OR addresses_1.user_id IS NULL",
User, User.addresses,
checkparams={'user_id_1': 7}
)
self._test_filter_aliases(
~(Address.user == u7_transient), ":param_1 != addresses_1.user_id",
User, User.addresses,
checkparams={'param_1': 7}
)
self._test_filter_aliases(
~(Address.user != u7_transient),
"NOT (addresses_1.user_id != :user_id_1 "
"OR addresses_1.user_id IS NULL)", User, User.addresses,
checkparams={'user_id_1': 7}
)
def test_m2o_compare_instance_aliased(self):
User, Address = self.classes.User, self.classes.Address
u7 = User(id=5)
attributes.instance_state(u7)._commit_all(attributes.instance_dict(u7))
u7.id = 7
u7_transient = User(id=7)
a1 = aliased(Address)
self._test(
a1.user == u7,
":param_1 = addresses_1.user_id",
checkparams={'param_1': 7})
self._test(
a1.user != u7,
"addresses_1.user_id != :user_id_1 OR addresses_1.user_id IS NULL",
checkparams={'user_id_1': 7})
a1 = aliased(Address)
self._test(
a1.user == u7_transient,
":param_1 = addresses_1.user_id",
checkparams={'param_1': 7})
self._test(
a1.user != u7_transient,
"addresses_1.user_id != :user_id_1 OR addresses_1.user_id IS NULL",
checkparams={'user_id_1': 7})
def test_selfref_relationship(self):
Node = self.classes.Node
nalias = aliased(Node)
# auto self-referential aliasing
self._test(
Node.children.any(Node.data == 'n1'),
"EXISTS (SELECT 1 FROM nodes AS nodes_1 WHERE "
"nodes.id = nodes_1.parent_id AND nodes_1.data = :data_1)",
entity=Node,
checkparams={'data_1': 'n1'}
)
# needs autoaliasing
self._test(
Node.children == None,
"NOT (EXISTS (SELECT 1 FROM nodes AS nodes_1 "
"WHERE nodes.id = nodes_1.parent_id))",
entity=Node,
checkparams={}
)
self._test(
Node.parent == None,
"nodes.parent_id IS NULL",
checkparams={}
)
self._test(
nalias.parent == None,
"nodes_1.parent_id IS NULL",
checkparams={}
)
self._test(
nalias.parent != None,
"nodes_1.parent_id IS NOT NULL",
checkparams={}
)
self._test(
nalias.children == None,
"NOT (EXISTS ("
"SELECT 1 FROM nodes WHERE nodes_1.id = nodes.parent_id))",
entity=nalias,
checkparams={}
)
self._test(
nalias.children.any(Node.data == 'some data'),
"EXISTS (SELECT 1 FROM nodes WHERE "
"nodes_1.id = nodes.parent_id AND nodes.data = :data_1)",
entity=nalias,
checkparams={'data_1': 'some data'}
)
# this fails because self-referential any() is auto-aliasing;
# the fact that we use "nalias" here means we get two aliases.
#self._test(
# Node.children.any(nalias.data == 'some data'),
# "EXISTS (SELECT 1 FROM nodes AS nodes_1 WHERE "
# "nodes.id = nodes_1.parent_id AND nodes_1.data = :data_1)",
# entity=Node
# )
self._test(
nalias.parent.has(Node.data == 'some data'),
"EXISTS (SELECT 1 FROM nodes WHERE nodes.id = nodes_1.parent_id "
"AND nodes.data = :data_1)",
entity=nalias,
checkparams={'data_1': 'some data'}
)
self._test(
Node.parent.has(Node.data == 'some data'),
"EXISTS (SELECT 1 FROM nodes AS nodes_1 WHERE "
"nodes_1.id = nodes.parent_id AND nodes_1.data = :data_1)",
entity=Node,
checkparams={'data_1': 'some data'}
)
self._test(
Node.parent == Node(id=7),
":param_1 = nodes.parent_id",
checkparams={"param_1": 7}
)
self._test(
nalias.parent == Node(id=7),
":param_1 = nodes_1.parent_id",
checkparams={"param_1": 7}
)
self._test(
nalias.parent != Node(id=7),
'nodes_1.parent_id != :parent_id_1 '
'OR nodes_1.parent_id IS NULL',
checkparams={"parent_id_1": 7}
)
self._test(
nalias.parent != Node(id=7),
'nodes_1.parent_id != :parent_id_1 '
'OR nodes_1.parent_id IS NULL',
checkparams={"parent_id_1": 7}
)
self._test(
nalias.children.contains(Node(id=7, parent_id=12)),
"nodes_1.id = :param_1",
checkparams={"param_1": 12}
)
def test_multilevel_any(self):
User, Address, Dingaling = \
self.classes.User, self.classes.Address, self.classes.Dingaling
sess = Session()
q = sess.query(User).filter(
User.addresses.any(
and_(Address.id == Dingaling.address_id,
Dingaling.data == 'x')))
# new since #2746 - correlate_except() now takes context into account
# so its usage in any() is not as disrupting.
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users "
"WHERE EXISTS (SELECT 1 "
"FROM addresses, dingalings "
"WHERE users.id = addresses.user_id AND "
"addresses.id = dingalings.address_id AND "
"dingalings.data = :data_1)"
)
def test_op(self):
User = self.classes.User
self._test(User.name.op('ilike')('17'), "users.name ilike :name_1")
def test_in(self):
User = self.classes.User
self._test(User.id.in_(['a', 'b']), "users.id IN (:id_1, :id_2)")
def test_in_on_relationship_not_supported(self):
User, Address = self.classes.User, self.classes.Address
assert_raises(NotImplementedError, Address.user.in_, [User(id=5)])
def test_neg(self):
User = self.classes.User
self._test(-User.id, "-users.id")
self._test(User.id + -User.id, "users.id + -users.id")
def test_between(self):
User = self.classes.User
self._test(
User.id.between('a', 'b'), "users.id BETWEEN :id_1 AND :id_2")
def test_collate(self):
User = self.classes.User
self._test(collate(User.id, 'binary'), "users.id COLLATE binary")
self._test(User.id.collate('binary'), "users.id COLLATE binary")
def test_selfref_between(self):
User = self.classes.User
ualias = aliased(User)
self._test(
User.id.between(ualias.id, ualias.id),
"users.id BETWEEN users_1.id AND users_1.id")
self._test(
ualias.id.between(User.id, User.id),
"users_1.id BETWEEN users.id AND users.id")
def test_clauses(self):
User, Address = self.classes.User, self.classes.Address
for (expr, compare) in (
(func.max(User.id), "max(users.id)"),
(User.id.desc(), "users.id DESC"),
(between(5, User.id, Address.id),
":param_1 BETWEEN users.id AND addresses.id"),
# this one would require adding compile() to
# InstrumentedScalarAttribute. do we want this ?
# (User.id, "users.id")
):
c = expr.compile(dialect=default.DefaultDialect())
assert str(c) == compare, "%s != %s" % (str(c), compare)
class ExpressionTest(QueryTest, AssertsCompiledSQL):
__dialect__ = 'default'
def test_deferred_instances(self):
User, addresses, Address = (self.classes.User,
self.tables.addresses,
self.classes.Address)
session = create_session()
s = session.query(User).filter(
and_(addresses.c.email_address == bindparam('emailad'),
Address.user_id == User.id)).statement
l = list(
session.query(User).instances(s.execute(emailad='jack@bean.com')))
eq_([User(id=7)], l)
def test_aliased_sql_construct(self):
User, Address = self.classes.User, self.classes.Address
j = join(User, Address)
a1 = aliased(j)
self.assert_compile(
a1.select(),
"SELECT anon_1.users_id, anon_1.users_name, anon_1.addresses_id, "
"anon_1.addresses_user_id, anon_1.addresses_email_address "
"FROM (SELECT users.id AS users_id, users.name AS users_name, "
"addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM users JOIN addresses "
"ON users.id = addresses.user_id) AS anon_1"
)
def test_aliased_sql_construct_raises_adapt_on_names(self):
User, Address = self.classes.User, self.classes.Address
j = join(User, Address)
assert_raises_message(
sa_exc.ArgumentError,
"adapt_on_names only applies to ORM elements",
aliased, j, adapt_on_names=True
)
def test_scalar_subquery_compile_whereclause(self):
User = self.classes.User
Address = self.classes.Address
session = create_session()
q = session.query(User.id).filter(User.id == 7)
q = session.query(Address).filter(Address.user_id == q)
assert isinstance(q._criterion.right, expression.ColumnElement)
self.assert_compile(
q,
"SELECT addresses.id AS addresses_id, addresses.user_id "
"AS addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses WHERE "
"addresses.user_id = (SELECT users.id AS users_id "
"FROM users WHERE users.id = :id_1)"
)
def test_named_subquery(self):
User = self.classes.User
session = create_session()
a1 = session.query(User.id).filter(User.id == 7).subquery('foo1')
a2 = session.query(User.id).filter(User.id == 7).subquery(name='foo2')
a3 = session.query(User.id).filter(User.id == 7).subquery()
eq_(a1.name, 'foo1')
eq_(a2.name, 'foo2')
eq_(a3.name, '%%(%d anon)s' % id(a3))
def test_labeled_subquery(self):
User = self.classes.User
session = create_session()
a1 = session.query(User.id).filter(User.id == 7). \
subquery(with_labels=True)
assert a1.c.users_id is not None
def test_reduced_subquery(self):
User = self.classes.User
ua = aliased(User)
session = create_session()
a1 = session.query(User.id, ua.id, ua.name).\
filter(User.id == ua.id).subquery(reduce_columns=True)
self.assert_compile(a1,
"SELECT users.id, users_1.name FROM "
"users, users AS users_1 WHERE users.id = users_1.id")
def test_label(self):
User = self.classes.User
session = create_session()
q = session.query(User.id).filter(User.id == 7).label('foo')
self.assert_compile(
session.query(q),
"SELECT (SELECT users.id FROM users WHERE users.id = :id_1) AS foo"
)
def test_as_scalar(self):
User = self.classes.User
session = create_session()
q = session.query(User.id).filter(User.id == 7).as_scalar()
self.assert_compile(session.query(User).filter(User.id.in_(q)),
'SELECT users.id AS users_id, users.name '
'AS users_name FROM users WHERE users.id '
'IN (SELECT users.id FROM users WHERE '
'users.id = :id_1)')
def test_param_transfer(self):
User = self.classes.User
session = create_session()
q = session.query(User.id).filter(User.id == bindparam('foo')).\
params(foo=7).subquery()
q = session.query(User).filter(User.id.in_(q))
eq_(User(id=7), q.one())
def test_in(self):
User, Address = self.classes.User, self.classes.Address
session = create_session()
s = session.query(User.id).join(User.addresses).group_by(User.id).\
having(func.count(Address.id) > 2)
eq_(session.query(User).filter(User.id.in_(s)).all(), [User(id=8)])
def test_union(self):
User = self.classes.User
s = create_session()
q1 = s.query(User).filter(User.name == 'ed').with_labels()
q2 = s.query(User).filter(User.name == 'fred').with_labels()
eq_(
s.query(User).from_statement(union(q1, q2).
order_by('users_name')).all(), [User(name='ed'), User(name='fred')]
)
def test_select(self):
User = self.classes.User
s = create_session()
# this is actually not legal on most DBs since the subquery has no
# alias
q1 = s.query(User).filter(User.name == 'ed')
self.assert_compile(
select([q1]),
"SELECT users_id, users_name FROM (SELECT users.id AS users_id, "
"users.name AS users_name FROM users WHERE users.name = :name_1)"
)
def test_join(self):
User, Address = self.classes.User, self.classes.Address
s = create_session()
# TODO: do we want aliased() to detect a query and convert to
# subquery() automatically ?
q1 = s.query(Address).filter(Address.email_address == 'jack@bean.com')
adalias = aliased(Address, q1.subquery())
eq_(
s.query(User, adalias).join(adalias, User.id == adalias.user_id).
all(),
[
(
User(id=7, name='jack'),
Address(email_address='jack@bean.com', user_id=7, id=1))])
class ColumnPropertyTest(_fixtures.FixtureTest, AssertsCompiledSQL):
__dialect__ = 'default'
run_setup_mappers = 'each'
def _fixture(self, label=True, polymorphic=False):
User, Address = self.classes("User", "Address")
users, addresses = self.tables("users", "addresses")
stmt = select([func.max(addresses.c.email_address)]).\
where(addresses.c.user_id == users.c.id).\
correlate(users)
if label:
stmt = stmt.label("email_ad")
mapper(User, users, properties={
"ead": column_property(stmt)
}, with_polymorphic="*" if polymorphic else None)
mapper(Address, addresses)
def test_order_by_column_prop_string(self):
User, Address = self.classes("User", "Address")
self._fixture(label=True)
s = Session()
q = s.query(User).order_by("email_ad")
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses "
"WHERE addresses.user_id = users.id) AS email_ad, "
"users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY email_ad"
)
def test_order_by_column_prop_aliased_string(self):
User, Address = self.classes("User", "Address")
self._fixture(label=True)
s = Session()
ua = aliased(User)
q = s.query(ua).order_by("email_ad")
def go():
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses WHERE addresses.user_id = users_1.id) "
"AS anon_1, users_1.id AS users_1_id, "
"users_1.name AS users_1_name FROM users AS users_1 "
"ORDER BY email_ad"
)
assert_warnings(
go,
["Can't resolve label reference 'email_ad'"], regex=True)
def test_order_by_column_labeled_prop_attr_aliased_one(self):
User = self.classes.User
self._fixture(label=True)
ua = aliased(User)
s = Session()
q = s.query(ua).order_by(ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses WHERE addresses.user_id = users_1.id) AS anon_1, "
"users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users AS users_1 ORDER BY anon_1"
)
def test_order_by_column_labeled_prop_attr_aliased_two(self):
User = self.classes.User
self._fixture(label=True)
ua = aliased(User)
s = Session()
q = s.query(ua.ead).order_by(ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses, "
"users AS users_1 WHERE addresses.user_id = users_1.id) "
"AS anon_1 ORDER BY anon_1"
)
# we're also testing that the state of "ua" is OK after the
# previous call, so the batching into one test is intentional
q = s.query(ua).order_by(ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses WHERE addresses.user_id = users_1.id) AS anon_1, "
"users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users AS users_1 ORDER BY anon_1"
)
def test_order_by_column_labeled_prop_attr_aliased_three(self):
User = self.classes.User
self._fixture(label=True)
ua = aliased(User)
s = Session()
q = s.query(User.ead, ua.ead).order_by(User.ead, ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses, users WHERE addresses.user_id = users.id) "
"AS email_ad, (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses, users AS users_1 WHERE addresses.user_id = "
"users_1.id) AS anon_1 ORDER BY email_ad, anon_1"
)
q = s.query(User, ua).order_by(User.ead, ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses WHERE addresses.user_id = users.id) AS "
"email_ad, users.id AS users_id, users.name AS users_name, "
"(SELECT max(addresses.email_address) AS max_1 FROM addresses "
"WHERE addresses.user_id = users_1.id) AS anon_1, users_1.id "
"AS users_1_id, users_1.name AS users_1_name FROM users, "
"users AS users_1 ORDER BY email_ad, anon_1"
)
def test_order_by_column_labeled_prop_attr_aliased_four(self):
User = self.classes.User
self._fixture(label=True, polymorphic=True)
ua = aliased(User)
s = Session()
q = s.query(ua, User.id).order_by(ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 FROM "
"addresses WHERE addresses.user_id = users_1.id) AS anon_1, "
"users_1.id AS users_1_id, users_1.name AS users_1_name, "
"users.id AS users_id FROM users AS users_1, users ORDER BY anon_1"
)
def test_order_by_column_unlabeled_prop_attr_aliased_one(self):
User = self.classes.User
self._fixture(label=False)
ua = aliased(User)
s = Session()
q = s.query(ua).order_by(ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses WHERE addresses.user_id = users_1.id) AS anon_1, "
"users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users AS users_1 ORDER BY anon_1"
)
def test_order_by_column_unlabeled_prop_attr_aliased_two(self):
User = self.classes.User
self._fixture(label=False)
ua = aliased(User)
s = Session()
q = s.query(ua.ead).order_by(ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses, "
"users AS users_1 WHERE addresses.user_id = users_1.id) "
"AS anon_1 ORDER BY anon_1"
)
# we're also testing that the state of "ua" is OK after the
# previous call, so the batching into one test is intentional
q = s.query(ua).order_by(ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses WHERE addresses.user_id = users_1.id) AS anon_1, "
"users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users AS users_1 ORDER BY anon_1"
)
def test_order_by_column_unlabeled_prop_attr_aliased_three(self):
User = self.classes.User
self._fixture(label=False)
ua = aliased(User)
s = Session()
q = s.query(User.ead, ua.ead).order_by(User.ead, ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses, users WHERE addresses.user_id = users.id) "
"AS anon_1, (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses, users AS users_1 "
"WHERE addresses.user_id = users_1.id) AS anon_2 "
"ORDER BY anon_1, anon_2"
)
q = s.query(User, ua).order_by(User.ead, ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses WHERE addresses.user_id = users.id) AS "
"anon_1, users.id AS users_id, users.name AS users_name, "
"(SELECT max(addresses.email_address) AS max_1 FROM addresses "
"WHERE addresses.user_id = users_1.id) AS anon_2, users_1.id "
"AS users_1_id, users_1.name AS users_1_name FROM users, "
"users AS users_1 ORDER BY anon_1, anon_2"
)
def test_order_by_column_prop_attr(self):
User, Address = self.classes("User", "Address")
self._fixture(label=True)
s = Session()
q = s.query(User).order_by(User.ead)
# this one is a bit of a surprise; this is compiler
# label-order-by logic kicking in, but won't work in more
# complex cases.
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses "
"WHERE addresses.user_id = users.id) AS email_ad, "
"users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY email_ad"
)
def test_order_by_column_prop_attr_non_present(self):
User, Address = self.classes("User", "Address")
self._fixture(label=True)
s = Session()
q = s.query(User).options(defer(User.ead)).order_by(User.ead)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses "
"WHERE addresses.user_id = users.id)"
)
class ComparatorTest(QueryTest):
def test_clause_element_query_resolve(self):
from sqlalchemy.orm.properties import ColumnProperty
User = self.classes.User
class Comparator(ColumnProperty.Comparator):
def __init__(self, expr):
self.expr = expr
def __clause_element__(self):
return self.expr
sess = Session()
eq_(
sess.query(Comparator(User.id)).order_by(Comparator(User.id)).all(),
[(7, ), (8, ), (9, ), (10, )]
)
# more slice tests are available in test/orm/generative.py
class SliceTest(QueryTest):
def test_first(self):
User = self.classes.User
assert User(id=7) == create_session().query(User).first()
assert create_session().query(User).filter(User.id == 27). \
first() is None
def test_limit_offset_applies(self):
"""Test that the expected LIMIT/OFFSET is applied for slices.
The LIMIT/OFFSET syntax differs slightly on all databases, and
query[x:y] executes immediately, so we are asserting against
SQL strings using sqlite's syntax.
"""
User = self.classes.User
sess = create_session()
q = sess.query(User)
self.assert_sql(
testing.db, lambda: q[10:20], [
(
"SELECT users.id AS users_id, users.name "
"AS users_name FROM users LIMIT :param_1 OFFSET :param_2",
{'param_1': 10, 'param_2': 10})])
self.assert_sql(
testing.db, lambda: q[:20], [
(
"SELECT users.id AS users_id, users.name "
"AS users_name FROM users LIMIT :param_1",
{'param_1': 20})])
self.assert_sql(
testing.db, lambda: q[5:], [
(
"SELECT users.id AS users_id, users.name "
"AS users_name FROM users LIMIT -1 OFFSET :param_1",
{'param_1': 5})])
self.assert_sql(testing.db, lambda: q[2:2], [])
self.assert_sql(testing.db, lambda: q[-2:-5], [])
self.assert_sql(
testing.db, lambda: q[-5:-2], [
(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users", {})])
self.assert_sql(
testing.db, lambda: q[-5:], [
(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users", {})])
self.assert_sql(
testing.db, lambda: q[:], [
(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users", {})])
class FilterTest(QueryTest, AssertsCompiledSQL):
__dialect__ = 'default'
def test_basic(self):
User = self.classes.User
users = create_session().query(User).all()
eq_([User(id=7), User(id=8), User(id=9), User(id=10)], users)
@testing.requires.offset
def test_limit_offset(self):
User = self.classes.User
sess = create_session()
assert [User(id=8), User(id=9)] == \
sess.query(User).order_by(User.id).limit(2).offset(1).all()
assert [User(id=8), User(id=9)] == \
list(sess.query(User).order_by(User.id)[1:3])
assert User(id=8) == sess.query(User).order_by(User.id)[1]
assert [] == sess.query(User).order_by(User.id)[3:3]
assert [] == sess.query(User).order_by(User.id)[0:0]
@testing.requires.bound_limit_offset
def test_select_with_bindparam_offset_limit(self):
"""Does a query allow bindparam for the limit?"""
User = self.classes.User
sess = create_session()
q1 = sess.query(self.classes.User).\
order_by(self.classes.User.id).limit(bindparam('n'))
for n in range(1, 4):
result = q1.params(n=n).all()
eq_(len(result), n)
eq_(
sess.query(User).order_by(User.id).limit(bindparam('limit')).
offset(bindparam('offset')).params(limit=2, offset=1).all(),
[User(id=8), User(id=9)]
)
@testing.fails_on("mysql", "doesn't like CAST in the limit clause")
@testing.requires.bound_limit_offset
def test_select_with_bindparam_offset_limit_w_cast(self):
User = self.classes.User
sess = create_session()
q1 = sess.query(self.classes.User).\
order_by(self.classes.User.id).limit(bindparam('n'))
eq_(
list(
sess.query(User).params(a=1, b=3).order_by(User.id)
[cast(bindparam('a'), Integer):cast(bindparam('b'), Integer)]),
[User(id=8), User(id=9)]
)
@testing.requires.boolean_col_expressions
def test_exists(self):
User = self.classes.User
sess = create_session(testing.db)
assert sess.query(exists().where(User.id == 9)).scalar()
assert not sess.query(exists().where(User.id == 29)).scalar()
def test_one_filter(self):
User = self.classes.User
assert [User(id=8), User(id=9)] == \
create_session().query(User).filter(User.name.endswith('ed')).all()
def test_contains(self):
"""test comparing a collection to an object instance."""
User, Address = self.classes.User, self.classes.Address
sess = create_session()
address = sess.query(Address).get(3)
assert [User(id=8)] == \
sess.query(User).filter(User.addresses.contains(address)).all()
try:
sess.query(User).filter(User.addresses == address)
assert False
except sa_exc.InvalidRequestError:
assert True
assert [User(id=10)] == \
sess.query(User).filter(User.addresses == None).all()
try:
assert [User(id=7), User(id=9), User(id=10)] == \
sess.query(User).filter(User.addresses != address).all()
assert False
except sa_exc.InvalidRequestError:
assert True
# assert [User(id=7), User(id=9), User(id=10)] ==
# sess.query(User).filter(User.addresses!=address).all()
def test_clause_element_ok(self):
User = self.classes.User
s = Session()
self.assert_compile(
s.query(User).filter(User.addresses),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users, addresses WHERE users.id = addresses.user_id"
)
def test_unique_binds_join_cond(self):
"""test that binds used when the lazyclause is used in criterion are
unique"""
User, Address = self.classes.User, self.classes.Address
sess = Session()
a1, a2 = sess.query(Address).order_by(Address.id)[0:2]
self.assert_compile(
sess.query(User).filter(User.addresses.contains(a1)).union(
sess.query(User).filter(User.addresses.contains(a2))
),
"SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS "
"anon_1_users_name FROM (SELECT users.id AS users_id, "
"users.name AS users_name FROM users WHERE users.id = :param_1 "
"UNION SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :param_2) AS anon_1",
checkparams={'param_1': 7, 'param_2': 8}
)
def test_any(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
assert [User(id=8), User(id=9)] == \
sess.query(User). \
filter(
User.addresses.any(Address.email_address.like('%ed%'))).all()
assert [User(id=8)] == \
sess.query(User). \
filter(
User.addresses.any(
Address.email_address.like('%ed%'), id=4)).all()
assert [User(id=8)] == \
sess.query(User). \
filter(User.addresses.any(Address.email_address.like('%ed%'))).\
filter(User.addresses.any(id=4)).all()
assert [User(id=9)] == \
sess.query(User). \
filter(User.addresses.any(email_address='fred@fred.com')).all()
# test that any() doesn't overcorrelate
assert [User(id=7), User(id=8)] == \
sess.query(User).join("addresses"). \
filter(
~User.addresses.any(
Address.email_address == 'fred@fred.com')).all()
# test that the contents are not adapted by the aliased join
assert [User(id=7), User(id=8)] == \
sess.query(User).join("addresses", aliased=True). \
filter(
~User.addresses.any(
Address.email_address == 'fred@fred.com')).all()
assert [User(id=10)] == \
sess.query(User).outerjoin("addresses", aliased=True). \
filter(~User.addresses.any()).all()
def test_has(self):
Dingaling, User, Address = (
self.classes.Dingaling, self.classes.User, self.classes.Address)
sess = create_session()
assert [Address(id=5)] == \
sess.query(Address).filter(Address.user.has(name='fred')).all()
assert [Address(id=2), Address(id=3), Address(id=4), Address(id=5)] \
== sess.query(Address). \
filter(Address.user.has(User.name.like('%ed%'))). \
order_by(Address.id).all()
assert [Address(id=2), Address(id=3), Address(id=4)] == \
sess.query(Address). \
filter(Address.user.has(User.name.like('%ed%'), id=8)). \
order_by(Address.id).all()
# test has() doesn't overcorrelate
assert [Address(id=2), Address(id=3), Address(id=4)] == \
sess.query(Address).join("user"). \
filter(Address.user.has(User.name.like('%ed%'), id=8)). \
order_by(Address.id).all()
# test has() doesn't get subquery contents adapted by aliased join
assert [Address(id=2), Address(id=3), Address(id=4)] == \
sess.query(Address).join("user", aliased=True). \
filter(Address.user.has(User.name.like('%ed%'), id=8)). \
order_by(Address.id).all()
dingaling = sess.query(Dingaling).get(2)
assert [User(id=9)] == \
sess.query(User). \
filter(User.addresses.any(Address.dingaling == dingaling)).all()
def test_contains_m2m(self):
Item, Order = self.classes.Item, self.classes.Order
sess = create_session()
item = sess.query(Item).get(3)
eq_(
sess.query(Order).filter(Order.items.contains(item)).
order_by(Order.id).all(),
[Order(id=1), Order(id=2), Order(id=3)]
)
eq_(
sess.query(Order).filter(~Order.items.contains(item)).
order_by(Order.id).all(),
[Order(id=4), Order(id=5)]
)
item2 = sess.query(Item).get(5)
eq_(
sess.query(Order).filter(Order.items.contains(item)).
filter(Order.items.contains(item2)).all(),
[Order(id=3)]
)
def test_comparison(self):
"""test scalar comparison to an object instance"""
Item, Order, Dingaling, User, Address = (
self.classes.Item, self.classes.Order, self.classes.Dingaling,
self.classes.User, self.classes.Address)
sess = create_session()
user = sess.query(User).get(8)
assert [Address(id=2), Address(id=3), Address(id=4)] == \
sess.query(Address).filter(Address.user == user).all()
assert [Address(id=1), Address(id=5)] == \
sess.query(Address).filter(Address.user != user).all()
# generates an IS NULL
assert [] == sess.query(Address).filter(Address.user == None).all()
assert [] == sess.query(Address).filter(Address.user == null()).all()
assert [Order(id=5)] == \
sess.query(Order).filter(Order.address == None).all()
# o2o
dingaling = sess.query(Dingaling).get(2)
assert [Address(id=5)] == \
sess.query(Address).filter(Address.dingaling == dingaling).all()
# m2m
eq_(
sess.query(Item).filter(Item.keywords == None).
order_by(Item.id).all(), [Item(id=4), Item(id=5)])
eq_(
sess.query(Item).filter(Item.keywords != None).
order_by(Item.id).all(), [Item(id=1), Item(id=2), Item(id=3)])
def test_filter_by(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
user = sess.query(User).get(8)
assert [Address(id=2), Address(id=3), Address(id=4)] == \
sess.query(Address).filter_by(user=user).all()
# many to one generates IS NULL
assert [] == sess.query(Address).filter_by(user=None).all()
assert [] == sess.query(Address).filter_by(user=null()).all()
# one to many generates WHERE NOT EXISTS
assert [User(name='chuck')] == \
sess.query(User).filter_by(addresses=None).all()
assert [User(name='chuck')] == \
sess.query(User).filter_by(addresses=null()).all()
def test_filter_by_tables(self):
users = self.tables.users
addresses = self.tables.addresses
sess = create_session()
self.assert_compile(
sess.query(users).filter_by(name='ed').
join(addresses, users.c.id == addresses.c.user_id).
filter_by(email_address='ed@ed.com'),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN addresses ON users.id = addresses.user_id "
"WHERE users.name = :name_1 AND "
"addresses.email_address = :email_address_1",
checkparams={'email_address_1': 'ed@ed.com', 'name_1': 'ed'}
)
def test_filter_by_no_property(self):
addresses = self.tables.addresses
sess = create_session()
assert_raises_message(
sa.exc.InvalidRequestError,
"Entity 'addresses' has no property 'name'",
sess.query(addresses).filter_by, name='ed'
)
def test_none_comparison(self):
Order, User, Address = (
self.classes.Order, self.classes.User, self.classes.Address)
sess = create_session()
# scalar
eq_(
[Order(description="order 5")],
sess.query(Order).filter(Order.address_id == None).all()
)
eq_(
[Order(description="order 5")],
sess.query(Order).filter(Order.address_id == null()).all()
)
# o2o
eq_(
[Address(id=1), Address(id=3), Address(id=4)],
sess.query(Address).filter(Address.dingaling == None).
order_by(Address.id).all())
eq_(
[Address(id=1), Address(id=3), Address(id=4)],
sess.query(Address).filter(Address.dingaling == null()).
order_by(Address.id).all())
eq_(
[Address(id=2), Address(id=5)],
sess.query(Address).filter(Address.dingaling != None).
order_by(Address.id).all())
eq_(
[Address(id=2), Address(id=5)],
sess.query(Address).filter(Address.dingaling != null()).
order_by(Address.id).all())
# m2o
eq_(
[Order(id=5)],
sess.query(Order).filter(Order.address == None).all())
eq_(
[Order(id=1), Order(id=2), Order(id=3), Order(id=4)],
sess.query(Order).order_by(Order.id).
filter(Order.address != None).all())
# o2m
eq_(
[User(id=10)],
sess.query(User).filter(User.addresses == None).all())
eq_(
[User(id=7), User(id=8), User(id=9)],
sess.query(User).filter(User.addresses != None).
order_by(User.id).all())
def test_blank_filter_by(self):
User = self.classes.User
eq_(
[(7,), (8,), (9,), (10,)],
create_session().query(User.id).filter_by().order_by(User.id).all()
)
eq_(
[(7,), (8,), (9,), (10,)],
create_session().query(User.id).filter_by(**{}).
order_by(User.id).all()
)
def test_text_coerce(self):
User = self.classes.User
s = create_session()
self.assert_compile(
s.query(User).filter(text("name='ed'")),
"SELECT users.id AS users_id, users.name "
"AS users_name FROM users WHERE name='ed'"
)
class SetOpsTest(QueryTest, AssertsCompiledSQL):
__dialect__ = 'default'
def test_union(self):
User = self.classes.User
s = create_session()
fred = s.query(User).filter(User.name == 'fred')
ed = s.query(User).filter(User.name == 'ed')
jack = s.query(User).filter(User.name == 'jack')
eq_(
fred.union(ed).order_by(User.name).all(),
[User(name='ed'), User(name='fred')]
)
eq_(
fred.union(ed, jack).order_by(User.name).all(),
[User(name='ed'), User(name='fred'), User(name='jack')]
)
def test_statement_labels(self):
"""test that label conflicts don't occur with joins etc."""
User, Address = self.classes.User, self.classes.Address
s = create_session()
q1 = s.query(User, Address).join(User.addresses).\
filter(Address.email_address == "ed@wood.com")
q2 = s.query(User, Address).join(User.addresses).\
filter(Address.email_address == "jack@bean.com")
q3 = q1.union(q2).order_by(User.name)
eq_(
q3.all(),
[
(User(name='ed'), Address(email_address="ed@wood.com")),
(User(name='jack'), Address(email_address="jack@bean.com")),
]
)
def test_union_literal_expressions_compile(self):
"""test that column expressions translate during
the _from_statement() portion of union(), others"""
User = self.classes.User
s = Session()
q1 = s.query(User, literal("x"))
q2 = s.query(User, literal_column("'y'"))
q3 = q1.union(q2)
self.assert_compile(
q3,
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"anon_1.param_1 AS anon_1_param_1 "
"FROM (SELECT users.id AS users_id, users.name AS "
"users_name, :param_1 AS param_1 "
"FROM users UNION SELECT users.id AS users_id, "
"users.name AS users_name, 'y' FROM users) AS anon_1"
)
def test_union_literal_expressions_results(self):
User = self.classes.User
s = Session()
q1 = s.query(User, literal("x"))
q2 = s.query(User, literal_column("'y'"))
q3 = q1.union(q2)
q4 = s.query(User, literal_column("'x'").label('foo'))
q5 = s.query(User, literal("y"))
q6 = q4.union(q5)
eq_(
[x['name'] for x in q6.column_descriptions],
['User', 'foo']
)
for q in (
q3.order_by(User.id, text("anon_1_param_1")),
q6.order_by(User.id, "foo")):
eq_(
q.all(),
[
(User(id=7, name='jack'), 'x'),
(User(id=7, name='jack'), 'y'),
(User(id=8, name='ed'), 'x'),
(User(id=8, name='ed'), 'y'),
(User(id=9, name='fred'), 'x'),
(User(id=9, name='fred'), 'y'),
(User(id=10, name='chuck'), 'x'),
(User(id=10, name='chuck'), 'y')
]
)
def test_union_labeled_anonymous_columns(self):
User = self.classes.User
s = Session()
c1, c2 = column('c1'), column('c2')
q1 = s.query(User, c1.label('foo'), c1.label('bar'))
q2 = s.query(User, c1.label('foo'), c2.label('bar'))
q3 = q1.union(q2)
eq_(
[x['name'] for x in q3.column_descriptions],
['User', 'foo', 'bar']
)
self.assert_compile(
q3,
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"anon_1.foo AS anon_1_foo, anon_1.bar AS anon_1_bar "
"FROM (SELECT users.id AS users_id, users.name AS users_name, "
"c1 AS foo, c1 AS bar FROM users UNION SELECT users.id AS "
"users_id, users.name AS users_name, c1 AS foo, c2 AS bar "
"FROM users) AS anon_1"
)
def test_order_by_anonymous_col(self):
User = self.classes.User
s = Session()
c1, c2 = column('c1'), column('c2')
f = c1.label('foo')
q1 = s.query(User, f, c2.label('bar'))
q2 = s.query(User, c1.label('foo'), c2.label('bar'))
q3 = q1.union(q2)
self.assert_compile(
q3.order_by(c1),
"SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS "
"anon_1_users_name, anon_1.foo AS anon_1_foo, anon_1.bar AS "
"anon_1_bar FROM (SELECT users.id AS users_id, users.name AS "
"users_name, c1 AS foo, c2 AS bar "
"FROM users UNION SELECT users.id "
"AS users_id, users.name AS users_name, c1 AS foo, c2 AS bar "
"FROM users) AS anon_1 ORDER BY anon_1.foo"
)
self.assert_compile(
q3.order_by(f),
"SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS "
"anon_1_users_name, anon_1.foo AS anon_1_foo, anon_1.bar AS "
"anon_1_bar FROM (SELECT users.id AS users_id, users.name AS "
"users_name, c1 AS foo, c2 AS bar "
"FROM users UNION SELECT users.id "
"AS users_id, users.name AS users_name, c1 AS foo, c2 AS bar "
"FROM users) AS anon_1 ORDER BY anon_1.foo"
)
def test_union_mapped_colnames_preserved_across_subquery(self):
User = self.classes.User
s = Session()
q1 = s.query(User.name)
q2 = s.query(User.name)
# the label names in the subquery are the typical anonymized ones
self.assert_compile(
q1.union(q2),
"SELECT anon_1.users_name AS anon_1_users_name "
"FROM (SELECT users.name AS users_name FROM users "
"UNION SELECT users.name AS users_name FROM users) AS anon_1"
)
# but in the returned named tuples,
# due to [ticket:1942], this should be 'name', not 'users_name'
eq_(
[x['name'] for x in q1.union(q2).column_descriptions],
['name']
)
@testing.requires.intersect
def test_intersect(self):
User = self.classes.User
s = create_session()
fred = s.query(User).filter(User.name == 'fred')
ed = s.query(User).filter(User.name == 'ed')
jack = s.query(User).filter(User.name == 'jack')
eq_(fred.intersect(ed, jack).all(), [])
eq_(fred.union(ed).intersect(ed.union(jack)).all(), [User(name='ed')])
def test_eager_load(self):
User, Address = self.classes.User, self.classes.Address
s = create_session()
fred = s.query(User).filter(User.name == 'fred')
ed = s.query(User).filter(User.name == 'ed')
def go():
eq_(
fred.union(ed).order_by(User.name).
options(joinedload(User.addresses)).all(), [
User(
name='ed', addresses=[Address(), Address(),
Address()]),
User(name='fred', addresses=[Address()])]
)
self.assert_sql_count(testing.db, go, 1)
class AggregateTest(QueryTest):
def test_sum(self):
Order = self.classes.Order
sess = create_session()
orders = sess.query(Order).filter(Order.id.in_([2, 3, 4]))
eq_(
next(orders.values(func.sum(Order.user_id * Order.address_id))),
(79,))
eq_(orders.value(func.sum(Order.user_id * Order.address_id)), 79)
def test_apply(self):
Order = self.classes.Order
sess = create_session()
assert sess.query(func.sum(Order.user_id * Order.address_id)). \
filter(Order.id.in_([2, 3, 4])).one() == (79,)
def test_having(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
assert [User(name='ed', id=8)] == \
sess.query(User).order_by(User.id).group_by(User). \
join('addresses').having(func.count(Address.id) > 2).all()
assert [User(name='jack', id=7), User(name='fred', id=9)] == \
sess.query(User).order_by(User.id).group_by(User). \
join('addresses').having(func.count(Address.id) < 2).all()
class ExistsTest(QueryTest, AssertsCompiledSQL):
__dialect__ = 'default'
def test_exists(self):
User = self.classes.User
sess = create_session()
q1 = sess.query(User)
self.assert_compile(
sess.query(q1.exists()),
'SELECT EXISTS ('
'SELECT 1 FROM users'
') AS anon_1'
)
q2 = sess.query(User).filter(User.name == 'fred')
self.assert_compile(
sess.query(q2.exists()),
'SELECT EXISTS ('
'SELECT 1 FROM users WHERE users.name = :name_1'
') AS anon_1'
)
def test_exists_col_warning(self):
User = self.classes.User
Address = self.classes.Address
sess = create_session()
q1 = sess.query(User, Address).filter(User.id == Address.user_id)
self.assert_compile(
sess.query(q1.exists()),
'SELECT EXISTS ('
'SELECT 1 FROM users, addresses '
'WHERE users.id = addresses.user_id'
') AS anon_1'
)
def test_exists_w_select_from(self):
User = self.classes.User
sess = create_session()
q1 = sess.query().select_from(User).exists()
self.assert_compile(
sess.query(q1),
'SELECT EXISTS (SELECT 1 FROM users) AS anon_1'
)
class CountTest(QueryTest):
def test_basic(self):
users, User = self.tables.users, self.classes.User
s = create_session()
eq_(s.query(User).count(), 4)
eq_(s.query(User).filter(users.c.name.endswith('ed')).count(), 2)
def test_count_char(self):
User = self.classes.User
s = create_session()
# '*' is favored here as the most common character,
# it is reported that Informix doesn't like count(1),
# rumors about Oracle preferring count(1) don't appear
# to be well founded.
self.assert_sql_execution(
testing.db, s.query(User).count, CompiledSQL(
"SELECT count(*) AS count_1 FROM "
"(SELECT users.id AS users_id, users.name "
"AS users_name FROM users) AS anon_1", {}
)
)
def test_multiple_entity(self):
User, Address = self.classes.User, self.classes.Address
s = create_session()
q = s.query(User, Address)
eq_(q.count(), 20) # cartesian product
q = s.query(User, Address).join(User.addresses)
eq_(q.count(), 5)
def test_nested(self):
User, Address = self.classes.User, self.classes.Address
s = create_session()
q = s.query(User, Address).limit(2)
eq_(q.count(), 2)
q = s.query(User, Address).limit(100)
eq_(q.count(), 20)
q = s.query(User, Address).join(User.addresses).limit(100)
eq_(q.count(), 5)
def test_cols(self):
"""test that column-based queries always nest."""
User, Address = self.classes.User, self.classes.Address
s = create_session()
q = s.query(func.count(distinct(User.name)))
eq_(q.count(), 1)
q = s.query(func.count(distinct(User.name))).distinct()
eq_(q.count(), 1)
q = s.query(User.name)
eq_(q.count(), 4)
q = s.query(User.name, Address)
eq_(q.count(), 20)
q = s.query(Address.user_id)
eq_(q.count(), 5)
eq_(q.distinct().count(), 3)
class DistinctTest(QueryTest):
def test_basic(self):
User = self.classes.User
eq_(
[User(id=7), User(id=8), User(id=9), User(id=10)],
create_session().query(User).order_by(User.id).distinct().all()
)
eq_(
[User(id=7), User(id=9), User(id=8), User(id=10)],
create_session().query(User).distinct().
order_by(desc(User.name)).all()
)
def test_joined(self):
"""test that orderbys from a joined table get placed into the columns
clause when DISTINCT is used"""
User, Address = self.classes.User, self.classes.Address
sess = create_session()
q = sess.query(User).join('addresses').distinct(). \
order_by(desc(Address.email_address))
assert [User(id=7), User(id=9), User(id=8)] == q.all()
sess.expunge_all()
# test that it works on embedded joinedload/LIMIT subquery
q = sess.query(User).join('addresses').distinct(). \
options(joinedload('addresses')).\
order_by(desc(Address.email_address)).limit(2)
def go():
assert [
User(id=7, addresses=[
Address(id=1)
]),
User(id=9, addresses=[
Address(id=5)
]),
] == q.all()
self.assert_sql_count(testing.db, go, 1)
class PrefixWithTest(QueryTest, AssertsCompiledSQL):
def test_one_prefix(self):
User = self.classes.User
sess = create_session()
query = sess.query(User.name)\
.prefix_with('PREFIX_1')
expected = "SELECT PREFIX_1 "\
"users.name AS users_name FROM users"
self.assert_compile(query, expected, dialect=default.DefaultDialect())
def test_many_prefixes(self):
User = self.classes.User
sess = create_session()
query = sess.query(User.name).prefix_with('PREFIX_1', 'PREFIX_2')
expected = "SELECT PREFIX_1 PREFIX_2 "\
"users.name AS users_name FROM users"
self.assert_compile(query, expected, dialect=default.DefaultDialect())
def test_chained_prefixes(self):
User = self.classes.User
sess = create_session()
query = sess.query(User.name)\
.prefix_with('PREFIX_1')\
.prefix_with('PREFIX_2', 'PREFIX_3')
expected = "SELECT PREFIX_1 PREFIX_2 PREFIX_3 "\
"users.name AS users_name FROM users"
self.assert_compile(query, expected, dialect=default.DefaultDialect())
class YieldTest(_fixtures.FixtureTest):
run_setup_mappers = 'each'
run_inserts = 'each'
def _eagerload_mappings(self, addresses_lazy=True, user_lazy=True):
User, Address = self.classes("User", "Address")
users, addresses = self.tables("users", "addresses")
mapper(User, users, properties={
"addresses": relationship(
Address, lazy=addresses_lazy,
backref=backref("user", lazy=user_lazy)
)
})
mapper(Address, addresses)
def test_basic(self):
self._eagerload_mappings()
User = self.classes.User
sess = create_session()
q = iter(
sess.query(User).yield_per(1).from_statement(
text("select * from users")))
ret = []
eq_(len(sess.identity_map), 0)
ret.append(next(q))
ret.append(next(q))
eq_(len(sess.identity_map), 2)
ret.append(next(q))
ret.append(next(q))
eq_(len(sess.identity_map), 4)
try:
next(q)
assert False
except StopIteration:
pass
def test_yield_per_and_execution_options(self):
self._eagerload_mappings()
User = self.classes.User
sess = create_session()
q = sess.query(User).yield_per(15)
q = q.execution_options(foo='bar')
assert q._yield_per
eq_(
q._execution_options,
{"stream_results": True, "foo": "bar", "max_row_buffer": 15})
def test_no_joinedload_opt(self):
self._eagerload_mappings()
User = self.classes.User
sess = create_session()
q = sess.query(User).options(joinedload("addresses")).yield_per(1)
assert_raises_message(
sa_exc.InvalidRequestError,
"The yield_per Query option is currently not compatible with "
"joined collection eager loading. Please specify ",
q.all
)
def test_no_subqueryload_opt(self):
self._eagerload_mappings()
User = self.classes.User
sess = create_session()
q = sess.query(User).options(subqueryload("addresses")).yield_per(1)
assert_raises_message(
sa_exc.InvalidRequestError,
"The yield_per Query option is currently not compatible with "
"subquery eager loading. Please specify ",
q.all
)
def test_no_subqueryload_mapping(self):
self._eagerload_mappings(addresses_lazy="subquery")
User = self.classes.User
sess = create_session()
q = sess.query(User).yield_per(1)
assert_raises_message(
sa_exc.InvalidRequestError,
"The yield_per Query option is currently not compatible with "
"subquery eager loading. Please specify ",
q.all
)
def test_joinedload_m2o_ok(self):
self._eagerload_mappings(user_lazy="joined")
Address = self.classes.Address
sess = create_session()
q = sess.query(Address).yield_per(1)
q.all()
def test_eagerload_opt_disable(self):
self._eagerload_mappings()
User = self.classes.User
sess = create_session()
q = sess.query(User).options(subqueryload("addresses")).\
enable_eagerloads(False).yield_per(1)
q.all()
q = sess.query(User).options(joinedload("addresses")).\
enable_eagerloads(False).yield_per(1)
q.all()
def test_m2o_joinedload_not_others(self):
self._eagerload_mappings(addresses_lazy="joined")
Address = self.classes.Address
sess = create_session()
q = sess.query(Address).options(
lazyload('*'), joinedload("user")).yield_per(1).filter_by(id=1)
def go():
result = q.all()
assert result[0].user
self.assert_sql_count(testing.db, go, 1)
class HintsTest(QueryTest, AssertsCompiledSQL):
__dialect__ = 'default'
def test_hints(self):
User = self.classes.User
from sqlalchemy.dialects import mysql
dialect = mysql.dialect()
sess = create_session()
self.assert_compile(
sess.query(User).with_hint(
User, 'USE INDEX (col1_index,col2_index)'),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users USE INDEX (col1_index,col2_index)",
dialect=dialect
)
self.assert_compile(
sess.query(User).with_hint(
User, 'WITH INDEX col1_index', 'sybase'),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users", dialect=dialect
)
ualias = aliased(User)
self.assert_compile(
sess.query(User, ualias).with_hint(
ualias, 'USE INDEX (col1_index,col2_index)').
join(ualias, ualias.id > User.id),
"SELECT users.id AS users_id, users.name AS users_name, "
"users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users INNER JOIN users AS users_1 "
"USE INDEX (col1_index,col2_index) "
"ON users_1.id > users.id", dialect=dialect
)
def test_statement_hints(self):
User = self.classes.User
sess = create_session()
stmt = sess.query(User).\
with_statement_hint("test hint one").\
with_statement_hint("test hint two").\
with_statement_hint("test hint three", "postgresql")
self.assert_compile(
stmt,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users test hint one test hint two",
)
self.assert_compile(
stmt,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users test hint one test hint two test hint three",
dialect='postgresql'
)
class TextTest(QueryTest, AssertsCompiledSQL):
__dialect__ = 'default'
def test_fulltext(self):
User = self.classes.User
with expect_warnings("Textual SQL"):
eq_(
create_session().query(User).
from_statement("select * from users order by id").all(),
[User(id=7), User(id=8), User(id=9), User(id=10)]
)
eq_(
create_session().query(User).from_statement(
text("select * from users order by id")).first(), User(id=7)
)
eq_(
create_session().query(User).from_statement(
text("select * from users where name='nonexistent'")).first(),
None)
def test_fragment(self):
User = self.classes.User
with expect_warnings("Textual SQL expression"):
eq_(
create_session().query(User).filter("id in (8, 9)").all(),
[User(id=8), User(id=9)]
)
eq_(
create_session().query(User).filter("name='fred'").
filter("id=9").all(), [User(id=9)]
)
eq_(
create_session().query(User).filter("name='fred'").
filter(User.id == 9).all(), [User(id=9)]
)
def test_binds_coerce(self):
User = self.classes.User
with expect_warnings("Textual SQL expression"):
eq_(
create_session().query(User).filter("id in (:id1, :id2)").
params(id1=8, id2=9).all(), [User(id=8), User(id=9)]
)
def test_as_column(self):
User = self.classes.User
s = create_session()
assert_raises(
sa_exc.InvalidRequestError, s.query,
User.id, text("users.name"))
eq_(
s.query(User.id, "name").order_by(User.id).all(),
[(7, 'jack'), (8, 'ed'), (9, 'fred'), (10, 'chuck')])
def test_via_select(self):
User = self.classes.User
s = create_session()
eq_(
s.query(User).from_statement(
select([column('id'), column('name')]).
select_from(table('users')).order_by('id'),
).all(),
[User(id=7), User(id=8), User(id=9), User(id=10)]
)
def test_via_textasfrom_from_statement(self):
User = self.classes.User
s = create_session()
eq_(
s.query(User).from_statement(
text("select * from users order by id").
columns(id=Integer, name=String)).all(),
[User(id=7), User(id=8), User(id=9), User(id=10)]
)
def test_via_textasfrom_use_mapped_columns(self):
User = self.classes.User
s = create_session()
eq_(
s.query(User).from_statement(
text("select * from users order by id").
columns(User.id, User.name)).all(),
[User(id=7), User(id=8), User(id=9), User(id=10)]
)
def test_via_textasfrom_select_from(self):
User = self.classes.User
s = create_session()
eq_(
s.query(User).select_from(
text("select * from users").columns(id=Integer, name=String)
).order_by(User.id).all(),
[User(id=7), User(id=8), User(id=9), User(id=10)]
)
def test_order_by_w_eager_one(self):
User = self.classes.User
s = create_session()
# from 1.0.0 thru 1.0.2, the "name" symbol here was considered
# to be part of the things we need to ORDER BY and it was being
# placed into the inner query's columns clause, as part of
# query._compound_eager_statement where we add unwrap_order_by()
# to the columns clause. However, as #3392 illustrates, unlocatable
# string expressions like "name desc" will only fail in this scenario,
# so in general the changing of the query structure with string labels
# is dangerous.
#
# the queries here are again "invalid" from a SQL perspective, as the
# "name" field isn't matched up to anything.
#
with expect_warnings("Can't resolve label reference 'name';"):
self.assert_compile(
s.query(User).options(joinedload("addresses")).
order_by(desc("name")).limit(1),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM (SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY users.name "
"DESC LIMIT :param_1) AS anon_1 "
"LEFT OUTER JOIN addresses AS addresses_1 "
"ON anon_1.users_id = addresses_1.user_id "
"ORDER BY name DESC, addresses_1.id"
)
def test_order_by_w_eager_two(self):
User = self.classes.User
s = create_session()
with expect_warnings("Can't resolve label reference 'name';"):
self.assert_compile(
s.query(User).options(joinedload("addresses")).
order_by("name").limit(1),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM (SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY users.name "
"LIMIT :param_1) AS anon_1 "
"LEFT OUTER JOIN addresses AS addresses_1 "
"ON anon_1.users_id = addresses_1.user_id "
"ORDER BY name, addresses_1.id"
)
def test_order_by_w_eager_three(self):
User = self.classes.User
s = create_session()
self.assert_compile(
s.query(User).options(joinedload("addresses")).
order_by("users_name").limit(1),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM (SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY users.name "
"LIMIT :param_1) AS anon_1 "
"LEFT OUTER JOIN addresses AS addresses_1 "
"ON anon_1.users_id = addresses_1.user_id "
"ORDER BY anon_1.users_name, addresses_1.id"
)
# however! this works (again?)
eq_(
s.query(User).options(joinedload("addresses")).
order_by("users_name").first(),
User(name='chuck', addresses=[])
)
def test_order_by_w_eager_four(self):
User = self.classes.User
Address = self.classes.Address
s = create_session()
self.assert_compile(
s.query(User).options(joinedload("addresses")).
order_by(desc("users_name")).limit(1),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM (SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY users.name DESC "
"LIMIT :param_1) AS anon_1 "
"LEFT OUTER JOIN addresses AS addresses_1 "
"ON anon_1.users_id = addresses_1.user_id "
"ORDER BY anon_1.users_name DESC, addresses_1.id"
)
# however! this works (again?)
eq_(
s.query(User).options(joinedload("addresses")).
order_by(desc("users_name")).first(),
User(name='jack', addresses=[Address()])
)
def test_order_by_w_eager_five(self):
"""essentially the same as test_eager_relations -> test_limit_3,
but test for textual label elements that are freeform.
this is again #3392."""
User = self.classes.User
Address = self.classes.Address
Order = self.classes.Order
sess = create_session()
q = sess.query(User, Address.email_address.label('email_address'))
l = q.join('addresses').options(joinedload(User.orders)).\
order_by(
"email_address desc").limit(1).offset(0)
with expect_warnings(
"Can't resolve label reference 'email_address desc'"):
eq_(
[
(User(
id=7,
orders=[Order(id=1), Order(id=3), Order(id=5)],
addresses=[Address(id=1)]
), 'jack@bean.com')
],
l.all())
class TextWarningTest(QueryTest, AssertsCompiledSQL):
def _test(self, fn, arg, offending_clause, expected):
assert_raises_message(
sa.exc.SAWarning,
r"Textual (?:SQL|column|SQL FROM) expression %(stmt)r should be "
r"explicitly declared (?:with|as) text\(%(stmt)r\)" % {
"stmt": util.ellipses_string(offending_clause),
},
fn, arg
)
with expect_warnings("Textual "):
stmt = fn(arg)
self.assert_compile(stmt, expected)
def test_filter(self):
User = self.classes.User
self._test(
Session().query(User.id).filter, "myid == 5", "myid == 5",
"SELECT users.id AS users_id FROM users WHERE myid == 5"
)
def test_having(self):
User = self.classes.User
self._test(
Session().query(User.id).having, "myid == 5", "myid == 5",
"SELECT users.id AS users_id FROM users HAVING myid == 5"
)
def test_from_statement(self):
User = self.classes.User
self._test(
Session().query(User.id).from_statement,
"select id from user",
"select id from user",
"select id from user",
)
class ParentTest(QueryTest, AssertsCompiledSQL):
__dialect__ = 'default'
def test_o2m(self):
User, orders, Order = (
self.classes.User, self.tables.orders, self.classes.Order)
sess = create_session()
q = sess.query(User)
u1 = q.filter_by(name='jack').one()
# test auto-lookup of property
o = sess.query(Order).with_parent(u1).all()
assert [Order(description="order 1"), Order(description="order 3"),
Order(description="order 5")] == o
# test with explicit property
o = sess.query(Order).with_parent(u1, property='orders').all()
assert [Order(description="order 1"), Order(description="order 3"),
Order(description="order 5")] == o
o = sess.query(Order).with_parent(u1, property=User.orders).all()
assert [Order(description="order 1"), Order(description="order 3"),
Order(description="order 5")] == o
o = sess.query(Order).filter(with_parent(u1, User.orders)).all()
assert [
Order(description="order 1"), Order(description="order 3"),
Order(description="order 5")] == o
# test generative criterion
o = sess.query(Order).with_parent(u1).filter(orders.c.id > 2).all()
assert [
Order(description="order 3"), Order(description="order 5")] == o
# test against None for parent? this can't be done with the current
# API since we don't know what mapper to use
# assert
# sess.query(Order).with_parent(None, property='addresses').all()
# == [Order(description="order 5")]
def test_noparent(self):
Item, User = self.classes.Item, self.classes.User
sess = create_session()
q = sess.query(User)
u1 = q.filter_by(name='jack').one()
try:
q = sess.query(Item).with_parent(u1)
assert False
except sa_exc.InvalidRequestError as e:
assert str(e) \
== "Could not locate a property which relates "\
"instances of class 'Item' to instances of class 'User'"
def test_m2m(self):
Item, Keyword = self.classes.Item, self.classes.Keyword
sess = create_session()
i1 = sess.query(Item).filter_by(id=2).one()
k = sess.query(Keyword).with_parent(i1).all()
assert [
Keyword(name='red'), Keyword(name='small'),
Keyword(name='square')] == k
def test_with_transient(self):
User, Order = self.classes.User, self.classes.Order
sess = Session()
q = sess.query(User)
u1 = q.filter_by(name='jack').one()
utrans = User(id=u1.id)
o = sess.query(Order).with_parent(utrans, 'orders')
eq_(
[
Order(description="order 1"), Order(description="order 3"),
Order(description="order 5")],
o.all()
)
o = sess.query(Order).filter(with_parent(utrans, 'orders'))
eq_(
[
Order(description="order 1"), Order(description="order 3"),
Order(description="order 5")],
o.all()
)
def test_with_pending_autoflush(self):
Order, User = self.classes.Order, self.classes.User
sess = Session()
o1 = sess.query(Order).first()
opending = Order(id=20, user_id=o1.user_id)
sess.add(opending)
eq_(
sess.query(User).with_parent(opending, 'user').one(),
User(id=o1.user_id)
)
eq_(
sess.query(User).filter(with_parent(opending, 'user')).one(),
User(id=o1.user_id)
)
def test_with_pending_no_autoflush(self):
Order, User = self.classes.Order, self.classes.User
sess = Session(autoflush=False)
o1 = sess.query(Order).first()
opending = Order(user_id=o1.user_id)
sess.add(opending)
eq_(
sess.query(User).with_parent(opending, 'user').one(),
User(id=o1.user_id)
)
def test_unique_binds_union(self):
"""bindparams used in the 'parent' query are unique"""
User, Address = self.classes.User, self.classes.Address
sess = Session()
u1, u2 = sess.query(User).order_by(User.id)[0:2]
q1 = sess.query(Address).with_parent(u1, 'addresses')
q2 = sess.query(Address).with_parent(u2, 'addresses')
self.assert_compile(
q1.union(q2),
"SELECT anon_1.addresses_id AS anon_1_addresses_id, "
"anon_1.addresses_user_id AS anon_1_addresses_user_id, "
"anon_1.addresses_email_address AS "
"anon_1_addresses_email_address FROM (SELECT addresses.id AS "
"addresses_id, addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address FROM "
"addresses WHERE :param_1 = addresses.user_id UNION SELECT "
"addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address "
"AS addresses_email_address "
"FROM addresses WHERE :param_2 = addresses.user_id) AS anon_1",
checkparams={'param_1': 7, 'param_2': 8},
)
def test_unique_binds_or(self):
User, Address = self.classes.User, self.classes.Address
sess = Session()
u1, u2 = sess.query(User).order_by(User.id)[0:2]
self.assert_compile(
sess.query(Address).filter(
or_(with_parent(u1, 'addresses'), with_parent(u2, 'addresses'))
),
"SELECT addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses WHERE "
":param_1 = addresses.user_id OR :param_2 = addresses.user_id",
checkparams={'param_1': 7, 'param_2': 8},
)
class WithTransientOnNone(_fixtures.FixtureTest, AssertsCompiledSQL):
run_inserts = None
__dialect__ = 'default'
def _fixture1(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
mapper(User, users)
mapper(Address, addresses, properties={
'user': relationship(User),
'special_user': relationship(
User, primaryjoin=and_(
users.c.id == addresses.c.user_id,
users.c.name == addresses.c.email_address))
})
def test_filter_with_transient_assume_pk(self):
self._fixture1()
User, Address = self.classes.User, self.classes.Address
sess = Session()
q = sess.query(Address).filter(Address.user == User())
with expect_warnings("Got None for value of column "):
self.assert_compile(
q,
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses WHERE :param_1 = addresses.user_id",
checkparams={'param_1': None}
)
def test_filter_with_transient_warn_for_none_against_non_pk(self):
self._fixture1()
User, Address = self.classes.User, self.classes.Address
s = Session()
q = s.query(Address).filter(Address.special_user == User())
with expect_warnings("Got None for value of column"):
self.assert_compile(
q,
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND :param_2 = addresses.email_address",
checkparams={"param_1": None, "param_2": None}
)
def test_with_parent_with_transient_assume_pk(self):
self._fixture1()
User, Address = self.classes.User, self.classes.Address
sess = Session()
q = sess.query(User).with_parent(Address(), "user")
with expect_warnings("Got None for value of column"):
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :param_1",
checkparams={'param_1': None}
)
def test_with_parent_with_transient_warn_for_none_against_non_pk(self):
self._fixture1()
User, Address = self.classes.User, self.classes.Address
s = Session()
q = s.query(User).with_parent(Address(), "special_user")
with expect_warnings("Got None for value of column"):
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :param_1 "
"AND users.name = :param_2",
checkparams={"param_1": None, "param_2": None}
)
def test_negated_contains_or_equals_plain_m2o(self):
self._fixture1()
User, Address = self.classes.User, self.classes.Address
s = Session()
q = s.query(Address).filter(Address.user != User())
with expect_warnings("Got None for value of column"):
self.assert_compile(
q,
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses "
"WHERE addresses.user_id != :user_id_1 "
"OR addresses.user_id IS NULL",
checkparams={'user_id_1': None}
)
def test_negated_contains_or_equals_complex_rel(self):
self._fixture1()
User, Address = self.classes.User, self.classes.Address
s = Session()
# this one does *not* warn because we do the criteria
# without deferral
q = s.query(Address).filter(Address.special_user != User())
self.assert_compile(
q,
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses "
"WHERE NOT (EXISTS (SELECT 1 "
"FROM users "
"WHERE users.id = addresses.user_id AND "
"users.name = addresses.email_address AND users.id IS NULL))",
checkparams={}
)
class SynonymTest(QueryTest, AssertsCompiledSQL):
__dialect__ = 'default'
@classmethod
def setup_mappers(cls):
users, Keyword, items, order_items, orders, Item, User, \
Address, keywords, Order, item_keywords, addresses = \
cls.tables.users, cls.classes.Keyword, cls.tables.items, \
cls.tables.order_items, cls.tables.orders, \
cls.classes.Item, cls.classes.User, cls.classes.Address, \
cls.tables.keywords, cls.classes.Order, \
cls.tables.item_keywords, cls.tables.addresses
mapper(User, users, properties={
'name_syn': synonym('name'),
'addresses': relationship(Address),
'orders': relationship(
Order, backref='user', order_by=orders.c.id), # o2m, m2o
'orders_syn': synonym('orders'),
'orders_syn_2': synonym('orders_syn')
})
mapper(Address, addresses)
mapper(Order, orders, properties={
'items': relationship(Item, secondary=order_items), # m2m
'address': relationship(Address), # m2o
'items_syn': synonym('items')
})
mapper(Item, items, properties={
'keywords': relationship(Keyword, secondary=item_keywords) # m2m
})
mapper(Keyword, keywords)
def test_options(self):
User, Order = self.classes.User, self.classes.Order
s = create_session()
def go():
result = s.query(User).filter_by(name='jack').\
options(joinedload(User.orders_syn)).all()
eq_(result, [
User(id=7, name='jack', orders=[
Order(description='order 1'),
Order(description='order 3'),
Order(description='order 5')
])
])
self.assert_sql_count(testing.db, go, 1)
def test_options_syn_of_syn(self):
User, Order = self.classes.User, self.classes.Order
s = create_session()
def go():
result = s.query(User).filter_by(name='jack').\
options(joinedload(User.orders_syn_2)).all()
eq_(result, [
User(id=7, name='jack', orders=[
Order(description='order 1'),
Order(description='order 3'),
Order(description='order 5')
])
])
self.assert_sql_count(testing.db, go, 1)
def test_options_syn_of_syn_string(self):
User, Order = self.classes.User, self.classes.Order
s = create_session()
def go():
result = s.query(User).filter_by(name='jack').\
options(joinedload('orders_syn_2')).all()
eq_(result, [
User(id=7, name='jack', orders=[
Order(description='order 1'),
Order(description='order 3'),
Order(description='order 5')
])
])
self.assert_sql_count(testing.db, go, 1)
def test_joins(self):
User, Order = self.classes.User, self.classes.Order
for j in (
['orders', 'items'],
['orders_syn', 'items'],
[User.orders_syn, Order.items],
['orders_syn_2', 'items'],
[User.orders_syn_2, 'items'],
['orders', 'items_syn'],
['orders_syn', 'items_syn'],
['orders_syn_2', 'items_syn'],
):
result = create_session().query(User).join(*j).filter_by(id=3). \
all()
assert [User(id=7, name='jack'), User(id=9, name='fred')] == result
def test_with_parent(self):
Order, User = self.classes.Order, self.classes.User
for nameprop, orderprop in (
('name', 'orders'),
('name_syn', 'orders'),
('name', 'orders_syn'),
('name', 'orders_syn_2'),
('name_syn', 'orders_syn'),
('name_syn', 'orders_syn_2'),
):
sess = create_session()
q = sess.query(User)
u1 = q.filter_by(**{nameprop: 'jack'}).one()
o = sess.query(Order).with_parent(u1, property=orderprop).all()
assert [
Order(description="order 1"), Order(description="order 3"),
Order(description="order 5")] == o
def test_froms_aliased_col(self):
Address, User = self.classes.Address, self.classes.User
sess = create_session()
ua = aliased(User)
q = sess.query(ua.name_syn).join(
Address, ua.id == Address.user_id)
self.assert_compile(
q,
"SELECT users_1.name AS users_1_name FROM "
"users AS users_1 JOIN addresses ON users_1.id = addresses.user_id"
)
class ImmediateTest(_fixtures.FixtureTest):
run_inserts = 'once'
run_deletes = None
@classmethod
def setup_mappers(cls):
Address, addresses, users, User = (cls.classes.Address,
cls.tables.addresses,
cls.tables.users,
cls.classes.User)
mapper(Address, addresses)
mapper(User, users, properties=dict(
addresses=relationship(Address)))
def test_one(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
assert_raises(
sa.orm.exc.NoResultFound,
sess.query(User).filter(User.id == 99).one)
eq_(sess.query(User).filter(User.id == 7).one().id, 7)
assert_raises(sa.orm.exc.MultipleResultsFound, sess.query(User).one)
assert_raises(
sa.orm.exc.NoResultFound,
sess.query(User.id, User.name).filter(User.id == 99).one)
eq_(sess.query(User.id, User.name).filter(User.id == 7).one(),
(7, 'jack'))
assert_raises(
sa.orm.exc.MultipleResultsFound,
sess.query(User.id, User.name).one)
assert_raises(
sa.orm.exc.NoResultFound,
(sess.query(User, Address).join(User.addresses).
filter(Address.id == 99)).one)
eq_((sess.query(User, Address).
join(User.addresses).
filter(Address.id == 4)).one(),
(User(id=8), Address(id=4)))
assert_raises(
sa.orm.exc.MultipleResultsFound,
sess.query(User, Address).join(User.addresses).one)
# this result returns multiple rows, the first
# two rows being the same. but uniquing is
# not applied for a column based result.
assert_raises(
sa.orm.exc.MultipleResultsFound,
sess.query(User.id).join(User.addresses).
filter(User.id.in_([8, 9])).order_by(User.id).one)
# test that a join which ultimately returns
# multiple identities across many rows still
# raises, even though the first two rows are of
# the same identity and unique filtering
# is applied ([ticket:1688])
assert_raises(
sa.orm.exc.MultipleResultsFound,
sess.query(User).join(User.addresses).filter(User.id.in_([8, 9])).
order_by(User.id).one)
@testing.future
def test_getslice(self):
assert False
def test_scalar(self):
User = self.classes.User
sess = create_session()
eq_(sess.query(User.id).filter_by(id=7).scalar(), 7)
eq_(sess.query(User.id, User.name).filter_by(id=7).scalar(), 7)
eq_(sess.query(User.id).filter_by(id=0).scalar(), None)
eq_(sess.query(User).filter_by(id=7).scalar(),
sess.query(User).filter_by(id=7).one())
assert_raises(sa.orm.exc.MultipleResultsFound, sess.query(User).scalar)
assert_raises(
sa.orm.exc.MultipleResultsFound,
sess.query(User.id, User.name).scalar)
def test_value(self):
User = self.classes.User
sess = create_session()
eq_(sess.query(User).filter_by(id=7).value(User.id), 7)
eq_(sess.query(User.id, User.name).filter_by(id=7).value(User.id), 7)
eq_(sess.query(User).filter_by(id=0).value(User.id), None)
sess.bind = testing.db
eq_(sess.query().value(sa.literal_column('1').label('x')), 1)
class ExecutionOptionsTest(QueryTest):
def test_option_building(self):
User = self.classes.User
sess = create_session(bind=testing.db, autocommit=False)
q1 = sess.query(User)
assert q1._execution_options == dict()
q2 = q1.execution_options(foo='bar', stream_results=True)
# q1's options should be unchanged.
assert q1._execution_options == dict()
# q2 should have them set.
assert q2._execution_options == dict(foo='bar', stream_results=True)
q3 = q2.execution_options(foo='not bar', answer=42)
assert q2._execution_options == dict(foo='bar', stream_results=True)
q3_options = dict(foo='not bar', stream_results=True, answer=42)
assert q3._execution_options == q3_options
def test_options_in_connection(self):
User = self.classes.User
execution_options = dict(foo='bar', stream_results=True)
class TQuery(Query):
def instances(self, result, ctx):
try:
eq_(
result.connection._execution_options,
execution_options)
finally:
result.close()
return iter([])
sess = create_session(
bind=testing.db, autocommit=False, query_cls=TQuery)
q1 = sess.query(User).execution_options(**execution_options)
q1.all()
class BooleanEvalTest(fixtures.TestBase, testing.AssertsCompiledSQL):
"""test standalone booleans being wrapped in an AsBoolean, as well
as true/false compilation."""
def _dialect(self, native_boolean):
d = default.DefaultDialect()
d.supports_native_boolean = native_boolean
return d
def test_one(self):
s = Session()
c = column('x', Boolean)
self.assert_compile(
s.query(c).filter(c),
"SELECT x WHERE x",
dialect=self._dialect(True)
)
def test_two(self):
s = Session()
c = column('x', Boolean)
self.assert_compile(
s.query(c).filter(c),
"SELECT x WHERE x = 1",
dialect=self._dialect(False)
)
def test_three(self):
s = Session()
c = column('x', Boolean)
self.assert_compile(
s.query(c).filter(~c),
"SELECT x WHERE x = 0",
dialect=self._dialect(False)
)
def test_four(self):
s = Session()
c = column('x', Boolean)
self.assert_compile(
s.query(c).filter(~c),
"SELECT x WHERE NOT x",
dialect=self._dialect(True)
)
def test_five(self):
s = Session()
c = column('x', Boolean)
self.assert_compile(
s.query(c).having(c),
"SELECT x HAVING x = 1",
dialect=self._dialect(False)
)
class SessionBindTest(QueryTest):
@contextlib.contextmanager
def _assert_bind_args(self, session):
get_bind = mock.Mock(side_effect=session.get_bind)
with mock.patch.object(session, "get_bind", get_bind):
yield
for call_ in get_bind.mock_calls:
is_(call_[1][0], inspect(self.classes.User))
is_not_(call_[2]['clause'], None)
def test_single_entity_q(self):
User = self.classes.User
session = Session()
with self._assert_bind_args(session):
session.query(User).all()
def test_sql_expr_entity_q(self):
User = self.classes.User
session = Session()
with self._assert_bind_args(session):
session.query(User.id).all()
def test_count(self):
User = self.classes.User
session = Session()
with self._assert_bind_args(session):
session.query(User).count()
def test_aggregate_fn(self):
User = self.classes.User
session = Session()
with self._assert_bind_args(session):
session.query(func.max(User.name)).all()
def test_bulk_update_no_sync(self):
User = self.classes.User
session = Session()
with self._assert_bind_args(session):
session.query(User).filter(User.id == 15).update(
{"name": "foob"}, synchronize_session=False)
def test_bulk_delete_no_sync(self):
User = self.classes.User
session = Session()
with self._assert_bind_args(session):
session.query(User).filter(User.id == 15).delete(
synchronize_session=False)
def test_bulk_update_fetch_sync(self):
User = self.classes.User
session = Session()
with self._assert_bind_args(session):
session.query(User).filter(User.id == 15).update(
{"name": "foob"}, synchronize_session='fetch')
def test_bulk_delete_fetch_sync(self):
User = self.classes.User
session = Session()
with self._assert_bind_args(session):
session.query(User).filter(User.id == 15).delete(
synchronize_session='fetch')
def test_column_property(self):
User = self.classes.User
mapper = inspect(User)
mapper.add_property(
"score",
column_property(func.coalesce(self.tables.users.c.name, None)))
session = Session()
with self._assert_bind_args(session):
session.query(func.max(User.score)).scalar()
def test_column_property_select(self):
User = self.classes.User
Address = self.classes.Address
mapper = inspect(User)
mapper.add_property(
"score",
column_property(
select([func.sum(Address.id)]).
where(Address.user_id == User.id).as_scalar()
)
)
session = Session()
with self._assert_bind_args(session):
session.query(func.max(User.score)).scalar()
| lameiro/cx_oracle_on_ctypes | test/integration/3rdparty/SQLAlchemy-1.0.8/test/orm/test_query.py | test_query.py | py | 132,871 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "test.orm._fixtures.FixtureTest",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "test.orm._fixtures",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 41,
"usage_type": "call"
},
{
"api... |
20240300556 | # Definition for singly-linked list.
from typing import Optional
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
# @see https://qiita.com/KueharX/items/7112e8bd9dbf69f5c083
class Solution:
def mergeTwoLists(
self, list1: Optional[ListNode], list2: Optional[ListNode]
) -> Optional[ListNode]:
temp = ans = ListNode(0)
while list1 and list2:
# list2 が大きいなら、小さい順に並んでいるのでans.nextにはlist1を追加する
if list1.val < list2.val:
ans.next = list1
# list1は評価し終わったので、next
list1 = list1.next
else:
ans.next = list2
list2 = list2.next
# ansは評価し終わったので、次はans.nextに何を詰め込むかを考える
ans = ans.next
# ? ans.nextは最終的に list1 or list2で orをとる なんで?
# -> list1 and list2でどっちかがなくなったらwhile loopは終わる
# -> 残ったほうはあとは追加するだけでいいので、 ans.nextに最後その余り物を追加して終わり
ans.next = list1 or list2
return temp.next
| sugitata/leetCode | linkedList/merge_two_sorted_list.py | merge_two_sorted_list.py | py | 1,286 | python | ja | code | 0 | github-code | 36 | [
{
"api_name": "typing.Optional",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 15,
"usage_type": "name"
}
] |
28616363443 | from enum import Enum
from InsertOrder import InsertOrder, LifeSpan, Side
import sys
import exchange_pb2 as proto
import State
from time import sleep
from typing import Callable
from threading import Thread
from websocket import WebSocketApp
class ExchangeClient:
'''
A client class responsible for sending InsertOrder object through socket.
'''
def __init__(self, hostname: str = '127.0.0.1', port: int = 15001):
self.uri = f'ws://{hostname}:{port}'
self.ws = WebSocketApp(self.uri, on_message=self.on_message, on_error=self.on_error, on_close=self.on_close)
# hash event_enum: Event => list[handler: Callable]
self.handlers = {event_enum: [] for event_enum in event_to_class}
Thread(target=self.ws.run_forever).start()
# Waiting for websocket to connect.
sleep(1)
self.state = State.NotLoggedInState(self)
def on_message(self, ws, message) -> None:
# first 4 bytes is event type
event_type_raw = message[:4]
event_type = int.from_bytes(event_type_raw, byteorder="little")
print(f"Message type: {Event(event_type)}")
if not len(event_type_raw):
print("Server closed connection")
ws.close()
sys.exit()
event_enum = Event(event_type)
event_proto_obj = event_to_class[event_enum]()
event_proto_obj.ParseFromString(message[4:])
self.emit(event_enum, event_proto_obj)
def on_error(self, ws, error):
print(error)
def on_close(self, ws, close_status_code, close_msg):
print("on_close args:")
if close_status_code or close_msg:
print("close status code: " + str(close_status_code))
print("close message: " + str(close_msg))
def send_insert_request(self, insert_order: InsertOrder) -> None:
self.state.send_insert_request(insert_order)
def send_cancel_order_request(self, client_id) -> None:
self.state.send_cancel_order_request(client_id)
def add_handler(self, event_enum: 'Event', handler: Callable):
if event_enum not in event_to_class:
raise Exception(f'Event number {event_enum} does not exist')
self.handlers[event_enum].append(handler)
def emit(self, event_enum: 'Event', data) -> None:
if event_enum not in event_to_class:
raise Exception(f'Event number {event_enum} does not exist')
for handler in self.handlers[event_enum]:
try:
handler(data)
except Exception as e:
print(e)
return False
return True
def send_login_request(self, key: str) -> None:
self.state.send_login_request(key)
def change_state(self, state: State) -> None:
self.state = state
class Event(Enum):
Feed = FEED = proto.EXCHANGE_FEED
Fill = FILL = proto.ORDER_FILL
Update = UPDATE = proto.ORDER_UPDATE
LoginResponse = LOGINRESPONSE = proto.LOGIN_RESPONSE
# dict: event proto enum -> proto class signature
event_to_class = {
Event.FEED: proto.ExchangeFeed,
Event.FILL: proto.OrderFillMessage,
Event.UPDATE: proto.OrderUpdateMessage,
Event.LOGINRESPONSE: proto.LoginResponse
}
if __name__ == '__main__':
client = ExchangeClient()
client.add_handler(Event.UPDATE, print)
client.add_handler(Event.LOGINRESPONSE, print)
client.send_login_request("6f0cb665-c2ea-4460-8442-ebfbe01fbedf")
# res = client.send_login_request("95e95dae-b722-4b2e-9c11-db7088041b63")
sleep(4)
client.send_insert_request(InsertOrder(5, 100, LifeSpan.GFD, Side.BUY, 0, 0))
# client.send_insert_request(InsertOrder(5, 100, LifeSpan.GFD, Side.SELL, 0, 0))
| UOA-CS732-SE750-Students-2022/project-group-magenta-mice | apps/data-generator/libs/ExchangeClient.py | ExchangeClient.py | py | 3,811 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "websocket.WebSocketApp",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "State.NotLoggedInS... |
42912024062 | import cv2
from PIL import Image
import pytesseract
import re
import numpy as np
from url_camera import url
# En esta función podría quitar la parte en la que me guarda la imagen.
def capture_food():
cap = cv2.VideoCapture(url)
while True:
ret, frame = cap.read()
if frame is not None:
blur_image = cv2.GaussianBlur(frame, (7,7), 0)
cv2.imshow('frame', blur_image)
cv2.imwrite(filename='saved_img.jpg', img=frame)
q = cv2.waitKey(1)
if q == ord("q"):
break
cv2.destroyAllWindows()
im = Image.open("saved_img.jpg")
text = pytesseract.image_to_string(im, lang='spa')
return text
def find_text(text):
result = re.findall(r'E-\d+', text)
result_2 = re.findall(r'E\d+', text)
return result, result_2
def get_homogenic_data(result, result_2):
values = []
for value in result:
values.append(value)
for value in result_2:
values.append(value)
for value in values:
if "E-" in value:
values = [item.replace('E-','E') for item in values]
values = values.apply(lambda item: item.str.strip())
else:
return values
def get_allergies_dataframe(df, values):
allergies = df[df['id'].isin(values)]
return allergies
| marinapm90/E-vitalos | Proyecto/scanning_ingredients.py | scanning_ingredients.py | py | 1,342 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "url_camera.url",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "cv2.GaussianBlur",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
... |
19260761331 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 29 13:45:16 2020
"""
from pytube import YouTube
import tkinter as tk
window=tk.Tk()
#########by teacher############
window.title("Youtube下載器")
window.geometry("500x150")
window.resizable(False,False)
#######################
progress=0
def showProgress(stream,chunk,bytes_remaining):
size=stream.filesize
global progress
preprogress=progress
currentprogress=int((size-bytes_remaining)*100/size)
progress=currentprogress
if progress == 100:
print("下載完成")
return
if preprogress!=progress:
scale.set(progress)
window.update()
#print("目前進度:"+ (currentprogress)+"%") #by mike
print("目前進度:"+ str(currentprogress)+"%") #by teacher
def onClick():
global var
var.set(entry.get())
try:
yt = YouTube(var.get(),on_progress_callback=showProgress)
stream = yt.streams.first()
stream.download()
except:
print("下載失敗")
label=tk.Label(window,text="請不要輸入Youtube網址")
label.pack()
var = tk.StringVar()
entry=tk.Entry(window,width=50)
entry.pack()
#########by teacher###############
button=tk.Button(window,text="下載",command=onClick)
button.pack()
##########################
scale = tk.Scale(window,label="進度條",orient=tk.HORIZONTAL,length=200) #by mike
scale.pack()
window.mainloop()
| JeffreyChen-coding/student-homework | mike1024.py | mike1024.py | py | 1,513 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tkinter.Tk",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pytube.YouTube",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "tkinter.StringVar",
"line... |
18230811445 | import nfc
from nfc.clf import RemoteTarget
from pybleno import *
def startup(targets):
print("waiting for new NFC tags...")
return targets
def connected(tag):
print("old message:")
if tag.TYPE == 'Type4Tag' and tag.ndef is not None:
if tag.ndef.records[0].uri == 'http://www.kddi.com/hr-nfc/':
print(tag.ndef.records[0].uri)
return True
def released(tag):
print("released:")
clf = nfc.ContactlessFrontend('usb')
print(clf)
if clf:
while clf.connect(rdwr={
'targets': ['106A'],
'on-startup': startup,
'on-connect': connected,
'on-release': released,
}):
pass
| shugonta/monitor_nfc | pynfc.py | pynfc.py | py | 660 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nfc.ContactlessFrontend",
"line_number": 24,
"usage_type": "call"
}
] |
10492846310 | import cv2
import time
import os
# 비디오 파일 열기
cap = cv2.VideoCapture('./frames/test2.mp4')
# 비디오 파일이 성공적으로 열렸는지 확인
if not cap.isOpened():
print("Cannot open video file")
exit()
# 프레임 레이트 가져오기
fps = cap.get(cv2.CAP_PROP_FPS)
fps = 3
# 각 프레임에서 이미지를 추출하고 저장할 폴더 경로 설정
output_folder = 'frames/images'
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# 비디오의 총 프레임 수 가져오기
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# 프레임 추출 시작 시간 설정
start_time = time.time()
# 다음 이미지를 추출할 프레임 번호 설정
next_frame = 0
# 각 프레임에서 이미지를 추출하고 저장하는 루프
while True:
# 현재 시간 가져오기
current_time = time.time()
# 프레임 추출 시간 계산
elapsed_time = current_time - start_time
# 프레임 추출 간격 계산
frame_interval = int(fps * elapsed_time)
# 다음 이미지를 추출할 프레임 번호 계산
next_frame += frame_interval
# 마지막 프레임까지 이미지를 추출했으면 루프 종료
if next_frame >= frame_count:
break
# 비디오에서 다음 프레임 가져오기
cap.set(cv2.CAP_PROP_POS_FRAMES, next_frame)
ret, frame = cap.read()
# 프레임 가져오기가 성공했는지 확인
if not ret:
print("Cannot read frame ", next_frame)
continue
# 이미지를 저장할 파일 이름 설정
filename = os.path.join(output_folder, f"frame_{next_frame:04}.png")
# 이미지를 파일로 저장
cv2.imwrite(filename, frame)
# 비디오 파일 닫기
cap.release()
| chansoopark98/3D-Scanning | video_extract.py | video_extract.py | py | 1,759 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_FPS",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"l... |
12438520461 | from genericpath import isfile
import os
from cryptography.fernet import Fernet
file_list = []
for file in os.listdir():
if file == "ransom.py" or file == "dosyalari-sifrele.py" or file == "generatedkey.key" or file == "dosyalari-coz.py":
continue
if os.path.isfile(file):
file_list.append(file)
with open("generatedkey.key", "rb") as generatedkey:
secret_key = generatedkey.read()
for file in file_list:
with open(file, "rb") as the_file:
contents = the_file.read()
contents_decrypted = Fernet(secret_key).decrypt(contents)
with open(file, "wb") as the_file:
the_file.write(contents_decrypted) | karakayaahmet/Ransomware | dosyalari-coz.py | dosyalari-coz.py | py | 657 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "cryptography.fernet.Fernet",
... |
71352577065 | """
trainer of cycle architecture
works well on adt2gex, atac2gex, gex2atac subtasks
used the cycle consistancy loss the enhance the reconstruction effect
"""
import os
import logging
import numpy as np
import anndata as ad
from scipy.sparse import csc_matrix
from tensorboardX import SummaryWriter
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, random_split
from opts import DATASET
from utils.metric import rmse
from utils.dataloader import SeqDataset
from utils.loss import L1regularization
from modules.model_ae import AutoEncoder
class TrainProcess:
"""the training process for cycle arch"""
def __init__(self, args):
self.args = args
self.writer = SummaryWriter(log_dir=f"../runs/{args.exp_name}")
self.device = (
torch.device(f"cuda:{args.gpu_ids[0]}") if args.gpu_ids else torch.device("cpu")
)
mod1_idf = np.load(args.idf_path) if args.tfidf != 0 else None
self.trainset = SeqDataset(
DATASET[args.mode]["train_mod1"],
DATASET[args.mode]["train_mod2"],
mod1_idx_path=args.mod1_idx_path,
tfidf=args.tfidf,
mod1_idf=mod1_idf,
batch_list=args.train_batch,
norm=args.norm,
gene_activity=args.gene_activity,
)
logging.info(f"TRAIN_NUM: {len(self.trainset):5d}")
# do random split of dataset in phase 2
if args.mode not in ["gex2atac_p2", "gex2adt_p2", "adt2gex_p2", "atac2gex_p2"]:
self.testset = SeqDataset(
DATASET[args.mode]["test_mod1"],
DATASET[args.mode]["test_mod2"],
mod1_idx_path=args.mod1_idx_path,
tfidf=args.tfidf,
mod1_idf=mod1_idf,
batch_list=args.test_batch,
norm=args.norm,
gene_activity=args.gene_activity,
)
logging.info(f"TEST_NUM : {len(self.testset):5d}")
else:
train_size = int(0.9 * len(self.trainset))
test_size = len(self.trainset) - train_size
logging.info(f"(SPLITED) TRAIN_SIZE: {train_size:5d}, TEST_SIZE: {test_size:5d}")
self.trainset, self.testset = random_split(
self.trainset,
[train_size, test_size],
generator=torch.Generator().manual_seed(args.seed),
)
self.train_loader = DataLoader(self.trainset, batch_size=args.batch_size, shuffle=True)
self.test_loader = DataLoader(self.testset, batch_size=args.batch_size, shuffle=False)
self.model_AtoB = (
AutoEncoder(
input_dim=args.mod1_dim,
out_dim=args.mod2_dim,
feat_dim=args.emb_dim,
hidden_dim=args.hid_dim,
dropout=args.dropout,
)
.to(self.device)
.float()
)
self.model_BtoA = (
AutoEncoder(
input_dim=args.mod2_dim,
out_dim=args.mod1_dim,
feat_dim=args.emb_dim,
hidden_dim=args.hid_dim,
dropout=args.dropout,
)
.to(self.device)
.float()
)
logging.info(self.model_AtoB)
logging.info(self.model_BtoA)
self.mse_loss = nn.MSELoss()
self.adv_loss = nn.BCELoss()
self.l1reg_loss = L1regularization(weight_decay=0.1)
self.eval_best = float("inf")
self.optimizer = optim.SGD(
[{"params": self.model_AtoB.parameters()}, {"params": self.model_BtoA.parameters()}],
lr=args.lr,
momentum=args.momentum,
weight_decay=5e-4,
)
step_size = self.args.lr_decay_epoch
self.scheduler = optim.lr_scheduler.MultiStepLR(
self.optimizer,
milestones=[step_size, step_size * 2, step_size * 3],
gamma=0.5,
verbose=True,
)
def load_checkpoint(self):
"""load pre-trained model checkpoint"""
if self.args.checkpoint is not None:
if os.path.isfile(self.args.checkpoint):
logging.info(f"loading checkpoint: {self.args.checkpoint}")
checkpoint = torch.load(self.args.checkpoint)
self.model_AtoB.load_state_dict(checkpoint["AtoB_state_dict"])
self.model_BtoA.load_state_dict(checkpoint["BtoA_state_dict"])
else:
logging.info(f"no resume checkpoint found at {self.args.checkpoint}")
def train_epoch(self, epoch):
"""training process of each epoch"""
self.model_AtoB.train()
self.model_BtoA.train()
total_rec_loss = 0.0
total_cycle_loss = 0.0
total_rec_loss_B = 0.0
print(f"Epoch {epoch+1:2d} / {self.args.epoch}")
for batch_idx, (mod1_seq, mod2_seq) in enumerate(self.train_loader):
mod1_seq = mod1_seq.to(self.device).float()
mod2_seq = mod2_seq.to(self.device).float()
# (1) A to B to A:
# (1-1) Reconstruction phase
mod2_rec = self.model_AtoB(mod1_seq)
mod1_rec = self.model_BtoA(mod2_rec)
# (1-2) Rec loss
rec_loss_B = self.mse_loss(mod2_rec, mod2_seq)
# (1-3) Cycle loss
cycle_loss_A = self.mse_loss(mod1_rec, mod1_seq)
# l1 regularization
l1reg_loss = (
self.l1reg_loss(self.model_AtoB) + self.l1reg_loss(self.model_BtoA)
) * self.args.reg_loss_weight
# ABA total loss
ABA_loss = (rec_loss_B + l1reg_loss) * 10 + cycle_loss_A
ABA_loss.backward()
self.optimizer.step()
# (2) B to A to B:
# (2-1) Reconstruction phase
mod1_rec2 = self.model_BtoA(mod2_seq)
mod2_rec2 = self.model_AtoB(mod1_rec2)
# (1-2) Rec loss
rec_loss_A = self.mse_loss(mod1_rec2, mod1_seq)
# (1-3) Cycle loss
cycle_loss_B = self.mse_loss(mod2_rec2, mod2_seq)
# l1 regularization
l1reg_loss = (
self.l1reg_loss(self.model_AtoB) + self.l1reg_loss(self.model_BtoA)
) * self.args.reg_loss_weight
# BAB total loss
BAB_loss = rec_loss_A + l1reg_loss + cycle_loss_B
self.optimizer.zero_grad()
BAB_loss.backward()
self.optimizer.step()
rec_loss = rec_loss_A + rec_loss_B
cycle_loss = cycle_loss_A + cycle_loss_B
total_rec_loss += rec_loss.item()
total_cycle_loss += cycle_loss.item()
total_rec_loss_B += rec_loss_B.item()
print(
f"Epoch {epoch+1:2d} [{batch_idx+1:2d} /{len(self.train_loader):2d}] | "
+ f"Mod2: {rec_loss_B.item():.4f} | "
+ f"Rec: {total_rec_loss / (batch_idx + 1):.4f} | "
+ f"Cyc: {total_cycle_loss / (batch_idx + 1):.4f} | "
+ f"ABA: {ABA_loss.item() :.4f} | "
+ f"BAB: {BAB_loss.item() :.4f} | "
+ f"L1: {l1reg_loss.item():.4f}"
)
self.scheduler.step()
train_rmse = np.sqrt(total_rec_loss_B / len(self.train_loader))
test_rmse = self.eval_epoch()
(self.eval_best, save_best) = (
(test_rmse, True) if test_rmse < self.eval_best else (self.eval_best, False)
)
logging.info(
f"Epoch {epoch+1:3d} / {self.args.epoch} | Train RMSE: {train_rmse:.4f} "
+ f"| Eval RMSE: {test_rmse:.4f} | Eval best: {self.eval_best:.4f}"
)
self.writer.add_scalar("train_rmse", train_rmse, epoch)
self.writer.add_scalar("rec_loss", rec_loss_B.item(), epoch)
self.writer.add_scalar("test_rmse", test_rmse, epoch)
# save checkpoint
if not self.args.dryrun:
filename = f"../weights/model_{self.args.exp_name}.pt"
print(f"saving weight to {filename} ...")
torch.save(
{
"epoch": epoch,
"AtoB_state_dict": self.model_AtoB.state_dict(),
"BtoA_state_dict": self.model_BtoA.state_dict(),
},
filename,
)
# A to B
filenameAtoB = f"../weights/model_AtoB_{self.args.exp_name}.pt"
print(f"saving AtoB weight to {filenameAtoB} ...")
torch.save(self.model_AtoB.state_dict(), filenameAtoB)
if save_best and epoch > self.args.save_best_from:
filename = f"../weights/model_best_{self.args.exp_name}.pt"
print(f"saving best weight to {filename} ...")
torch.save(
{
"epoch": epoch,
"AtoB_state_dict": self.model_AtoB.state_dict(),
"BtoA_state_dict": self.model_BtoA.state_dict(),
},
filename,
)
# A to B
filenameAtoB = f"../weights/model_best_AtoB_{self.args.exp_name}.pt"
print(f"saving best AtoB weight to {filenameAtoB} ...")
torch.save(self.model_AtoB.state_dict(), filenameAtoB)
def eval_epoch(self):
"""eval process of each epoch"""
self.model_AtoB.eval()
total_rec_loss = 0.0
for _, (mod1_seq, mod2_seq) in enumerate(self.test_loader):
mod1_seq = mod1_seq.to(self.device).float()
mod2_seq = mod2_seq.to(self.device).float()
mod2_rec = self.model_AtoB(mod1_seq)
rec_loss = self.mse_loss(mod2_rec, mod2_seq)
total_rec_loss += rec_loss.item()
test_rmse = np.sqrt(total_rec_loss / len(self.test_loader))
return test_rmse
def run(self):
"""run the whole training process"""
self.load_checkpoint()
print("start training ...")
for epoch in range(self.args.epoch):
self.train_epoch(epoch)
def eval(self):
"""eval the trained model on train / test set"""
print("start eval...")
self.model_AtoB.eval()
self.model_BtoA.eval()
logging.info(f"Mode: {self.args.mode}")
# train set rmse
if self.args.mode in ["gex2atac", "gex2adt", "adt2gex"]:
use_numpy = False
mod2_pred = []
else:
use_numpy = True
mod2_pred = np.zeros((1, self.args.mod2_dim))
for _, (mod1_seq, _) in enumerate(self.train_loader):
mod1_seq = mod1_seq.to(self.device).float()
mod2_rec = self.model_AtoB(mod1_seq)
if use_numpy:
mod2_rec = mod2_rec.data.cpu().numpy()
mod2_pred = np.vstack((mod2_pred, mod2_rec))
else:
mod2_pred.append(mod2_rec)
if use_numpy:
mod2_pred = mod2_pred[
1:,
]
else:
mod2_pred = torch.cat(mod2_pred).detach().cpu().numpy()
mod2_pred = csc_matrix(mod2_pred)
mod2_data = ad.read_h5ad(DATASET[self.args.mode]["train_mod2"])
if len(self.args.train_batch) != 0:
batch_data = mod2_data[mod2_data.obs["batch"] == "s0"] # empty anndata
for batch in self.args.train_batch:
batch_data = ad.concat(
(batch_data, mod2_data[mod2_data.obs["batch"] == batch]),
axis=0,
join="outer",
index_unique="-",
)
mod2_data = batch_data
mod2_sol = mod2_data.X
rmse_pred = rmse(mod2_sol, mod2_pred)
logging.info(f"Train RMSE: {rmse_pred:5f}")
# test set rmse
mod2_pred = []
for _, (mod1_seq, _) in enumerate(self.test_loader):
mod1_seq = mod1_seq.to(self.device).float()
mod2_rec = self.model_AtoB(mod1_seq)
mod2_pred.append(mod2_rec)
mod2_pred = torch.cat(mod2_pred).detach().cpu().numpy()
mod2_pred = csc_matrix(mod2_pred)
mod2_sol = ad.read_h5ad(DATASET[self.args.mode]["test_mod2"]).X
rmse_pred = rmse(mod2_sol, mod2_pred)
logging.info(f"Eval RMSE: {rmse_pred:5f}")
def save_AtoB(self):
""" save only one way model (A to B) from the whole cycle model """
checkpoint_name = (
self.args.checkpoint.replace("../", "").replace("weights/", "").replace("model_", "")
)
filename = f"../weights/model_AtoB_{checkpoint_name}"
print(f"saving AtoB weight to {filename} ...")
torch.save(self.model_AtoB.state_dict(), filename)
| itscassie/scJoint-neurips2021-modality-prediction | model/trainer/trainer_cycle.py | trainer_cycle.py | py | 13,131 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorboardX.SummaryWriter",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "utils.dataloader.S... |
14300366370 | # -*- coding: utf-8 -*-
"""
Created on Sat Jun 26 20:12:34 2021
@author: RISHBANS
"""
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 26 19:35:00 2021
@author: RISHBANS
"""
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D, Flatten, Dense
#Initialize CNN
model = Sequential()
#Step - 1 Convolution
model.add(Convolution2D(32, (3,3), input_shape = (64, 64, 3), activation = 'relu'))
#Step - 2 MaxPooling
model.add(MaxPooling2D(pool_size=(2,2)))
#Step - 3 Flattening
model.add(Flatten())
#Full Connection
model.add(Dense(units = 128, activation = 'relu'))
#Output
model.add(Dense(units = 3, activation = 'softmax'))
model.compile(optimizer='adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
#Image Augmentation
from keras.preprocessing.image import ImageDataGenerator
train_gen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2,
zoom_range=0.2, horizontal_flip=True)
test_gen = ImageDataGenerator(rescale = 1./255)
train_data = train_gen.flow_from_directory('dataset_3/train', target_size = (64,64),
batch_size = 64, class_mode = 'categorical')
test_data = test_gen.flow_from_directory('dataset_3/val', target_size = (64,64),
batch_size = 64, class_mode = 'categorical')
# Fit CNN to training set and test it on test set
model.fit_generator(train_data, steps_per_epoch = 200,
epochs = 30, validation_data = test_data)
| edyoda/ML-with-Rishi | cnn_3.py | cnn_3.py | py | 1,545 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "keras.models.Sequential",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "keras.layers.Convolution2D",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "keras.layers.MaxPooling2D",
"line_number": 25,
"usage_type": "call"
},
{
"api_... |
6803651192 | import cx_Freeze
executables = [cx_Freeze.Executable("sk invader.py")]
cx_Freeze.setup(
name="Space Invader",
options={"build_exe": {"packages":["pygame"],
"include_files":["background.png","background2.jpg","fighter.png"]}},
executables = executables
) | subashal/SK-SPACE-INVADER | setup.py | setup.py | py | 298 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cx_Freeze.Executable",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "cx_Freeze.setup",
"line_number": 5,
"usage_type": "call"
}
] |
72573554664 | """
This file will contain the metrics of the framework
"""
import matplotlib.pyplot as plt
import numpy as np
import sklearn.metrics as mt
import wandb
class IOUMetric:
"""
Class to calculate mean-iou using fast_hist method
"""
def __init__(self, num_classes):
self.num_classes = num_classes
self.hist = np.zeros((num_classes, num_classes))
def _fast_hist(self, label_pred, label_true):
mask = (label_true >= 0) & (label_true < self.num_classes)
hist = np.bincount(
self.num_classes * label_true[mask].astype(int) +
label_pred[mask], minlength=self.num_classes ** 2).reshape(self.num_classes, self.num_classes)
return hist
def add_batch(self, predictions, gts):
for lp, lt in zip(predictions, gts):
self.hist += self._fast_hist(lp.flatten(), lt.flatten())
def evaluate(self):
acc = np.diag(self.hist).sum() / self.hist.sum()
acc_cls = np.diag(self.hist) / self.hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
iu = np.diag(self.hist) / (self.hist.sum(axis=1) + self.hist.sum(axis=0) - np.diag(self.hist))
mean_iu = np.nanmean(iu)
freq = self.hist.sum(axis=1) / self.hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
return acc, acc_cls, iu, mean_iu, fwavacc
class AverageMeter:
"""
Class to be an average meter for any average metric like loss, accuracy, etc..
"""
def __init__(self):
self.value = 0
self.avg = 0
self.sum = 0
self.count = 0
self.reset()
def reset(self):
self.value = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.value = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
@property
def val(self):
return self.avg
class AverageMeterList:
"""
Class to be an average meter for any average metric List structure like mean_iou_per_class
"""
def __init__(self, num_cls):
self.cls = num_cls
self.value = [0] * self.cls
self.avg = [0] * self.cls
self.sum = [0] * self.cls
self.count = [0] * self.cls
self.reset()
def reset(self):
self.value = [0] * self.cls
self.avg = [0] * self.cls
self.sum = [0] * self.cls
self.count = [0] * self.cls
def update(self, val, n=1):
for i in range(self.cls):
self.value[i] = val[i]
self.sum[i] += val[i] * n
self.count[i] += n
self.avg[i] = self.sum[i] / self.count[i]
@property
def val(self):
return self.avg
def cls_accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
view = target.view(1, -1).expand_as(pred)
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k / batch_size)
return res
def compute_metrics(output, target):
ap = mt.average_precision_score(target, output)
f1 = mt.f1_score(target, output, average='weighted')
pr = mt.precision_score(target, output, average='weighted')
rc = mt.recall_score(target, output, average='weighted')
fpr, tpr, thresholds = mt.roc_curve(target, output)
roc_auc = mt.auc(fpr, tpr)
plt.ioff()
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic weighted')
plt.legend(loc="lower right")
dic = {"epoch/Average Precision (weighted)" : ap,
"epoch/F1 score (weighted) " : f1,
"epoch/Precision (weighted)" : pr,
"epoch/Recall (weighted)" : rc,
"epoch/Roc (weighted)" : wandb.Image(plt)}
plt.close()
return dic
| ArthurZucker/PAMAI | utils/metrics.py | metrics.py | py | 4,262 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.bincount",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.diag",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.diag",
"line_number":... |
73130978024 | from django.conf import settings
from django.conf.urls.static import static
from django.urls import path, include
from drf_spectacular.views import (
SpectacularAPIView,
SpectacularRedocView,
SpectacularSwaggerView,
)
from api.urls import urlpatterns as api_urls
# Common urls
# =============================================================================
urlpatterns = [
path(f"{settings.API_VERSION}/", include(api_urls)),
path(
f"{settings.API_VERSION}/schema/",
SpectacularAPIView.as_view(api_version=settings.API_VERSION),
name="schema",
),
path(
f"{settings.API_VERSION}/schema/swagger/",
SpectacularSwaggerView.as_view(url_name="schema"),
name="swagger",
),
path(
f"{settings.API_VERSION}/schema/redoc/",
SpectacularRedocView.as_view(url_name="schema"),
name="redoc",
),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
*urlpatterns,
*static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),
*static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),
path("__debug__/", include(debug_toolbar.urls)),
path("api-auth/", include("rest_framework.urls")),
]
| by-Exist/django-skeleton | backend/config/urls.py | urls.py | py | 1,252 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.API_VERSION",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 15,
"usage_type": "name"
},
{
"api_n... |
17796731524 | from __future__ import absolute_import, division, print_function, unicode_literals
import os
import unittest
import xml.etree.ElementTree as ET
from builtins import object
from textwrap import dedent
from pants.base.build_environment import get_buildroot
from pants.util.contextutil import open_zip, temporary_dir
from pants.util.dirutil import safe_open
SHAPELESS_CLSFILE = 'org/pantsbuild/testproject/unicode/shapeless/ShapelessExample.class'
SHAPELESS_TARGET = 'testprojects/src/scala/org/pantsbuild/testproject/unicode/shapeless'
class BaseZincCompileIntegrationTest(object):
def create_file(self, path, value):
with safe_open(path, 'w') as f:
f.write(value)
def run_run(self, target_spec, config, workdir):
args = ['run', target_spec]
pants_run = self.run_pants_with_workdir(args, workdir, config)
self.assert_success(pants_run)
def test_scala_compile_jar(self):
jar_suffix = 'z.jar'
with self.do_test_compile(SHAPELESS_TARGET,
expected_files=[jar_suffix]) as found:
with open_zip(self.get_only(found, jar_suffix), 'r') as jar:
self.assertTrue(jar.getinfo(SHAPELESS_CLSFILE),
'Expected a jar containing the expected class.')
# TODO: this could be converted into a unit test!
def test_consecutive_compiler_option_sets(self):
"""Test that the ordering of args in compiler option sets are respected.
Generating a scalac profile requires two consecutive arguments, '-Yprofile-destination' and its
following argument, the file to write the CSV profile to. We want to be able to allow users to
successfully run scalac with profiling from pants, so we test this case in particular. See the
discussion from https://github.com/pantsbuild/pants/pull/7683.
"""
with temporary_dir() as tmp_dir:
profile_destination = os.path.join(tmp_dir, 'scala_profile.csv')
self.do_command(
'compile',
SHAPELESS_TARGET,
# Flags to enable profiling and statistics on target
config={
'compile.zinc': {
'default_compiler_option_sets': ['profile'],
'compiler_option_sets_enabled_args': {
'profile': [
'-S-Ystatistics',
'-S-Yhot-statistics-enabled',
'-S-Yprofile-enabled',
'-S-Yprofile-destination',
'-S{}'.format(profile_destination),
'-S-Ycache-plugin-class-loader:last-modified',
],
},
},
})
self.assertTrue(os.path.isfile(profile_destination))
def test_scala_empty_compile(self):
with self.do_test_compile('testprojects/src/scala/org/pantsbuild/testproject/emptyscala',
expected_files=[]):
# no classes generated by this target
pass
def test_scala_shared_sources(self):
clsname = 'SharedSources.class'
with self.do_test_compile('testprojects/src/scala/org/pantsbuild/testproject/sharedsources::',
expected_files=[clsname]) as found:
classes = found[clsname]
self.assertEqual(2, len(classes))
for cls in classes:
self.assertTrue(cls.endswith(
'org/pantsbuild/testproject/sharedsources/SharedSources.class'))
def test_scala_failure(self):
"""With no initial analysis, a failed compilation shouldn't leave anything behind."""
analysis_file = 'testprojects.src.scala.' \
'org.pantsbuild.testproject.compilation_failure.compilation_failure.analysis'
with self.do_test_compile(
'testprojects/src/scala/org/pantsbuild/testprojects/compilation_failure',
expected_files=[analysis_file],
expect_failure=True) as found:
self.assertEqual(0, len(found[analysis_file]))
def test_scala_with_java_sources_compile(self):
with self.do_test_compile('testprojects/src/scala/org/pantsbuild/testproject/javasources',
expected_files=['ScalaWithJavaSources.class',
'JavaSource.class']) as found:
self.assertTrue(
self.get_only(found, 'ScalaWithJavaSources.class').endswith(
'org/pantsbuild/testproject/javasources/ScalaWithJavaSources.class'))
self.assertTrue(
self.get_only(found, 'JavaSource.class').endswith(
'org/pantsbuild/testproject/javasources/JavaSource.class'))
def test_scalac_plugin_compile(self):
with self.do_test_compile(
'examples/src/scala/org/pantsbuild/example/scalac/plugin:other_simple_scalac_plugin',
expected_files=['OtherSimpleScalacPlugin.class', 'scalac-plugin.xml']) as found:
self.assertTrue(
self.get_only(found, 'OtherSimpleScalacPlugin.class').endswith(
'org/pantsbuild/example/scalac/plugin/OtherSimpleScalacPlugin.class'))
# Ensure that the plugin registration file is written to the root of the classpath.
path = self.get_only(found, 'scalac-plugin.xml')
self.assertTrue(path.endswith('/classes/scalac-plugin.xml'),
'plugin registration file `{}` not located at the '
'root of the classpath'.format(path))
# And that it is well formed.
root = ET.parse(path).getroot()
self.assertEqual('plugin', root.tag)
self.assertEqual('other_simple_scalac_plugin', root.find('name').text)
self.assertEqual('org.pantsbuild.example.scalac.plugin.OtherSimpleScalacPlugin',
root.find('classname').text)
def test_scalac_debug_symbol(self):
with self.do_test_compile(
'examples/src/scala/org/pantsbuild/example/scalac/plugin:simple_scalac_plugin',
expected_files=['SimpleScalacPlugin.class', 'scalac-plugin.xml'],
extra_args=['--compile-zinc-debug-symbols']):
pass
def test_zinc_unsupported_option(self):
with self.temporary_workdir() as workdir:
with self.temporary_cachedir() as cachedir:
# compile with an unsupported flag
pants_run = self.run_test_compile(
workdir,
cachedir,
'testprojects/src/scala/org/pantsbuild/testproject/emptyscala',
extra_args=[
'--compile-zinc-args=-recompile-all-fraction',
'--compile-zinc-args=0.5',
])
self.assert_success(pants_run)
# Confirm that we were warned.
self.assertIn('is not supported, and is subject to change/removal', pants_run.stdout_data)
def test_zinc_compiler_options_sets(self):
def test_combination(target, expect_success, extra_args=[]):
with self.temporary_workdir() as workdir:
with self.temporary_cachedir() as cachedir:
pants_run = self.run_test_compile(
workdir,
cachedir,
'testprojects/src/scala/org/pantsbuild/testproject/compilation_warnings:{}'.format(
target),
extra_args=extra_args)
if expect_success:
self.assert_success(pants_run)
else:
self.assert_failure(pants_run)
test_combination('fatal', expect_success=False)
test_combination('nonfatal', expect_success=True)
test_combination('fatal', expect_success=True,
extra_args=['--compile-zinc-compiler-option-sets-enabled-args={"fatal_warnings": ["-C-Werror"]}'])
test_combination('fatal', expect_success=False,
extra_args=['--compile-zinc-compiler-option-sets-disabled-args={"fatal_warnings": ["-S-Xfatal-warnings"]}'])
@unittest.expectedFailure
def test_soft_excludes_at_compiletime(self):
with self.do_test_compile('testprojects/src/scala/org/pantsbuild/testproject/exclude_direct_dep',
extra_args=['--resolve-ivy-soft-excludes'],
expect_failure=True):
# TODO See #4874. Should have failed to compile because its only dependency is excluded.
pass
def test_pool_created_for_fresh_compile_but_not_for_valid_compile(self):
with self.temporary_cachedir() as cachedir, self.temporary_workdir() as workdir:
# Populate the workdir.
first_run = self.run_test_compile(workdir, cachedir,
'testprojects/src/scala/org/pantsbuild/testproject/javasources')
self.assertIn('isolation-zinc-pool-bootstrap', first_run.stdout_data)
# Run valid compile.
second_run = self.run_test_compile(workdir, cachedir,
'testprojects/src/scala/org/pantsbuild/testproject/javasources')
self.assertNotIn('isolation-zinc-pool-bootstrap', second_run.stdout_data)
def test_source_compat_binary_incompat_scala_change(self):
with temporary_dir() as cache_dir, \
self.temporary_workdir() as workdir, \
temporary_dir(root_dir=get_buildroot()) as src_dir:
config = {
'cache.compile.zinc': {'write_to': [cache_dir], 'read_from': [cache_dir]},
}
srcfile = os.path.join(src_dir, 'org', 'pantsbuild', 'cachetest', 'A.scala')
srcfile_b = os.path.join(src_dir, 'org', 'pantsbuild', 'cachetest', 'B.scala')
buildfile = os.path.join(src_dir, 'org', 'pantsbuild', 'cachetest', 'BUILD')
self.create_file(buildfile,
dedent("""
scala_library(name='a',
sources=['A.scala'])
scala_library(name='b',
sources=['B.scala'],
dependencies=[':a'])
jvm_binary(name='bin',
main='org.pantsbuild.cachetest.B',
dependencies=[':b']
)
"""))
self.create_file(srcfile,
dedent("""
package org.pantsbuild.cachetest
object A {
def x(y: Option[Int] = None) = {
println("x");
}
}
"""))
self.create_file(srcfile_b,
dedent("""
package org.pantsbuild.cachetest
object B extends App {
A.x();
System.exit(0);
}
"""))
cachetest_bin_spec = os.path.join(os.path.basename(src_dir), 'org', 'pantsbuild',
'cachetest:bin')
cachetest_spec = cachetest_bin_spec
# Caches values A.class, B.class
self.run_run(cachetest_spec, config, workdir)
self.create_file(srcfile,
dedent("""
package org.pantsbuild.cachetest;
object A {
def x(y: Option[Int] = None, z:Option[Int]=None) = {
println("x");
}
}
"""))
self.run_run(cachetest_bin_spec, config, workdir)
def test_source_compat_binary_incompat_java_change(self):
with temporary_dir() as cache_dir, \
self.temporary_workdir() as workdir, \
temporary_dir(root_dir=get_buildroot()) as src_dir:
config = {
'cache.compile.zinc': {'write_to': [cache_dir], 'read_from': [cache_dir]},
'compile.zinc': {'incremental_caching': True },
}
srcfile = os.path.join(src_dir, 'org', 'pantsbuild', 'cachetest', 'A.java')
srcfile_b = os.path.join(src_dir, 'org', 'pantsbuild', 'cachetest', 'B.java')
buildfile = os.path.join(src_dir, 'org', 'pantsbuild', 'cachetest', 'BUILD')
self.create_file(buildfile,
dedent("""
java_library(name='cachetest',
sources=['A.java'])
java_library(name='b',
sources=['B.java'],
dependencies=[':a']
)
jvm_binary(name='bin',
main='org.pantsbuild.cachetest.B',
dependencies=[':b']
)
"""))
self.create_file(srcfile,
dedent("""package org.pantsbuild.cachetest;
class A {
public static void x() {
System.out.println("x");
}
}
"""))
self.create_file(srcfile_b,
dedent("""package org.pantsbuild.cachetest;
class B {
public static void main(String[] args) {
A.x();
}
}
"""))
cachetest_spec = os.path.join(os.path.basename(src_dir), 'org', 'pantsbuild',
'cachetest:cachetest')
self.run_run(cachetest_spec, config, workdir)
self.create_file(srcfile,
dedent("""package org.pantsbuild.cachetest;
class A {
public static int x() {
System.out.println("x");
return 0;
}
}
"""))
self.run_run(cachetest_spec, config, workdir)
| fakeNetflix/twitter-repo-pants | tests/python/pants_test/backend/jvm/tasks/jvm_compile/zinc/zinc_compile_integration_base.py | zinc_compile_integration_base.py | py | 13,451 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "builtins.object",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pants.util.dirutil.safe_open",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pants.util.contextutil.open_zip",
"line_number": 33,
"usage_type": "call"
},
{
"api_... |
42520787765 | from fastapi import FastAPI,Request
from fastapi import APIRouter
import aiomysql
from configuration.configuration import configuracion
from pydantic import BaseModel
from fastapi.param_functions import Body
from models.estudianteClass import estudianteClass
paralelo_router = APIRouter()
async def getConexion():
conn = await aiomysql.connect(
host=configuracion['development'].MYSQL_HOST,
user=configuracion['development'].MYSQL_USER,
password=configuracion['development'].MYSQL_PASSWORD,
db=configuracion['development'].MYSQL_DB,
charset='utf8',
cursorclass=aiomysql.DictCursor)
return conn
@paralelo_router.get("/getParalelos")
async def getParalelos():
conn = await getConexion()
try:
usuarios=[]
async with conn.cursor() as cur:
await cur.execute("SELECT * FROM Paralelo")
resultado = await cur.fetchall()
for result in resultado:
usuario = {
'id_paralelo': result['id_paralelo'],
'nombre_paralelo': result['nombre_paralelo']}
usuarios.append(usuario)
return {'data': usuarios, 'accion': True}
except Exception as e:
return {'data': '', 'accion': False}
finally:
conn.close() | juanjoo0410/CS_Proyecto_API | controllers/paraleloController.py | paraleloController.py | py | 1,310 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "aiomysql.connect",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "configuration.configuration.configuracion",
"line_number": 13,
"usage_type": "name"
},
{
"api_n... |
3289924772 | import sys
import pprint as _pprint_
from pyomo.core.kernel.numvalue import \
NumericValue
from pyomo.core.kernel.component_interface import \
(ICategorizedObject,
_ActiveComponentContainerMixin)
import six
def pprint(obj, indent=0, stream=sys.stdout):
"""pprint a kernel modeling object"""
# ugly hack for ctypes
import pyomo.core.base
if not isinstance(obj, ICategorizedObject):
if isinstance(obj, NumericValue):
prefix = ""
if indent > 0:
prefix = (" "*indent)+" - "
stream.write(prefix+str(obj)+"\n")
else:
assert indent == 0
_pprint_.pprint(obj, indent=indent+1, stream=stream)
return
if not obj._is_component:
# a container but not a block
assert obj._is_container
prefix = ""
if indent > 0:
prefix = (" "*indent)+" - "
if isinstance(obj, _ActiveComponentContainerMixin):
stream.write(prefix+"%s: container(size=%s, active=%s, ctype=%s)\n"
% (str(obj), len(obj), obj.active, obj.ctype.__name__))
else:
stream.write(prefix+"%s: container(size=%s, ctype=%s)\n"
% (str(obj), len(obj), obj.ctype.__name__))
for c in obj.children():
pprint(c, indent=indent+1, stream=stream)
elif not obj._is_container:
prefix = ""
if indent > 0:
prefix = (" "*indent)+" - "
# not a block
clsname = obj.__class__.__name__
if obj.ctype is pyomo.core.base.Var:
stream.write(prefix+"%s: %s(value=%s, bounds=(%s,%s), domain_type=%s, fixed=%s, stale=%s)\n"
% (str(obj),
clsname,
obj.value,
obj.lb,
obj.ub,
obj.domain_type.__name__,
obj.fixed,
obj.stale))
elif obj.ctype is pyomo.core.base.Constraint:
stream.write(prefix+"%s: %s(active=%s, expr=%s)\n"
% (str(obj),
clsname,
obj.active,
str(obj.expr)))
elif obj.ctype is pyomo.core.base.Objective:
stream.write(prefix+"%s: %s(active=%s, expr=%s)\n"
% (str(obj), clsname, obj.active, str(obj.expr)))
elif obj.ctype is pyomo.core.base.Expression:
stream.write(prefix+"%s: %s(expr=%s)\n"
% (str(obj), clsname, str(obj.expr)))
elif obj.ctype is pyomo.core.base.Param:
stream.write(prefix+"%s: %s(value=%s)\n"
% (str(obj), clsname, str(obj.value)))
elif obj.ctype is pyomo.core.base.SOSConstraint:
stream.write(prefix+"%s: %s(active=%s, level=%s, entries=%s)\n"
% (str(obj),
clsname,
obj.active,
obj.level,
str(["(%s,%s)" % (str(v), w)
for v,w in zip(obj.variables,
obj.weights)])))
else:
assert obj.ctype is pyomo.core.base.Suffix
stream.write(prefix+"%s: %s(size=%s)\n"
% (str(obj.name), clsname,str(len(obj))))
else:
# a block
for i, block in enumerate(obj.blocks()):
if i > 0:
stream.write("\n")
stream.write((" "*indent)+"block: %s\n" % (str(block)))
ctypes = block.collect_ctypes(descend_into=False)
for ctype in sorted(ctypes,
key=lambda x: str(x)):
stream.write((" "*indent)+'ctype='+ctype.__name__+" declarations:\n")
for c in block.children(ctype=ctype):
if ctype is pyomo.core.base.Block:
if c._is_component:
stream.write((" "*indent)+" - %s: block(children=%s)\n"
% (str(c), len(list(c.children()))))
else:
stream.write((" "*indent)+" - %s: block_container(size=%s)\n"
% (str(c), len(list(c))))
else:
pprint(c, indent=indent+1, stream=stream)
| igorsowa9/vpp | venv/lib/python3.6/site-packages/pyomo/core/kernel/util.py | util.py | py | 4,337 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "sys.stdout",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pyomo.core.kernel.component_interface.ICategorizedObject",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "pyomo.core.kernel.numvalue.NumericValue",
"line_number": 17,
... |
30758316388 | import pathlib
def create_directory(path: str) -> None:
directory = pathlib.Path(path)
if directory.exists():
raise FileExistsError(f"{path} is exist")
directory.mkdir()
def create_file(path: str, content: str = None) -> None:
file = pathlib.Path(path)
if file.exists():
raise FileExistsError(f"{path} is exist")
file.touch()
if content:
with file.open(mode="w") as f:
f.write(content)
| jonarsli/flask-restapi | flask_restapi/tool/core.py | core.py | py | 456 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 13,
"usage_type": "call"
}
] |
73118842984 | from datetime import datetime, timedelta
def work_day(start, end):
work = 0
curr_date = start
while curr_date <= end:
if curr_date.weekday() < 5:
work += 1
curr_date += timedelta(days=1)
return work
start_date, end_date = datetime(2023, 1, 1), datetime(2023, 12, 31)
print(f"В этом промежутке времни надо было отработать минимум: {work_day(start_date, end_date)} дней")
| IlyaOrlov/PythonCourse2.0_September23 | Practice/achernov/module_10/task_2.py | task_2.py | py | 468 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "datetime.timedelta",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 14,
"usage_type": "call"
}
] |
4871120057 | import urllib.request
from time import sleep
import json
from pprint import pprint
from bs4 import BeautifulSoup
from beautifulscraper import BeautifulScraper
scraper = BeautifulScraper()
years =[x for x in range(2009,2018)]
weeks = [x for x in range(1,18)]
stype = "REG"
gameids =[]
f = open("nfldata.json", "w")
#f.write(json.dumps("REG"))
#loops through to get the game ids and write them to the json file
for year in years:
for week in weeks:
url="http://www.nfl.com/schedules/%d/%s%s"%(year,stype,str(week))
page = scraper.go(url)
divs = page.find_all('div',{"class":"schedules-list-content"})
for div in divs:
#print (div['data-gameid'])
#gameids=(div['data-gameid']), year, stype
gameids.append(div['data-gameid'])
f.write(json.dumps(gameids))
#print (gameids)
#print (url)
#print (div['data-gameid'])
stype = "POST"
#f.write(json.dumps("POST"))
#loops through to get the game ids and write them to the json file
for year in years:
for week in weeks:
url="http://www.nfl.com/schedules/%d/%s%s"%(year,stype,str(week))
page = scraper.go(url)
divs = page.find_all('div',{"class":"schedules-list-content"})
for div in divs:
#print (div['data-gameid'])
#gameids=(div['data-gameid']), year, stype
gameids.append(div['data-gameid'])
#print (gameids)
f.write(json.dumps(gameids))
#print (url)
#f.write(json.dumps(gameids))
#gets data from the specified link and creats a json file with the data
for gameid in gameids:
#print (gameid)
urllib.request.urlretrieve("http://www.nfl.com/liveupdate/game-center/%s/%s_gtd.json"%(gameid,gameid),gameid+'.json')
sleep(.02)
#data = json.loads(url.read().decode())
| Jamada623/4883-SWTools-Joseph | Assignments/A03/scrape_game_ids_data_joseph.py | scrape_game_ids_data_joseph.py | py | 1,925 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "beautifulscraper.BeautifulScraper",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "urllib.request... |
17156637951 | import torch
from mapping.model_training.transformer_training_nsp import train_nsp
from mapping.model_training.training_data_utils import get_next_sentence_df
from mapping.mapping_models.data_fit_models.masking_lm.bert_masking_trained_mtl import BertMaskingTrainedMtlMapper
from utils.bert_utils import get_lm_embeddings
class BertNspTrainedMtlMapper(BertMaskingTrainedMtlMapper):
def get_embeds(self):
test_df = self.get_dataset(dataset_name=self.test_dataset, app_name=self.app_name)
# We get the embeddings based on the first token position output from the BERT model.
# This is unlike other embedding methods, wheer wwe take an average.
# This is because the model is trained to predict next sentence based on the first token position output.
all_embeddings = get_lm_embeddings(self, test_df, f"{self.get_mapping_name()}", use_first_token_only = True)
return all_embeddings, test_df
def set_parameters(self):
self.model_name = 'bert-base-uncased'
self.max_length = 128
self.batch_size = 64
self.eval_batch_size = 256
self.lr = 5e-5
self.eps = 1e-6
self.wd = 0.01
self.epochs = 100
self.patience = 2
def train_model(self, model_path):
train_df = self.get_training_data()
# Get a dataset that contains two pieces of text in every observation. Half of pairs are matched, half are not.
train_df = get_next_sentence_df(train_df)
# Save this df for debugging purposes
self.save_preprocessed_df(train_df, f"{self.test_dataset}_{self.app_name}")
params = {
"lr": self.lr,
"eps": self.eps,
"wd": self.wd,
"epochs": self.epochs,
"patience": self.patience,
"model_name": self.model_name,
"max_length": self.max_length,
"batch_size": self.batch_size,
}
model = train_nsp(train_df, params, self.device)
torch.save(model.state_dict(), model_path)
def get_mapping_name(self):
return f"bert_nsp_trained_mtl"
| Peter-Devine/Feedback-Mapping | mapping/mapping_models/data_fit_models/nsp_lm/bert_nsp_trained_mtl.py | bert_nsp_trained_mtl.py | py | 2,123 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mapping.mapping_models.data_fit_models.masking_lm.bert_masking_trained_mtl.BertMaskingTrainedMtlMapper",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "utils.bert_utils.get_lm_embeddings",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "mapping... |
41234096273 | from django.utils import timezone
import os
from rest_framework_simplejwt.tokens import RefreshToken
# url for user's image.
def upload_img_url(instance, filename):
date = timezone.now()
path = os.path.join("user-pic", instance.username, str(date.year), str(date.month), str(date.day), filename)
return path
# Generate jwt tokens (refresh, access)
def get_token(user):
refresh = RefreshToken.for_user(user)
return {
"refresh": str(refresh),
"access": str(refresh.access_token),
}
def convert_to_seconds(timedelta):
seconds = timedelta.seconds
if not seconds:
seconds = timedelta.days * 3600 * 24
return seconds
| shaikhAhmed232/Socialogram-Backend | accounts/utils.py | utils.py | py | 682 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.utils.timezone.now",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
25918861038 | import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
# Capacitor
capacitance = 3400 # [=] farads
esr = 0.00013 # [=] ohms
initial_voltage = 2.85 # [=] volts
# Railgun, projectile, leads, construction
w = 0.00635 * 2 # width of the rails [=] meters
h = 0.00635 * 2 # height of rail [=] meters
l = 1 # length of the rails [=] meters
d = 0.00635 # separation of the rails and width of the bar [=] meters
lp = 0.00635 # length of projectile [=] meters
hp = 0.00635 # height of projectile [=] meters
p_material = -1 # 0 for copper, -1 for aluminum, else specify 'mass' and 'projectile_resistance' values below
lc = 0.3048 / 3 # length of conductor (both leads added) [=] meters
dc = 0.018288 # diameter of connector wire [=] meters
angle = 0 # angle of launch (from ground) [=] degrees
initial_velocity = 1 # meters per second
# Physical constants (should not need to change)
mu0 = (4 * np.pi) * (10 ** -7) # magnetic constant [=] newtons per ampere squared
copper_resistivity = 1.68 * (10 ** -8) # [=] ohms * meters
aluminum_resistivity = 2.65 * (10 ** -8) # [=] ohms * meters
cross_c = np.pi * (dc / 2) ** 2 # [=] meters squared
connection_resistance = copper_resistivity * lc / cross_c # [=] ohms
copper_density = 8950 * 1000 # [=] g per cubic meter
aluminum_density = 2710 * 1000 # [=] g per cubic meter
friction_coefficient = 1.4 # friction coefficient of copper [=] newtons / newtons (no units)
if p_material == 0:
mass = lp * d * hp * copper_density # [=] grams
projectile_resistance = copper_resistivity * d / (hp * lp) # resistance of copper projectile [=] ohms
elif p_material == -1:
mass = lp * d * hp * aluminum_density # [=] grams
projectile_resistance = aluminum_resistivity * d / (hp * lp) # resistance aluminum projectile [=] ohms
else:
mass = "specify here" # for other material [=] grams
projectile_resistance = "specify" # for other material [=] ohms
inductance_gradient = 3 * (10 ** -7) # measured value [=] henris / meters
inductance_leads = 4.46 * (10 ** -7) # measured value [=] henris
static_resistance = esr + projectile_resistance + connection_resistance # [=] ohms
resistance_gradient = 2 * aluminum_resistivity / (w * h) # resistance coefficient of rails [=] ohms / position
weight = mass * 9.81 # [=] newtons
friction_force = friction_coefficient * weight * np.cos(np.radians(angle)) # [=] newtons
capacitor_energy = 1 / 2 * capacitance * initial_voltage ** 2 # [=] joules
initial_current_rate = initial_voltage / inductance_leads # derivative of current with respect to time
def closest_value(input_list, input_value):
arr = np.asarray(input_list)
i = (np.abs(arr - input_value)).argmin()
return i
def dydt(y, t):
position, velocity, current, current_rate, voltage = y
acceleration = (1 / 2 * inductance_gradient * current ** 2 - friction_force) / mass
d_voltage = 1 / capacitance * current
d_resistance = static_resistance * current_rate
d_resistance_gradient = resistance_gradient * (current * velocity + position * current_rate)
d_EMF = inductance_gradient * (current * acceleration + velocity * current_rate)
# d_inductance = inductance_leads * 'current_rate_rate'
# d_inductance_gradient = inductance_gradient * (position * 'current_rate_rate' + current_rate * velocity)
# 'd_inductance' and 'd_inductance_gradient' contain the term 'current_rate_rate' which must be solved for, and
# they are included in the final equation via the addition of 'inductance1' and multiplication of 'inductance2'
inductance1 = inductance_gradient * current_rate * velocity
inductance2 = -1 / (inductance_leads + inductance_gradient * position)
if position < l and velocity > 0:
current_rate_rate = inductance2 * (inductance1 + d_voltage + d_resistance + d_resistance_gradient + d_EMF)
else:
acceleration = 0
current_rate_rate = 0
current_rate = 0
current = 0
return velocity, acceleration, current_rate, current_rate_rate, -current / capacitance
length = 3
time = np.linspace(0, length, 100000)
y0 = [0.0, initial_velocity, 0, initial_current_rate, initial_voltage]
y1 = odeint(dydt, y0, time)
interval = 1.2 * length * (closest_value(y1[:, 0], l) / len(y1[:, 0])) # used for graphing, to set correct x-axis window
time = np.linspace(0, interval, 1000)
y1 = odeint(dydt, y0, time)
final_velocity = (y1[:, 1][-1])
projectile_energy = 1 / 2 * mass * (final_velocity - initial_velocity) ** 2
capacitor_energy_used = capacitor_energy - (1 / 2 * capacitance * y1[:, 4][-1] ** 2) # charge lost by capacitor
energy_efficiency = projectile_energy / capacitor_energy_used * 100 # percentage of energy transferred to projectile
total_energy_efficiency = projectile_energy / capacitor_energy
if final_velocity <= 0:
energy_efficiency = 0
projectile_energy = 0
total_energy_efficiency = 0
print('final_velocity =', round(final_velocity, 4), 'm/s')
print('energy_efficiency =', round(energy_efficiency, 4), "%")
print('total_energy_efficiency =', round(total_energy_efficiency, 4), '%')
print("projectile_energy =", projectile_energy, "joules")
# plt.plot(time, y1[:, 0], 'b', label='position')
plt.plot(time, y1[:, 1], 'r', label='velocity')
# plt.plot(time, y1[:, 2], 'g', label='current')
# plt.plot(time, y1[:, 3], 'o', label='current_rate')
plt.plot(time, y1[:, 4], 'k', label='voltage')
plt.xlim(0, interval)
plt.legend(loc='best')
plt.xlabel('t')
plt.show()
plt.grid()
| WhiteRabbit2006/Railgun | Calculator.py | Calculator.py | py | 5,512 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.pi",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "numpy.cos",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_numbe... |
9996315350 | import re
from datetime import datetime
from async_lru import alru_cache
from discord import Permissions, Embed
from discord.utils import escape_markdown
import utils.discord
import utils.misc
import utils.tableBuilder
import wrappers.api.minecraft
import wrappers.api.wynncraft.v3.player
from handlers.commands import command
from niatypes.dataTypes import CommandEvent, MinecraftPlayer
from wrappers import botConfig, minecraftPlayer
from wrappers.api.wynncraft.v3.types import PlayerStats
_USERNAME_RE = re.compile(r'[0-9A-Za-z_]+')
_UUID_RE = re.compile(r'[0-9a-f]+')
@alru_cache(ttl=60)
async def _create_player_embed(p: MinecraftPlayer) -> Embed | None:
stats: PlayerStats = await wrappers.api.wynncraft.v3.player.stats(utils.misc.format_uuid(p.uuid), full_result=True)
rank = stats.rank if stats.rank != "Player" \
else stats.supportRank.capitalize() if stats.supportRank is not None \
else "Player"
if stats.online:
seen = f"Online on {stats.server}"
else:
last_join = datetime.fromisoformat(stats.lastJoin)
seen = f"{utils.misc.get_relative_date_str(last_join, days=True, hours=True, minutes=True, seconds=True)} ago"
if stats.guild is None:
guild = "None"
else:
guild = f"{stats.guild.rank.capitalize()} in **[{stats.guild.name}](https://wynncraft.com/stats/guild/{stats.guild.name.replace(' ', '%20')})**"
description = f"## [{rank}] {escape_markdown(p.name)}\n" \
f"``{stats.uuid}``\n" \
f"⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯\n" \
f"**Seen:** {seen}\n" \
f"**Guild:** {guild}\n" \
f"**Joined:** {stats.firstJoin}\n" \
f"**Total Playtime:** {stats.playtime} Hours\n" \
f"**Total Level:** {stats.globalData.totalLevel}\n" \
f"**Total Wars:** {stats.globalData.wars}\n" \
f"**Total Mobs Killed:** {stats.globalData.killedMobs}\n" \
f"**Total Chests Opened:** {stats.globalData.chestsFound}\n" \
f"**Total Quests Completed:** {stats.globalData.completedQuests}\n" \
f"⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯"
raids_tb = utils.tableBuilder.TableBuilder.from_str("l r")
raids_tb.add_row("Total", str(stats.globalData.raids.total))
for raid_name, amount in stats.globalData.raids.list.items():
raids_tb.add_row(raid_name, str(amount))
raids = f"**Total Raid Completions**\n" \
f">>> ```\n" \
f"{raids_tb.build()}\n" \
f"```"
dungeons_tb = utils.tableBuilder.TableBuilder.from_str("l r")
dungeons_tb.add_row("Total", str(stats.globalData.dungeons.total))
for dungeon_name, amount in stats.globalData.dungeons.list.items():
dungeons_tb.add_row(dungeon_name.replace("Corrupted", "Cor."), str(amount))
dungeons = f"**Total Dungeon Completions**\n" \
f">>> ```\n" \
f"{dungeons_tb.build()}\n" \
f"```"
embed = Embed(
title="",
description=description,
color=botConfig.DEFAULT_COLOR,
)
embed.set_thumbnail(url=f"https://visage.surgeplay.com/bust/350/{stats.uuid}?y=-40") \
.add_field(name="", value=raids, inline=False) \
.add_field(name="", value=dungeons, inline=False) \
.add_field(name="", value="⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯\n**Characters**", inline=False)
if stats.characters is not None:
char_count = 0
for char_id, char in stats.characters.items():
if char.nickname is None:
char_name = char.type.capitalize()
else:
char_name = f"{escape_markdown(char.nickname)} ({char.type.capitalize()})"
embed.add_field(
name="",
value=f"**[{char_name}](https://wynncraft.com/stats/player/{stats.uuid}?class={char_id})**\n" \
f"Combat: {char.level}\n" \
f"Total: {char.totalLevel}",
inline=True
)
char_count += 1
for i in range(char_count % 3):
embed.add_field(name="", value="", inline=True)
return embed
class PlayerCommand(command.Command):
def __init__(self):
super().__init__(
name="player",
aliases=("p", "stats"),
usage=f"player <username|uuid>",
description="See the wynncraft stats of the provided player.",
req_perms=Permissions().none(),
permission_lvl=command.PermissionLevel.ANYONE
)
async def _execute(self, event: CommandEvent):
async with event.channel.typing():
if len(event.args) < 2:
await utils.discord.send_error(event.channel, "Please specify a username or uuid!")
return
user_str = event.args[1]
if len(user_str) <= 16 and _USERNAME_RE.fullmatch(user_str):
p = await minecraftPlayer.get_player(username=user_str)
else:
user_str = user_str.replace("-", "").lower()
if len(user_str) == 32 and _UUID_RE.fullmatch(user_str):
p = await minecraftPlayer.get_player(uuid=user_str)
else:
await utils.discord.send_error(event.channel, f"Couldn't parse player ``{event.args[1]}``.")
return
if p is None:
await utils.discord.send_error(event.channel, f"Couldn't find player ``{event.args[1]}``.")
return
try:
embed = await _create_player_embed(p)
await event.channel.send(embed=embed)
except wrappers.api.wynncraft.v3.player.UnknownPlayerException:
await utils.discord.send_error(event.channel, f"{escape_markdown(p.name)} is not a wynncraft player!")
| Freeder1k/NiaBot | handlers/commands/prefixed/playerCommand.py | playerCommand.py | py | 6,131 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "niatypes.dataTypes.MinecraftPlayer",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "wrappers.api... |
21626078429 | import configparser
import os
# read the source under [browser] item
def get_browser(name):
global base_path, cf_path
base_path = os.path.dirname(os.getcwd())
cf_path = os.path.join(base_path, "config", "config.ini")
cf = configparser.ConfigParser()
cf.read(cf_path)
return cf.get('browser', name)
def get_url():
cp = configparser.ConfigParser()
cp.read(cf_path)
return cp.get('browser', 'Url')
| litongtongx/test | common/readconfig.py | readconfig.py | py | 435 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number":... |
34543856455 | from pathlib import Path
from torch.utils.data import IterableDataset
class SceneDataset(IterableDataset):
"""
A dataset representing an infinite stream of noise images of specified dimensions.
"""
def __init__(self, path: Path):
"""
:param num_classes: Number of classes (labels)
:param C: Number of channels per image
"""
super().__init__()
self.path = path
self.image_dim = (C, W, H)
def __iter__(self) -> Iterator[Tuple[Tensor, int]]:
"""
:return: An iterator providing an infinite stream of random labelled images.
"""
while True:
X, y = random_labelled_image(self.image_dim, self.num_classes)
yield X, y
| manorzvi/VoteNet | data/dataset.py | dataset.py | py | 746 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.utils.data.IterableDataset",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "name"
}
] |
5915682093 | import argparse
import pickle as pk
import re
import signal
import time
import atexit
import traceback
import warnings
import sys
import os
import json
from collections import defaultdict
from threading import Thread
import pandas as pd
import psycopg2 as pg
from psycopg2.extras import RealDictCursor
from selenium.webdriver import DesiredCapabilities
import config
warnings.filterwarnings("ignore")
import numpy as np
import pandas.io.sql as psql
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException, ElementClickInterceptedException
from selenium.webdriver.firefox.options import Options
from selenium.common.exceptions import StaleElementReferenceException
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
print(os.path.abspath(os.getcwd()))
from status_codes import *
# sys.path.append('Customer_data')
# from Customer_data.Customers import customers
merchant_id = config.merchant_id
# num_tabs = config.num_tabs
num_tabs = None
timeout_tab = config.timeout_tab
timeout_restart = config.timeout_end
price_step = config.price_step
kaspi_login = None
kaspi_password = None
my_link = None
my_id = None
driver = None
elem = []
curr_order_link = 'None'
city_inited = False
tab_status = None
# tab_status = pd.DataFrame({'idx': [0] * num_tabs,
# 'action': ['None'] * num_tabs,
# 'status': ['None'] * num_tabs,
# 'start_t': [time.time()] * num_tabs,
# 'strings': [''] * num_tabs})
tab_timeout_on = False
tab_timeout_dict = []
def init_vars():
global driver, elem, curr_order_link, city_inited, tab_status, tab_timeout_dict
driver = None
elem = []
curr_order_link = 'None'
city_inited = False
tab_status = pd.DataFrame({'idx': [0] * num_tabs,
'action': ['None'] * num_tabs,
'status': ['None'] * num_tabs,
'start_t': [time.time()] * num_tabs,
'strings': [''] * num_tabs})
for _ in range(num_tabs):
tab_timeout_dict.append({'prev': 0, 'last_change': time.time()})
def exit_handler():
global driver, db
try:
db.close()
print('closed db')
except:
print('db already closed')
try:
driver.quit()
print('closed driver')
except:
print('driver already closed')
os.kill(os.getpid(), signal.SIGKILL)
atexit.register(exit_handler)
def create_driver():
global driver
# fp = webdriver.FirefoxProfile()
# fp.set_preference("dom.popup_maximum", 0)
# fp.set_preference("browser.link.open_newwindow", 0)
# fp.set_preference("browser.link.open_newwindow.restriction", 0)
# fp.set_preference("browser.tabs.remote.autostart", False)
# fp.set_preference("browser.tabs.remote.autostart.1", False)
# fp.set_preference("browser.tabs.remote.autostart.2", False)
caps = DesiredCapabilities().FIREFOX
caps["pageLoadStrategy"] = 'none'
options = Options()
options.headless = config.headless
options.page_load_strategy = 'none'
driver = webdriver.Firefox(options=options,
# firefox_profile=fp,
capabilities=caps,
# executable_path='/main/drivers/geckodriver'
)
def open_new_tabs():
for i in range(num_tabs - 1):
driver.switch_to.new_window('TAB')
write_logs_out('DEBUG', OPEN_TABS_SUCCESS, f'Opened new {num_tabs} tabs')
def login():
global driver
try:
driver.get("https://kaspi.kz/mc/#/login")
WebDriverWait(driver, 15).until(EC.element_to_be_clickable((By.CLASS_NAME, 'button.is-primary')))
time.sleep(1)
el = driver.find_element(By.XPATH, "//span[text()='Email']")
el.click()
time.sleep(1)
el = driver.find_elements_by_name('username')[1]
el.send_keys(kaspi_login)
time.sleep(1)
el = WebDriverWait(driver, 15).until(EC.element_to_be_clickable((By.CLASS_NAME, 'button.is-primary')))
el.click()
time.sleep(1)
el = driver.find_elements_by_class_name('text-field')[2]
el.send_keys(kaspi_password)
el = WebDriverWait(driver, 15).until(EC.element_to_be_clickable((By.CLASS_NAME, 'button.is-primary')))
el.click()
WebDriverWait(driver, 1).until(EC.element_to_be_clickable((By.CLASS_NAME, 'menu__item--text')))
return True
except:
return False
# def login():
# global driver
# while True:
# driver.get("https://kaspi.kz/merchantcabinet/login#/offers")
# success1 = wait_till_load_by_text('Вход для магазинов')
# while not success1:
# driver.close()
# # driver = webdriver.Firefox(firefox_profile=fp)
# create_driver()
# driver.get("https://kaspi.kz/merchantcabinet/login#/offers")
# success1 = wait_till_load_by_text('Вход для магазинов')
#
# # fill_by_name('username', 'bitcom-90@mail.ru')
# # fill_by_name('username', 'Kuanishbekkyzy@mail.ru')
# # fill_by_name('password', 'Nurislam177@')
#
# fill_by_name('username', kaspi_login)
# fill_by_name('password', kaspi_password)
# press_enter()
# success2 = wait_till_load_by_text('Заказы')
# if success2:
# break
# else:
# driver.close()
# # driver = webdriver.Firefox(firefox_profile=fp)
# create_driver()
# # while not success:
# # driver.close()
# # driver = webdriver.Firefox(firefox_profile=fp)
# # driver.get("https://kaspi.kz/merchantcabinet/login#/offers")
# # success = wait_till_load_by_text('Заказы')
#
#
# # if list(select_by_attr('a', 'data-city-id', "750000000")):
# # click_mouse()
def init_city():
# driver.get(mini_orders_all[0].iloc[0].ORDER_LINK)
driver.get('https://kaspi.kz/shop/p/almacom-ach-18as-belyi-montazhnyi-komplekt-101215645/?c=750000000')
city_el = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, f"//a[@data-city-id=750000000]")))
if len(elem) > 0:
elem.pop()
elem.append(city_el)
click_mouse()
def wait_till_load_by_text(text, t=5.0):
# driver.find_elements_by_xpath(f"//*[contains(text(), '{text}')]")
trials = 1
for i in range(trials):
try:
myElem = WebDriverWait(driver, t).until(EC.text_to_be_present_in_element((By.CLASS_NAME, 'layout'), text))
return True
except TimeoutException:
driver.back()
# time.sleep(1)
driver.forward()
# driver.refresh()
# exit_handler()
return False
def fill_by_name(name, fill):
global elem
if len(elem) > 0:
elem.pop()
elem.append(driver.find_element_by_name(name))
elem[0].clear()
elem[0].send_keys(fill)
def select_by_attr(tag_name, attr_name, attr):
global elem
elems = driver.find_elements(By.XPATH, f"//{tag_name}[@{attr_name}='{attr}']")
for el in elems:
if len(elem) > 0:
elem.pop()
elem.append(el)
yield el
def select_by_tag(name):
global elem
elems = driver.find_elements_by_tag_name(name)
for el in elems:
if len(elem) > 0:
elem.pop()
elem.append(el)
yield el
def select_by_class(name):
global elem
elems = driver.find_elements_by_class_name(name)
for el in elems:
if len(elem) > 0:
elem.pop()
elem.append(el)
yield el
def press_enter():
elem[0].send_keys(Keys.RETURN)
def click_mouse():
elem[0].click()
def write_logs_out(lvl, status_code, text):
global curr_order_link
print('_______________________')
print(curr_order_link)
print(lvl)
print(text)
print()
if write_db:
cursor = db.cursor()
cursor.execute(f"INSERT INTO LOGS (MERCHANT_ID, ORDER_LINK, LOG_LEVEL, LOG_STATUS, LOG_TEXT) "
"VALUES (%s, %s, %s, %s, %s) ", (merchant_id, curr_order_link, lvl, status_code, text))
db.commit()
cursor.close()
def page_is_loaded():
try:
WebDriverWait(driver, 0.2).until(EC.presence_of_element_located((By.CSS_SELECTOR, 'h2')))
# WebDriverWait(driver, 0.2).until(EC.element_to_be_clickable((By.CLASS_NAME, 'topbar__logo')))
return True
except:
return False
# def get_price_rows():
# prices = {}
# no_fail = False
# # while True:
# while not no_fail:
# try:
# WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.CLASS_NAME, 'sellers-table__buy-cell-button')))
# no_fail = True
# except TimeoutException:
# write_logs_out('DEBUG', SELLERS_TABLE_ERROR, f'Seller table load fail:\n {traceback.format_exc()}')
# return False, None
# # try:
# # WebDriverWait(driver, 2).until(
# # EC.text_to_be_present_in_element((By.CLASS_NAME, 'layout'), 'К сожалению, в настоящее время'))
# # write_logs_out('No seller')
# # print('refresh 1')
# # driver.refresh()
# # except TimeoutException:
# # write_logs_out(traceback.format_exc())
# # print('refresh 2')
# # driver.refresh()
# # raise Exception('MANUAL EXCEPTION 1')
# was_exception = False
# rows_scanned = False
# while not rows_scanned:
# if was_exception:
# write_detailed_logs = True
# else:
# write_detailed_logs = False
# was_exception = False
# for j, row in enumerate(select_by_tag('tr')):
# try:
# if write_detailed_logs:
# write_logs_out('DEBUG', DETAILED_LOGS_PAGENUM, f'Row no {j}')
# rr = row.get_attribute('innerText')
# if write_detailed_logs:
# write_logs_out('DEBUG', DETAILED_LOGS_PAGENUM, f'Row no {j}: innerText={rr}')
# inner_html = row.get_attribute('innerHTML')
# if write_detailed_logs:
# write_logs_out('DEBUG', DETAILED_LOGS_PAGENUM, f'Row no {j}: innerHTML={inner_html}')
# if rr is None or inner_html is None:
# raise StaleElementReferenceException('rr or inner_html is None')
# # print(rr)
# except StaleElementReferenceException:
# was_exception = True
# write_logs_out('DEBUG', PRICE_PAGE_STALE, traceback.format_exc())
# break
# if 'Выбрать' not in rr:
# if write_detailed_logs:
# write_logs_out('DEBUG', DETAILED_LOGS_PAGENUM, f'Row no {j}: Выбрать not in rr')
# continue
#
# name = rr.split('\n')[0]
# prc = [re.sub('[^0-9]', '', pp) for pp in rr.split('\n')]
# prc = max([int(pp) for pp in prc if pp != ''])
# # print(name)
# # print(prc)
# href = 'https://kaspi.kz' + inner_html[inner_html.find('href'):].split('"')[1]
# href = href.split('?')[0]
# # print(href)
# prices[href] = (name, prc)
# # print(name)
# # print('------------------------\n')
# # if not was_exception:x
# # success_parsing_page_prices = True
# # time.sleep(0.5)
# if not was_exception:
# if write_detailed_logs:
# write_logs_out('DEBUG', DETAILED_LOGS_PAGENUM, f'NO EXCEPTION FOR ALL PAGES')
# rows_scanned = True
# else:
# if write_detailed_logs:
# write_logs_out('DEBUG', DETAILED_LOGS_PAGENUM, 'EXCEPTION FOR PAGE')
# pagination_exists = len(list(select_by_class('pagination__el'))) > 0
# if pagination_exists:
# finished = False
# next_pressed = False
# while not finished and not next_pressed:
# try:
# for page_button in select_by_class('pagination__el'):
# if page_button.get_attribute('innerText') == 'Следующая':
# if page_button.get_attribute('class') == 'pagination__el':
# # press
# click_mouse()
# # print(prices)
# # print('CLICK...')
# next_pressed = True
# break
# elif page_button.get_attribute('class') == 'pagination__el _disabled':
# finished = True
# except Exception:
# write_logs_out('DEBUG', PRESS_NEXT_PAGE_ERROR, traceback.format_exc())
# # print('e2', e)
# pass
# if finished:
# return False, prices
# if next_pressed:
# return True, prices
# return False, prices
def get_price_rows():
try:
WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.XPATH, '//tbody/tr[1]/td[7]/div[1]/div[1]')))
except TimeoutException:
write_logs_out('DEBUG', SELLERS_TABLE_ERROR, f'Seller page load fail:\n {traceback.format_exc()}')
return False, None
try:
html = WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.XPATH, '//tbody'))).get_attribute('innerHTML')
rows = re.finditer('<tr>', html)
prices = {}
for r in rows:
r = html[r.span()[0]:]
href = 'https://kaspi.kz' + r[r.find('href'):r.find('?')][6:]
name = r.split('">')[2].split('<')[0]
price = int(r.split('sellers-table__price-cell-text">')[1].split(' ')[0].replace(' ', ''))
prices[href] = (name, price)
except:
write_logs_out('DEBUG', SELLERS_TABLE_ERROR, f'Prices parse fail:\n {traceback.format_exc()}')
return False, None
try:
next_button = WebDriverWait(driver, 1).until(
EC.visibility_of_element_located((By.XPATH, "//li[contains(text(),'Следующая')]")))
except TimeoutException:
return False, prices
if next_button.get_attribute('class') == 'pagination__el _disabled':
return False, prices
next_button.click()
return True, prices
def refresh_at_page(page_num):
driver.refresh()
for page_button in select_by_class('pagination__el'):
if page_button.get_attribute('innerText') == page_num:
click_mouse()
def fill_by_class(name, fill):
classes = select_by_class(name)
for c in classes:
if c.get_attribute('class') == 'form__col _12-12 _medium_6-12':
break
elem[0].clear()
elem[0].send_keys(fill)
def change_tab_status(i, idx=None, action=None, status=None, start_t=False, strings=None):
# write_logs_out('DEBUG', CHANGE_TAB_STATUS,
# f'change_tab_status \ni={i} \nidx={idx} \naction={action} \nstatus={status} '
# f'\nstart_t={start_t}', write_db=False)
if idx:
tab_status.loc[i, 'idx'] = idx
if action:
tab_status.loc[i, 'action'] = action
if status:
tab_status.loc[i, 'status'] = status
if start_t:
tab_status.loc[i, 'start_t'] = int(time.time())
if strings:
tab_status.loc[i, 'strings'] = strings
def write_prices(prices):
write_logs_out('DEBUG', BOT_WRITING_PRICES, f'wrote prices, len prices = {len(prices)}')
prices_df = pd.DataFrame({'seller_link': list(prices.keys()),
'seller_name': [n for n, _ in prices.values()],
'seller_price': [p for _, p in prices.values()]})
cursor = db.cursor()
cursor.execute(
f"INSERT INTO scan_event (merchant_id, order_link, sellers_links, sellers_names, sellers_prices) "
"VALUES (%s, %s, %s, %s, %s)", (merchant_id,
str(mini_orders.iloc[tab_status.loc[i, 'idx'] % mini_orders.shape[0]].order_link),
','.join(prices_df.seller_link.values),
','.join(prices_df.seller_name.values),
','.join(prices_df.seller_price.astype('str').values)))
db.commit()
cursor.close()
def prepare_orders():
db = pg.connect(user=config.db_user,
password=config.db_pass,
database=config.db,
host=config.host,
port=config.port)
orders = psql.read_sql(f'SELECT * from order_table where merchant_id = {merchant_id}', db)
orders_fact = psql.read_sql(f'select * from order_fact where merchant_id = {merchant_id}', db)
write_logs_out('INTO', BOT_INFO, 'Loaded orders fact from db')
orders = orders_fact.merge(orders, left_on='order_link', right_on='order_link', how='left')[
['merchant_id_x', 'order_link', 'min_price', 'active', 'cls', 'main_includes']]
write_logs_out('INFO', BOT_INFO,
f'Orders fact count: {len(orders_fact)}\n'
f'Orders included in input: {orders[orders.main_includes==True].shape[0]}\n'
f'Orders active: {orders[orders.active==True].shape[0]}')
orders = orders[orders.active==True]
write_logs_out('INFO', BOT_INFO, f'Active orders shape {orders.shape[0]}')
last_scans = pd.read_sql(f'select * from scan_event where merchant_id = {merchant_id} order by created_at desc limit {orders.shape[0] * 2}', db)
last_scans['priority'] = list(range(1, len(last_scans) + 1))
last_scans.drop_duplicates('order_link', keep='first', inplace=True)
global num_tabs
num_tabs = 2 if len(orders) <= 50 else 10
idx = np.linspace(0, orders.shape[0], num_tabs + 1)
# tab_status = [[0, -1, 0, 0]] * num_tabs # order_n, curr_phase, next_phase, time_elps
mini_orders_all = []
for i in range(num_tabs):
mini_order = orders.iloc[int(idx[i]):int(idx[i + 1])]
mini_order = mini_order.merge(last_scans, on='order_link', how='left')
mini_order['priority'].fillna(0, inplace=True)
mini_order.sort_values(by='priority', ascending=False, inplace=True)
mini_order.reset_index(drop=True, inplace=True)
# pd.options.display.max_colwidth = 10000
# print(mini_order)
mini_orders_all.append(mini_order)
write_logs_out('DEBUG', MINI_ORDERS_SHAPES, f'mini_orders sizes {[mo.shape[0] for mo in mini_orders_all]}')
with open('temp_mini.pk', 'wb') as file:
pk.dump(mini_orders_all, file, protocol=pk.HIGHEST_PROTOCOL)
return mini_orders_all
def check_tab_timeout():
global tab_timeout_on
tab_timeout_on = False
time.sleep(5)
tab_timeout_on = True
write_logs_out('DEBUG', TIMEOUT_CHECKER_ON, 'Timeout checker is on')
def check_tab_timeout_helper():
cnt = 1
while tab_timeout_on:
for j in range(num_tabs):
idc = tab_status.iloc[j].idx
if idc != tab_timeout_dict[j]['prev']:
tab_timeout_dict[j]['last_change'] = time.time()
tab_timeout_dict[j]['prev'] = idc
if time.time() - tab_timeout_dict[j]['last_change'] > timeout_tab:
write_logs_out('DEBUG', TIMEOUT_AT_TAB, f"timeout at tab {j} \ntdiff: {int(time.time() - tab_timeout_dict[j]['last_change'])} secs")
change_tab_status(j, action='None', start_t=True, idx=tab_status.loc[j]['idx'] + 1)
if cnt % 60 == 0:
crs = db.cursor(cursor_factory=RealDictCursor)
crs.execute('select Extract(epoch from (now() - created_at)/60) as minutes_ago '
'from scan_event order by created_at desc limit 1')
rec = crs.fetchone()['minutes_ago']
if rec > 2:
stop_tab_timeout()
write_logs_out('DEBUG', BOT_KILL_TIMEOUT, f'{time.time() - start_time} seconds after start')
os.kill(os.getpid(), signal.SIGKILL)
# t_diff = time.time() - tab_status.start_t > timeout_tab
# if any(t_diff):
# write_logs_out('ERROR', TIMEOUT_AT_TAB, f'Timeout at tab {t_diff}')
# timeout_tabs = time.time() - tab_status.start_t > timeout_tab
# for j in list(timeout_tabs.index.values):
# timeout_t = timeout_tabs.loc[j].iloc[0]
# if timeout_t:
# write_logs_out('DEBUG', TIMEOUT_AT_TAB, f'timeout at tab {j}')
# change_tab_status(j, action='None', start_t=True, idx=tab_status.loc[j]['idx'] + 1)
# else:
# exit_handler()
# os._exit(os.EX_OK)
time.sleep(3)
cnt += 1
th = Thread(target=check_tab_timeout_helper)
th.start()
def stop_tab_timeout():
global tab_timeout_on
tab_timeout_on = False
time.sleep(5)
def init_kaspi_vars():
global kaspi_login, kaspi_password, my_link, my_id
cursor = db.cursor(cursor_factory=RealDictCursor)
cursor.execute(f'select * from merchants where merchant_id = {merchant_id}')
rec = cursor.fetchone()
kaspi_login = rec['kaspi_login']
kaspi_password = rec['kaspi_password']
my_link = rec['address_tab']
my_id = my_link.split('/')[-3]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('write_logs', nargs='?', default='False')
args = parser.parse_args()
write_db = args.write_logs == 'True'
db = pg.connect(user=config.db_user,
password=config.db_pass,
database=config.db,
host=config.host,
port=config.port)
init_kaspi_vars()
start_time = time.time()
mini_orders_all = prepare_orders()
init_vars()
create_driver()
login()
open_new_tabs()
# with open('temp_mini.pk', 'rb') as file:
# mini_orders_all = pk.load(file)
if not city_inited:
init_city()
city_inited = True
while time.time() - start_time < timeout_restart:
print(tab_status[['idx', 'action', 'status']])
for i, h in enumerate(driver.window_handles):
try:
driver.switch_to.window(h)
mini_orders = mini_orders_all[i]
curr_order_link = mini_orders.iloc[tab_status.loc[i, 'idx'] % mini_orders.shape[0]].order_link
# print(curr_order_link, tab_status.loc[i, 'action'], tab_status.loc[i, 'status'])
# curr_order_active = mini_orders.iloc[tab_status.loc[i, 'idx'] % mini_orders.shape[0]].active
# if not curr_order_active:
# tab_status.loc[i, 'idx'] += 1
# continue
if tab_status.loc[i, 'action'] == 'None':
# start cycle
driver.get(curr_order_link)
change_tab_status(i, action='open_order', status='pending', start_t=True)
elif tab_status.loc[i, 'action'] == 'open_order':
if tab_status.loc[i, 'status'] == 'pending':
if page_is_loaded():
change_tab_status(i, status='success')
if tab_status.loc[i, 'status'] == 'success':
change_tab_status(i, action='pricep_1', status='pending')
next_pressed, prices = get_price_rows()
if next_pressed:
if prices:
action = tab_status.loc[i]['action']
action_no = int(action.split('_')[1])
change_tab_status(i, action=f'pricep_{action_no + 1}', status='pending',
strings=json.dumps(prices))
else:
raise Exception('Next pressed, but prices is None') # Never occurred
else:
if prices:
write_prices(prices)
# change_tab_status(i, idx=tab_status.loc[i]['idx'] + 1, action='None')
change_tab_status(i, action='process_order', status='pending',
strings=json.dumps(prices))
else:
driver.refresh()
change_tab_status(i, action='reload', status='pending')
# raise Exception('No any seller')
elif tab_status.loc[i, 'action'] == 'reload':
if page_is_loaded():
change_tab_status(i, action='open_order', status='pending')
elif 'pricep' in tab_status.loc[i, 'action']:
next_pressed, prices = get_price_rows()
# if len(prices) == 0:
# next_pressed, prices = get_price_rows()
if next_pressed:
if prices:
action = tab_status.loc[i]['action']
action_no = int(action.split('_')[1])
change_tab_status(i, action=f'pricep_{action_no + 1}', status='pending',
strings=tab_status.loc[i]['strings'] + '|+|' + json.dumps(prices))
else:
raise Exception('Next pressed, but prices is None') # Never occurred
else:
if prices:
strings = tab_status.loc[i]['strings']
strings = strings.split('|+|')
strings = [json.loads(s) for s in strings]
res = {}
for p in strings + [prices]:
# try:
res.update(p)
# except Exception as e:
# write_logs_out('SPECIAL', f'p: {p}\n'
# f'strings: {strings}\n'
# f'prices: {prices}')
# raise e
write_prices(res)
# change_tab_status(i, idx=tab_status.loc[i]['idx'] + 1, action='None')
change_tab_status(i, action=f'process_order', status='pending',
strings=json.dumps(res))
else:
action = tab_status.loc[i]['action']
action_no = action.split('_')[1]
# refresh_at_page(action_no)
driver.refresh()
change_tab_status(i, action=f'reloadp_{action_no}', status='pending')
elif 'reloadp' in tab_status.loc[i, 'action']:
action = tab_status.loc[i]['action']
action_no = action.split('_')[1]
if page_is_loaded():
for page_button in select_by_class('pagination__el'):
if page_button.get_attribute('innerText') == action_no:
click_mouse()
change_tab_status(i, action=f'pricep_{action_no}', status='pending')
elif tab_status.loc[i, 'action'] == 'process_order':
if tab_status.loc[i, 'status'] == 'pending':
prices = json.loads(tab_status.loc[i]['strings'])
# write_logs_out('DEBUG', json.dumps(prices), write_db=False)
im_seller = any([my_id in p for p in prices])
if im_seller:
my_rank = [i for i, k in enumerate(prices) if my_id in k][0]
my_curr_price = prices[my_link][1]
min_price = mini_orders.iloc[tab_status.loc[i, 'idx'] % mini_orders.shape[0]].min_price
is_alone = len(prices) == 1
cursor = db.cursor()
cursor.execute(f"""insert into order_status
(merchant_id, order_link, ranking, curr_price, min_price, is_alone, scanned_at) values
(%s, %s, %s, %s, %s, %s, now()) on conflict (merchant_id, order_link) do update set
(ranking, curr_price, min_price, is_alone, scanned_at) = (EXCLUDED.ranking, EXCLUDED.curr_price, EXCLUDED.min_price, EXCLUDED.is_alone, EXCLUDED.scanned_at)""",
(merchant_id, curr_order_link, my_rank + 1, my_curr_price,
int(min_price) if not pd.isna(min_price) else None, is_alone))
db.commit()
cursor.close()
top1_price = prices[list(prices)[0]][1]
if len(prices) > 1:
top2_price = prices[list(prices)[1]][1]
else:
top2_price = -top1_price
if my_rank != 0:
desired_price = top1_price - price_step
else:
desired_price = top2_price - price_step
if desired_price < 0:
write_logs_out('DEBUG', BOT_ALONE_SELLER, f'I AM ALONE SELLER')
desired_price = my_curr_price
desired_price = max(min_price, desired_price)
# write_logs_out('DEBUG',
# f'Curr rank {my_rank + 1}\n'
# f'Curr price {my_curr_price}\n'
# f'Min price {min_price}\n'
# f'Desired price {desired_price}\n'
# f'Top1 price {top1_price}\n'
# f'Top2 price {top2_price}')
if pd.isna(min_price):
write_logs_out('DEBUG', INDEXER_NO_MINPRICE_IN_ORDERTABLE, curr_order_link)
change_tab_status(i, idx=tab_status.loc[i]['idx'] + 1, action='None')
else:
if desired_price == min_price:
write_logs_out('DEBUG', DESIRED_EQ_MIN, 'Desired price = min price')
# change_tab_status(i, idx=tab_status.loc[i]['idx'] + 1, action='None')
if desired_price == my_curr_price:
write_logs_out('DEBUG', ALREADY_DES_PRICE, 'Already desired price')
change_tab_status(i, idx=tab_status.loc[i]['idx'] + 1, action='None')
else:
# link = f"https://kaspi.kz/merchantcabinet/#/offers/edit/{curr_order_link.split('-')[-1]}_{my_id}"
link = f"https://kaspi.kz/mc/#/products/{curr_order_link.split('-')[-1]}_{my_id}"
driver.get(link)
change_tab_status(i, action='process_order3', status='pending',
strings=link + '|+|' + (str(int(desired_price)) if
(my_curr_price - desired_price) <= 60000 else str(int(my_curr_price - 60000))))
# print('NEW_PRICE_CREATE', link + '|+|' + str(int(desired_price)) if
# (my_curr_price - desired_price) <= 60000 else str(int(my_curr_price - 60000)))
else:
# change_tab_status(i, idx=tab_status.loc[i]['idx'] + 1, action='None')
raise Exception('I am not seller')
# elif tab_status.iloc[i]['action'] == 'process_order2':
# if tab_status.iloc[i]['status'] == 'pending':
# # 'main-nav__el-link'
# try:
# success1 = wait_till_load_by_text('Заказы', t=0.5)
# if success1:
# change_tab_status(i, status='success')
# except TimeoutException:
# write_logs_out('ERROR', ZAKAZY_NOTLOADED,
# f'ZAKAZY NOT LOADED at process_order2 {traceback.format_exc()}')
# if tab_status.loc[i]['status'] == 'success':
# link = tab_status.loc[i]['strings'].split('|+|')[0]
# driver.get(link)
# change_tab_status(i, action='process_order3', status='pending')
elif tab_status.loc[i]['action'] == 'process_order3':
if tab_status.loc[i]['status'] == 'pending':
try:
WebDriverWait(driver, 1).until(EC.element_to_be_clickable(
(By.XPATH, "//div[@class='column is-main-content']//li[1]")))
change_tab_status(i, status='success')
write_logs_out('DEBUG', REDAKTIROVANIE_LOADED, 'Редактирование товара loaded')
# if got_page:
# change_tab_status(i, status='success')
# write_logs_out('DEBUG', REDAKTIROVANIE_LOADED, 'Редактирование товара loaded')
# else:
# write_logs_out('ERROR', UNKNOWN_ERROR_PROCESS_ORDER3,
# 'UNKNOWN ERROR at process_order3')
except TimeoutException:
write_logs_out('ERROR', REDAKTIROVANIE_NOTLOADED3,
'REDAKTIROVANIE TOVARA NOT LOADED at process_order3')
try:
WebDriverWait(driver, 15).until(EC.element_to_be_clickable(
(By.CLASS_NAME, 'button.is-primary')))
login()
except:
raise Exception('RELOGIN NOT GOOD')
if tab_status.loc[i]['status'] == 'success':
was_exception = False
# try:
new_price = tab_status.loc[i]['strings'].split('|+|')[1]
# try:
# new_price = tab_status.loc[i]['strings'].split('|+|')[1]
# except:
# print('NEW_PRICE_USE', tab_status.loc[i]['strings'].split('|+|'))
# el = WebDriverWait(driver, 10).until(EC.element_to_be_clickable(
# (By.XPATH, '//th[2]//div[1]//span[1]//div[1]//label[1]//input[1]')))
# el = driver.find_element(By.XPATH, '//th[2]//div[1]//span[1]//div[1]//label[1]//span[1]')
# el.click()
# radios = select_by_class('form__radio-title')
#
# for radio in radios:
# if radio.text == 'Одна для всех городов':
# break
# click_mouse()
el = WebDriverWait(driver, 1).until(EC.element_to_be_clickable(
(By.XPATH, "//div[@class='th-wrap']//input[@type='text']")))
# fill_by_class('form__col', new_price)
el.clear()
el.send_keys(new_price)
el = WebDriverWait(driver, 1).until(EC.element_to_be_clickable(
(By.XPATH, "//button[@class='button button is-primary mr-1']")))
# fill_by_class('form__col', new_price)
el.click()
# try:
# WebDriverWait(driver, 1).until(EC.visibility_of_element_located(
# (By.XPATH, "//div[@class='modal-card animation-content']")))
# # butt = WebDriverWait(driver, 0.2).until(EC.visibility_of_element_located(
# # (By.XPATH, "//button[contains(text(),'Да')]")))
# # butt.click()
# write_logs_out('DEBUG', BIGPRICEDIFF_WARNING, 'YESBIGP')
# except:
# write_logs_out('DEBUG', NO_BIGPRICEDIFF_WARNING, 'NOBIGP')
# buttons = select_by_tag('button')
# for button in buttons:
# if button.text == 'Сохранить':
# break
# press_enter()
cursor = db.cursor()
cursor.execute(f"""UPDATE ORDER_STATUS
SET NEXT_PRICE = %s, updated_at = now()
WHERE merchant_id = %s and ORDER_LINK = %s""",
(int(float(new_price)), merchant_id, curr_order_link))
write_logs_out('DEBUG', UPDATING_PRICE, f'Updating price to {new_price}')
db.commit()
cursor.close()
change_tab_status(i, idx=tab_status.loc[i]['idx'] + 1, action='None')
# except ElementClickInterceptedException:
# was_exception = True
# new_price = tab_status.loc[i]['strings'].split('|+|')[1]
# write_logs_out('ERROR', UPDATING_PRICE_ERROR,
# f'Fail while updating price to {new_price}:\n {traceback.format_exc()}')
if was_exception:
try:
curtain = WebDriverWait(driver, 1).until(EC.visibility_of_element_located(
(By.CLASS_NAME, 'ks-gwt-dialog _small g-ta-c')))
button = WebDriverWait(driver, 1).until(
EC.element_to_be_clickable((By.CLASS_NAME, 'gwt-Button button')))
button.click()
write_logs_out('DEBUG', CURTAIN_CLICKED, 'Curtain clicked')
except TimeoutException:
write_logs_out('ERROR', CURTAIN_CLICK_ERROR,
f'Fail to click curtain but page is stale:\n {traceback.format_exc()}')
raise TimeoutException()
else:
# print(tab_status.loc[i, 'action'])
raise NotImplementedError(f"Not implemented action {tab_status.loc[i, 'action']}")
except Exception as e:
change_tab_status(i, idx=tab_status.loc[i]['idx'] + 1, action='None')
write_logs_out('FATAL', BOT_ERROR, traceback.format_exc())
time.sleep(1)
# print(curr_order_link, tab_status.loc[i, 'action'], tab_status.loc[i, 'status'])
# print()
pass
if not tab_timeout_on:
check_tab_timeout()
stop_tab_timeout()
write_logs_out('DEBUG', BOT_END_SECONDS_AFTER, f'{time.time() - start_time} seconds after start')
os.kill(os.getpid(), signal.SIGKILL)
# selenium.common.exceptions.ElementClickInterceptedException: Message: Element <label class="form__radio-title"> is not clickable at point (511,611) because another element <div class="ks-gwt-dialog _small g-ta-c"> obscures it
# https://kaspi.kz/shop/p/artel-dolce-21-ex-belyi-2602172/?c=750000000
# https://kaspi.kz/shop/p/ardesto-kastrjulja-ar1922as-stal-2-2-l-105339039,https://kaspi.kz/shop/p/samsung-ar12txhqasinua-belyi-104750094,https://kaspi.kz/shop/p/ardesto-kastrjulja-gemini-salerno-ar1908cs-stal-0-8-l-105363172,https://kaspi.kz/shop/p/ardesto-nozh-gemini-como-ar1906ck-6-sht-stal--105314266
# bad guy https://kaspi.kz/shop/p/stabilizator-naprjazhenija-resanta-asn-600-1-i-5400793
# _______________________
# https://kaspi.kz/shop/p/kompressor-masljanyi-vihr-kmp-230-24-50600000
# FATAL
# Traceback (most recent call last):
# File "/Users/batyagg/kaspibot_dock/kaspibot/kaspibotV3.py", line 768, in <module>
# new_price = tab_status.loc[i]['strings'].split('|+|')[1]
# IndexError: list index out of range | BatyaGG/kaspibot | kaspibotV3.py | kaspibotV3.py | py | 41,975 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path.a... |
31076470598 | # This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import praw
import prawcore
from subber import util
logger = logging.getLogger(__name__)
class Reddit(object):
"""Reddit API session"""
def __init__(self, client_id, client_secret, password, username):
self._session = praw.Reddit(client_id=client_id,
client_secret=client_secret,
password=password,
user_agent='web app',
username=username)
# Verify connection
try:
self._session.user.me()
except prawcore.exceptions.OAuthException:
logger.critical('Unable to initialize Reddit API session. Verify '
'the credentials in the Subber config file are '
'correct.')
raise RuntimeError('Unable to initialize Reddit API session.')
def get_session(self):
return self._session
def get_user_recommendations(session, user):
"""Return a list of recommended subs for a user
Keyword arguments:
session -- instance of the Reddit api
user -- username to retrieve recommendations for
"""
# Get similar users
try:
similar_users = _get_similar_users(session, user)
except Exception as e:
logger.error('Unable to get recommendations for user {}. Error '
'retrieving similar users.'.format(user))
logger.exception(e)
return
# Create a list of sub recommendations
subs = []
for sim_user in similar_users:
# Get active subs for similar user
try:
active_subs = _get_active_subs(session, sim_user)
# Add active subs to recommendations
for sub in active_subs:
# Get sub metadata and append if not in subs
sub_info = get_sub_info(session, sub[2:])
if sub_info not in subs:
subs.append(sub_info)
except Exception as e:
logger.exception('Unable to get recommendations for user {}. '
'Error retrieving active subs for user '
'{}'.format(user, sim_user))
logger.exception(e)
if not subs:
logger.warning('No recommendations found for user {}'.format(user))
else:
logger.debug('Recommending subs {} to user {}.'.format(subs, user))
return subs
def _get_similar_users(session, user):
"""Return a list containing users that have commented on a user's post
and users whose posts have been commented on by a user.
Keyword arguments:
session -- instance of the Reddit api
user -- username to retrieve similar users for
"""
similar_users = []
# Retrieve commenters from parent comments
comments = _get_user_comments(session, user)
try:
for comment in comments:
parent = comment.parent().author.name
if parent not in similar_users and parent != user:
similar_users.append(parent)
except Exception:
# Comment is deleted
pass
# Retrieve commenters from user's top posts
submissions = _get_user_submissions(session, user)
try:
for s in submissions:
for c in s.comments:
if (c.author and c.author.name not in similar_users and
c.author.name != user):
similar_users.append(c.author.name)
break # limit to one submission comment
except Exception:
# Comments missing
logger.debug('Skipping submission comments for user '
'{}'.format(user))
pass
logger.debug('Considering similar users {} for user '
'{}'.format(similar_users, user))
return similar_users
def _get_user_comments(session, user):
"""Return a list of a user's five newest comments
Keyword arguments:
session -- instance of the Reddit api
user -- username to retrieve comments for
"""
try:
comments = session.redditor(user).comments.new(limit=5)
logger.debug('PRAW comment request made for user {}'.format(user))
return comments
except Exception:
logger.error('Error retrieving comments for user {}'.format(user))
def _get_user_submissions(session, user):
"""Return a list of a user's top five submissions
Keyword arguments:
session -- instance of the Reddit api
user -- username to retrieve submissions for
"""
try:
submissions = session.redditor(user).submissions.top(limit=5)
logger.debug('PRAW submission request made for user {}'.format(user))
return submissions
except Exception:
logger.error('Error retrieving submissions for user {}'.format(user))
def _get_active_subs(session, user):
"""Return a list of subs a user is active in
Keyword arguments:
session -- instance of the Reddit api
user -- username to retrieve active subs for
"""
def process_posts(posts):
subs = []
try:
for p in posts:
sub = p.subreddit_name_prefixed
if sub not in subs:
subs.append(sub)
except Exception:
# Skip post if missing metadata
logger.error('Error processing content request results for user '
'{}'.format(user))
pass
return subs
# Retrieve user comments and submissions
comments = _get_user_comments(session, user)
submissions = _get_user_submissions(session, user)
# Process active subs
subs = []
if comments is not None:
logger.debug('Processing PRAW comment request results for user '
'{}'.format(user))
subs = process_posts(comments)
if submissions is not None:
logger.debug('Processing PRAW submission requst results for user '
'{}'.format(user))
subs = subs + process_posts(submissions)
logger.debug('{} active subs found for user {}'.format(len(subs), user))
if subs:
logger.debug('Active subs found for user {} as {}'.format(user, subs))
return subs
def get_sub_info(session, sub):
"""Return a dictionary containing metadata for a subreddit
Keyword arguments:
session -- instance of the Reddit api
sub -- subreddit to get metadata for
"""
try:
# Get subreddit metadata
subreddit = session.subreddit(sub)
# Convert seconds after UTC epoch to years since sub creation
sub_age = util.utc_epoch_sec_to_years(subreddit.created)
return {'name': subreddit.display_name_prefixed,
'title': subreddit.title,
'age': sub_age,
'subscribers': subreddit.subscribers,
'over18': subreddit.over18,
'desc': subreddit.public_description}
except Exception:
logger.debug('Unable to retrieve sub info for {}'.format(sub))
| drewwalters96/subber | subber/reddit.py | reddit.py | py | 7,738 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "praw.Reddit",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "prawcore.exceptions",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "subber.util.ut... |
26455397608 | #! /usr/bin/python3
# findspark import후 초기화
# 빨리 찾기위한 module 있으나 없으나 별 차이가 없다.
import findspark
findspark.init()
from pyspark.sql import SparkSession
#SparkSession 객체 생성 방법. (앱이름을 pyspark-hdfs1라는것으로 임의로 주고 .getOrCreate() 해준>
sparkSession = SparkSession.builder.appName("pyspark-hdfs2").getOrCreate()
# read.load()는 다양한 옵션을 설정할 수 있다
df2 = sparkSession.read.load('/home/jshoon/employee.csv',
# inferSchema='true' :데이터들의 자료형을 자동으로 설정해라 라는뜻. 숫자는 숫자 텍스트는 텍스트
# sep() = 데이터 분리, = split()와 같다 보면된다.
format='csv', sep=',', inferSchema='true', header='false')
df2 = df2.toDF('id','name','salary','job')
df2.show()
| jshooon/BigData_Hadoop | chap06/local_file_load2.py | local_file_load2.py | py | 812 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "findspark.init",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder.appName",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 8,
"usage_type": "attribute"
},... |
74784258662 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def ludwig_lang_plot(cation_data_fn, anion_data_fn, cations, anions):
cat_reader = pd.read_csv(cation_data_fn)
an_reader = pd.read_csv(anion_data_fn)
cat_data = []
for column in cat_reader:
cat_data.append(cat_reader[column].values)
samp_names = cat_data[0]
cat_names = []
for i in cations:
cat_names.append(cat_reader.iloc[:,i].name)
xlabel = cat_names[0]
for i in range(1, len(cat_names)):
xlabel = xlabel + '+' + cat_names[i]
an_names = []
for i in anions:
an_names.append(an_reader.iloc[:,i].name)
ylabel = an_names[0]
for i in range(1, len(an_names)):
ylabel = ylabel + '+' + an_names[i]
an_data = []
for column in an_reader:
an_data.append(an_reader[column].values)
x = 50 * sum(cat_data[i] for i in cations)/sum(cat_data[1:])
y = 50 * sum(an_data[i] for i in anions)/sum(an_data[1:])
this_dict = {}
k = 0
temp_name = samp_names[0]
for i in range(len(samp_names)):
if type(samp_names[i]) == str:
this_dict[temp_name] = [k, i-1]
temp_name = samp_names[i]
k = i
this_dict[temp_name] = [k, len(samp_names)-1]
for key in this_dict:
i = this_dict[key][0]
j = this_dict[key][1]
plt.scatter(x[i:j+1], y[i:j+1], label = key)
plt.grid(True, which = "both")
plt.title("Ludwig-Langelier Plot")
plt.xlabel(xlabel + " (% of 50 millieq)" )
plt.ylabel(ylabel + " (% of 50 millieq)")
plt.legend()
plt.show()
return | yashvardhan747/Statistical-and-Aqual-chemical-plots | AquaChemPlots/ludwig_lang_plot.py | ludwig_lang_plot.py | py | 1,452 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.py... |
36339474495 | import scipy.misc
import matplotlib.pyplot as plt
# This script demonstrates fancy indexing by setting values
# on the diagonals to 0.
# Load the ascent array
ascent = scipy.misc.ascent()
xmax = ascent.shape[0]
ymax = ascent.shape[1]
# Fancy indexing
# Set values on diagonal to 0
# x 0-xmax
# y 0-ymax
ascent[range(xmax), range(ymax)] = 0
# Set values on other diagonal to 0
# x xmax-0
# y 0-ymax
ascent[range(xmax-1,-1,-1), range(ymax)] = 0
# Plot ascent with diagonal lines set to 0
plt.imshow(ascent)
plt.show()
| denotepython/pythonbook | python数据分析/3358OS_Code/3358OS_02_Code/3358OS_02_Code/code2/fancy.py | fancy.py | py | 521 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scipy.misc.misc.ascent",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "scipy.misc.misc",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "scipy.misc",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot... |
18326177260 | from blackjack_simulator import BlackJack as Environment
from evaluate_policy import mc_first_visit, mc_every_visit, k_step_td
from control_policy import k_step_sarsa, q_learning, td_lambda
import sys
from tqdm.auto import tqdm
def eval_dealer_policy(eval_algo=0, num_expt=1, num_episodes=1000, k_step=1):
env = Environment()
policy = env.get_dealer_policy()
q_func_array = []
if eval_algo==0:
for _ in tqdm(range(num_expt)):
q_func_array.append(mc_first_visit(policy,num_episodes))
elif eval_algo==1:
for _ in tqdm(range(num_expt)):
q_func_array.append(mc_every_visit(policy,num_episodes))
else:
for _ in tqdm(range(num_expt)):
q_func_array.append(k_step_td_eff(policy=policy,number_of_episodes=num_episodes, k_step=k_step))
q_func = q_func_array[0]
for key in q_func:
for q in q_func_array[1:]:
q_func[key]+=q[key]
q_func[key]/=len(q_func_array)
env.plot_policy(q_func, policy)
return q_func
def learn_policy(algo=0, num_episodes=10000, k_step=1, lr=0.1, epsilon=0.1,
lambd=0.5, decay_epsilon=True, plot_frequency=100, test_episodes=1000,):
if algo==0:
policy, scores = k_step_sarsa(lr=lr, epsilon=epsilon, decay_epsilon=decay_epsilon, plot_frequency=plot_frequency,
number_of_episodes=num_episodes, test_episodes=test_episodes, k_step=k_step)
elif algo==1:
policy,scores = q_learning(lr=lr, number_of_episodes=num_episodes,decay_epsilon=decay_epsilon, plot_frequency=plot_frequency,
test_episodes=test_episodes)
else:
policy, scores = td_lambda(lr=lr, epsilon=epsilon, lambd=lambd, decay_epsilon=decay_epsilon, number_of_episodes=num_episodes,
plot_frequency=plot_frequency, test_episodes=test_episodes)
q = mc_every_visit(policy,num_episodes)
env = Environment()
env.plot_policy(q,policy)
return policy,q, scores
| djin31/tabularRL | tabular_rl.py | tabular_rl.py | py | 2,069 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "blackjack_simulator.BlackJack",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "tqdm.auto.tqdm",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "evaluate_policy.mc_first_visit",
"line_number": 13,
"usage_type": "call"
},
{
"api_na... |
9796518132 | import time
from urllib import quote, unquote
from webob import Request
from swift.common.utils import (get_logger, get_remote_client,
get_valid_utf8_str, TRUE_VALUES)
class InputProxy(object):
"""
File-like object that counts bytes read.
To be swapped in for wsgi.input for accounting purposes.
"""
def __init__(self, wsgi_input):
"""
:param wsgi_input: file-like object to wrap the functionality of
"""
self.wsgi_input = wsgi_input
self.bytes_received = 0
self.client_disconnect = False
def read(self, *args, **kwargs):
"""
Pass read request to the underlying file-like object and
add bytes read to total.
"""
try:
chunk = self.wsgi_input.read(*args, **kwargs)
except Exception:
self.client_disconnect = True
raise
self.bytes_received += len(chunk)
return chunk
def readline(self, *args, **kwargs):
"""
Pass readline request to the underlying file-like object and
add bytes read to total.
"""
try:
line = self.wsgi_input.readline(*args, **kwargs)
except Exception:
self.client_disconnect = True
raise
self.bytes_received += len(line)
return line
class ProxyLoggingMiddleware(object):
"""
Middleware that logs Swift proxy requests in the swift log format.
"""
def __init__(self, app, conf):
self.app = app
self.log_hdrs = conf.get('log_headers', 'no').lower() in TRUE_VALUES
# The leading access_* check is in case someone assumes that
# log_statsd_valid_http_methods behaves like the other log_statsd_*
# settings.
self.valid_methods = conf.get(
'access_log_statsd_valid_http_methods',
conf.get('log_statsd_valid_http_methods',
'GET,HEAD,POST,PUT,DELETE,COPY'))
self.valid_methods = [m.strip().upper() for m in
self.valid_methods.split(',') if m.strip()]
access_log_conf = {}
for key in ('log_facility', 'log_name', 'log_level', 'log_udp_host',
'log_udp_port', 'log_statsd_host', 'log_statsd_port',
'log_statsd_default_sample_rate',
'log_statsd_metric_prefix'):
value = conf.get('access_' + key, conf.get(key, None))
if value:
access_log_conf[key] = value
self.access_logger = get_logger(access_log_conf,
log_route='proxy-access')
self.access_logger.set_statsd_prefix('proxy-server')
def log_request(self, env, status_int, bytes_received, bytes_sent,
request_time, client_disconnect):
"""
Log a request.
:param env: WSGI environment
:param status_int: integer code for the response status
:param bytes_received: bytes successfully read from the request body
:param bytes_sent: bytes yielded to the WSGI server
:param request_time: time taken to satisfy the request, in seconds
"""
req = Request(env)
if client_disconnect: # log disconnected clients as '499' status code
status_int = 499
req_path = get_valid_utf8_str(req.path)
the_request = quote(unquote(req_path))
if req.query_string:
the_request = the_request + '?' + req.query_string
logged_headers = None
if self.log_hdrs:
logged_headers = '\n'.join('%s: %s' % (k, v)
for k, v in req.headers.items())
method = req.environ.get('swift.orig_req_method', req.method)
self.access_logger.info(' '.join(
quote(str(x) if x else '-')
for x in (
get_remote_client(req),
req.remote_addr,
time.strftime('%d/%b/%Y/%H/%M/%S', time.gmtime()),
method,
the_request,
req.environ.get('SERVER_PROTOCOL'),
status_int,
req.referer,
req.user_agent,
req.headers.get('x-auth-token'),
bytes_received,
bytes_sent,
req.headers.get('etag', None),
req.environ.get('swift.trans_id'),
logged_headers,
'%.4f' % request_time,
req.environ.get('swift.source'),
)))
# Log timing and bytes-transfered data to StatsD
if req.path.startswith('/v1/'):
try:
stat_type = [None, 'account', 'container',
'object'][req.path.strip('/').count('/')]
except IndexError:
stat_type = 'object'
else:
stat_type = env.get('swift.source')
# Only log data for valid controllers (or SOS) to keep the metric count
# down (egregious errors will get logged by the proxy server itself).
if stat_type:
stat_method = method if method in self.valid_methods \
else 'BAD_METHOD'
metric_name = '.'.join((stat_type, stat_method, str(status_int)))
self.access_logger.timing(metric_name + '.timing',
request_time * 1000)
self.access_logger.update_stats(metric_name + '.xfer',
bytes_received + bytes_sent)
def __call__(self, env, start_response):
start_response_args = [None]
input_proxy = InputProxy(env['wsgi.input'])
env['wsgi.input'] = input_proxy
start_time = time.time()
def my_start_response(status, headers, exc_info=None):
start_response_args[0] = (status, list(headers), exc_info)
def iter_response(iterable):
iterator = iter(iterable)
try:
chunk = iterator.next()
while not chunk:
chunk = iterator.next()
except StopIteration:
chunk = ''
for h, v in start_response_args[0][1]:
if h.lower() in ('content-length', 'transfer-encoding'):
break
else:
if not chunk:
start_response_args[0][1].append(('content-length', '0'))
elif isinstance(iterable, list):
start_response_args[0][1].append(
('content-length', str(sum(len(i) for i in iterable))))
else:
raise Exception('WSGI [proxy-logging]: No content-length '
'or transfer-encoding header sent and '
'there is content! %r' % chunk)
start_response(*start_response_args[0])
bytes_sent = 0
client_disconnect = False
try:
while chunk:
bytes_sent += len(chunk)
yield chunk
chunk = iterator.next()
except GeneratorExit: # generator was closed before we finished
client_disconnect = True
raise
finally:
status_int = int(start_response_args[0][0].split(' ', 1)[0])
self.log_request(
env, status_int, input_proxy.bytes_received, bytes_sent,
time.time() - start_time,
client_disconnect or input_proxy.client_disconnect)
try:
iterable = self.app(env, my_start_response)
except Exception:
self.log_request(
env, 500, input_proxy.bytes_received, 0,
time.time() - start_time, input_proxy.client_disconnect)
raise
else:
return iter_response(iterable)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def proxy_logger(app):
return ProxyLoggingMiddleware(app, conf)
return proxy_logger
| DmitryMezhensky/Hadoop-and-Swift-integration | swift/swift/common/middleware/proxy_logging.py | proxy_logging.py | py | 8,172 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "swift.common.utils.TRUE_VALUES",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "swift.common.utils.get_logger",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "webob.Request",
"line_number": 91,
"usage_type": "call"
},
{
"api_na... |
38033130776 | from typing import List, Optional
from pydantic import BaseModel
class Address(BaseModel):
city: str
country: str
class User(BaseModel):
name: str
address: Address
friends: Optional[List['User']] = None
class Config:
json_encoders = {
Address: lambda a: f'{a.city} ({a.country})',
'User': lambda u: f'{u.name} in {u.address.city} '
f'({u.address.country[:2].upper()})',
}
User.update_forward_refs()
wolfgang = User(
name='Wolfgang',
address=Address(city='Berlin', country='Deutschland'),
friends=[
User(name='Pierre', address=Address(city='Paris', country='France')),
User(name='John', address=Address(city='London', country='UK')),
],
)
print(wolfgang.json(models_as_dict=False))
| merlinepedra25/PYDANTIC | docs/examples/exporting_models_json_forward_ref.py | exporting_models_json_forward_ref.py | py | 816 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.List",
... |
34400920616 | #!/usr/bin/python3
"""This module defines a class to manage file storage for airbnb clone"""
import json
class FileStorage:
"""Class manages storage of all instances for airbnb clone"""
__file_path = "file.json"
__objects = {}
def all(self):
"""Returns dictionary of all objects"""
return FileStorage.__objects
def new(self, obj):
"""Sets in __objects the obj with key <obj class name>.id"""
self.all().update({obj.to_dict()["__class__"] + "." + obj.id: obj})
def save(self):
"""Saves dict to file"""
with open(FileStorage.__file_path, "w") as f:
temp = {}
temp.update(FileStorage.__objects)
for key, val in temp.items():
temp[key] = val.to_dict()
json.dump(temp, f)
def reload(self):
"""Loads dict from file"""
from models.base_model import BaseModel
from models.user import User
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.place import Place
from models.review import Review
classes = {
"BaseModel": BaseModel,
"User": User,
"State": State,
"City": City,
"Amenity": Amenity,
"Place": Place,
"Review": Review,
}
try:
temp = {}
with open(FileStorage.__file_path, "r") as f:
temp = json.load(f)
for key, val in temp.items():
self.all()[key] = classes[val["__class__"]](**val)
except FileNotFoundError:
pass
| Sami64/AirBnB_clone | models/engine/file_storage.py | file_storage.py | py | 1,685 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.dump",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "models.base_model.BaseModel",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "models.user.User",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "models.state.S... |
7003120074 | from analysis import load_subset_data, define_meta_class, feature_choice
from analysis import keep_important_variables, create_pairs
from analysis import normalize_data, Bigger_Net, global_loop, save_error
from vizualisation import color_map
from torch.utils.data import dataset, DataLoader
import torch
import torch.nn as nn
import numpy as np
import datetime
import os
liste = [
'EB*', 'Mira', 'SN candidate', 'QSO', 'BLLac', 'Blazar',
'Ambiguous', 'RRLyr', 'YSO', 'LPV*', 'AGN', 'Seyfert_1', 'AGN_Candidate',
'TTau*', 'Kilonova candidate'
]
df = load_subset_data(liste)
dic = {'variable_star_class': ['EB*', 'Mira', 'RRLyr', 'YSO', 'LPV*', "TTau*"],
'AGN_class': ['QSO', 'BLLac', 'Blazar',
'AGN', 'Seyfert_1', 'AGN_Candidate']
}
df['finkclass'] = df['finkclass'].apply(define_meta_class, dic=dic)
df = df.groupby('finkclass').head(200)
df = df.reset_index(drop=True)
print(df['finkclass'].value_counts())
colors = color_map(df['finkclass'].value_counts().index.tolist())
cols = [
'rf_kn_vs_nonkn',
'rf_snia_vs_nonia',
'snn_sn_vs_all',
'snn_snia_vs_nonia'
]
cols_in_candidate = [
'jdstarthist',
'magpsf',
'sigmapsf',
'fid',
'magnr',
'sigmagnr',
'isdiffpos',
'neargaia',
'sgscore1',
'classtar'
]
df_filtered = feature_choice(
df, cols + ['lc_features_g', 'lc_features_r',
['candidate', cols_in_candidate]])
df_filtered = normalize_data(df_filtered)
df_filt_selected = keep_important_variables(df_filtered)
label = df['finkclass']
# take a random sample of 100 elements in df_filt_selected and label
df_filt_selected_sample = df_filt_selected.sample(
n=len(df) - 200, random_state=42)
label_sample = label[df_filt_selected_sample.index]
# create an other sample with 100 different elements
# of df_filt_selected and label
df_filt_selected_sample2 = df_filt_selected.drop(df_filt_selected_sample.index)
df_filt_selected_sample2 = df_filt_selected_sample2.sample(
n=200, random_state=42)
label_sample2 = label[df_filt_selected_sample2.index]
X_train, y_train = create_pairs(df_filt_selected_sample, label_sample)
X_test, y_test = create_pairs(df_filt_selected_sample2, label_sample2)
trainloader = DataLoader(dataset(X_train, y_train),
batch_size=64, shuffle=False)
testloader = DataLoader(dataset(X_test, y_test), batch_size=64, shuffle=False)
net = Bigger_Net(len(df_filt_selected.columns.tolist()))
learning_rate = 0.001
epochs = 1000
loss_fn = nn.BCELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
errors = global_loop(trainloader, testloader, net,
loss_fn, optimizer, epochs=epochs)
folder = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
path = os.path.join('results/models/', folder)
os.mkdir(path)
torch.save(net.state_dict(), 'results/models/' + folder + '/model.pt')
np.savetxt('results/models/' + folder + '/errors.csv', errors, delimiter=',')
save_error(errors, 'results/models/' + folder)
print("Script ended")
| pierrecavalier/graph_fink | main.py | main.py | py | 3,056 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "analysis.load_subset_data",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "analysis.define_meta_class",
"line_number": 25,
"usage_type": "argument"
},
{
"api_name": "vizualisation.color_map",
"line_number": 31,
"usage_type": "call"
},
{
"... |
36085301774 | import bs4
import requests
import pandas as pd
import re
import warnings
warnings.filterwarnings('ignore')
def get_page(url):
headers = {
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/106.0.0.0 Safari/537.36"}
r = requests.get(url, headers)
try:
r.raise_for_status()
page = bs4.BeautifulSoup(r.text, 'lxml')
return page
except Exception as e:
return False
def pontofrio(search, n_page):
'''df = pd.DataFrame(
[],
columns=[
'nm_item',
'nm_product',
],
)'''
df_web = pd.read_csv('web_scrapping_product.csv')
for page in range(n_page):
soup = get_page(f'https://search3.pontofrio.com.br/busca?q={search}&page={1 + page}')
new_search = re.sub('-', ' ', search)
for item in soup.select('.nm-product-name a'):
df_web = df_web.append({'nm_item': item.getText().upper(), 'nm_product': new_search}, ignore_index=True)
# print(item.getText().upper())
df_web.to_csv('web_scrapping_product.csv', index=False)
print(f'{search}: Ok')
df = pd.read_csv('product.csv')
nm_product = df['nm_product'].values.tolist()
for product in nm_product:
new_product = re.sub(' ', '-', product)
#print(new_product)
pontofrio(new_product, 5)
# Erro: caderno, lapis, apontador, regua, borracha, estojo, tinta, lapiseira, pasta | MECAI2022/short_text_classification | webscraping.py | webscraping.py | py | 1,538 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv... |
32829514166 | # 문제 풀이 실패
# 모범 답안
from collections import deque
n, m = map(int, input().split())
graph = [list(map(int, input())) for _ in range(n)]
# 이동할 네 방향 정의
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
# BFS 구현
def bfs(x, y):
# queue 구현
queue = deque()
queue.append((x, y))
# queue가 빌 때까지 반복
while queue:
x, y = queue.popleft()
# 현재 위치에서 네 방향 확인
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
# 공간을 벗어난 경우 무시
if nx < 0 or ny < 0 or nx >= n or ny >= m:
continue
# 벽인 경우 무시
if graph[nx][ny] == 0:
continue
# 해당 노드를 처음 방문하는 경우에만 최단 거리 기록
if graph[nx][ny] == 1:
graph[nx][ny] = graph[x][y] + 1
queue.append((nx, ny))
# 가장 오른쪽 아래까지의 최단 거리 반환
return graph[n-1][m-1]
print(bfs(0, 0)) | veluminous/CodingTest | [이것이 코딩테스트다] 실전 문제/[BFS] 미로 탈출.py | [BFS] 미로 탈출.py | py | 1,067 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 17,
"usage_type": "call"
}
] |
34344785626 | import pika
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(
exchange='publish',
exchange_type='fanout'
)
result = channel.queue_declare(
queue='',
exclusive=True
)
queue_name = result.method.queue
channel.queue_bind(
exchange='publish',
queue=queue_name
)
def callback(ch, method, properties, body):
print(body)
channel.basic_consume(
queue=queue_name,
on_message_callback=callback,
auto_ack=True)
try:
channel.start_consuming()
except KeyboardInterrupt:
print('Interrupted') | drupadh-eunimart/my_projects | RabbitMQ/PublishFanout/consumer.py | consumer.py | py | 628 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pika.BlockingConnection",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "pika.ConnectionParameters",
"line_number": 4,
"usage_type": "call"
}
] |
15870414551 | from typing import List, Union, Tuple
from functools import reduce
from rdkit import Chem
import torch
import torch.nn as nn
from .args import ModelArgs
from .features import BatchMolGraph, get_atom_fdim, get_bond_fdim, mol2graph
from .nn_utils import index_select_ND, get_activation_function
class MPNEncoder(nn.Module):
"""An :class:`MPNEncoder` is a message passing neural network for encoding a molecule."""
def __init__(self, args: ModelArgs, atom_fdim: int, bond_fdim: int, hidden_size: int = None,
bias: bool = None, depth: int = None):
"""
:param args: A :class:`~chemprop.args.ModelArgs` object containing model arguments.
:param atom_fdim: Atom feature vector dimension.
:param bond_fdim: Bond feature vector dimension.
:param hidden_size: Hidden layers dimension
:param bias: Whether to add bias to linear layers
:param depth: Number of message passing steps
"""
super(MPNEncoder, self).__init__()
self.atom_fdim = atom_fdim
self.bond_fdim = bond_fdim
self.atom_messages = args.atom_messages
self.hidden_size = hidden_size or args.hidden_size
self.bias = bias or args.bias
self.depth = depth or args.depth
self.dropout = args.dropout
self.layers_per_message = 1
self.undirected = args.undirected
self.device = args.device
self.aggregation = args.aggregation
self.aggregation_norm = args.aggregation_norm
# Dropout
self.dropout_layer = nn.Dropout(p=self.dropout)
# Activation
self.act_func = get_activation_function(args.activation)
# Cached zeros
self.cached_zero_vector = nn.Parameter(torch.zeros(self.hidden_size), requires_grad=False)
# Input
input_dim = self.atom_fdim if self.atom_messages else self.bond_fdim
self.W_i = nn.Linear(input_dim, self.hidden_size, bias=self.bias)
if self.atom_messages:
w_h_input_size = self.hidden_size + self.bond_fdim
else:
w_h_input_size = self.hidden_size
# Shared weight matrix across depths (default)
self.W_h = nn.Linear(w_h_input_size, self.hidden_size, bias=self.bias)
self.W_o = nn.Linear(self.atom_fdim + self.hidden_size, self.hidden_size)
def forward(self, mol_graph: BatchMolGraph) -> torch.FloatTensor:
"""
Encodes a batch of molecular graphs.
:param mol_graph: A :class:`~chemprop.features.featurization.BatchMolGraph` representing
a batch of molecular graphs.
:return: A PyTorch tensor of shape :code:`(num_molecules, hidden_size)` containing the encoding of each molecule.
"""
f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope = mol_graph.get_components(atom_messages=self.atom_messages)
f_atoms, f_bonds, a2b, b2a, b2revb = f_atoms.to(self.device), f_bonds.to(self.device), a2b.to(self.device), b2a.to(self.device), b2revb.to(self.device)
if self.atom_messages:
a2a = mol_graph.get_a2a().to(self.device)
# Input
if self.atom_messages:
input = self.W_i(f_atoms) # num_atoms x hidden_size
else:
input = self.W_i(f_bonds) # num_bonds x hidden_size
message = self.act_func(input) # num_bonds x hidden_size
# Message passing
for depth in range(self.depth - 1):
if self.undirected:
message = (message + message[b2revb]) / 2
if self.atom_messages:
nei_a_message = index_select_ND(message, a2a) # num_atoms x max_num_bonds x hidden
nei_f_bonds = index_select_ND(f_bonds, a2b) # num_atoms x max_num_bonds x bond_fdim
nei_message = torch.cat((nei_a_message, nei_f_bonds), dim=2) # num_atoms x max_num_bonds x hidden + bond_fdim
message = nei_message.sum(dim=1) # num_atoms x hidden + bond_fdim
else:
# m(a1 -> a2) = [sum_{a0 \in nei(a1)} m(a0 -> a1)] - m(a2 -> a1)
# message a_message = sum(nei_a_message) rev_message
nei_a_message = index_select_ND(message, a2b) # num_atoms x max_num_bonds x hidden
a_message = nei_a_message.sum(dim=1) # num_atoms x hidden
rev_message = message[b2revb] # num_bonds x hidden
message = a_message[b2a] - rev_message # num_bonds x hidden
message = self.W_h(message)
message = self.act_func(input + message) # num_bonds x hidden_size
message = self.dropout_layer(message) # num_bonds x hidden
a2x = a2a if self.atom_messages else a2b
nei_a_message = index_select_ND(message, a2x) # num_atoms x max_num_bonds x hidden
a_message = nei_a_message.sum(dim=1) # num_atoms x hidden
a_input = torch.cat([f_atoms, a_message], dim=1) # num_atoms x (atom_fdim + hidden)
atom_hiddens = self.act_func(self.W_o(a_input)) # num_atoms x hidden
atom_hiddens = self.dropout_layer(atom_hiddens) # num_atoms x hidden
# Readout
mol_vecs = []
for i, (a_start, a_size) in enumerate(a_scope):
if a_size == 0:
mol_vecs.append(self.cached_zero_vector)
else:
cur_hiddens = atom_hiddens.narrow(0, a_start, a_size)
mol_vec = cur_hiddens # (num_atoms, hidden_size)
if self.aggregation == 'mean':
mol_vec = mol_vec.sum(dim=0) / a_size
elif self.aggregation == 'sum':
mol_vec = mol_vec.sum(dim=0)
elif self.aggregation == 'norm':
mol_vec = mol_vec.sum(dim=0) / self.aggregation_norm
mol_vecs.append(mol_vec)
mol_vecs = torch.stack(mol_vecs, dim=0) # (num_molecules, hidden_size)
return mol_vecs # num_molecules x hidden
class MPN(nn.Module):
"""An :class:`MPN` is a wrapper around :class:`MPNEncoder` which featurizes input as needed."""
def __init__(self,
args: ModelArgs,
atom_fdim: int = None,
bond_fdim: int = None):
"""
:param args: A :class:`~chemprop.args.ModelArgs` object containing model arguments.
:param atom_fdim: Atom feature vector dimension.
:param bond_fdim: Bond feature vector dimension.
"""
super(MPN, self).__init__()
self.atom_fdim = atom_fdim or get_atom_fdim()
self.bond_fdim = bond_fdim or get_bond_fdim(atom_messages=args.atom_messages)
self.device = args.device
self.encoder = nn.ModuleList([MPNEncoder(args, self.atom_fdim, self.bond_fdim)
for _ in range(args.number_of_molecules)])
def forward(self,
batch: Union[List[List[str]], List[List[Chem.Mol]], List[List[Tuple[Chem.Mol, Chem.Mol]]], List[BatchMolGraph]]) -> torch.FloatTensor:
"""
Encodes a batch of molecules.
:param batch: A list of list of SMILES, a list of list of RDKit molecules, or a
list of :class:`~chemprop.features.featurization.BatchMolGraph`.
The outer list or BatchMolGraph is of length :code:`num_molecules` (number of datapoints in batch),
the inner list is of length :code:`number_of_molecules` (number of molecules per datapoint).
:return: A PyTorch tensor of shape :code:`(num_molecules, hidden_size)` containing the encoding of each molecule.
"""
if not isinstance(batch[0], BatchMolGraph):
# Group first molecules, second molecules, etc for mol2graph
batch = [[mols[i] for mols in batch] for i in range(len(batch[0]))]
batch = [mol2graph(mols=b) for b in batch]
encodings = [enc(ba) for enc, ba in zip(self.encoder, batch)]
output = reduce(lambda x, y: torch.cat((x, y), dim=1), encodings)
return output
| gmum/graph-representations | graphrepr/chemprop/mpn.py | mpn.py | py | 8,069 | python | en | code | 18 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "args.ModelArgs",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "args.atom_messages",
... |
73578540903 | # coding: utf-8
_all_ = [ ]
import os
from pathlib import Path
import sys
parent_dir = os.path.abspath(__file__ + 2 * '/..')
sys.path.insert(0, parent_dir)
from dash import Dash, dcc, html, Input, Output, State, ctx
from dash.exceptions import PreventUpdate
import argparse
import numpy as np
import pandas as pd
import event_processing as processing
app = Dash(__name__)
app.title = '3D Visualization'
app.config['suppress_callback_exceptions'] = True
app.layout = html.Div([
html.Div([
html.Div([dcc.Dropdown(['3D view', 'Layer view'], '3D view', id='page')], style={'width':'15%'}),
], style={'display': 'flex', 'flex-direction': 'row'}),
html.Div(id='page-content')
])
@app.callback(Output('page-content', 'children'),
[Input('page', 'value')])
def render_content(page = '3D view'):
if page == '3D view':
return processing.tab_3d_layout
elif page == 'Layer view':
return processing.tab_layer_layout
@app.callback(Output('event-display', 'children'), Output('out_slider', 'children'), Output('dataframe', 'data'),
[Input('particle', 'value'), Input('tc-cl', 'value'), Input('event-val', 'n_clicks'),
Input('submit-val', 'n_clicks'), Input('mip', 'value'), State('event', 'value')])
def update_event(particle, cluster, n_clicks, submit, mip, event):
df, event = processing.get_data(event, particle)
slider = dcc.RangeSlider(df['layer'].min(),df['layer'].max(), value=[df['layer'].min(), df['layer'].max()], step=None,
marks={int(layer) : {"label": str(layer)} for each, layer in enumerate(sorted(df['layer'].unique()))},
id = 'slider-range')
return u'Event {} selected'.format(event), slider, df.reset_index().to_json(date_format='iso')
@app.callback(Output('graph', 'figure'), Output('slider-container', 'style'),
[Input('submit-layer', 'n_clicks'), Input('dataframe', 'data')],
[Input('layer_sel', 'value'), State('tc-cl', 'value'), State('mip', 'value'),
State('slider-range', 'value'), State('page', 'value')])
def make_graph(submit, data, layer, cluster, mip, slider_value, page):
df = pd.read_json(data, orient='records')
df_sel = df[df.mipPt >= mip]
if layer == 'layer selection':
df_sel = df_sel[(df_sel.layer >= slider_value[0]) & (df_sel.layer <= slider_value[1])]
if cluster == 'cluster':
df_no_cluster = df_sel[df_sel.tc_cluster_id == 0]
df_cluster = df_sel[df_sel.tc_cluster_id != 0]
fig = processing.set_3dfigure(df_cluster)
fig = processing.update_3dfigure(fig, df_no_cluster)
else:
fig = processing.set_3dfigure(df_sel)
if layer == 'display the entire event':
status_slider = {'display': 'none', 'width':'1'}
else:
status_slider = {'display': 'block', 'width':'1'}
return fig, status_slider
@app.callback(Output('graph2d', 'figure'),
[Input('dataframe', 'data'), Input('slider-range', 'value')],
[State('tc-cl', 'value'), State('mip', 'value'), State('page', 'value')])
def make_graph(data, slider_value, cluster, mip, page):
df = pd.read_json(data, orient='records')
df_sel = df.loc[df.mipPt >= mip]
df_sel = df_sel[df_sel.layer == slider_value[1]]
if cluster == 'cluster':
df_no_cluster = df_sel[df_sel.tc_cluster_id == 0]
df_cluster = df_sel[df_sel.tc_cluster_id != 0]
fig = processing.set_2dfigure(df_cluster)
fig = processing.update_2dfigure(fig, df_no_cluster)
else:
fig = processing.set_2dfigure(df_sel)
return fig
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-id','--username',type=str,default=os.getlogin())
parser.add_argument('--host',type=str,default='llruicms01.in2p3.fr')
parser.add_argument('--port',type=int,default=8004)
args = parser.parse_args()
app.run_server(debug=True,
host=args.host,
port=args.port)
| bfonta/bye_splits | bye_splits/plot/display_plotly/main.py | main.py | py | 4,073 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sys.path.insert",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number... |
12536549602 | from datetime import datetime
from .models import Employee, Shift
import pulp
def save_shifts_to_database(prob, x, u, dienstplan, employee_id_to_idx):
num_hours_per_day = len(x)
num_days = len(x[0])
num_employees = len(x[0][0])
for h in range(num_hours_per_day):
for d in range(num_days):
for e in range(num_employees):
if pulp.value(x[h][d][e]) == 1:
# Finde die entsprechende Mitarbeiter-ID
employee_id = [k for k, v in employee_id_to_idx.items() if v == e][0]
# Laden Sie das Mitarbeiterobjekt
employee = Employee.objects.get(id=employee_id)
# Erstellen Sie ein neues Schichtobjekt
shift = Shift(
dienstplan=dienstplan,
employee=employee,
start_time=datetime.time(datetime(2023, 1, 1, h)),
end_time=datetime.time(datetime(2023, 1, 1, h + 1)),
# Hier nehmen wir an, dass eine Schicht eine Stunde dauert
day_of_month=d + 1 # +1, weil d von 0 startet
)
# Speichern Sie das Schichtobjekt in der Datenbank
shift.save()
| reneHoellmueller/Algo_schedule | app/schedule/save_shifts_to_database.py | save_shifts_to_database.py | py | 1,300 | python | de | code | 0 | github-code | 36 | [
{
"api_name": "pulp.value",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "models.Employee.objects.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "models.Employee.objects",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "m... |
11166510919 | #! /usr/bin/env python
# prototype / test - for parsing commands from CLI
# basics: ArgumentParser
# https://docs.python.org/3/howto/argparse.html
#
# ArgumentParser - docs
# https://docs.python.org/3/library/argparse.html#type
# taking had written help like this and generating help using ArgumentParser (Python stdLib)
example_help = '''
- - Help / Exmple use - -
$ cd path
$ .pe # alias .pe='. venv/bin/activate'
$ ./moviepicker/moviepicker.py # plug in all disks - will report each DB contents &
# DUPLICATES that appear across discs
# list movies in DB - ldb
$ ./moviepicker/moviepicker.py -ldb /Volumes/Osx4T/tor/__media_data2/medialib2.pickle
$ ./moviepicker/moviepicker.py -u -d # find info about new additions to movie directory
# - dummy run (NO WRITE)
$ ./moviepicker/moviepicker.py -u # find info about new additions to movie directory UPDATE DB
option
-ec print list of file extension found on default target
-ec /path/ print list of file extension found on path
-d run but don't save results to disk (dummy run)
-u udate entries on default target
-u /path/ udate entries on default target with path
-udev update from local repo movie directory
-ldb /path/medialib2.pickle list entries in a pickleDB
-ldr /path/media list potential entries in a target directory ??
'''
def main(args):
pass
if __name__ == "__main__":
from pprint import pprint
import sys
# parse arges
import argparse
parser = argparse.ArgumentParser(exit_on_error=False,
description='This is a lead in to the help describing the program.\n\n',
epilog='Bug reports to https://github.com/UnacceptableBehaviour.')
# exit_on_error=False allows exception to be caught on error instead of direct exit
#
# annoyingly description & epilog scrub \n so appear as continous text!? :/
#print(example_help)
# examples how to process different types of optiona & arguments
# NON - OPTIONAL argv[1], argv[2]
# COMMENT IN next two lines
parser.add_argument("argv_1", help="path to media files") # argv[1] - ORDER DEPENDANT
parser.add_argument("argv_2", type=int, help="max number of new media to add") # argv[2] - can use ANY name
# ./scripts/cli_parse.py ./movies 10
# Namespace(argv_1='./movies', argv_2='10', update=False, dummy=False)
# --option - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# -o
parser.add_argument("-u", "--update", help="scan for new media & update database ", action="store_true")
parser.add_argument("-d", "--dummy", help="dummy run - report only - NO database update", action="store_true")
# ./scripts/cli_parse.py -u Namespace(dummy=False, update=True)
# ./scripts/cli_parse.py -u -d Namespace(dummy=True, update=True)
# ./scripts/cli_parse.py -ud Namespace(dummy=True, update=True)
#
# ./scripts/cli_parse.py -h
# usage: cli_parse.py [-h] [-u] [-d]
#
# optional arguments:
# -h, --help show this help message and exit
# -u, --update scan for new media & update database << order dependent on how added above ^
# -d, --dummy dummy run - report only - NO database update
# --option value [coices for value] - - - - - - - - - - - - - - - - - - - - - - - - -
parser.add_argument("-v", "--verbosity", type=int, choices=[0, 1, 2], help="set output verbosity")
# list entries in a pickleDB - - - - - - - - - - - - - - - - - - - - - - - - -
# --list_entries_db NON optional_target
# -ldb /path/ print list of file extension found on path
parser.add_argument("-ldb", "--list_entries_db", type=open, help="list media in DBname.pickle at PATH")
# ./scripts/cli_parse.py -ldb < argument missing
# |
# usage: cli_parse.py [-h] [-u] [-d] [-v {0,1,2}] [-ldb LIST_ENTRIES_DB]
# cli_parse.py: error: argument -ldb/--list_entries_db: expected one argument
# specify DB to scan / update - - - - - - - - - - - - - - - - - - - - - - - - -
# --specify_db NON optional_target
# -db /path/ print list of file extension found on path
parser.add_argument("-db", "--specify_db", type=open, help="specify PATH of DB to scan / update")
# list entries in a pickleDB - - - - - - - - - - - - - - - - - - - - - - - - -
# --list_new_entries_in_dir NON optional_target
# -lnd /path/ print list of file extension found on path
parser.add_argument("-lnd", "--list_new_entries_in_dir", type=open, help="scan PATH for new media & in specified DB")
# --extension_scan optional_target - - - - - - - - - - - - - - - - - - -
# -ec print list of file extension found on default target
# -ec /path/ print list of file extension found on path
# --push comment - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# -p comnt
# -p handle missing val
parser.add_argument("-p", "--push", type=ascii, help="push commits to remote with COMMENT")
# list of values - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# file1.cpp file2.cpp file3.c
# --update --dummy - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# -u -d
# -ud
commit_comment = None
try:
args = parser.parse_args() # returns Namespace()
except Exception as e:
pprint(vars(e))
pprint(e)
pprint(e.argument_name)
pprint(sys.argv)
args = None
if '-p' in e.argument_name:
commit_comment = input("Commit comment:") # user forgot comment collect and continue
# need to re-parse
sys.argv.remove('-p') # remove offending arg
args = parser.parse_args() # retry & let argparse report other issues if any issue
#args.add('push', commit_comment) # handle some other way
print(vars(args))
if commit_comment:
print(f"Commit comment: {commit_comment}")
# missing a commit comment
# $ ./scripts/cli_parse.py /forty/five 90 -v 2 -db /Volumes/Osx4T/tor/__media_data2/medialib2.pickle -p
#
# {'argument_name': '-p/--push', 'message': 'expected one argument'}
# ArgumentError(_StoreAction(option_strings=['-p', '--push'], dest='push', nargs=None, const=None, default=None, type=<built-in function ascii>, choices=None, help='push commits to remote with COMMENT', metavar=None), 'expected one argument')
# '-p/--push'
# ['./scripts/cli_parse.py',
# '/forty/five',
# '90',
# '-v',
# '2',
# '-db',
# '/Volumes/Osx4T/tor/__media_data2/medialib2.pickle',
# '-p']
# Commit comment:ardvaark
# final args object + collected comment
# {'argv_1': '/forty/five', 'argv_2': 90, 'update': False, 'dummy': False, 'verbosity': 2, 'list_entries_db': None, 'specify_db': <_io.TextIOWrapper name='/Volumes/Osx4T/tor/__media_data2/medialib2.pickle' mode='r' encoding='UTF-8'>, 'list_new_entries_in_dir': None, 'push': None}
# Commit comment: ardvaark
| UnacceptableBehaviour/movie_picker | scripts/cli_parse.py | cli_parse.py | py | 7,173 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",... |
16935982634 | from enum import unique
from flask import Flask, jsonify, request, render_template
from flask_sqlalchemy import SQLAlchemy
import json
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///students.sqlite"
db = SQLAlchemy(app)
class students(db.Model):
id = db.Column(db.Integer, unique=True, primary_key=True)
name = db.Column(db.String, unique=False, nullable=False)
grade = db.Column(db.String, unique=False, nullable=True)
@app.route('/', endpoint='main')
def index():
return render_template('grader.html')
@app.route('/grades', methods=['GET'], endpoint='students')
def getStudents():
studentsAll = students.query.with_entities(
students.name, students.grade).order_by(students.name)
studentsList = {}
for student in studentsAll:
studentsList[student.name] = student.grade
print(studentsList)
return jsonify(studentsList)
@app.route('/grades/search/name/<nameSearch>', methods=['GET'], endpoint='studentFindWithName')
def getStudentByName(nameSearch):
studentsAll = students.query.with_entities(
students.name, students.grade).filter_by(name=nameSearch)
studentFound = {}
for student in studentsAll:
studentFound[student.name] = student.grade
return jsonify(studentFound)
@app.route('/grades/search/grade/<gradeSearch>', methods=['GET'], endpoint='studentFindWithGrade')
def getStudentByGrade(gradeSearch):
studentsAll = students.query.with_entities(
students.name, students.grade).filter_by(grade=gradeSearch)
studentFound = {}
for student in studentsAll:
studentFound[student.name] = student.grade
return jsonify(studentFound)
@app.route('/grades', methods=['POST'], endpoint='studentAdd')
def addStudent():
NewStudent = {}
NewStudent.update(request.get_json())
for i in NewStudent:
key = i
value = NewStudent[i]
studentAdd = students(name=key, grade=value)
db.session.add(studentAdd)
db.session.commit()
return '', 204
@app.route('/grades', methods=['POST'], endpoint='studentEdit')
def editStudent():
NewStudent = {}
NewStudent.update(request.get_json())
for i in NewStudent:
key = i
value = NewStudent[i]
student = students.query.with_entities(
students.name, students.grade).filter_by(name=key)
student.grade = value
db.session.commit()
return '', 204
@app.route('/grades', methods=['POST'], endpoint='studentDeleteGrade')
def deleteStudentGrade():
NewStudent = {}
NewStudent.update(request.get_json())
for i in NewStudent:
key = i
student = students.query.with_entities(
students.name, students.grade).filter_by(name=key)
student.grade = ' '
db.session.commit()
return '', 204
@app.route('/grades/delete', methods=['POST'], endpoint='studentDelete')
def deleteStudent():
NewStudent = {}
NewStudent.update(request.get_json())
for i in NewStudent:
key = i
students.query.with_entities(
students.name, students.grade).filter_by(name=key).delete()
db.session.commit()
return '', 204
if __name__ == '__main__':
app.run(host='localhost', port=5000, debug=True)
| Ernest0G/Grader | grader.py | grader.py | py | 3,221 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flask.jso... |
11565125878 |
import keras
import morse
import numpy as np
import cwmodel
checkpoint_fn = "weights_detect.h5"
try:
from google.colab import drive
drive.mount('/content/drive')
checkpoint_fn = '/content/drive/MyDrive/Colab Notebooks/' + checkpoint_fn
except:
print("Couldn't mount Google Colab Drive")
model = cwmodel.make_model()
model.summary()
callbacks = [
keras.callbacks.ModelCheckpoint(
checkpoint_fn, save_best_only=True, monitor="binary_accuracy"
),
keras.callbacks.ReduceLROnPlateau(
monitor="binary_accuracy", factor=0.5, patience=10, min_lr=0.0001
),
keras.callbacks.EarlyStopping(monitor="binary_accuracy", patience=50, verbose=1),
]
model.compile(
optimizer="adam",
loss="binary_crossentropy",
metrics=["binary_accuracy"],
)
try:
model.load_weights(checkpoint_fn)
except:
print("could not load weights", checkpoint_fn)
training_generator = cwmodel.DataGenerator()
validation_generator = cwmodel.DataGenerator()
epochs = 500
history = model.fit(
x=training_generator,
#validation_data=validation_generator,
#validation_steps=100,
epochs=epochs,
callbacks=callbacks,
verbose=1,
)
| sehugg/cwkeras | train_detect.py | train_detect.py | py | 1,183 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "google.colab.drive.mount",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "google.colab.drive",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "cwmodel.make_model",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "keras.... |
30922847459 | from pylatex import Document, Section, Figure, SubFigure, Command, Subsection, Package, NoEscape
class PdfGenerator:
def __init__(self, directory, name):
geometry_options = {"right": "1cm", "left": "1cm", "top": "1cm", "bottom": "1cm"}
self.doc = Document("{}/{}-Report".format(directory, name), geometry_options=geometry_options)
self.doc.documentclass = Command(
'documentclass',
options=['10pt', 'vertical'],
arguments=['article'],
)
self.doc.preamble.append(Command("title", name))
self.doc.real_data.append(Command("date", ""))
self.doc.append(NoEscape(r'\maketitle'))
self.doc.packages.append(Package("placeins"))
self.locMapDirectory = "LocationMaps/"
self.callMapDirectory = "SetCallMaps/"
self.ptaMapDirectory = "PTAMaps/"
self.IMPMAPDirectory = "ImportantTimesMaps/"
def createPdf(self, mapInfosList):
self.createLocSection(mapInfosList[0])
self.createCallSection(mapInfosList[1])
self.createPtaSection(mapInfosList[2])
self.createImpSection(mapInfosList[3])
self.createPosResetSection(mapInfosList[4])
self.createNegResetSection(mapInfosList[5])
self.createRunBreakSection(mapInfosList[6])
self.createAllRotationsSection(mapInfosList)
self.finish()
def generateSection(self, section, fileInfos):
for i in range(1, 7):
sub = Subsection("Rotation {}".format(i))
subInfos = fileInfos.getByRotation(i)
lenSub = len(subInfos)
if lenSub != 0:
self.makeImages(sub, subInfos)
section.append(sub)
section.append(Command("FloatBarrier"))
self.doc.append(section)
def createLocSection(self, locFileInfos):
section = Section("Pass Location Maps")
self.generateSection(section, locFileInfos)
def createCallSection(self, callFileInfos):
section = Section("Setter Call Maps")
self.generateSection(section, callFileInfos)
def createPtaSection(self, ptaFileInfos):
section = Section("Pass to Attack Maps")
self.generateSection(section, ptaFileInfos)
def createImpSection(self, impFileInfos):
section = Section("Important Times Maps")
self.generateSection(section, impFileInfos)
def createPosResetSection(self, posResetInfos):
section = Section("Positive Reset Maps")
self.generateSection(section, posResetInfos)
def createNegResetSection(self, negResetInfos):
section = Section("Negative Reset Maps")
self.generateSection(section, negResetInfos)
def createRunBreakSection(self, runBreakInfos):
section = Section("Run Breaking Decision Maps")
self.generateSection(section, runBreakInfos)
def createAllRotationsSection(self, allInfos):
section = Section("All Rotations")
locSub = Subsection("Pass Location")
self.generateAllRotationSubsection(allInfos[0], locSub, section)
callSub = Subsection("Setter Call")
self.generateAllRotationSubsection(allInfos[1], callSub, section)
ptaSub = Subsection("Pass To Attack")
self.generateAllRotationSubsection(allInfos[2], ptaSub, section)
impSub = Subsection("Important Times")
self.generateAllRotationSubsection(allInfos[3], impSub, section)
posSub = Subsection("Positive Reset")
self.generateAllRotationSubsection(allInfos[4], posSub, section)
negSub = Subsection("Negative Reset")
self.generateAllRotationSubsection(allInfos[5], negSub, section)
runBSub = Subsection("Run Break")
self.generateAllRotationSubsection(allInfos[6], runBSub, section)
self.doc.append(section)
def generateAllRotationSubsection(self, infos, subsection, section):
subInfos = infos.getByRotation("All Rotations")
if len(subInfos) != 0:
self.makeImages(subsection, subInfos)
section.append(subsection)
section.append(Command("FloatBarrier"))
def makeImages(self, section, fileInfos):
figure = Figure(position="h")
for index, infos in enumerate(fileInfos):
image = self.createImage(infos.filename, infos.caption)
figure.append(image)
if index % 2 == 1:
section.append(figure)
figure = Figure(position="h")
if index == len(fileInfos) - 1:
section.append(figure)
def createImage(self, filename, captionString):
fig = SubFigure(position="h")
file, typ = filename.split(".")
fig.add_image("\"{}\".{}".format(file, typ), width="240px")
fig.add_caption(captionString)
return fig
def finish(self):
self.doc.generate_pdf(clean_tex=True, compiler='pdflatex')
| bgreni/Set_Data_Report | PdfGenerator.py | PdfGenerator.py | py | 4,903 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pylatex.Document",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pylatex.Command",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pylatex.Command",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pylatex.Command",
... |
74248266663 | import argparse
import sys
import nested_diff
import nested_diff.cli
class App(nested_diff.cli.App):
"""Diff tool for nested data structures."""
supported_ofmts = ('auto', 'html', 'json', 'term', 'toml', 'text', 'yaml')
def diff(self, a, b):
"""
Return diff for passed objects.
:param a: First object to diff.
:param b: Second object to diff.
"""
return nested_diff.diff(
a, b,
multiline_diff_context=self.args.text_ctx,
A=self.args.A,
N=self.args.N,
O=self.args.O, # noqa: E741
R=self.args.R,
U=self.args.U,
)
def get_optional_args_parser(self):
parser = super().get_optional_args_parser()
parser.add_argument(
'--text-ctx',
default=3,
metavar='NUM',
type=int,
help='amount of context lines for multiline strings diffs; '
'negative value will disable multiline diffs, default is '
'"%(default)s"',
)
parser.add_argument(
'--out',
default=sys.stdout,
metavar='FILE',
type=argparse.FileType('w'),
help='output file; STDOUT is used if omitted',
)
parser.add_argument('-A', type=int, choices=(0, 1), default=1,
help='show added items; enabled by default')
parser.add_argument('-N', type=int, choices=(0, 1), default=1,
help="show item's new values; enabled by default")
parser.add_argument('-O', type=int, choices=(0, 1), default=1,
help="show item's old values; enabled by default")
parser.add_argument('-R', type=int, choices=(0, 1), default=1,
help='Show removed items; enabled by default')
parser.add_argument('-U', type=int, choices=(0, 1), default=0,
help='show unchanged items; disabled by default')
return parser
def get_positional_args_parser(self):
parser = super().get_positional_args_parser()
parser.add_argument('file1', type=argparse.FileType())
parser.add_argument('file2', type=argparse.FileType())
return parser
def get_dumper(self, fmt, **kwargs):
if fmt == 'auto':
if self.args.out.isatty():
fmt = 'term'
else:
fmt = 'text'
if fmt == 'term':
return TermDumper(**kwargs)
elif fmt == 'text':
return TextDumper(**kwargs)
elif fmt == 'html':
return HtmlDumper(**kwargs)
return super().get_dumper(fmt, **kwargs)
def run(self):
diff = self.diff(
self.load(self.args.file1),
self.load(self.args.file2),
)
exit_code = 0 if not diff or 'U' in diff else 1
self.dump(self.args.out, diff, self.args.ofmt)
return exit_code
class AbstractFmtDumper(nested_diff.cli.Dumper):
"""Base class for diff dumpers."""
def encode(self, data):
return self.encoder.format(data)
@staticmethod
def get_opts(opts):
opts.setdefault('sort_keys', True)
return opts
class HtmlDumper(AbstractFmtDumper):
"""Human friendly HTML dumper for nested diff."""
def __init__(self, **kwargs):
super().__init__()
from html import escape
from nested_diff import fmt
self.html_opts = {
'lang': 'en',
'title': 'Nested diff',
}
self.html_opts.update(kwargs.pop('html_opts', {}))
self.formatter = fmt.HtmlFormatter(**self.get_opts(kwargs))
if 'header' not in self.html_opts:
self.html_opts['header'] = (
'<!DOCTYPE html><html lang="' + self.html_opts['lang'] +
'"><head><title>' + escape(self.html_opts['title']) +
'</title><style>' + self.formatter.get_css() +
'</style></head><body>'
)
if 'footer' not in self.html_opts:
self.html_opts['footer'] = '</body></html>'
def encode(self, data):
return self.formatter.format(
data,
header=self.html_opts['header'],
footer=self.html_opts['footer'],
)
class TermDumper(AbstractFmtDumper):
"""Same as TextDumper but with ANSI term colors."""
def __init__(self, **kwargs):
super().__init__()
from nested_diff import fmt
self.encoder = fmt.TermFormatter(**self.get_opts(kwargs))
class TextDumper(AbstractFmtDumper):
"""Human friendly text dumper for nested diff."""
def __init__(self, **kwargs):
super().__init__()
from nested_diff import fmt
self.encoder = fmt.TextFormatter(**self.get_opts(kwargs))
def cli():
"""Cli tool entry point."""
return App().run()
| grafviz/hatvp-json | modif_nested_diff/diff_tool.py | diff_tool.py | py | 4,955 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nested_diff.cli",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "nested_diff.diff",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "argparse.FileTyp... |
11828562957 | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 21 10:41:14 2017
@author: Juan Antonio Barragán Noguera
@email: jabarragann@unal.edu.co
"""
import matplotlib.pyplot as plt
import numpy as np
FS=8000
TS=1/FS
f=1500
x=np.arange(0,200)
y=np.sin(2*np.pi*f*TS*x)
plt.stem(x,y)
| jabarragann/DiscretSignalsUnal | temp.py | temp.py | py | 283 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.arange",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.stem",
"li... |
10955703754 | from datetime import datetime, timedelta
from itertools import islice, tee, izip
from django.conf import settings
from django.db import models
# output log if difference in traffic is less than than TRAFFIC_DELTA_MIN
TRAFFIC_DELTA_MIN = 1000.0
SLIDING_WINDOW_LEN = 4
LINK_ALIVE_INTERVAL = getattr(settings, 'GMAP_LINK_ALIVE_INTERVAL', 5)
class BandwidthManager(models.Manager):
"""Manager that calculates rates (bandwidths) from traffic"""
def _window(self, iterator, n):
"""Returns a sliding window (of width n) over data from the iterable
s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ..."""
# from http://docs.python.org/release/2.3.5/lib/itertools-example.html
result = tuple(islice(iterator, n))
if len(result) == n:
yield result
for elem in iterator:
result = result[1:] + (elem,)
yield result
def _get_rate(self, attr, x0, x1):
"""Return rate (bandwidth) from traffic values"""
time_delta = float(x1.time - x0.time)
bits_delta = getattr(x1, attr) - getattr(x0, attr)
if time_delta == 0:
return 0
rate = bits_delta / time_delta
return rate
def rate(self, link):
"""Return a rate tuple (rx, tx) for a link in Bps.
Args:
link: link id
"""
try:
# Get the two most recent entries
t1,t0 = self.filter(link=link).order_by('-update_date')[:2]
except (IndexError, ValueError) :
return (0, 0)
else:
#alive_int = datetime.now() - t1.update_date
#if alive_int > timedelta(seconds=LINK_ALIVE_INTERVAL):
# # link is inactive
# return (0, 0)
rx, tx = self._get_rate('rx', t0, t1), self._get_rate('tx', t0, t1)
# Test for negative rate:
# Negative rates happen when the lastest report's (rx,tx)
# is less than the penultimate one: a counter rolls over.
if rx < 0: rx = 1
if tx < 0: tx = 1
return (rx, tx)
def rates(self, direction, link, window_len=SLIDING_WINDOW_LEN):
"""Return a list of average rates
Args:
direction: 'rx' or 'tx' traffic
link: link id
"""
qs = Bandwidth.objects.filter(link=link).order_by('update_date')
itr = qs.iterator()
rates = []
for swin in self._window(itr, window_len):
rates.append(self._get_rate(direction, swin[0], swin[-1]))
return rates
class Bandwidth(models.Model):
"""Bandwidth on an NDN network link"""
link = models.IntegerField()
time = models.FloatField()
rx = models.BigIntegerField()
tx = models.BigIntegerField()
update_date = models.DateTimeField(default=datetime.now,
editable=False, db_index=True)
class Meta:
ordering = ('-update_date', )
objects = BandwidthManager()
def save(self, *args, **kwargs):
if not self.rx and not self.tx:
return
else:
super(Bandwidth, self).save(*args, **kwargs)
def __unicode__(self):
return 'link {0}'.format(self.link)
| shakirjames/ndnmap | gmap/models.py | models.py | py | 3,287 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.conf.settings",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "django.db.models.Manager",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 12,
"usage_type": "name"
},
{
"api_name":... |
18363762101 | from Bio import SeqIO, Entrez
from Bio.SeqFeature import FeatureLocation
import json, os, doctest
Entrez.email = "fayssal.el.ansari@gmail.com"
# TODO: faut modifier cette fonction pour accepter
# une seqRecord avec plusieurs sequences
def find_cds(seqRecord): #marche
'''Renvoie une liste des couples de positions de début et de fin des CDS contenues dans la séquence seqRecord
Example:
>>> genebank = SeqIO.read("../data/NM_007389.gb", "genbank")
>>> find_cds(genebank)
[(ExactPosition(51), ExactPosition(1425))]
'''
liste_positions = []
for feature in seqRecord.features:
if feature.type == "CDS":
liste_positions.append((feature.location.start, feature.location.end))
return liste_positions
def geneID(NM):
"""
renvoie la liste des Identifiants numerique geneID:
>>> geneID("NM_007389")
['11435']
"""
handle_elink = Entrez.elink(dbfrom="nucleotide",db="gene", id=NM)
record_elink = Entrez.read(handle_elink)
handle_elink.close()
l=[]
print(record_elink)
for link in record_elink[0]["LinkSetDb"][0]["Link"]:
l.append(link["Id"])
return l
def mrna_to_gene(gene_NM_):
'''
renvoie l’identifiant du gène (gene_ID) (de type <class 'Bio.Entrez.Parser.StringElement'> )
>>> mrna_to_gene("NM_007389")
['11435']
'''
try:
handle_elink = Entrez.elink(dbfrom="nucleotide",db="gene", id=gene_NM_)
record_elink = Entrez.read(handle_elink)
handle_elink.close()
assert record_elink != []
l=[]
assert record_elink[0]["LinkSetDb"] != [] #mrna_to_gene("19304878") il est correct mais ...
for link in record_elink[0]["LinkSetDb"][0]["Link"]:
l.append(link["Id"])
return l
except AssertionError as ve:
return ValueError(str(gene_NM_) + " : wrong id value passed")
def numero_accession(gene_ID):
"""
renvoie le numéro d’accession du chromosome (commençant par NC_) :
>>> numero_accession(mrna_to_gene("NM_007389"))
'NC_000068.8'
"""
try:
handle_esummary = Entrez.esummary(dbfrom="nucleotide", db="gene", id=gene_ID)
record_esummary = Entrez.read(handle_esummary)
handle_esummary.close()
return record_esummary["DocumentSummarySet"]["DocumentSummary"][0]["GenomicInfo"][0]["ChrAccVer"]
except AssertionError as ve:
return ValueError(str(gene_ID) + " : wrong id value passed")
def postion_chromosomique_gene_debut(gene_ID):
"""
renvoie la postion_chromosomique_gene_debut du chromosome :
>>> postion_chromosomique_gene_debut(mrna_to_gene("NM_007389"))
'73410681'
"""
try:
handle_esummary = Entrez.esummary(dbfrom="nucleotide", db="gene", id=gene_ID)
record_esummary = Entrez.read(handle_esummary)
handle_esummary.close()
return record_esummary["DocumentSummarySet"]["DocumentSummary"][0]["GenomicInfo"][0]["ChrStart"]
except AssertionError as ve:
return ValueError(str(gene_ID) + " : wrong id value passed")
def postion_chromosomique_gene_fin(gene_ID):
"""
renvoie la postion_chromosomique_gene_fin du chromosome :
>>> postion_chromosomique_gene_fin(mrna_to_gene("NM_007389"))
'73393624'
"""
try:
handle_esummary = Entrez.esummary(dbfrom="nucleotide", db="gene", id=gene_ID)
record_esummary = Entrez.read(handle_esummary)
handle_esummary.close()
return record_esummary["DocumentSummarySet"]["DocumentSummary"][0]["GenomicInfo"][0]["ChrStop"]
except AssertionError as ve:
return ValueError(str(gene_ID) + " : wrong id value passed")
def compare_rec_seq(record1, record2):
'''
cette fonction prend en parametre 2 records et les compare
elle affiche le resulat de la comparaison en terminal
et return le resulat sous forme de boolean
'''
resultat = str(record1.seq) == str(record2.seq)
if (resultat): # True
print(" Les 2 sequences sont identiques")
else:
print(" Les 2 sequences NE sont PAS identiques!")
return resultat
def upstream_gene_seq(pmid):
'''
à partir d’un identifiant de gène et d’une longueur retourne un objet Biopython Bio.Seq
correspondant à la séquence ADN amont de ce gène de la longueur demandée
/!\ (attention au brin sur lequel se trouve le gène).
'''
""" renvoie le numéro d'acension"""
handle = Entrez.esummary(db="gene", id=mrna_to_gene(pmid))
record = Entrez.read(handle)
handle.close()
print(json.dumps(record, indent=2, separators=(", ", " : ")))
# print(record["DocumentSummarySet"]["DocumentSummary"][0]["GenomicInfo"][0]["ChrAccVer"])
return record["DocumentSummarySet"]["DocumentSummary"][0]["GenomicInfo"][0]["ChrAccVer"]
def download_promotors(l_mrna, taille_seq, dir="."):
'''
cette fonctoin prend en parametre:
* l_mrna: une liste d'identifiants mrna,
* taille_seq: une taille de sequence de gene a recuperer
* dir: le repetoire destination
Etant donné une liste d’identifiants de mRNA, une taille de séquence promotrice,
un répertoire d’enregistrement (. par défaut), télécharge dans des fichiers séparés
les séquences promotrices de ces mRNA au format FASTA
'''
for mrna in l_mrna:
nom_fichier = str(mrna) + "_" + str(taille_seq) + ".fasta"
chemin_fichier = os.path.join(os.getcwd(), "data", nom_fichier)
# id = Entrez.read(Entrez.esearch(db="genbank", term=mrna))
id = mrna_to_gene(mrna)
print(id)
fast_handle = Entrez.efetch(db="nucleotide", id=id, rettype="fasta", retmode="text")
fast_record = fast_handle.read()
print(fast_record)
# sequence = Entrez.read(gb_handle)
# print(sequence)
fast_handle.close()
if __name__ == "__main__":
list_mrna = ["NM_007389", "NM_079420", "NM_001267550", "NM_002470", "NM_003279", "NM_005159", "NM_003281", "NM_002469", "NM_004997", "NM_004320", "NM_001100", "NM_006757"]
# download_promotors(list_mrna, 1024, "../data")
# doctest.testmod()
| fayssalElAnsari/Bioinformatics-python-sequence-analyser | app/src/utilsTest.py | utilsTest.py | py | 6,111 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "Bio.Entrez.email",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "Bio.Entrez",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "Bio.Entrez.elink",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "Bio.Entrez",
"lin... |
3745949617 | # pylint: disable=C0413
# Standard Library
import logging
import os
import urllib
# Third Party
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# First Party
from resc_backend.db.model import Base
basedir = os.path.abspath(os.path.dirname(__file__))
logger = logging.getLogger(__name__)
DB_CONNECTION_STRING = os.environ.get("DB_CONNECTION_STRING")
try:
env_variables = {}
for key, value in os.environ.items():
if "pass" in key.lower():
env_variables[key] = urllib.parse.quote(value)
else:
env_variables[key] = value
DB_CONNECTION_STRING = DB_CONNECTION_STRING.format(**env_variables)
except AttributeError:
logger.warning("Missing DB Connection environment variables, using SQLite database")
DB_CONNECTION_STRING = ""
SQL_ALCHEMY_POOL_SIZE_DEFAULT = 25
SQL_ALCHEMY_MAX_OVERFLOW_DEFAULT = 15
echo_queries = os.getenv('SQL_ALCHEMY_ECHO_QUERIES', 'False').lower() in ('true', '1', 'y')
pool_size = int(os.environ.get("SQL_ALCHEMY_POOL_SIZE", SQL_ALCHEMY_POOL_SIZE_DEFAULT))
max_overflow = int(os.environ.get("SQL_ALCHEMY_MAX_OVERFLOW", SQL_ALCHEMY_MAX_OVERFLOW_DEFAULT))
if DB_CONNECTION_STRING:
logger.info("Using provided environment variable to connect to the Database")
engine = create_engine(DB_CONNECTION_STRING, echo=echo_queries, pool_size=pool_size, max_overflow=max_overflow)
else:
DATABASE_URL = 'sqlite:///' + os.path.join(basedir, 'db.sqlite?check_same_thread=False')
logger.info(f"Database environment variables were not provided, defaulting to {DATABASE_URL}")
engine = create_engine(DATABASE_URL, echo=echo_queries)
Base.metadata.create_all(engine, checkfirst=True)
Session = sessionmaker()
| abnamro/repository-scanner | components/resc-backend/src/resc_backend/db/connection.py | connection.py | py | 1,725 | python | en | code | 137 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
... |
7110168618 | from django.shortcuts import render, get_object_or_404
from django.views import generic
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import post_save
from CodeConfab.models import Profile, Language,Post,FriendRequest,Poke, Prompt,Resources,Comment,Reply
from django.views.generic import DetailView
from django.contrib.auth.decorators import login_required
from CodeConfab.forms import PostForm,CommentForm,ReplyForm
from django.shortcuts import redirect
from django.urls import reverse
from django.utils import timezone
from django.http import HttpResponseRedirect
from django.contrib import messages
from django.core.paginator import Paginator
# Create your views here.
@login_required
def home(request):
if request.user.is_authenticated:
post_list = Post.objects.filter(language__id__in = request.user.language_set.values_list('id',flat = True)).order_by('-created')
paginator = Paginator(post_list, 10)
page = request.GET.get('page')
posts = paginator.get_page(page)
resources = Resources.objects.all()
return render (request, 'Confab/home.html', {'posts':posts, 'resources':resources})
else:
return render(request, 'Confab/index.html')
def index(request):
if request.user.is_authenticated:
post_list = Post.objects.filter(language__id__in = request.user.language_set.values_list('id',flat = True)).order_by('-created')
paginator = Paginator(post_list, 10)
page = request.GET.get('page')
posts = paginator.get_page(page)
resources = Resources.objects.all()
return render (request, 'Confab/home.html', {'posts':posts, 'resources':resources})
else:
return render(request, 'Confab/index.html')
@login_required
def post(request, user):
if request.method == 'POST':
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit= False)
post.user_id = request.user.id
post.save()
messages.add_message(request, messages.SUCCESS, 'Successful, your post has beed added, you can poke a friend or two if you want them to help.')
return redirect(reverse('confab:home'))
else:
header = 'New Post'
form = PostForm
return render(request, 'Confab/new2.html', {'form':form, 'header':header})
@login_required
def postdetail(request,user,pk, postslug):
if request.method == 'POST':
post = get_object_or_404(Post, pk=pk)
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit = False)
comment.user_id = request.user.id
comment.post_id = post.id
comment.save()
messages.add_message(request, messages.SUCCESS, 'Successful, Comment Added.')
return redirect(reverse('confab:post_detail', args=[user, pk, postslug] ))
else:
form = CommentForm(request.POST)
post = Post.objects.get(pk = pk)
return render(request ,'Confab/post_detail.html', { 'post':post, 'form':form})
@login_required
def commentdetail(request, user, slug, com):
if request.method == 'POST':
comment = Comment.objects.get(pk = com )
form = ReplyForm(request.POST)
if form.is_valid():
reply = form.save(commit = False)
reply.user_id = request.user.id
reply.comment_id = com
reply.save()
messages.add_message(request, messages.SUCCESS, 'Successful, Reply added.')
return redirect(reverse('confab:comment_detail', args=[user, slug, com] ))
else:
form = ReplyForm(request.POST)
comment = Comment.objects.get(pk = com)
return render(request ,'Confab/comment_detail.html', { 'comment':comment, 'form':form})
@receiver(post_save, sender = User)
def createuserprofile(sender, **kwargs):
if kwargs['created']:
user_profile =Profile.objects.create(user = kwargs['instance'], date_joined = timezone.now())
class LanguageView(DetailView):
model = Language
template_name = 'Confab/language.html'
@login_required
def ConnectionsView(request, user):
users = Profile.objects.exclude(user = request.user).exclude(user__is_staff = True).exclude(user__in =request.user.profile.friend.values_list('user',flat = True))
return render( request, 'Confab/connections.html', {'users':users})
@login_required
def Connect(request, user):
if request.method == 'POST':
user = get_object_or_404(User, username = user)
friend_request, created = FriendRequest.objects.get_or_create(
sender = request.user,
receiver = user
)
messages.add_message(request, messages.SUCCESS, 'Successful, a connection request has been sent to'+ ' ' + user.username)
return redirect(reverse('account:pub_profile', args = [user]))
@login_required
def DeleteRequest(request, reqid):
if request.method =='POST':
user = get_object_or_404(User, id = request.user.id)
con_request = FriendRequest.objects.get(id = reqid)
con_request.delete()
messages.add_message(request, messages.SUCCESS, 'Successful, friend request deleted.')
return redirect(reverse('confab:connections', args = [user]))
@login_required
def AcceptRequest(request, user, id):
if request.method == 'POST':
con_request = FriendRequest.objects.get(id = id)
users = User.objects.exclude(username = 'admin').exclude(username = request.user.username)
requests = FriendRequest.objects.filter(receiver_id = request.user.id)
sender_user = get_object_or_404(User, username = user)
sender = Profile.objects.get(user_id = sender_user.id)
receiver = Profile.objects.get(user_id = request.user.id)
receiver.friend.add(sender)
con_request.delete()
messages.add_message(request, messages.SUCCESS, 'Successful, you are now connected to'+ ' ' + sender.user.username)
return redirect(reverse('confab:connections', args = [request.user]), kwargs=[users, requests])
@login_required
def DeleteConnection(request, friend):
if request.method == 'POST':
friend = Profile.objects.get(id = friend)
receiver = Profile.objects.get(user_id = request.user.id)
receiver.friend.remove(friend)
user = friend.user
messages.add_message(request, messages.SUCCESS, 'Successful, you are no longer connected to'+ ' ' + user.username)
return redirect(reverse('account:pub_profile', args = [user]))
@login_required
def PokeList(request,slug, post):
post = get_object_or_404(Post, id = post)
return render(request, 'Confab/poke_list.html', {'post':post})
@login_required
def PromptList(request,slug, post):
post = get_object_or_404(Post, id = post)
return render(request, 'Confab/prompt_list.html', {'post':post})
@login_required
def PokeFriend(request, post):
if request.method == 'POST':
user_list = request.POST.getlist("connections")
users = User.objects.filter(username__in = user_list)
post = get_object_or_404(Post, id = post)
for pokee in users:
poke, created = Poke.objects.get_or_create(
poker = request.user,
poked = pokee,
post = post
)
messages.add_message(request, messages.SUCCESS, 'Poke Successful, your friends will be notified')
return redirect(reverse('confab:home'))
@login_required
def PromptFriend(request, post):
if request.method == 'POST':
user_list = request.POST.getlist("connections")
users = User.objects.filter(username__in = user_list)
post = get_object_or_404(Post, id = post)
for promptee in users:
prompt, created = Prompt.objects.get_or_create(
prompter = request.user,
prompted = promptee,
post = post
)
messages.add_message(request, messages.SUCCESS, 'Prompt Successful, your friends will be notified')
return redirect(reverse('confab:home'))
@login_required
def SeePoke(request, id):
if request.method == 'POST':
poke = get_object_or_404(Poke, id = id)
if poke.seen:
return redirect(reverse('confab:post_detail', args=[poke.post.user, poke.post.pk, poke.post.slug]))
else:
poke.seen = True
poke.save()
return redirect(reverse('confab:post_detail', args=[poke.post.user, poke.post.pk, poke.post.slug]))
@login_required
def SeePrompt(request, id):
if request.method == 'POST':
prompt = get_object_or_404(Prompt, id = id)
if prompt.seen:
return redirect(reverse('confab:post_detail', args=[prompt.post.user, prompt.post.pk, prompt.post.slug]))
else:
prompt.seen = True
prompt.save()
return redirect(reverse('confab:post_detail', args=[prompt.post.user, prompt.post.pk, prompt.post.slug]))
@login_required
def Notifications(request, user):
return render(request, 'Confab/notifications.html')
def Help(request):
return render(request, 'Confab/help.html')
def About(request):
return render(request, 'Confab/about.html')
def ResourcesView(request):
if request.method == 'POST':
title=request.POST.get('title')
if title != '':
resource_list = Resources.objects.filter(title__contains = title)
else:
resource_list =[]
else:
resource_list = Resources.objects.filter(language__id__in = request.user.language_set.values_list('id',flat = True))
paginator = Paginator(resource_list, 10)
page = request.GET.get('page')
resources = paginator.get_page(page)
return render(request, 'Confab/resources.html', {'resources':resources})
def SuggestFriends(request, user):
Suggestions = User.objects.filter(language__id__in = request.user.language_set.values_list('id',flat = True)).exclude(username = request.user).distinct().exclude(id__in = request.user.profile.friend.values_list('user_id',flat = True))
return render(request, 'Confab/Suggestions.html', {'Suggestions':Suggestions})
def FindFriends(request, user):
if request.method == 'POST':
username=request.POST.get('username')
if username != '':
Suggestions = User.objects.filter(username__contains = username).exclude(is_staff = True)
else:
Suggestions = None
return render(request, 'Confab/Friendresults.html', {'Suggestions':Suggestions})
def Contact(request):
return render(request, 'Confab/contact.html')
| Tekkieware/CodeConfab | CodeConfab/views.py | views.py | py | 10,814 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "CodeConfab.models.Post.objects.filter",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "CodeConfab.models.Post.objects",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "CodeConfab.models.Post",
"line_number": 23,
"usage_type": "name... |
37005491530 | import pytest
import re
from itertools import chain
from pathlib import Path
from sphinx.application import Sphinx
from sphinx.util.docutils import docutils_namespace
from sphinx.testing.restructuredtext import parse as sphinx_parse
from .diffpdf import diff_pdf
from .pdf_linkchecker import check_pdf_links
from .util import in_directory
from rinoh.frontend.rst import ReStructuredTextReader, from_doctree
from rinoh.frontend.sphinx import nodes # load Sphinx docutils nodes
from rinoh.attribute import OverrideDefault, Var
from rinoh.template import (DocumentTemplate, TemplateConfiguration,
ContentsPartTemplate, PageTemplate,
TemplateConfigurationFile)
__all__ = ['render_doctree', 'render_rst_file']
TEST_DIR = Path(__file__).parent.parent.absolute()
OUTPUT_DIR = TEST_DIR / 'output'
class MinimalTemplate(DocumentTemplate):
stylesheet = OverrideDefault('sphinx_base14')
parts = OverrideDefault(['contents'])
contents = ContentsPartTemplate()
page = PageTemplate(page_size=Var('paper_size'),
chapter_title_flowables=None,
header_text=None,
footer_text=None)
contents_page = PageTemplate(base='page')
def _render_rst(rst_path, doctree, out_filename, reference_path, warnings=[]):
kwargs = {}
stylesheet_path = rst_path.with_suffix('.rts')
if stylesheet_path.exists():
kwargs['stylesheet'] = str(stylesheet_path)
templconf_path = rst_path.with_suffix('.rtt')
if templconf_path.exists():
config = TemplateConfigurationFile(str(templconf_path))
else:
config = TemplateConfiguration('rst', template=MinimalTemplate, **kwargs)
config.variables['paper_size'] = 'a5'
render_doctree(doctree, out_filename, reference_path, config, warnings)
def render_rst_file(rst_path, out_filename, reference_path):
reader = ReStructuredTextReader()
doctree = reader.parse(rst_path)
return _render_rst(rst_path, doctree, out_filename, reference_path)
def render_sphinx_rst_file(rst_path, out_filename, reference_path,
test_output_dir):
with docutils_namespace():
out_dir = str(test_output_dir)
app = Sphinx(srcdir=str(rst_path.parent), confdir=None, outdir=out_dir,
doctreedir=out_dir, buildername='dummy', status=None)
with open(rst_path) as rst_file:
contents = rst_file.read()
sphinx_doctree = sphinx_parse(app, contents)
doctree = from_doctree(rst_path.name, sphinx_doctree)
docinfo = sphinx_doctree.settings.env.metadata['index']
return _render_rst(rst_path, doctree, out_filename, reference_path,
warnings=docinfo.get('warnings', '').splitlines())
def render_doctree(doctree, out_filename, reference_path,
template_configuration=None, warnings=[]):
if template_configuration:
document = template_configuration.document(doctree)
else:
document = MinimalTemplate(doctree)
output_dir = OUTPUT_DIR / out_filename
output_dir.mkdir(parents=True, exist_ok=True)
with pytest.warns(None) as recorded_warnings:
document.render(output_dir / out_filename)
if 'warnings' in document.metadata:
warnings_node = document.metadata['warnings'].source.node
warnings = chain(warnings_node.rawsource.splitlines(), warnings)
for warning in warnings:
if not any(re.search(warning, str(w.message)) for w in recorded_warnings):
pytest.fail('Expected warning matching "{}"'.format(warning))
verify_output(out_filename, output_dir, reference_path)
def render_sphinx_project(name, project_dir, template_cfg=None, stylesheet=None):
project_path = TEST_DIR / project_dir
out_path = OUTPUT_DIR / name
confoverrides = {}
if template_cfg:
confoverrides['rinoh_template'] = str(TEST_DIR / template_cfg)
if stylesheet:
confoverrides['rinoh_stylesheet'] = str(TEST_DIR / stylesheet)
with docutils_namespace():
sphinx = Sphinx(srcdir=str(project_path),
confdir=str(project_path),
outdir=str(out_path / 'rinoh'),
doctreedir=str(out_path / 'doctrees'),
buildername='rinoh',
confoverrides=confoverrides)
sphinx.build()
out_filename = '{}.pdf'.format(name)
with in_directory(out_path):
if not diff_pdf(TEST_DIR / 'reference' / out_filename,
out_path / 'rinoh' / out_filename):
pytest.fail('The generated PDF is different from the reference '
'PDF.\nGenerated files can be found in {}'
.format(out_path))
def verify_output(out_filename, output_dir, reference_path):
pdf_filename = '{}.pdf'.format(out_filename)
_, _, _, _, _, _, ref_outlines = \
check_pdf_links(reference_path / pdf_filename)
with in_directory(output_dir):
_, _, _, badlinks, _, _, outlines = check_pdf_links(pdf_filename)
pytest.assume(badlinks == [])
pytest.assume(ref_outlines == outlines)
if not diff_pdf(reference_path / pdf_filename, pdf_filename):
pytest.fail('The generated PDF is different from the reference '
'PDF.\nGenerated files can be found in {}'
.format(output_dir))
| Chris-Jr-Williams/rinohtype | tests_regression/helpers/regression.py | regression.py | py | 5,470 | python | en | code | null | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "rinoh.template.DocumentTemplate",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "rinoh.attribute.OverrideDefault",
"line_number": 30,
"usage_type": "call"
},
{
"api_... |
28431033139 | # -*- coding: utf-8 -*-
from __future__ import print_function
import time
import pygame
import OpenGL.GL as gl
import OpenGL.GLU as glu
import numpy as np
import itertools
import fractions
import copy
import numpy as np
#local imports
from common import COLORS, DEBUG, VSYNC_PATCH_HEIGHT_DEFAULT, VSYNC_PATCH_WIDTH_DEFAULT, DEFAULT_FLASH_RATE
from common import UserEscape
from screen import Screen
from checkerboard import CheckerBoard
class TripleCheckerBoardFlasher(Screen):
def setup(self,
nrows,
nrows_center = 1,
check_width = None,
check_width_center = 0.5,
check_color1 = 'white',
check_color2 = 'black',
screen_background_color = 'neutral-gray',
show_fixation_dot = False,
flash_rate_left = DEFAULT_FLASH_RATE,
flash_rate_right = DEFAULT_FLASH_RATE,
flash_rate_center = DEFAULT_FLASH_RATE,
#rate_compensation = None,
vsync_patch = None,
):
Screen.setup(self,
background_color = screen_background_color,
vsync_patch = vsync_patch,
)
#run colors through filter to catch names and convert to RGB
check_color1 = COLORS.get(check_color1, check_color1)
check_color2 = COLORS.get(check_color2, check_color2)
# set checkerboard-related attributes
if check_width is None:
check_width = 2.0/nrows #fill whole screen
self.board_width = check_width*nrows
self.board_width_center = check_width_center * nrows_center
self.nrows = nrows
self.CB1 = CheckerBoard(nrows, check_width, color1 = check_color1, color2 = check_color2, show_fixation_dot = show_fixation_dot)
self.CB2 = CheckerBoard(nrows, check_width, color1 = check_color2, color2 = check_color1, show_fixation_dot = show_fixation_dot) #reversed pattern
self.CB1_center = CheckerBoard(nrows_center, check_width_center, color1 = check_color1, color2 = check_color2, show_fixation_dot = False)#show_fixation_dot)
self.CB2_center = CheckerBoard(nrows_center, check_width_center, color1 = check_color2, color2 = check_color1, show_fixation_dot = False)#show_fixation_dot)
self.CB_cycle_left = itertools.cycle((self.CB1,self.CB2))
self.CB_cycle_right = itertools.cycle((self.CB1,self.CB2))
self.CB_cycle_center = itertools.cycle((self.CB1_center,self.CB2_center))
# set time-related attributes
self._last_CB_change_time_left = None
self._last_CB_change_time_right = None
self._last_CB_change_time_center = None
self.flash_rate_left = flash_rate_left
self.flash_interval_left = 1.0/flash_rate_left
self.flash_rate_right = flash_rate_right
self.flash_interval_right = 1.0/flash_rate_right
self.flash_rate_center = flash_rate_center
self.flash_interval_center = 1.0/flash_rate_center
#self.rate_compensation = rate_compensation
# get useful coordinate values for checkerboard rendering locations
self.xC, self.yC = (-0.5*self.board_width,-0.5*self.board_width)
self.xL, self.yL = (self.xC - 0.7*self.screen_right, self.yC)
self.xR, self.yR = (self.xC + 0.7*self.screen_right, self.yC)
# some lists for checking things
self.vals = itertools.cycle((1,0))
self.t_list = []
self.val_list = []
self.vals_current = self.vals.next()
def start_time(self,t):
# get start time and set current CB objects (and their change times)
Screen.start_time(self,t)
self._last_CB_change_time_left = t
self._last_CB_change_time_right = t
self._last_CB_change_time_center = t
self._current_CB_left = self.CB_cycle_left.next()
self._current_CB_right = self.CB_cycle_right.next()
self._current_CB_center = self.CB_cycle_center.next()
# also used for checking things
self.t_begin = t
def render(self):
# do general OpenGL stuff as well as FixationCross and Vsync Patch if needed
Screen.render(self)
# translate to position of left board and render
gl.glLoadIdentity()
gl.glTranslatef(self.xL, self.yL, 0.0)
self._current_CB_left.render()
# translate to position of right board and render
gl.glLoadIdentity()
gl.glTranslatef(self.xR, self.yR, 0.0)
self._current_CB_right.render()
# render center board
gl.glLoadIdentity()
gl.glTranslatef(-self.board_width_center / 2.0, -self.board_width_center / 2.0, 0.0)
self._current_CB_center.render()
def update(self, t, dt):
self.ready_to_render = False
# only update a checkerboard if its flash_interval has elapsed
if (t - self._last_CB_change_time_left) >= self.flash_interval_left:
self._last_CB_change_time_left = t
self._current_CB_left = self.CB_cycle_left.next()
self.ready_to_render = True
# checking things
self.vals_current = self.vals.next()
self.val_list.append(self.vals_current)
self.t_list.append(t - self.t_begin)
if (t - self._last_CB_change_time_right) >= self.flash_interval_right:
self._last_CB_change_time_right = t
self._current_CB_right = self.CB_cycle_right.next()
self.ready_to_render = True
if (t - self._last_CB_change_time_center) >= self.flash_interval_center:
self._last_CB_change_time_center = t
self._current_CB_center = self.CB_cycle_center.next()
self.ready_to_render = True
def run(self, **kwargs):
# loop rate set too high so it should run effectively as fast as python is capable of looping
Screen.run(self, display_loop_rate = 10000, **kwargs)
################################################################################
# TEST CODE
################################################################################
if __name__ == "__main__":
flash_rate_left = 17
flash_rate_right = 23
flash_rate_center = 19
duration = 5
show_plot = True
DCBF = TripleCheckerBoardFlasher.with_pygame_display(#VBI_sync_osx = False,
)
#DCBF = TripleCheckerBoardFlasher.with_psychopy_window()
DCBF.setup(flash_rate_left = flash_rate_left,
flash_rate_right = flash_rate_right,
flash_rate_center = flash_rate_center,
check_width = 1.0 / 16.0,
check_width_center = 0.5,
screen_background_color = 'neutral-gray',
nrows = 8,
nrows_center = 1,
show_fixation_dot = True,
)
DCBF.run(duration = duration)
pygame.quit()
if show_plot:
t_diffs = np.diff(np.array(DCBF.t_list))
print('Mean sample interval: ', t_diffs.mean())
print('Mean sample frequency:', 1.0/t_diffs.mean())
print('Sample interval STD: ', t_diffs.std())
import matplotlib.pyplot as plt
import scipy.signal as scps
# plt.subplot(2,1,1)
plt.step(DCBF.t_list, DCBF.val_list, color = 'red', label = 'Displayed')
time_vals = np.linspace(0, duration, duration * 720)
val_vals = [scps.square(flash_rate_left * np.pi * t, duty = 0.5) / 2.0 + 0.5 for t in time_vals]
plt.plot(time_vals, val_vals, color = 'blue', label = 'Ideal')
plt.legend(loc = 'best')
# must set ready_to_render to true in every loop for fft to work to get even sample spacing
# note that this introduces its own error, as rendering is not as optimized
# plt.subplot(2,1,2)
# fft_data = abs(np.fft.rfft(DCBF.val_list))
# fft_freqs = np.fft.rfftfreq(len(DCBF.val_list), 1.0/60)
# plt.plot(fft_freqs, fft_data)
# plt.show()
| SridharLab/neurodot-present | neurodot_present/triple_checkerboard_flasher.py | triple_checkerboard_flasher.py | py | 8,015 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "screen.Screen",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "common.DEFAULT_FLASH_RATE",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "common.DEFAULT_FLASH_RATE",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "com... |
35553846638 | from datetime import datetime
from turtle import position
from urllib.error import HTTPError
from td.client import TdAmeritradeClient
from td.rest.options_chain import OptionsChain
from Authenticator import TDAuthenticator
from time import sleep
class TDPosition():
def __init__(self, position_dict : dict):
self.__position_dict : dict = position_dict
self.average_price : float = self.__position_dict["averagePrice"]
self.long_quantity : float = self.__position_dict["longQuantity"]
self.asset_type : str = self.__position_dict["instrument"]["assetType"]
self.cusip : str = self.__position_dict["instrument"]["cusip"]
self.symbol : str = self.__position_dict["instrument"]["symbol"]
self.market_value : float = self.__position_dict["marketValue"]
self.time : datetime = datetime.now()
def __str__(self) -> str:
print_string = f"Symbol: {self.symbol}\n"
print_string += f"Asset: {self.asset_type}\n"
print_string += f"Quantity: {self.long_quantity}\n"
print_string += f"Market value: {self.market_value}\n"
print_string += f"Average Price: {self.average_price}\n"
print_string += f"Cusip: {self.cusip}\n"
print_string += f"Time: {self.time}\n"
return print_string
class TDAccount():
def __init__(self, account_dict : dict):
self.name : str = list(account_dict.keys())[0]
self.__account_dict : dict = account_dict[self.name]
self.type : str = self.__account_dict["type"]
self.id : str = self.__account_dict["accountId"]
self.day_trader : bool = self.__account_dict["isDayTrader"]
self.positions : list[TDPosition] = self.get_positions()
self.current_trading_balance : float = self.__account_dict["currentBalances"]["cashAvailableForTrading"]
self.current_liquidation_value : float = self.__account_dict["currentBalances"]["liquidationValue"]
self.initial_trading_balance : float = self.__account_dict["initialBalances"]["cashAvailableForTrading"]
self.initial_account_value : float = self.__account_dict["initialBalances"]["accountValue"]
self.time : datetime = datetime.now()
def get_positions(self) -> list[TDPosition]:
td_positions = [TDPosition(position) for position in self.__account_dict.get("positions", [])]
return td_positions
def __str__(self) -> str:
print_string = f"Name: {self.name}\n"
print_string += f"Type: {self.type}\n"
print_string += f"ID: {self.id}\n"
print_string += f"Day Trader: {self.day_trader}\n"
print_string += f"Current Trading Balance: {self.current_trading_balance}\n"
print_string += f"Current Liquidation Value: {self.current_liquidation_value}\n"
print_string += f"Initial trading Balance: {self.initial_trading_balance}\n"
print_string += f"Initial Account Value: {self.initial_account_value}\n"
print_string += f"Time: {self.time}\n"
print_string += f"Positions: \n\n"
for position in self.positions:
print_string += f"Positions: {position}\n"
return print_string
class TDOptionParams():
def __init__(
self,
option_param_dict=None,
symbol=None,
strike=None,
from_date : datetime=None,
to_date : datetime=None,
contract_type=None,
expiration_month=None,
option_type="ALL",
range="ALL",
days_to_exp=None
):
if not option_param_dict:
self.__option_chain_dict = {
'symbol': symbol,
'strike': strike,
'fromDate': from_date,
'toDate': to_date,
'contractType': contract_type,
'expirationMonth': expiration_month,
'optionType': option_type,
'range': range,
'daysToExpiration': days_to_exp
}
else:
self.__option_chain_dict = option_param_dict
self.time : datetime = datetime.now()
def get_option_chain_dict(self):
return self.__option_chain_dict
class TDOption():
def __init__(self, option_dict : dict):
self.__option_dict : dict = option_dict
self.contract_type : str = self.__option_dict["putCall"]
self.symbol : str = self.__option_dict["symbol"]
self.description : str = self.__option_dict["description"]
self.bid : float = self.__option_dict["bid"]
self.ask : float = self.__option_dict["ask"]
self.last : float = self.__option_dict["last"]
self.mark : float = self.__option_dict["mark"]
self.bid_size : int = self.__option_dict["bidSize"]
self.ask_size : int = self.__option_dict["askSize"]
self.high_price : float = self.__option_dict["highPrice"]
self.low_price : float = self.__option_dict["lowPrice"]
self.open_price : float = self.__option_dict["openPrice"]
self.close_price : float = self.__option_dict["closePrice"]
self.total_volume : int = self.__option_dict["totalVolume"]
self.net_change : float = self.__option_dict["netChange"]
self.volatility : float = self.__option_dict["volatility"]
self.delta : float = self.__option_dict["delta"]
self.gamma : float = self.__option_dict["gamma"]
self.theta : float = self.__option_dict["theta"]
self.vega : float = self.__option_dict["vega"]
self.rho : float = self.__option_dict["rho"]
self.theoretical_option_value : float = self.__option_dict["theoreticalOptionValue"]
self.theoretical_volatility : float = self.__option_dict["theoreticalVolatility"]
self.strike_price : float = self.__option_dict["strikePrice"]
self.expiration_date : int = self.__option_dict["expirationDate"]
self.days_to_expiration : int = self.__option_dict["daysToExpiration"]
self.multiplier : int = self.__option_dict["multiplier"]
self.percent_change : float = self.__option_dict["percentChange"]
self.mark_change : float = self.__option_dict["markChange"]
self.mark_percent_change : float = self.__option_dict["markPercentChange"]
self.intrinsic_value : float = self.__option_dict["intrinsicValue"]
self.time : datetime = datetime.now()
def __str__(self) -> str:
print_string = f"Symbol: {self.symbol}\n"
print_string += f"Strike Price: {self.strike_price}\n"
print_string += f"Days to Expiration: {self.days_to_expiration}\n"
print_string += f"Last Price: {self.last}\n"
print_string += f"Time: {self.time}\n"
return print_string
class TDOptionsChain():
def __init__(self, options_chain_dict : dict, option_search_query : TDOptionParams):
self.__options_chain_dict = options_chain_dict
self.status : str = self.__options_chain_dict["status"]
self.option_search_query = option_search_query
self.options = []
self.time : datetime = datetime.now()
if(self.status=="SUCCESS"):
self.symbol : str = self.__options_chain_dict["symbol"]
self.interest_rate : float = self.__options_chain_dict["interestRate"]
self.underlying_price : float = self.__options_chain_dict["underlyingPrice"]
self.volatility : float = self.__options_chain_dict["volatility"]
self.days_to_expiration : float = self.__options_chain_dict["daysToExpiration"]
self.contract_number : int = self.__options_chain_dict["numberOfContracts"]
self.options: list[TDOption] = self.get_options()
else:
print("FAILED: " + str(self.option_search_query))
def get_options(self):
td_options = []
for option in self.__options_chain_dict["putExpDateMap"]:
option_dict = self.__options_chain_dict["putExpDateMap"][option]
for strike in option_dict.keys():
strike_dict = option_dict[strike]
for contract_dict in strike_dict:
td_option = TDOption(contract_dict)
td_options.append(td_option)
for option in self.__options_chain_dict["callExpDateMap"]:
option_dict = self.__options_chain_dict["callExpDateMap"][option]
for strike in option_dict.keys():
strike_dict = option_dict[strike]
for contract_dict in strike_dict:
td_option = TDOption(contract_dict)
td_options.append(td_option)
return td_options
def __str__(self) -> str:
print_string = f"Symbol: {self.symbol}\n"
print_string += f"Status: {self.status}\n"
print_string += f"Interest Rate: {self.interest_rate}\n"
print_string += f"Underlying Price: {self.underlying_price}\n"
print_string += f"Volatility: {self.volatility}\n"
print_string += f"Days To Expiration: {self.days_to_expiration}\n"
print_string += f"Number of contracts: {self.contract_number}\n"
print_string += f"Time: {self.time}\n"
print_string += f"Options: \n\n"
for option in self.options:
print_string += f"{option}\n"
return print_string
class TDOrder():
def __init__(self):
self.order_dict = {}
def get_order_dict(self) -> dict:
return {}
class TDOCOOrder():
def __init__(self, price, amount, symbol, stop_order_percent, limit_order_percent):
super().__init__()
self.price : float = price
self.amount : int = amount
self.symbol : str = symbol
self.stop_order_percent : float = stop_order_percent
self.limit_order_percent : float = limit_order_percent
self.stop_order_price : float = round(price*(1-stop_order_percent),2)
self.limit_order_price : float = round(price*(1-limit_order_percent),2)
self.time : datetime = datetime.now()
self.order_dict : dict={
"orderStrategyType": "TRIGGER",
"session": "NORMAL",
"duration": "DAY",
"orderType": "LIMIT",
"price": price,
"orderLegCollection": [
{
"instruction": "BUY_TO_OPEN",
"quantity": amount,
"instrument": {
"assetType": "OPTION",
"symbol": symbol
}
}
],
"childOrderStrategies": [
{
"orderStrategyType": "OCO",
"childOrderStrategies": [
{
"orderStrategyType": "SINGLE",
"session": "NORMAL",
"duration": "GOOD_TILL_CANCEL",
"orderType": "LIMIT",
"price": self.limit_order_price,
"orderLegCollection": [
{
"instruction": "SELL_TO_OPEN",
"quantity": amount,
"instrument": {
"assetType": "OPTION",
"symbol": symbol
}
}
]
},
{
"orderStrategyType": "SINGLE",
"session": "NORMAL",
"duration": "GOOD_TILL_CANCEL",
"orderType": "STOP",
"stopPrice": self.stop_order_price,
"orderLegCollection": [
{
"instruction": "SELL_TO_OPEN",
"quantity": amount,
"instrument": {
"assetType": "OPTION",
"symbol": symbol
}
}
]
}
]
}
]
}
def get_order_dict(self) -> dict:
return self.order_dict
class RecursiveTDOCOOrder():
def __init__(self, price, amount, symbol, stop_order_percent, limit_order_percent):
super().__init__()
self.price : float = price
self.amount : int = amount
self.symbol : str = symbol
self.stop_order_percent : float = stop_order_percent
self.limit_order_percent : float = limit_order_percent
self.stop_order_price : float = round(price*(1-stop_order_percent),2)
self.limit_order_price : float = round(price*(1-limit_order_percent),2)
self.time : datetime = datetime.now()
self.order_dict : dict={
"orderStrategyType": "TRIGGER",
"session": "NORMAL",
"duration": "DAY",
"orderType": "LIMIT",
"price": price,
"orderLegCollection": [
{
"instruction": "BUY_TO_OPEN",
"quantity": amount,
"instrument": {
"assetType": "OPTION",
"symbol": symbol
}
}
],
"childOrderStrategies": [
{
"orderStrategyType": "OCO",
"childOrderStrategies": [
{
"orderStrategyType": "TRIGGER",
"session": "NORMAL",
"duration": "GOOD_TILL_CANCEL",
"orderType": "LIMIT",
"price": self.limit_order_price,
"orderLegCollection": [
{
"instruction": "SELL_TO_OPEN",
"quantity": int(amount/2),
"instrument": {
"assetType": "OPTION",
"symbol": symbol
}
}
],
"childOrderStrategies": [
{
"orderStrategyType": "OCO",
"childOrderStrategies": [
{
"orderStrategyType": "SINGLE",
"session": "NORMAL",
"duration": "GOOD_TILL_CANCEL",
"orderType": "LIMIT",
"price": self.limit_order_price*2,
"orderLegCollection": [
{
"instruction": "SELL_TO_OPEN",
"quantity": int(int(amount/2)/2),
"instrument": {
"assetType": "OPTION",
"symbol": symbol
}
}
]
},
{
"orderStrategyType": "SINGLE",
"session": "NORMAL",
"duration": "GOOD_TILL_CANCEL",
"orderType": "STOP",
"stopPrice": self.stop_order_price*2,
"orderLegCollection": [
{
"instruction": "SELL_TO_OPEN",
"quantity": int(amount/2),
"instrument": {
"assetType": "OPTION",
"symbol": symbol
}
}
]
}
]
}
]
},
{
"orderStrategyType": "SINGLE",
"session": "NORMAL",
"duration": "GOOD_TILL_CANCEL",
"orderType": "STOP",
"stopPrice": self.stop_order_price,
"orderLegCollection": [
{
"instruction": "SELL_TO_OPEN",
"quantity": amount,
"instrument": {
"assetType": "OPTION",
"symbol": symbol
}
}
]
}
]
}
]
}
def get_order_dict(self) -> dict:
return self.order_dict
class TD():
def __init__(self, auth_file="config/td_credentials.json", paper_trade_balance=0):
self.__authenticator = TDAuthenticator(auth_file)
self.__client = TdAmeritradeClient(
credentials=self.__authenticator.get_credentials()
)
self.__options_service = self.__client.options_chain()
self.__account_service = self.__client.accounts()
self.__order_service = self.__client.orders()
self.__stream_client = self.__client.streaming_api_client()
self.__stream_services = self.__stream_client.services()
self.__stream_services.quality_of_service(qos_level='0')
self.__paper_trade_balance = paper_trade_balance
self.__paper_trading = False
if self.__paper_trade_balance:
self.__paper_trading = True
def get_accounts(self) -> list[TDAccount]:
while True:
try:
td_accounts = []
accounts = self.__account_service.get_accounts()
for account in accounts:
if self.__paper_trading:
account_dict = account[list(account.keys())[0]]
account_dict["currentBalances"]["cashAvailableForTrading"] = self.__paper_trade_balance
account_dict["currentBalances"]["liquidationValue"] = self.__paper_trade_balance
account_dict["initialBalances"]["cashAvailableForTrading"] = self.__paper_trade_balance
account_dict["initialBalances"]["accountValue"] = self.__paper_trade_balance
td_accounts.append(TDAccount(account))
return td_accounts
except HTTPError:
print("Failed to get accounts")
sleep(1)
def get_account_by_id(self, account_id : str) -> TDAccount:
while True:
try:
accounts = self.__account_service.get_accounts()
td_accounts = [TDAccount(account) for account in accounts]
for td_account in td_accounts:
if td_account.id == account_id:
return td_account
return None
except HTTPError:
print("Failed to get accounts")
sleep(1)
def get_options_chain(self, option_params : TDOptionParams) -> TDOptionsChain:
while True:
try:
options_chain = self.__options_service.get_option_chain(option_chain_dict=option_params.get_option_chain_dict())
td_options_chain = TDOptionsChain(options_chain, option_params)
return td_options_chain
except HTTPError:
print("Failed to get options chain")
sleep(1)
def fill_order(self, td_order : TDOrder, td_account : TDAccount):
while True:
try:
if self.__paper_trading:
pass
order = self.__order_service.place_order(account_id=td_account.id,order_dict=td_order.get_order_dict())
return order
except HTTPError:
print("Failed to place order")
sleep(1)
return False | gatordevin/TradingBot | v3/TD.py | TD.py | py | 19,700 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "datetime.datetime",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "datetime.da... |
70190136745 | import re
from datetime import datetime
from bs4 import BeautifulSoup
from ..utils.http import wget
from ..utils.data import DataEngine
from dateutil import parser, tz
class Engine(object):
"""
Engine to process: https://www.letmeread.net
"""
__host__ = 'letmeread'
baseurl: str = "https://www.letmeread.net"
total_of_pages: int = 0
total_of_pages_classified: int = 0
orm: str = ''
data_engine: str = object
def __init__(self, orm: str = '', **kwargs) -> None:
self.orm = orm
self.data_engine = DataEngine(orm=self.orm)
def item_save(self, book_data: list) -> bool:
try:
result = self.data_engine.save(book_data)
except Exception:
result = False
return result
def process_item(self, code: str) -> object:
item_url = self.baseurl + "/" + code + "/"
bs = BeautifulSoup(wget(item_url), 'html.parser')
try:
du = bs.find("meta", {'property': 'article:published_time'})['content']
date_posted = parser.parse(du).date()
except Exception:
date_posted = None
try:
thumb = bs.find("img", {'class': 'align-self-start img-fluid'})['src']
except Exception:
thumb = 'none'
try:
description = bs.find("div", {'class': 'col-md-8'}).find("div", {'class': 'card mb-4'}).find("div", {'class':'card-body'})
except Exception:
description = 'none'
data = {
'title': "none",
'date_published': None,
'date_posted': date_posted,
'pages': 0,
'language': "none",
'code': code,
'url': item_url,
'author': "none",
'publisher': "none",
'isbn10': "",
'isbn13': "none",
'thumbnail': thumb,
'engine': 'letmeread',
'format': 'text',
'size': 0,
'description': (description)
}
c = bs.find("ul", {'class': 'list-unstyled mb-0'}).findAll("li")
for i in c:
cc = i.get_text().strip()
item = re.findall("([a-zA-Z0-9\- ]+): (.*)", cc)
# print(item)
ititle = item[0][0].strip()
ivalue = item[0][1].strip()
if(ititle == "Title"):
data['title'] = ivalue
elif(ititle == "Author"):
data['author'] = ivalue
elif(ititle == "Length"):
num_of_pages = re.search("([0-9]+) pages", ivalue)[1]
data['pages'] = num_of_pages
elif(ititle == "Language"):
data['language'] = ivalue
elif(ititle == "Publisher"):
data['publisher'] = ivalue
elif(ititle == "Publication Date"):
try:
d = datetime.strptime(ivalue, '%Y').date()
data['date_published'] = d
except Exception:
try:
d = datetime.strptime(ivalue, '%Y-%m-%d').date()
data['date_published'] = d
except Exception:
try:
d = datetime.strptime(ivalue, '%Y-%m').date()
data['date_published'] = d
except Exception:
pass
elif(ititle == "ISBN-10"):
data['isbn10'] = ivalue
elif(ititle == "ISBN-13"):
data['isbn13'] = ivalue
return data
def process_page(self, page_number: int = 1, progressbar: object = None) -> []:
#print("Processing Page: " + str(page_number) + " of " + str(self.total_of_pages))
page_url = self.baseurl + "/page/" + str(page_number) + "/" if page_number > 1 else self.baseurl
bs = BeautifulSoup(wget(page_url), 'html.parser')
nameList = bs.findAll('div', {'class': 'card-body p-2'})
data = []
for _index, i in enumerate(nameList):
if progressbar is not None:
progressbar()
data = i.find('a')
data_text = data.get_text()
code = data['href'].replace("/", "")
#print(f"\t\t[page={page_number}]item: " + str(index + 1) + " of " + str(len(nameList)))
isset = self.data_engine.isset_code(code=code, engine=self.__host__)
if isset is False:
try:
book_data = self.process_item(code=code)
self.item_save(book_data=book_data)
pass
except Exception as e:
print(f"Error processing page: {page_url} , title: {data_text}, item: " + self.baseurl + "/" + code + "/")
print(e)
return True
def count_total_pages(self) -> int:
bs = BeautifulSoup(wget(self.baseurl), 'html.parser')
content = bs.find(
"li", {'class': 'page-item disabled d-none d-lg-block'})
sp = re.search(
'of ([0-9]+)', content.get_text().strip(), flags=re.IGNORECASE)
total_pages = int(sp[1])
total_items = bs.findAll('div', {'class': 'card-body p-2'})
self.total_of_pages = total_pages
self.totat_items_per_page = len(total_items)
return total_pages, self.totat_items_per_page
def num_of_pages_to_process(self, start_from_page: int = 1) -> ([], int):
"""
Return all the sanitized pages
Keyword Arguments:
start_from_page {int} -- What page are going to start (default: {1})
Returns:
list -- All the pages to be processed
"""
total_pages, num_items_per_page = self.count_total_pages()
entries = []
for i in range(total_pages):
current_page = i + 1
if current_page >= start_from_page:
entries.append(i + 1)
self.total_of_pages_classified = len(entries)
return entries, num_items_per_page
def run(self, start_from_page: int = 1) -> None:
pages, _ = self.num_of_pages_to_process(start_from_page=start_from_page)
for current_page in pages:
self.process_page(current_page)
def fix(self):
import pprint as pp
d = DataEngine()
session, table = d.get_engine()
r = session.query(table).filter(table.date == "0000-00-00").all()
for i in r:
processed = self.process_item(i.code)
print("------------------ begin ----------------------")
#table.__table__.update().where(table.id==i.id).values(date=processed['date'])
session.query(table).filter(table.uid == i.uid).update({table.date_published: processed['date_published']}, synchronize_session = False)
pp.pprint((processed['url'], ": ", processed['date']))
print("------------------ end -----------------------")
session.commit()
| maborak/ebooks-dl | ebooksdl/engines/letmeread.py | letmeread.py | py | 6,996 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "utils.data.DataEngine",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "utils.http.wget",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "dateutil.pars... |
7438959251 | # -*- coding: utf-8 -*-
import random
#Libreria random
#librerias web scraping
from os import remove
import os, ssl
import urllib.request
from bs4 import BeautifulSoup
#librerias para descargar y guardar imagenes
#Para hacerlo sin certificaciones
import os, ssl
#Descargar imagen
import requests
#Mover archivos de ubicación
import shutil
#librerias para enviar correo
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
#variables web scraping
#link donde se encuentra la nota
url='https://www.queretaro.gob.mx/prensa/contenido.aspx?q=vUYGbsxLnljEIK42BbZlQ0+l8jDVq69PyrYjaUyZ2NTYQ4HUMPRgZA=='
#Variables descargar imagenes
carpertaImg="imagenes/"
#Variables de correo
#Permite ignorar las certificaciones, esto lo hace inceguro, pero no es relevante ya que tomamos notas de un dominio publico
if (not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None)):
ssl._create_default_https_context = ssl._create_unverified_context
#Extraccion de codigo fuente de la pagina
datos = urllib.request.urlopen(url).read().decode('Latin-1')
soup = BeautifulSoup(datos)
# Buscar los componentes solicitados, titulo y cuerpo con etiqueta y classe
#https://www.queretaro.gob.mx/prensa/
text = soup.find_all('div', class_="col-md-12")
#https://fiscaliageneralqro.gob.mx/
#text = soup.find_all('div', class_="entry-content clearfix")
lineas=list()
for i in text:
lineas.append(i.text)
extracto=str(lineas[0])
#inicios del formato
#Tomar titulo
Titulo=""
i=0
UltimoSaltoTitulo=0
while True:
if not(extracto[i] == '\n'):
Titulo=Titulo+extracto[i]
#print(Titulo)
elif extracto[i] == '\n' and i>=5:
#print("salio aqui")
UltimoSaltoTitulo=i
break
i=i+1
#print(Titulo)
NumSalto=0
#Tomar cuerpo
#contar \n ,
for i in range(0,len(extracto)):
if extracto[i] == '\n':
NumSalto=NumSalto+1
LugarSaltoMax=i
#print(NumSalto)
#Lugar \n ante ante penultimo
NumSaltoAnte=0
MenosUltimosSaltos=5 #Determinar cuando ultimas lineas de texto no seran tomada en cuenta
for i in range(0,len(extracto)):
if extracto[i] == '\n':
NumSaltoAnte=NumSaltoAnte+1
if NumSaltoAnte == (NumSalto-MenosUltimosSaltos):
LugarSaloAnteAnte=i
#Tomar cuerpo, salvo las ultimas lineas o saltos de linea
cuerpo=extracto[UltimoSaltoTitulo:LugarSaloAnteAnte]
#print(cuerpo)
#Formato de mensaje
TituloEntreCuerpo=":ExcerptStart"+"\n"+Titulo+"\n"+":ExcerptEnd"
LineasFinaleMensaje="#img1 caption="+"'"+Titulo+"'"+"#"
#linea depues del titulo
#Ensamble de mensaje
EnsambleMensaje=Titulo+"\n"+TituloEntreCuerpo+"\n"+cuerpo+"\n"+"\n"+LineasFinaleMensaje
#print(EnsambleMensaje)
######################################
# Links y descargar imagenes #
######################################
# Buscar los componentes solicitados, links imagenes con etiqueta y classe
links_imagenes = soup.find_all('img', class_="media-object")
extracto_imagenes=str(links_imagenes)
#print("extracto_imag= "+extracto_imagenes)
longitudlinks_imagenes=len(links_imagenes)
longitudLinks=len(extracto_imagenes)
#print("Tamaño total link: "+str(longitudLinks))
links=[]
posiC=[]
siS=False
siR=False
siC=False
siInclinado=False
siMayorQ=False
iniLink=0
finLink=0
Filtroextracto_imagen=""
soloImpares=0
#print(extracto_imagenes[2])
#posicion encuentra inicio y final de link
#print(links_imagenes[2])
for a in range(0,longitudLinks-1):
if extracto_imagenes[a] == "s":
siS=True
elif extracto_imagenes[a] == "r" and siS==True:
siR=True
else:
siS=False
if extracto_imagenes[a] == "c" and siR==True:
#print("vi una r y paso una s y ahora hay una c")
siC=True
elif extracto_imagenes[a] == "=" and siC==True:
iniLink=a+1
siS=False
siR=False
siC=False
else:
pass
if extracto_imagenes[a] == "/":
siInclinado=True
elif extracto_imagenes[a] == ">" and siInclinado==True:
siMayorQ=True
finLink=a-1
if iniLink > 0 and finLink>0:
#print(str(soloImpares%2))
if (soloImpares%2)==1: #solo queremos los links numeros impares
Filtroextracto_imagen=extracto_imagenes[iniLink:finLink]
Filtroextracto_imagen=Filtroextracto_imagen.replace("\"","")
links.append(Filtroextracto_imagen.replace("amp;",""))
else:
pass
soloImpares=soloImpares+1
#print("SOlo imperes: "+ str(soloImpares))
iniLink=0
finLink=0
#Revolver lista para descargar imagenes al azar
random.shuffle(links)
#print("Numero de links: "+str(len(links)))
#Solo tener max 3 links de fotos
if len(links)>3:
links.pop(3,)
"""for i in links:
print("\n cada link: "+str(i))"""
#Descargar
for i in range(0,len(links)):
url_imagen = links[i] # El link de la imagen
nombre_local_imagen = Titulo+" "+str(i)+".jpg" # El nombre con el que queremos guardarla
imagen = requests.get(url_imagen, verify=False).content
with open(nombre_local_imagen, 'wb') as handler:
handler.write(imagen)
#Mover imagen a la ubicación correcta
# Mueve el archivo desde la ubicación actual a la
# carpeta "Documentos".
shutil.move(nombre_local_imagen, "imagenes/")
######################################
# Enviar correo con imagen #
######################################
# Crear instacia de objeto de mensaje
msg = MIMEMultipart()
#Aqui va el mensaje anteriormente armado
message = EnsambleMensaje
# setup the parameters of the message
password = "qwerTyui1"
msg['From'] = "notas.automaticas@gmail.com"
msg['To'] = "notas.automaticas@gmail.com"
msg['Subject'] = Titulo
# attach image to message body
for i in range(0,len(links)):
fp = open(carpertaImg+Titulo+" "+str(i)+".jpg", 'rb')
image = MIMEImage(fp.read())
fp.close()
msg.attach(image)
##### Parte Agregada
# add in the message body
msg.attach(MIMEText(message, 'plain'))
# create server
server = smtplib.SMTP('smtp.gmail.com: 587')
server.starttls()
# Login Credentials for sending the mail
server.login(msg['From'], password)
# send the message via the server.
server.sendmail(msg['From'], msg['To'], msg.as_string())
server.quit()
print("El correo fue enviadó exitosamente a %s:" % (msg['To']))
#Una vez enviado el correo se eliminan las fotos
for i in range(0,len(links)):
remove(carpertaImg+Titulo+" "+str(i)+".jpg") | eduardo-trejo-es/RecoleccionNotas_CronicaReg | old_App/Notas automatico a correo terminal final.py | Notas automatico a correo terminal final.py | py | 6,587 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "ssl._create_default_https_context",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": ... |
1606737974 | from http.server import BaseHTTPRequestHandler
import json
import os
import shutil
import logging
import pytest
import requests
from pytest_httpserver import HTTPServer
from sunfish.lib.core import Core
from sunfish.lib.exceptions import *
from tests import test_utils, tests_template
class TestSunfishcoreLibrary():
@classmethod
def setup_class(cls):
shutil.rmtree(os.path.join(os.getcwd(),'tests', 'Resources', 'EventService', 'Subscriptions'))
path = os.path.join(os.getcwd(), 'tests', 'Resources', 'EventService', 'Subscriptions')
os.mkdir(path)
with open(os.path.join(path,'index.json'), 'w') as f:
json.dump(tests_template.setup_subscriptions, f)
f.close()
cls.conf = {
"storage_backend": "FS",
"redfish_root": "/redfish/v1/",
"backend_conf" : {
"fs_root": "tests/Resources",
"subscribers_root": "EventService/Subscriptions"
}
}
cls.core = Core(cls.conf)
# TEST REST
# Delete
@pytest.mark.order("last")
def test_delete(self):
id = test_utils.get_id(self.conf["backend_conf"]["fs_root"], 'Systems')
system_url = os.path.join(self.conf["redfish_root"], 'Systems', id)
logging.info('Deleting ', system_url)
self.core.delete_object(system_url)
assert test_utils.check_delete(system_url) == True
# raise exception if element doesnt exist
with pytest.raises(ResourceNotFound):
self.core.delete_object(system_url)
# Post
def test_post_object(self):
json_file = tests_template.test_post_system
path = os.path.join(self.conf["redfish_root"], "Systems")
assert self.core.create_object(path, json_file)
# Collection excpetion
path = os.path.join(self.conf["redfish_root"], "Systems")
with pytest.raises(CollectionNotSupported):
self.core.create_object(path, tests_template.test_collection)
# Get
def test_get(self):
id = test_utils.get_id(self.conf["backend_conf"]["fs_root"], 'Systems')
system_url = os.path.join(self.conf["redfish_root"], 'Systems', id)
assert self.core.get_object(system_url)
# Exception get element that doesnt exists
def test_get_exception(self):
system_url = os.path.join(self.conf["redfish_root"], 'Systems', '-1')
with pytest.raises(ResourceNotFound):
self.core.get_object(system_url)
# Put
def test_put(self):
# pytest.set_trace()
id = test_utils.get_id(self.conf["backend_conf"]["fs_root"], 'Systems')
payload = tests_template.test_put
id_properties = {
"@odata.id": os.path.join(self.conf["redfish_root"], 'Systems', id),
"Id": id
}
payload.update(id_properties)
self.core.replace_object(payload)
assert self.core.replace_object(payload) == payload
# Exception put element that doesnt exists
def test_put_exception(self):
payload = tests_template.test_update_exception
with pytest.raises(ResourceNotFound):
self.core.replace_object(payload)
# Patch
def test_patch(self):
id = test_utils.get_id(self.conf["backend_conf"]["fs_root"], 'Systems')
payload = tests_template.test_patch
id_properties = {
"@odata.id": os.path.join(self.conf["redfish_root"], 'Systems', id),
"Id": id
}
payload.update(id_properties)
assert self.core.patch_object(payload) == self.core.get_object(payload['@odata.id'])
# Exception patch element that doesnt exists
def test_patch_exception(self):
payload = tests_template.test_update_exception
with pytest.raises(ResourceNotFound):
self.core.patch_object(payload)
# EVENTING and SUBSCRIPTIONS
def test_subscription(self):
path = os.path.join(self.conf['redfish_root'], self.conf["backend_conf"]["subscribers_root"])
assert self.core.create_object(path, tests_template.sub1)
assert self.core.create_object(path, tests_template.sub2)
assert self.core.create_object(path, tests_template.sub3)
@pytest.fixture(scope="session")
def httpserver_listen_address(self):
return ("localhost", 8080)
def test_event_forwarding(self, httpserver: HTTPServer):
httpserver.expect_request("/").respond_with_data("OK")
resp = self.core.handle_event(tests_template.event)
assert len(resp) == 1
def test_event_forwarding_exception(self, httpserver: HTTPServer):
path = os.path.join(self.conf['redfish_root'], self.conf["backend_conf"]["subscribers_root"])
assert self.core.create_object(path, tests_template.wrong_sub)
resp = self.core.handle_event(tests_template.event)
assert len(resp) == 1
| OpenFabrics/sunfish_library_reference | tests/test_sunfishcore_library.py | test_sunfishcore_library.py | py | 4,902 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "shutil.rmtree",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number"... |
29013033469 | from fastapi import WebSocket
from app.settings import SETTINGS
from app.websocket.exceptions import OnlineLimitException
from app.websocket.classes import OnlineUser
class OnlineUsersConnectionManager:
def __init__(self):
self.online_users: list[OnlineUser] = []
def add(self, online_user: OnlineUser):
if len(self.online_users) >= SETTINGS.max_online_users:
raise OnlineLimitException
self.online_users.append(online_user)
def remove(self, online_user: OnlineUser):
self.online_users.remove(online_user)
def disconnect(self, websocket: WebSocket):
for online_user in self.online_users:
if online_user.websocket == websocket:
self.online_users.remove(online_user)
break
async def send_online_users_list(self, online_user: OnlineUser):
await online_user.websocket.send_json(self._get_users_list())
def _get_users_list(self):
return [
{
"email": online_user.data["user_email"],
"user_id": online_user.data["user_id"],
}
for online_user in self.online_users
]
async def users_count_changed_broadcast(self):
for online_user in self.online_users:
await self.send_online_users_list(online_user)
| mrdudov/tic-tac-toe | backend/app/websocket/connect_manager.py | connect_manager.py | py | 1,335 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "app.websocket.classes.OnlineUser",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "app.websocket.classes.OnlineUser",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "app.settings.SETTINGS.max_online_users",
"line_number": 13,
"usage_type... |
39562384945 | # Author: Zavier
import json
import random
import matplotlib.pyplot as plt
import numpy as np
import requests
import pandas as pd
import calendar
import datetime
from geopy.distance import geodesic
import geopy as gp
from app import *
from sklearn.linear_model import LinearRegression,Ridge,RidgeCV
from sklearn.preprocessing import PolynomialFeatures
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.graphics.tsaplots import plot_acf,plot_pacf
def barchart():
track_response = db.session.query(earf.EARF_Number,earf.Response_Time).filter(earf.Road_Class=='highway',
earf.Road_Type=='track').all()
ls = []
intime_count = 0
for i in track_response:
if i.Response_Time<=60:
if i.Response_Time<=10:
intime_count+=1
ls.append(i.Response_Time)
# plt.hist(x=result_array[0],bins=len(track_response))
# plt.show()
print(intime_count)
plt.bar(range(len(ls)),ls)
plt.show()
def daily_count_distribution():
daily_count = db.session.query(daily_case_count.Date,daily_case_count.Count,daily_case_count.Overtime_Count).filter(and_(daily_case_count.Date>'2015-12-31',daily_case_count.Date<'2017-01-01')).all()
date = []
ls = []
over_ls = []
over_rate = []
for pair in daily_count:
date.append(pair.Date)
ls.append(pair.Count)
over_ls.append(pair.Overtime_Count)
over_rate.append((pair.Count - pair.Overtime_Count)/pair.Count)
# plt.bar(date,ls)
# plt.bar(date, over_ls)
# plt.scatter(ls,over_rate)
plt.bar(date,over_rate)
plt.show()
def over_rate_regression():
daily_count = db.session.query(daily_case_count.Date, daily_case_count.Count,
daily_case_count.Overtime_Count).filter(
and_(daily_case_count.Date > '2015-12-31', daily_case_count.Date < '2017-01-01')).all()
test_daily_count = db.session.query(daily_case_count.Date, daily_case_count.Count,
daily_case_count.Overtime_Count).filter(
and_(daily_case_count.Date > '2017-01-01', daily_case_count.Date < '2017-05-31')).all()
date = []
ls = []
over_ls = []
over_rate = []
test_ls = []
test_over_rate = []
for pair in daily_count:
date.append([pair.Date])
ls.append([pair.Count])
over_ls.append([pair.Overtime_Count])
over_rate.append([pair.Overtime_Count / pair.Count])
for pair in test_daily_count:
test_ls.append([pair.Count])
test_over_rate.append([pair.Overtime_Count / pair.Count])
# ls = [ls]
# over_rate = [over_rate]
# ls = [[1],[2],[3],[4],[5]]
# over_rate = [[1],[4],[9],[16],[25]]
poly = PolynomialFeatures(degree=4)
X_poly = poly.fit_transform(ls)
poly.fit(X_poly, over_rate)
lin2 = LinearRegression()
lin2.fit(X_poly, over_rate)
# # Ridge
# model = RidgeCV(alphas=[0.1, 1.0, 10.0])
# model.fit(ls,over_rate)
# # Ridge predict
# overtime_predicted = model.predict(test_ls)
# plt.scatter(ls, over_rate, marker='o', color='green', label='Training data')
# plt.scatter(test_ls, overtime_predicted, marker='*', color='blue', label='Test data')
# plt.scatter(test_ls, test_over_rate, color = "orange", label = "2017")
# plt.plot(test_ls, overtime_predicted, c='r')
print(lin2.predict(poly.fit_transform([[78]])))
plt.scatter(ls, over_rate, color='blue')
plt.plot(ls, lin2.predict(poly.fit_transform(ls)), color='red')
plt.title('Polynomial Regression')
plt.show()
def arima():
daily_count = db.session.query(daily_case_count.Date, daily_case_count.Count,
daily_case_count.Overtime_Count).filter(
and_(daily_case_count.Date > '2015-12-31', daily_case_count.Date < '2017-01-01')).all()
date = []
ls = []
over_ls = []
over_rate = []
for pair in daily_count:
date.append(pair.Date)
ls.append(pair.Count)
over_ls.append(pair.Overtime_Count)
over_rate.append(pair.Overtime_Count / pair.Count)
df = pd.DataFrame(columns=['Date','Over_rate'])
for i in range(len(date)):
df = df.append(
pd.DataFrame({
'Date':[date[i]],
'Over_rate':[over_rate[i]]
}),ignore_index=True
)
df.to_csv('./data/arima-data.csv')
print(df)
over_rate = df.diff(1)
over_rate = over_rate.dropna()
print(over_rate)
plt.plot(date,df,color='red')
plt.plot(date,over_rate)
plt.show()
acf = plot_acf(over_rate,lags=20)
plt.title('acf')
plt.show()
q=1
pacf = plot_pacf(over_rate, lags=20)
plt.title('pacf')
plt.show()
p=7
model = ARIMA(over_rate,order=(7,1,1),freq=1)
result = model.fit()
# predict
pred = result.predict('2016-12-15','2017-02-01',dynamic=True)
print(pred)
# certain_area_filter
def get_ambulance_station():
station_list = []
id_set = set()
count = 0
Columns = ['Place_id','Name','Business_Status','Lat','Lon']
df = pd.DataFrame(columns=Columns)
# location centre
for i in range(-290,-90,3):
# print(i)
lat = float(i/10)
for j in range(1370,1540,3):
lon = float(j/10)
count += 1
try:
request_text = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=' + str(lat) + ',' + str(lon) + '&rankby=distance&keyword=ambulance+station&key=AIzaSyDpf-U3kydWGURsa81v2Bo7CGeLqOVguAI'
print(request_text)
resources = requests.get(request_text,timeout=60)
except Exception as e:
print('----exception----')
continue
resources = json.loads(resources.text)
print(len(resources['results']))
for item in resources['results']:
if item['place_id'] not in id_set:
station_list.append(item['name'])
df = df.append(
pd.DataFrame({
'Place_id':[item['place_id']],
'Name':[item['name']],
'Business_Status':[item['business_status']],
'Lat':[item["geometry"]['location']['lat']],
'Lon': [item["geometry"]['location']['lng']]
}),ignore_index=True
)
id_set.add(item['place_id'])
while 'next_page_token' in resources:
print(True)
try:
resources = requests.get('https://maps.googleapis.com/maps/api/place/nearbysearch/json?pagetoken=' + resources['next_page_token'] + '&key=AIzaSyDpf-U3kydWGURsa81v2Bo7CGeLqOVguAI', timeout=60)
except Exception as e:
print('----exception----')
continue
resources = json.loads(resources.text)
for item in resources['results']:
if item['place_id'] not in id_set:
station_list.append(item['name'])
df = df.append(
pd.DataFrame({
'Place_id': [item['place_id']],
'Name': [item['name']],
'Business_Status': [item['business_status']],
'Lat': [item["geometry"]['location']['lat']],
'Lon': [item["geometry"]['location']['lng']]
}), ignore_index=True
)
id_set.add(item['place_id'])
print(len(station_list))
df.to_csv('./data/ambulance_station.csv')
return station_list
def get_daily_cases(year):
date = '2015-01-01 00:00:00'
cal = calendar.Calendar()
day_filter=set()
alldaylist = []
for month in list(range(1, 13)):
# listday = []
# 调用calendar类的itermonthdays()方法,返回一个迭代器,含有指定年月的日子
for day in cal.itermonthdates(year, month):
# 过滤迭代器中用于填充首尾的0
if day != 0:
day = datetime.datetime.strftime(day,"%Y-%m-%d %H:%M:%S")
if day.startswith(str(year)) and day not in day_filter:
# listday.append(day)
alldaylist.append(day)
day_filter.add(day)
return alldaylist
def deceased_plot():
D_deceased_case = db.session.query(earf.EARF_Number,earf.Response_Time).filter(and_(earf.Final_Assessment=='deceased',earf.Priority=='D')).all()
D_deceased_case = db.session.query(earf.EARF_Number, earf.Response_Time).filter(
earf.Priority == 'D').all()
earf_num_li = []
response_time_li=[]
for pair in D_deceased_case:
earf_num_li.append(pair.EARF_Number)
response_time_li.append(pair.Response_Time)
# print(response_time_li)
plt.axhline(y=7, c='red')
plt.axhline(y=10,c='red')
plt.axhline(y=20, c='red')
plt.axhline(y=30, c='red')
plt.axhline(y=60, c='red')
plt.bar(range(len(earf_num_li)),response_time_li)
plt.show()
def low_priority_filter(priority):
low_priority_cases = db.session.query(earf.EARF_Number, earf.Lat, earf.Lon, earf.Priority, earf.Time_Received,
earf.Response_Time).filter(
and_(earf.Priority == priority, or_(earf.Response_Time_Class == 1, earf.Response_Time_Class == 2))).all()
case_collection={}
for case in low_priority_cases:
case_oc_time = case.Time_Received
# case_oc_time_str = str(case_oc_time).split(" ")[0]
case_collection[case.EARF_Number]={}
case_collection[case.EARF_Number]['self']=case
case_collection[case.EARF_Number]['hp_cases']={}
# print(case.Lat)
# print(type(case_oc_time))
one_h_bf = case_oc_time + datetime.timedelta(hours=-1)
close_time_hp_cases = db.session.query(earf.EARF_Number,earf.Lat,earf.Lon,earf.Priority,earf.Time_Received,earf.Response_Time).filter(and_(and_(earf.Time_Received>one_h_bf,earf.Time_Received<case_oc_time),or_(earf.Priority=='B',earf.Priority=='D'))).all()
for hp_case in close_time_hp_cases:
case_collection[case.EARF_Number]['hp_cases'][hp_case.EARF_Number] = hp_case
# print(case_collection[case.EARF_Number])
nearby_hp_case_collection = {}
lp_case_count = 0
total_count = 0
misdispatch = 0
misdispatch_set = set()
for case in case_collection:
low_priority_case_coordinate = (case_collection[case]['self'].Lat, case_collection[case]['self'].Lon)
lp_case_count += 1
# print(low_priority_case_coordinate)
nearby_hp_case_collection[case]={}
if case_collection[case]['hp_cases']!={}:
for hp_case in case_collection[case]['hp_cases']:
hp_coordinate = (case_collection[case]['hp_cases'][hp_case].Lat,case_collection[case]['hp_cases'][hp_case].Lon)
distance = geo_distance_cal(low_priority_case_coordinate,hp_coordinate)
distance = float(str(distance).split(" ")[0])
nearby_hp_case_collection[case][hp_case] = distance
total_count += 1
if distance<=10.0:
misdispatch += 1
misdispatch_set.add(case)
return nearby_hp_case_collection, lp_case_count, total_count, misdispatch,len(misdispatch_set)
def geo_distance_cal(coordinate_1, coordinate_2):
return geodesic(coordinate_1,coordinate_2)
def main():
# r1 = requests.get('https://nominatim.openstreetmap.org/search?format=json&q=' + 'Brisbane Road, Booval, Ipswich, Queensland, 4304, Australia', timeout=60)
# print(r1)
# print(type(r1))
# print(r1.text)
# r2 = requests.get('https://maps.googleapis.com/maps/api/place/textsearch/json?query=queensland+ambulance+service&key=AIzaSyDpf-U3kydWGURsa81v2Bo7CGeLqOVguAI', timeout=60)
# print(type(r2))
# # print(r2.text)
# r2 = json.loads(r2.text)
# for i in r2['results']:
# print(i['name'])
# get_ambulance_station()
# station_ls = get_ambulance_station()
# print(station_ls)
# print(len(station_ls))
# print(len(get_daily_cases(2015)))
# daily_count_distribution()
# over_rate_regression()
arima()
# print(geo_distance_cal((-27.4673, 153.158),(-27.4957, 153.06)))
# deceased_plot()
# print(low_priority_filter('O'))
# print(low_priority_filter('A'))
if __name__ == '__main__':
main()
# AIzaSyDpf-U3kydWGURsa81v2Bo7CGeLqOVguAI
| zavier250/ambulance_data_visualization | analysis.py | analysis.py | py | 12,820 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.bar",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplo... |
28891356701 | """Utilities for dealing with project configuration."""
import abc
import configparser
from typing import Iterable, Tuple, Type, TypeVar
from pytype.platform_utils import path_utils
import toml
_CONFIG_FILENAMES = ('pyproject.toml', 'setup.cfg')
_ConfigSectionT = TypeVar('_ConfigSectionT', bound='ConfigSection')
def find_config_file(path):
"""Finds the first instance of a config file in a prefix of path."""
# Make sure path is a directory
if not path_utils.isdir(path):
path = path_utils.dirname(path)
# Guard against symlink loops and /
seen = set()
while path and path not in seen:
seen.add(path)
for filename in _CONFIG_FILENAMES:
f = path_utils.join(path, filename)
if path_utils.exists(f) and path_utils.isfile(f):
return f
path = path_utils.dirname(path)
return None
class ConfigSection(abc.ABC):
"""A section of a config file."""
@classmethod
@abc.abstractmethod
def create_from_file(
cls: Type[_ConfigSectionT], filepath: str, section: str
) -> _ConfigSectionT:
"""Create a ConfigSection if the file at filepath has section."""
@abc.abstractmethod
def items(self) -> Iterable[Tuple[str, str]]:
...
class TomlConfigSection(ConfigSection):
"""A section of a TOML config file."""
def __init__(self, content):
self._content = content
@classmethod
def create_from_file(cls, filepath, section):
try:
content = toml.load(filepath)
except toml.TomlDecodeError:
return None
if 'tool' in content and section in content['tool']:
return cls(content['tool'][section])
return None
def items(self):
for k, v in self._content.items():
yield (k, ' '.join(str(e) for e in v) if isinstance(v, list) else str(v))
class IniConfigSection(ConfigSection):
"""A section of an INI config file."""
def __init__(self, parser, section):
self._parser = parser
self._section = section
@classmethod
def create_from_file(cls, filepath, section):
parser = configparser.ConfigParser()
try:
parser.read(filepath)
except configparser.MissingSectionHeaderError:
# We've read an improperly formatted config file.
return None
if parser.has_section(section):
return cls(parser, section)
return None
def items(self):
return self._parser.items(self._section)
| google/pytype | pytype/tools/config.py | config.py | py | 2,353 | python | en | code | 4,405 | github-code | 36 | [
{
"api_name": "typing.TypeVar",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pytype.platform_utils.path_utils.isdir",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pytype.platform_utils.path_utils",
"line_number": 19,
"usage_type": "name"
},
{... |
31608719442 | import math
from functools import partial
from collections import defaultdict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from . import torchvision_models
from .torchvision_models import load_pretrained, inflate_pretrained, modify_resnets
from network.non_local_gaussian import NONLocalBlock3D
class STABlock(nn.Module):
def __init__(self, in_channels, inter_channels=None):
super(STABlock, self).__init__()
self.in_channels = in_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
# self.sa1 = nn.Conv3d(in_channels=self.in_channels, out_channels=128,
# kernel_size=(1, 2, 2), stride=(1, 2, 2), padding=0) # 8, 14, 14
# self.sa2 = nn.Conv3d(in_channels=128, out_channels=128,
# kernel_size=(1, 2, 2), stride=(1, 2, 2), padding=0) # 8, 7, 7
# self.sa3 = nn.Conv3d(in_channels=128, out_channels=128,
# kernel_size=(1, 7, 7), stride=(1, 1, 1), padding=0) # 128, 8, 1, 1
# self.sa4 = nn.Conv3d(in_channels=128, out_channels=1,
# kernel_size=(1, 1, 1), stride=(1, 1, 1), padding=0) # 1, 8, 1, 1
self.Qs = nn.Conv3d(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.Ks = nn.Conv3d(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.Vs = nn.Conv3d(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.Ws = nn.Conv3d(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0)
self.Q = nn.Conv3d(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.K = nn.Conv3d(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.V = nn.Conv3d(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.W = nn.Conv3d(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0)
nn.init.constant_(self.Q.weight, 0)
nn.init.constant_(self.Q.bias, 0)
nn.init.constant_(self.K.weight, 0)
nn.init.constant_(self.K.bias, 0)
nn.init.constant_(self.Qs.weight, 0)
nn.init.constant_(self.Qs.bias, 0)
nn.init.constant_(self.Ks.weight, 0)
nn.init.constant_(self.Ks.bias, 0)
def forward(self, x):
'''
:param x: (b, c, t, h, w) (b, 64, t=16/2, 28, 28)
:return:
'''
batch_size = x.size(0)
t = x.size(2)
h = x.size(3)
w = x.size(4)
c = self.inter_channels
# Single Attention
# x_sa = self.sa1(x)
# x_sa = self.sa2(x_sa)
# x_sa = self.sa3(x_sa)
# x_sa = self.sa4(x_sa).view(batch_size, -1) #[batch, t]
# x_sa = F.sigmoid(x_sa)
# # print(x_sa)
# x_sa = x_sa.unsqueeze(2).unsqueeze(3).unsqueeze(4) #[batch, t, 1, 1, 1]
# x = x.permute(0, 2, 1, 3, 4)
# x = x.mul(x_sa)
# x = x.permute(0, 2, 1, 3, 4)
# Spatiao Attention
# Query
Q_x_s = self.Qs(x).view(batch_size, t, -1)
Q_x_s = Q_x_s.permute(0, 2, 1) # [chw x t]
# Key
K_x_s = self.Ks(x).view(batch_size, t, -1) # [t x chw]
# Value
V_x_s = self.Vs(x).view(batch_size, t, -1)
V_x_s = V_x_s.permute(0, 2, 1) # [chw x t]
corr_s = torch.matmul(Q_x_s, K_x_s) # [chw x chw]
corr_s_div_C = F.softmax(corr_s / math.sqrt(t), dim=-1)
ys = torch.matmul(corr_s_div_C, V_x_s)
ys = ys.permute(0, 2, 1).contiguous()
ys = ys.view(batch_size, self.inter_channels, *x.size()[2:])
# Temporal Attention
# Query
Q_x_t = self.Q(x).view(batch_size, c*h*w, -1)
Q_x_t = Q_x_t.permute(0, 2, 1) # [t x chw]
# Key
K_x_t = self.K(x).view(batch_size, c*h*w, -1) # [chw x t]
# Value
V_x_t = self.V(x).view(batch_size, c*h*w, -1)
V_x_t = V_x_t.permute(0, 2, 1) # [t x chw]
corr_t = torch.matmul(Q_x_t, K_x_t) # [t x t]
corr_t_div_C = F.softmax(corr_t / math.sqrt(t), dim=-1)
yt = torch.matmul(corr_t_div_C, V_x_t)
yt = yt.permute(0, 2, 1).contiguous()
yt = yt.view(batch_size, self.inter_channels, *x.size()[2:])
y_combine = ys + yt
W = self.W(y_combine)
z = W + x
return z
class TABlock(nn.Module):
def __init__(self, in_channels, inter_channels=None):
super(TABlock, self).__init__()
self.in_channels = in_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
# self.sa1 = nn.Conv3d(in_channels=self.in_channels, out_channels=128,
# kernel_size=(1, 2, 2), stride=(1, 2, 2), padding=0) # 8, 14, 14
# self.sa2 = nn.Conv3d(in_channels=128, out_channels=128,
# kernel_size=(1, 2, 2), stride=(1, 2, 2), padding=0) # 8, 7, 7
# self.sa3 = nn.Conv3d(in_channels=128, out_channels=128,
# kernel_size=(1, 7, 7), stride=(1, 1, 1), padding=0) # 128, 8, 1, 1
# self.sa4 = nn.Conv3d(in_channels=128, out_channels=1,
# kernel_size=(1, 1, 1), stride=(1, 1, 1), padding=0) # 1, 8, 1, 1
self.Q = nn.Conv3d(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.K = nn.Conv3d(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.V = nn.Conv3d(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.W = nn.Conv3d(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0)
nn.init.constant_(self.Q.weight, 0)
nn.init.constant_(self.Q.bias, 0)
nn.init.constant_(self.K.weight, 0)
nn.init.constant_(self.K.bias, 0)
def forward(self, x):
'''
:param x: (b, c, t, h, w) (b, 64, t=16/2, 28, 28)
:return:
'''
batch_size = x.size(0)
t = x.size(2)
h = x.size(3)
w = x.size(4)
c = self.inter_channels
# Single Attention
# x_sa = self.sa1(x)
# x_sa = self.sa2(x_sa)
# x_sa = self.sa3(x_sa)
# x_sa = self.sa4(x_sa).view(batch_size, -1) #[batch, t]
# x_sa = F.sigmoid(x_sa)
# # print(x_sa)
# x_sa = x_sa.unsqueeze(2).unsqueeze(3).unsqueeze(4) #[batch, t, 1, 1, 1]
# x = x.permute(0, 2, 1, 3, 4)
# x = x.mul(x_sa)
# x = x.permute(0, 2, 1, 3, 4)
# Temporal Attention
# Query
Q_x_t = self.Q(x).view(batch_size, c*h*w, -1)
Q_x_t = Q_x_t.permute(0, 2, 1) # [t x chw]
# Key
K_x_t = self.K(x).view(batch_size, c*h*w, -1) # [chw x t]
# Value
V_x_t = self.V(x).view(batch_size, c*h*w, -1)
V_x_t = V_x_t.permute(0, 2, 1) # [t x chw]
corr_t = torch.matmul(Q_x_t, K_x_t) # [t x t]
corr_t_div_C = F.softmax(corr_t / math.sqrt(h*w*c), dim=-1)
yt = torch.matmul(corr_t_div_C, V_x_t)
yt = yt.permute(0, 2, 1).contiguous()
yt = yt.view(batch_size, self.inter_channels, *x.size()[2:])
W = self.W(yt)
z = W + x
return z
__all__ = [
'ResNet3D', 'resnet3d10', 'resnet3d18', 'resnet3d34',
'resnet3d50', 'resnet3d101', 'resnet3d152', 'resnet3d200',
]
model_urls = {
'kinetics-400': defaultdict(lambda: None, {
'resnet3d18': 'http://pretorched-x.csail.mit.edu/models/resnet3d18_kinetics-e9f44270.pth',
'resnet3d34': 'http://pretorched-x.csail.mit.edu/models/resnet3d34_kinetics-7fed38dd.pth',
'resnet3d50': 'http://pretorched-x.csail.mit.edu/models/resnet3d50_kinetics-aad059c9.pth',
'resnet3d101': 'http://pretorched-x.csail.mit.edu/models/resnet3d101_kinetics-8d4c9d63.pth',
'resnet3d152': 'http://pretorched-x.csail.mit.edu/models/resnet3d152_kinetics-575c47e2.pth',
}),
'moments': defaultdict(lambda: None, {
'resnet3d50': 'http://pretorched-x.csail.mit.edu/models/resnet3d50_16seg_moments-6eb53860.pth',
}),
}
num_classes = {'kinetics-400': 400, 'moments': 339}
pretrained_settings = defaultdict(dict)
input_sizes = {}
means = {}
stds = {}
for model_name in __all__:
input_sizes[model_name] = [3, 224, 224]
means[model_name] = [0.485, 0.456, 0.406]
stds[model_name] = [0.229, 0.224, 0.225]
for model_name in __all__:
if model_name in ['ResNet3D']:
continue
for dataset, urls in model_urls.items():
pretrained_settings[model_name][dataset] = {
'input_space': 'RGB',
'input_range': [0, 1],
'url': urls[model_name],
'std': stds[model_name],
'mean': means[model_name],
'num_classes': num_classes[dataset],
'input_size': input_sizes[model_name],
}
def conv3x3x3(in_planes, out_planes, stride=1):
"""3x3x3 convolution with padding."""
return nn.Conv3d(
in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False
)
def downsample_basic_block(x, planes, stride):
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.Tensor(
out.size(0), planes - out.size(1),
out.size(2), out.size(3), out.size(4)).zero_()
if isinstance(out.data, torch.cuda.FloatTensor):
zero_pads = zero_pads.cuda()
out = Variable(torch.cat([out.data, zero_pads], dim=1))
return out
class BasicBlock(nn.Module):
expansion = 1
Conv3d = staticmethod(conv3x3x3)
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = self.Conv3d(inplanes, planes, stride)
self.bn1 = nn.BatchNorm3d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = self.Conv3d(planes, planes)
self.bn2 = nn.BatchNorm3d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
Conv3d = nn.Conv3d
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = self.Conv3d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm3d(planes)
self.conv2 = self.Conv3d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = self.Conv3d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm3d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet3D(nn.Module):
Conv3d = nn.Conv3d
def __init__(self, block, layers, shortcut_type='B', num_classes=400):
self.inplanes = 64
super(ResNet3D, self).__init__()
self.conv1 = self.Conv3d(3, 64, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False)
self.bn1 = nn.BatchNorm3d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)
# TAT
# self.tat_conv1 = self.Conv3d(64, 64, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False) #28x28x16
# self.tat_conv2 = self.Conv3d(64, 21, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False) #14x14x16
# self.tat_conv3 = self.Conv3d(21, 1, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False) #7x7x16
# # self.tat_conv4 = self.Conv3d(256, 1, kernel_size=1, stride=(1, 1, 1), padding=(0, 0, 0), bias=False) #7x7x16x1
# self.tat_pooling = nn.MaxPool3d(kernel_size=(1, 7, 7))
# self.tat_norm = nn.Sigmoid()
#self.ta = TABlock(64, 64)
self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type)
self.layer2 = self._make_layer(block, 128, layers[1], shortcut_type, stride=2)
#self.nl_2 = NONLocalBlock3D(in_channels=128)
self.layer3 = self._make_layer(block, 256, layers[2], shortcut_type, stride=2)
#self.sta3 = STABlock(256,64)
self.inplanes_tmp = self.inplanes
self.layer4_v = self._make_layer(block, 512, layers[3], shortcut_type, stride=2)
#self.nl_4_v = NONLocalBlock3D(in_channels=512)
self.inplanes = self.inplanes_tmp
self.layer4_a = self._make_layer(block, 512, layers[3], shortcut_type, stride=2)
#self.nl_4_a = NONLocalBlock3D(in_channels=512)
self.avgpool = nn.AdaptiveAvgPool3d(1)
#self.fc_v = nn.Linear(512 * block.expansion, 1)
#self.fc_a = nn.Linear(512 * block.expansion, 1)
self.fc_v_cls = nn.Linear(512* block.expansion, 20)
self.fc_a_cls = nn.Linear(512* block.expansion, 20)
self.fc_expr = nn.Linear(512 * block.expansion * 2, 7)
self.tanh = nn.Tanh()
self.init_weights()
def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
if shortcut_type == 'A':
downsample = partial(
downsample_basic_block,
planes=planes * block.expansion,
stride=stride,
)
else:
downsample = nn.Sequential(
self.Conv3d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm3d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def init_weights(self):
for m in self.modules():
if isinstance(m, self.Conv3d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x_v = self.layer3_v(x)
x_a = self.layer3_a(x)
x_v = self.layer4_v(x_v)
x_a = self.layer4_a(x_a)
x_v = self.avgpool(x_v)
x_a = self.avgpool(x_a)
x_v = x_v.view(x_v.size(0), -1)
x_v = self.fc_v(x_v)
x_a = x_a.view(x_a.size(0), -1)
x_a = self.fc_a(x_a)
x_expr = self.fc_expr(torch.cat((x_v, x_a),1))
x_expr = self.fc_expr(x_expr)
#return self.tanh(x[:,0:2]), x[:,2:9]
return torch.cat((x_v, x_a), 1), x_expr
def get_fine_tuning_parameters(model, ft_begin_index):
if ft_begin_index == 0:
return model.parameters()
ft_module_names = []
for i in range(ft_begin_index, 5):
ft_module_names.append('layer{}'.format(i))
ft_module_names.append('fc')
parameters = []
for k, v in model.named_parameters():
for ft_module in ft_module_names:
if ft_module in k:
parameters.append({'params': v})
break
else:
parameters.append({'params': v, 'lr': 0.0})
return parameters
def resnet3d10(**kwargs):
"""Constructs a ResNet3D-10 model."""
model = ResNet3D(BasicBlock, [1, 1, 1, 1], **kwargs)
model = modify_resnets(model)
return model
def resnet3d18(num_classes=400, pretrained='kinetics-400', shortcut_type='A', **kwargs):
"""Constructs a ResNet3D-18 model."""
model = ResNet3D(BasicBlock, [2, 2, 2, 2], num_classes=num_classes,
shortcut_type=shortcut_type, **kwargs)
#pretrained = None
if pretrained is not None:
print("######## Pretrained #######")
settings = pretrained_settings['resnet3d18'][pretrained]
model = load_pretrained(model, num_classes, settings)
else:
print("######## NOT Pretrained #######")
model = modify_resnets(model)
return model
def resnet3d34(num_classes=400, pretrained='kinetics-400', shortcut_type='A', **kwargs):
"""Constructs a ResNet3D-34 model."""
model = ResNet3D(BasicBlock, [3, 4, 6, 3], num_classes=num_classes,
shortcut_type=shortcut_type, **kwargs)
if pretrained is not None:
settings = pretrained_settings['resnet3d34'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet3d50(num_classes=400, pretrained='kinetics-400', **kwargs):
"""Constructs a ResNet3D-50 model."""
model = ResNet3D(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, **kwargs)
if pretrained is not None:
settings = pretrained_settings['resnet3d50'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet3d101(num_classes=400, pretrained='kinetics-400', **kwargs):
"""Constructs a ResNet3D-101 model."""
model = ResNet3D(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, **kwargs)
if pretrained is not None:
settings = pretrained_settings['resnet3d101'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet3d152(num_classes=400, pretrained='kinetics-400', **kwargs):
"""Constructs a ResNet3D-152 model."""
model = ResNet3D(Bottleneck, [3, 8, 36, 3], num_classes=num_classes, **kwargs)
if pretrained is not None:
settings = pretrained_settings['resnet3d152'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet3d200(num_classes=400, pretrained='kinetics-400', **kwargs):
"""Constructs a ResNet3D-200 model."""
model = ResNet3D(Bottleneck, [3, 24, 36, 3], **kwargs)
if pretrained is not None:
settings = pretrained_settings['resnet3d200'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resneti3d50(num_classes=400, pretrained='moments', **kwargs):
"""Constructs a ResNet3D-50 model."""
model = ResNet3D(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, **kwargs)
if pretrained is not None:
settings = torchvision_models.pretrained_settings['resnet50'][pretrained]
model = inflate_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
if __name__ == '__main__':
batch_size = 1
num_frames = 48
num_classes = 339
img_feature_dim = 512
frame_size = 224
model = resnet3d50(num_classes=num_classes, pretrained='moments')
input_var = torch.randn(batch_size, 3, num_frames, 224, 224)
print(input_var.shape)
output = model(input_var)
print(output.shape)
| Forrest0503/VAT-ABAW | network/resnet3D.py | resnet3D.py | py | 20,933 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv3d",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_nu... |
20764888384 | from bs4 import BeautifulSoup
import csv
with open("carviewer2.html") as fp:
soup = BeautifulSoup(fp, 'html.parser')
employee_file = open('week02data.csv', mode='w')
employee_writer = csv.writer(employee_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
rows = soup.findAll("tr")
#print(rows)
for row in rows:
cols = row.findAll("td")[0:4] #modify code so that update and delete is not outputted
dataList = []
for col in cols:
dataList.append(col.text)
#print(dataList)
employee_writer.writerow(dataList)
employee_file.close()
| eamonnofarrell/dataRepresentation | week03-webScraping/PY05-readFileFinal.py | PY05-readFileFinal.py | py | 602 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "bs4.BeautifulSoup",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "csv.QUOTE_MINIMAL",
"line_number": 8,
"usage_type": "attribute"
}
] |
10035777489 | import numpy as np
from sklearn.model_selection import StratifiedKFold, GridSearchCV
from sklearn.metrics import roc_auc_score
from sklearn import preprocessing
def runTuneTest(learner, parameters, X, y):
"""
This method takes in a learning algorithm, the possible settings you would use for the algorithm and the full data set
It performs cross-validation to tune the algorithm; that is, it creates held-aside data to determine which settings are best
Returns the accuracy and model weights for each fold
"""
outer_cv = StratifiedKFold(n_splits=5, shuffle=True)
outer_cv = outer_cv.split(X,y)
results = []
fold = 0
features = []
#For each chunk (fold) created above, treat it as the held-aside test set and group the other four to train the model
for train, test in outer_cv:
fold += 1
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
clf = GridSearchCV(learner, parameters, cv=3)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
results.append(score)
#Use the code below instead if you want roc values
#preds = clf.predict_proba(X_test)
#results.append(roc_auc_score(y_test,preds[:,1]))
try: # works for random forests or logistic regression
features.append(clf.best_estimator_.coef_[0,:])
except:
features.append(clf.best_estimator_.feature_importances_)
return results, features
def getGeneRanks(weights):
"""
Takes the weights learned from an algorithm and sorts them by the most significant weights for predicting cancer tissue. Returns an array of length numGenes, with the ith entry being the index of the ith most significant gene
"""
rank_weights = []
for w in weights:
rank_weights.append(np.argsort(np.argsort(w)))
feat_weights = np.average(rank_weights,axis=0)
indices = np.argsort(feat_weights)
return indices
def loadGeneExpression(filename, delim=","):
"""
Parses a csv file containing gene expression data. The format of the file
should be one expression profile per line.
Returns: 2D numpy array of size (numProfiles, numGenes)
"""
rawdata = np.genfromtxt(filename,delimiter=delim)
scaled = preprocessing.normalize(rawdata, norm='l2', axis=0)
return scaled
def loadGeneNames(filename):
"""
Reads the names of the genes in the expression profile. There should be
the same number of gene names as columns from loadGeneExpression
Returns a 1D numpy array of length numGenes
"""
with open(filename,'r') as f:
genes = np.array([line.rstrip() for line in f])
return genes
def loadLabels(filename):
"""
Reads the labels (classifications) of each expression profile loaded from
loadGeneExpression. There should be on label per profile. Negative values
indicate cancer while positive indicate normal tissue
Returns a 1D numpy array of length numProfiles
"""
y = np.genfromtxt(filename)
y = np.where(y>0,1,-1) #positive is normal
return y
| ameetsoni/CompBioNSF-Module | soln/geneMLLib.py | geneMLLib.py | py | 3,118 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sklearn.model_selection.StratifiedKFold",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.GridSearchCV",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 45,
"usage_type": "call"
},... |
1224550679 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 18 23:04:53 2018
@author: Carla Pastor
Project: Heat Distribution.
"""
import numpy as np
import matplotlib.pyplot as lb
from matplotlib.colors import ListedColormap
if __name__ == "__main__":
inp=int(input("Enter the starting temperature: "))
oldTemp=np.zeros((inp+1, inp+1))
for index in range(0,inp+1):
oldTemp[index, 0]=inp
temp=np.copy(oldTemp) #np.copy returns an array copy of the given object.
#counter to count 3000 iterations and stop if program iterates more than 3000..
#In the interest of time, I will stop at 3000 iterations if we don’t see convergence by then.
c=0
# while (True):
while c!=3000:
flag=False # go ahead and check. EXACTLY
for i in range(1, inp):
for j in range(1, inp):
temp[i,j]=0.25*(oldTemp[i-1,j] + oldTemp[i+1,j] + oldTemp[i,j-1] + oldTemp[i,j+1]) #included in the hw specifications
if np.array_equal(temp, oldTemp): # True if two arrays have the same shape and elements, False otherwise.
flag=True
break
if flag==True:
break
oldTemp=np.copy(temp)
if flag==True:
break
#increment counter
c=c+1
#ref : https://stackoverflow.com/questions/44443993/matplotlib-colors-listedcolormap-in-python
cmap = ListedColormap(['darkblue','blue','aqua','lawngreen','yellow','orange','red','darkred']) # invertidos. The colors are already included :/
#['#7D2C1B', '#DB5033', '#F4A718', '#FCEE14','#CBFE0D', '#0DFEDD', '#08C6E4', '#0A4751'] # my bad
colors=[]
split=inp/8.0 #divide the temperatures equally
for i in range(8):
colors.append(split)
split+=split
#Get the current Axes instance on the current figure matching the given keyword args, or create one.
lb.gca().invert_yaxis()
lb.imshow(oldTemp, cmap=cmap, origin='lower') #default: None. https://matplotlib.org/api/_as_gen/matplotlib.pyplot.imshow.html
lb.show()
#end
# Other references:
# https://www.programcreek.com/python/example/102329/matplotlib.pyplot.pcolormesh
# https://stackoverflow.com/questions/2051744/reverse-y-axis-in-pyplot
| carlaoutput/matplotlib | heatDistribution.py | heatDistribution.py | py | 2,384 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.array_equal",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_numbe... |
16371835834 | from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.hooks.postgres_hook import PostgresHook
from warnings import warn
class CheckFutureYearsOperator(BaseOperator):
"""
Checks number of observations in Postgres table with a year in the future
:param postgres_conn_id: reference to a specific Postgres database
:type postgres_conn_id: str
:param postgres_table_name: name of Postgres table to check
:type postgres_table_name: str
"""
@apply_defaults
def __init__(self,
postgres_conn_id,
postgres_table_name,
*args, **kwargs):
super(CheckFutureYearsOperator, self).__init__(*args, **kwargs)
self.postgres_conn_id = postgres_conn_id
self.postgres_table_name = postgres_table_name
def execute(self, context):
postgres_hook = PostgresHook(self.postgres_conn_id)
n_future_years = postgres_hook.get_records(
f"""
select count(*)
from {self.postgres_table_name}
where year > date_part('year', current_date);
"""
)[0][0]
# Output warning if there are observations with future years
if n_future_years > 0:
warn(
f"""
There are {n_future_years} observations in {self.postgres_table_name} table with year in the future
"""
)
| davidrubinger/political-contributions-canada | plugins/operators/check_future_years_operator.py | check_future_years_operator.py | py | 1,496 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "airflow.models.BaseOperator",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "airflow.utils.decorators.apply_defaults",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "airflow.hooks.postgres_hook.PostgresHook",
"line_number": 29,
"usage_t... |
34955184359 |
import os
import gspread
from gplus import ClientPlus
from datetime import datetime
from oauth2client.service_account import ServiceAccountCredentials
from flask import Flask, render_template
application = Flask(__name__, static_url_path='/static')
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
credentials = ServiceAccountCredentials.from_json_keyfile_name( CURRENT_DIR + '/coastal.json',
scopes = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive' ]
)
@application.route('/')
def index():
"""
Landing page to be rendered. Get the latest spreadsheet from Google Drive
and display it
"""
client = gspread.authorize(credentials, ClientPlus)
calendar = client.open('Coastal Calendar')
speakers = calendar.worksheet("Speakers")
speaker_details = []
for r, row in enumerate( speakers.get_all_values()):
# Skip the header
if r == 0:
continue
#https://drive.google.com/uc?export=view&id=1gi-Bj-WbFE82px-k3ZZXPFdEMNIwLEce
photo_id = client.get_file_id( row[2] )
photo_url = 'https://drive.google.com/uc?export=view&id={}'.format(photo_id)
day = datetime.strptime( row[0],"%m/%d/%Y" )
speaker_details.append( { 'date' :day.strftime( "%B %d, %Y" ),
'name' :row[1],
'photo' :photo_url,
'url' :row[3],
'describe':row[4]
} )
events = calendar.worksheet('Events')
event_details = []
for r,row in enumerate( events.get_all_values()):
# Skip the header
if r == 0:
continue
day = datetime.strptime( row[0],"%m/%d/%Y" )
event_details.append( { 'date' :day.strftime( "%a %B %d, %Y" ),
'describe':row[1],
'place' :row[2],
'notes' :row[3]
} )
context = {'speakers':speaker_details, 'events':event_details}
return render_template( 'main.html', **context )
@application.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, Nothing at this URL {}'.format(e), 404
@application.errorhandler(500)
def application_error(e):
"""Return a custom 500 error."""
return 'Sorry, unexpected error: {}'.format(e), 500
if __name__ == '__main__':
application.run(debug = False, host='0.0.0.0', port=8000)
| CrabbyPete/coastalflyrodders | main.py | main.py | py | 2,877 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line... |
73574130983 | import torch
import torch.nn as nn
import torch.nn.functional as F
class discrete_policy_net(nn.Module):
def __init__(self, input_dim, output_dim, hidden_dim=128):
super(discrete_policy_net, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.hidden_dim = hidden_dim
self.fc1_layer = nn.Linear(self.input_dim, self.hidden_dim)
self.fc2_layer = nn.Linear(self.hidden_dim, self.hidden_dim)
self.fc3_layer = nn.Linear(self.hidden_dim, self.output_dim)
def forward(self, input, explore=True, mask=None, log=False, reg=False, entropy=False, all=False):
x = F.leaky_relu(self.fc1_layer(input))
x = F.leaky_relu(self.fc2_layer(x))
x = self.fc3_layer(x)
res = []
prob = F.softmax(x, dim=-1)
if mask is not None:
prob.masked_fill_(mask, value=0.0)
if explore:
action = torch.multinomial(prob, 1)
else:
action = prob.max(1, keepdim=True)[1]
res = [action]
if reg:
reg_policy = torch.mean(x ** 2)
res.append(reg_policy)
if log:
res.append(F.log_softmax(x, -1).gather(1, action))
if entropy:
res.append(-(F.log_softmax(x, dim=-1) * F.softmax(x, dim=-1)).sum(1).mean())
if all:
res.append(F.softmax(x, dim=-1))
if len(res) > 1:
return tuple(res)
else:
return res[0] | deligentfool/MAAC_pytorch | policy.py | policy.py | py | 1,529 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
27515709525 | from discord import Embed
from discord.ext import commands
import discord
class ModerationCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=['удалить'])
@commands.has_role('Админ')
async def clear(self, ctx, amount: int = None):
await ctx.message.delete()
if amount is None:
await ctx.send('Укажите кол-во сообщений которые надо удалить', delete_after=10)
else:
await ctx.channel.purge(limit=amount)
emb = discord.Embed(title='Удаление сообщений',
description=f'Админ {ctx.author.mention} почистил чат.')
await ctx.send(embed=emb, delete_after=10)
@commands.command(aliases=['добавить_роль'])
@commands.has_role('Админ')
async def add_role(self, ctx, member: discord.Member = None, role: discord.Role = None):
logs = self.bot.get_channel(669129314251309056)
await ctx.message.delete()
if member == None:
await ctx.send('Укажите кому дать роль', delete_after=10)
elif role == None:
await ctx.send('Укажите какую роль дать', delete_after=10)
else:
await member.add_roles(role)
emb = discord.Embed(title='Выдача роли',
description=f'Админ {ctx.author.mention} дал роль {role} для {member.name}.')
await logs.send(embed=emb)
@commands.command(aliases=['бан'])
@commands.has_role('Админ')
async def ban(self, ctx, member: discord.Member = None, reason=None):
logs = self.bot.get_channel(669129314251309056)
await ctx.message.delete()
if member is None:
await ctx.send('Укажите кого надо забанить', delete_after=10)
elif member is ctx.message.author:
await ctx.send('Ты шо дурной, зачем банить самого себя?', delete_after=10)
else:
if reason is None:
emb = discord.Embed(title='Бан', description=f'Админ {ctx.author.mention} забанил пользователя {member}.')
await logs.send(embed=emb)
try:
await member.send(f'Вас забанили на сервере {ctx.guild.name}')
except Exception:
print('Ошибочка: Кажется я не могу писать в личку')
finally:
await ctx.guild.ban(member)
elif reason is not None:
emb = discord.Embed(title='Бан', description=f'Админ {ctx.author.mention} забанил пользователя {member} по причине {reason}.')
await logs.send(embed=emb)
try:
await member.send(f'Вас забанили на сервере {ctx.guild.name} по причине {reason}.')
except Exception:
print('Ошибочка: Кажется я не могу писать в личку')
finally:
await ctx.guild.ban(member, reason=reason)
@commands.command(aliases=['мут'])
@commands.has_role('Админ')
async def mute(self, ctx, member: discord.Member = None):
logs = self.bot.get_channel(669129314251309056)
await ctx.message.delete()
if member == None:
await ctx.send('Укажите кого надо замутить', delete_after=10)
elif member == ctx.message.author:
await ctx.send("Ты шо дурной, зачем мутить самого себя?", delete_after=10)
else:
emb = discord.Embed(title="Мут", description=f'Админ {ctx.author.mention} замутил пользователя {member}')
role = discord.utils.get(ctx.message.guild.roles, name='Mute')
await member.add_roles(role)
await logs.send(embed=emb)
@commands.command(aliases=['анмут'])
@commands.has_role('Админ')
async def unmute(self, ctx, member: discord.Member = None):
logs = self.bot.get_channel(669129314251309056)
await ctx.message.delete()
if member == None:
ctx.send("Укажите кого надо размутить", delete_after=10)
else:
emb = discord.Embed(title="Анмут", description=f'Админ {ctx.author.mention} размутил пользователя {member}')
role = discord.utils.get(ctx.message.guild.roles, name="Mute")
await member.remove_roles(role)
await logs.send(embed=emb)
def setup(bot):
bot.add_cog(ModerationCog(bot))
| RuCybernetic/CyberTyanBot | cogs/commands/moderation.py | moderation.py | py | 4,910 | python | ru | code | 1 | github-code | 36 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "discord.Embed",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "discor... |
35866755353 | import subprocess
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Запуск парсера офисных документов."
def handle(self, *args, **options):
run_parser = subprocess.getoutput(
f'poetry run parser -r -p {settings.RESOURSES_ROOT}'
)
with open(
f'{settings.RESOURSES_ROOT}/result.txt',
'w',
encoding='utf-8',
) as file:
print(run_parser)
file.write(run_parser)
| Studio-Yandex-Practicum/adaptive_hockey_federation | adaptive_hockey_federation/core/management/commands/fill-db.py | fill-db.py | py | 574 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "subprocess.getoutput",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.RESOURSES_ROOT",
"line_number": 12,
"usage_type": "attri... |
36723052286 | from django.template import loader, RequestContext
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound, Http404
from django.conf import settings
from django.utils.safestring import mark_safe
from django.contrib.auth.views import redirect_to_login
from django import forms
from django.views.generic.edit import FormView
from django.views.generic import ListView
import os
from .models import Page, Attachment
DEFAULT_TEMPLATE = 'pages/default.html'
def page(request, url='/'):
"""
Flat page view.
Models: `pages.page`
Templates: Uses the template defined by the ``template_name`` field,
or `pages/default.html` if template_name is not defined.
Context:
page
`pages.page` object
"""
if not url.endswith('/') and settings.APPEND_SLASH:
return HttpResponseRedirect("%s/" % request.path)
if not url.startswith('/'):
url = "/" + url
# here I've removed the requirement that the page be for this site
# - this won't work if we ever have more than one site here
# which isn't planned
# deal with the lack of a root page
try:
f = Page.objects.get(url__exact=url)
except:
# no page, if we're after the root page then serve a default page
if url == '/':
f = Page(title='No Pages',
content='<p>No pages defined. Login to <a href="/admin">admin</a> to create some.</p>')
else:
raise Http404("Page does not exist")
# If registration is required for accessing this page check the group
if f.group_required:
if request.user.is_authenticated:
if not request.user.groups.filter(name=f.group_required.name).count():
f = Page(title='Permission denied',
content='<p>You do not have permission to access this page. This page is only available to users with the permission level of ' + f.group_required.name.lower() + '. If you believe you should have access to this page please send us <a href="/feedback/generalfeedback.html">feedback</a>.</p>')
else:
# Not logged in, redirect to the login page.
return redirect_to_login(request.path)
if f.template_name:
t = loader.select_template((f.template_name, DEFAULT_TEMPLATE))
else:
t = loader.get_template(DEFAULT_TEMPLATE)
# To avoid having to always use the "|safe" filter in flatpage templates,
# mark the title and content as already safe (since they are raw HTML
# content in the first place).
f.title = mark_safe(f.title)
f.content = mark_safe(f.content)
response = HttpResponse(t.render({'page': f}, request))
return response
class UploadFileForm(forms.Form):
file = forms.FileField()
description = forms.CharField()
class AttachmentView(ListView):
template_name = "pages/attachments.html"
model = Attachment
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["form"] = UploadFileForm
return context
def handle_file(self, request, fileobj):
"""Store the uploaded file"""
fullpath = os.path.join(settings.UPLOAD_ROOT, "attachments", fileobj.name)
relname = os.path.join(settings.UPLOAD_URL, "attachments", fileobj.name)
if not os.path.exists(os.path.dirname(fullpath)):
os.makedirs(os.path.dirname(fullpath))
with open(fullpath, 'wb+') as destination:
for chunk in fileobj.chunks():
destination.write(chunk)
# create and save a new attachment object
a = Attachment(file=relname, description=request.POST['description'], uploader=request.user)
a.save()
return relname
def post(self, request):
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
destname = self.handle_file(request, request.FILES['file'])
else:
print("invalid form", form)
return HttpResponseRedirect('/pages/attachments')
| stevecassidy/signbank-pages | pages/views.py | views.py | py | 4,101 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.settings.APPEND_SLASH",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 29,
"usage_type": "call"
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.