seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73921921384 | import pandas as pd
import sys, os, MySQLdb
import pandas as pd
import numpy as np
from collections import defaultdict
import click
db = MySQLdb.connect(host="localhost", user=os.environ["DATAVIVA2_DB_USER"],
passwd=os.environ["DATAVIVA2_DB_PW"],
db=os.environ["DATAVIVA2_DB_NAME"])
db.autocommit(1)
cursor = db.cursor()
cursor.execute("select id_ibge, id from attrs_bra where id_ibge is not null and length(id) = 8;")
bra_lookup = {str(r[0])[:-1]:r[1] for r in cursor.fetchall()}
cursor.execute("select substr(id, 2), id from attrs_cnae where length(id) = 6;")
cnae_lookup = {str(r[0]):r[1] for r in cursor.fetchall()}
cursor.execute("select id, id from attrs_cbo where length(id) = 4;")
cbo_lookup = {r[0]:r[1] for r in cursor.fetchall()}
cbo_lookup["-1"] = "xxxx" # data uses -1 for missing occupation
missing = {
"bra_id": defaultdict(int),
"cnae_id": defaultdict(int),
"cbo_id": defaultdict(int)
}
def map_gender(x):
MALE, FEMALE = 0, 1
gender_dict = {MALE: 1, FEMALE: 2}
if x in gender_dict:
return str(gender_dict[x])
return str(3)
def map_color(color):
INDIAN, WHITE, BLACK, ASIAN, MULTI, UNKNOWN = 1,2,4,6,8,9
color_dict = {INDIAN:1, WHITE:2, BLACK:3, ASIAN:4, MULTI:5, 9:UNKNOWN, -1:UNKNOWN }
return str(color_dict[int(color)])
def map_age(age):
age_bucket = int(np.floor( int(age) / 10 ))
if age_bucket == 0:
age_bucket = 1
elif age_bucket > 6:
age_bucket = 6
return str(age_bucket)
def map_literacy(lit):
ILLITERATE, BASIC, HIGHSCHOOL, COLLEGE, UNKNOWN = 1, 2, 3, 4, 9
lit_dict = {1:ILLITERATE, 2:ILLITERATE, 3:BASIC, 4:BASIC, 5:BASIC, 6:BASIC, 7:HIGHSCHOOL,
8:HIGHSCHOOL, 9:COLLEGE, -1:UNKNOWN }
return str(lit_dict[int(lit)])
def floatvert(x):
x = x.replace(',', '.')
try:
return float(x)
except:
return np.nan
def bra_replace(raw):
try:
return bra_lookup[str(raw).strip()]
except:
missing["bra_id"][raw] += 1
return None
def cnae_replace(raw):
try:
return cnae_lookup[str(raw).strip()]
except:
missing["cnae_id"][raw] += 1
return None
def cbo_replace(raw):
try:
return cbo_lookup[str(raw).strip()[:4]]
except:
missing["cbo_id"][raw] += 1
return None
cols = ["cbo_id", "cnae_id", "literacy", "age", "est_id", "simple", "bra_id", "num_emp", "color", "wage_dec", "wage", "gender", "est_size", "year"]
coerce_cols = {"bra_id": bra_replace, "cnae_id":cnae_replace, "cbo_id":cbo_replace, \
"wage":floatvert, "emp_id":str, "est_id": str}
@click.command()
@click.argument('file_path', type=click.Path(exists=True))
def main(file_path):
output_file = file_path + ".h5"
df = pd.read_csv(file_path, header=0, sep=";", names=cols, converters=coerce_cols)
df["d_id"] = df.apply(lambda x:'%s%s%s%s' % (
map_gender(x['gender']), map_age(x['age']),
map_color(x['color']), map_literacy(x['literacy'])
), axis=1)
df.to_hdf(output_file, 'table')
if __name__ == '__main__':
main()
| vamsijkrishna/dataviva-scripts | scripts/rais_new/helpers/csv2hdf.py | csv2hdf.py | py | 3,228 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "MySQLdb.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_... |
3592670664 | from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from typing import List
from db.database import get_db
from security.auth import oauth2_scheme, get_current_user
from . import schemas, crud
router = APIRouter()
@router.post("/news/add")
async def add_new(title: str, desc: str, db: Session = Depends(get_db), token: str = Depends(oauth2_scheme)):
user = get_current_user(db, token)
if not user.is_admin:
raise HTTPException(status_code=400, detail="No permision")
return crud.add_new(db, title, desc)
@router.delete("/news/{new_id}/delete")
async def delete_new(new_id: int, db: Session = Depends(get_db), token: str = Depends(oauth2_scheme)):
user = get_current_user(db, token)
if not user.is_admin:
raise HTTPException(status_code=400, detail="No permision")
return crud.delete_new(db, new_id)
@router.get("/news", response_model=List[schemas.News])
async def read_news(db: Session = Depends(get_db)):
return crud.get_news(db)
| ostrekodowanie/Synapsis | backend/api/news/routes.py | routes.py | py | 1,024 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "db.database.... |
35919708000 | import difflib
import redis
from pymongo import MongoClient
client = MongoClient('mongodb+srv://Alex:goit123@utcluster.zrkwr.mongodb.net/myFirstDatabase?retryWrites=true&w=majority')
def add():
name = input('Enter name: ')
if db.ContactBook.find_one({'name': name}):
print(f"The record with name '{name}' is already exist. Try another name or update the one")
phone = input('Enter phone: ')
email = input('Enter email: ')
db.ContactBook.insert_one({'name': name, 'email': email, 'phone': phone})
print('New record successfully added')
def showall():
for rec in db.ContactBook.find():
print(f'name = {rec["name"]}, phone = {rec["phone"]}, email = {rec["email"]}')
def delete():
name = input('Enter name: ')
if db.ContactBook.find_one({'name': name}):
db.ContactBook.delete_one({'name': name})
print(f'Record with name "{name}" has been successfully deleted')
else:
print("There is no such record in DB")
def show():
name = input('Enter name: ')
result = db.ContactBook.find_one({'name': name})
if result:
print(f'name = {result["name"]}, phone = {result["phone"]}, email = {result["email"]}')
else:
print("There is no such record in DB")
def update():
name = input('Enter name: ')
if db.ContactBook.find_one({'name': name}):
print("The record exists in DB. Enter a new data:")
phone = input('Enter phone: ')
email = input('Enter email: ')
db.ContactBook.update_one({'name': name}, {'$set': {'name': name, 'email': email, 'phone': phone}})
print(f'Record "{name}" has been successfully updated')
else:
print("There is no such record in DB. Try another command")
def find():
data = input('Enter data: ')
query = {"$or": [{"phone": {"$regex": data}}, {"email": {"$regex": data}}]}
res = db.ContactBook.find(query, {'_id': 0})
if res != None:
for rec in res:
print(f" Name = {rec['name']}, phone = {rec['phone']}, email = {rec['email']}")
else:
print("There is no such record in DB. Try another command")
def command_assistant():
commands = ['add', 'show', 'delete', 'show_all', 'exit', 'update', 'find'] # list of commands
r = redis.StrictRedis(host='localhost', port=6379, db=0)
while True:
command = str(input('Enter command:\n>>> ')).lower().strip()
if not command in commands: # prediction logic
if r.get(command): # checking cache
print(f"(Cache)Perhaps you mean {(r.get(command)).decode('utf-8')}")
ans = str(input("Answer (Y/N): ")).lower()
if ans == "n":
print("Command input error, try again")
continue
elif ans == "y":
variant = r.get(command).decode('utf-8')
break
else:
variant = str(difflib.get_close_matches(command, commands, cutoff=0.1, n=1))[2:-2] # prediction realisation
print(f"Perhaps you mean {variant}")
answer = str(input("Answer (Y/N): ")).lower()
if answer == "n":
print("Command input error, try again")
continue
elif answer == "y":
r.set(command, variant)
break
else:
variant = command
break
return variant
if __name__ == '__main__':
with client:
db = client.myfirst_mongoDB
print(f'{" "*20}*** Welcome to Personal assistant Contact book DB edition!***')
print("Commands:\n - add;\n - show;\n - show_all;\n - delete;\n - update;\n - find;\n - exit\n")
while True:
try:
answer = command_assistant()
except (ConnectionRefusedError, redis.exceptions.ConnectionError, ConnectionError) as Error:
print("Error! Connection problems to Redis. App is working without command prediction")
answer = str(input('Enter command:\n>>> ')).lower().strip()
if answer == 'add':
add()
continue
elif answer == 'show_all':
showall()
continue
elif answer == 'delete':
delete()
continue
elif answer == 'show':
show()
continue
elif answer == 'update':
update()
continue
elif answer == 'find':
find()
continue
elif answer == 'exit':
break
else:
print("Command input error. Try correct command again")
continue
print("Good bye!")
| AlexUtchenko/goit-python | WEB10/PA_Mongo_Redis_Nodic.py | PA_Mongo_Redis_Nodic.py | py | 4,975 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "redis.StrictRedis",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "difflib.get_close_matches",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "redis.... |
37630155256 | #!/usr/bin/env python
from net import *
import csv
import cv2
from cv_bridge import CvBridge, CvBridgeError
import os
import numpy as np
from PIL import Image
import tensorflow as tf
from skimage import color
import time
import rospy
from cone_detection.msg import Label
#Init ros.
rospy.init_node('local_network_test')
#Net parameters.
image_width = rospy.get_param('/cone/width_pixel')
image_height = rospy.get_param('/cone/height_pixel')
path_to_candidate = rospy.get_param('/candidate_path')
path_to_model = rospy.get_param('/model_path')
datasets = rospy.get_param('/neural_net/datasets')
datasets_validation = rospy.get_param('/neural_net/datasets_validation')
#Init and saver variable.
keep_prob = tf.placeholder(tf.float32)
input_placeholder = tf.placeholder(tf.float32, [None, image_height, image_width, 3])
output_placeholder = tf.placeholder(tf.float32, [None, 2])
input_placeholder_flat = tf.contrib.layers.flatten(input_placeholder)
y_true = tf.argmax(output_placeholder, dimension=1)
output_layer = fully_connected(input_placeholder_flat, 0.01, keep_prob)
y_pred = tf.argmax(tf.nn.softmax(output_layer), dimension=1)
def deleteFolderContent(path):
for element in os.listdir(path):
os.remove(os.path.join(path, element))
class NeuralNet:
def __init__(self):
#Init tf session.
self.session = tf.Session()
self.session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(self.session, path_to_model + getModelName(datasets) + " .cpkt")
#Init cone list.
image_list = []
# Start timer.
start_time = time.time()
# Labeling.
for i in range(0,1000):
path = path_to_candidate + datasets_validation + "/" + str(i) + ".jpg"
try:
img = Image.open(path)
arr = np.array(img.getdata(),np.uint8)
arr = arr.reshape(image_height, image_width, 3)
image_list.append(self.labeling(arr, i))
cv2.imwrite(path_to_candidate + "candidates/" + str(i) + ".jpg", arr)
except:
continue
# Stop timer.
end_time = time.time()
time_difference = end_time - start_time
print("Labeling time usage: " + str(time_difference) + " s")
# Getting labels.
labeled_list = []
reader = csv.reader(open(path_to_candidate + datasets_validation + "/" + "labeling.csv"))
for row in reader:
image = int(row[0])
label = int(row[1])
labeled_list.append([image, label])
# Accuracy by comparing lists.
correct = 0.0;
for element in image_list:
index = element[0]
for labeled_element in labeled_list:
if(index == labeled_element[0] and element[1] == labeled_element[1]):
correct += 1.0
break
accuracy = correct / (len(labeled_list) - 1)
print("Labeling accuracy: " + str(accuracy))
def labeling(self,msg,index):
#Get image.
image = np.zeros((1,image_height, image_width,3))
image[0][:][:][:] = color.rgb2lab(msg) / 255.0
# Labeling.
label = y_pred.eval(session=self.session,feed_dict={input_placeholder: image, keep_prob: 1.0})
if(label == [0]):
cv2.imwrite(path_to_candidate + "cones/" + str(index) + ".jpg", msg)
return [index, 1]
else:
return [index, 0]
#------------------------------------------------------------------------
if __name__ == '__main__':
#Delete files in candidates and cones order.
deleteFolderContent(path_to_candidate + "candidates/")
deleteFolderContent(path_to_candidate + "cones/")
#Init neural net.
neural_net = NeuralNet() | ProjectARCConeDetection/cone_detection | neural_net/local_network_test.py | local_network_test.py | py | 3,822 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "rospy.init_node",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "rospy.get_param",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "rospy.get_param",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "rospy.get_param",
... |
39453776358 | from functools import reduce
class Solution:
def multiply(self, num1: str, num2: str) -> str:
sumList = []
i = 0
for a in num1[::-1]:
inta = int(a)
snum = 0
tempSingleList = []
for b in num2[::-1]:
intb = int(b)
tmuli = inta*intb+snum
tsnum = tmuli//10
tgnum = tmuli%10
tempSingleList.append(tgnum)
snum = tsnum
if snum!=0:
tempSingleList.append(snum)
sumList.append(tempSingleList[::-1]+[ 0 for i in range(0,i)])
i = i+1
result = "".join(map(lambda x:str(x),reduce(lambda x,y:self.strSum(x,y),sumList))).lstrip('0')
return result if len(result)>0 else '0'
def strSum(self,num1,num2):
num1 = num1[::-1]
num2 = num2[::-1]
numLength = max(len(num1),len(num2))
y = 0
numSum = []
for index in range(0,numLength):
a = num1[index] if index<len(num1) else 0
b = num2[index] if index<len(num2) else 0
tempSum = a+b+y
y = tempSum//10
s = tempSum%10
numSum.append(s)
if y !=0:
numSum.append(y)
return numSum[::-1]
if __name__ == "__main__":
s = Solution()
print(s.multiply('7','871'))
# print(s.strSum([8,2],[2,3])) | haodongxi/leetCode | 43.py | 43.py | py | 1,429 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "functools.reduce",
"line_number": 22,
"usage_type": "call"
}
] |
44217228363 | """
@author: Miguel Taibo Martínez
Date: Nov 2021
"""
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import gpflow
import pandas as pd
import sobol_seq
from gpflow.utilities import print_summary
from frontutils import get_pareto_undominated_by
class frontGP(object):
def __init__(self, O:int, C:int, d:int, lowerBounds: float, upperBounds: float, X = None, Y = None, noise_variance=0.01):
self.O = O
self.C = C
self.d = d
self.lowerBounds = lowerBounds
self.upperBounds = upperBounds
self.X = X
self.Y = Y
self.noise_variance = noise_variance
self.multiGPR : MultiGPR = None
def addSample(self, x, y, save=False, filename=None):
if self.X is None or self.Y is None:
self.X = np.array([x])
self.Y = np.array([y])
return
self.X = np.append(self.X, [x], axis=0)
self.Y = np.append(self.Y, [y], axis=0)
if save and filename is not None:
self.writeSample(filename, x,y)
def updateGP(self):
self.multiGPR = MultiGPR(X = self.X, Y = self.Y, noise_variance = self.noise_variance)
def optimizeKernel(self):
self.multiGPR.optimizeKernel()
## Visualization methods
def plot(self):
fig, axs = plt.subplots(nrows = self.O, ncols=self.d, figsize=(10,5))
if self.d >1:
for j in range(self.d):
grid = np.ones((10_000,self.d))
for k in range(self.d):
grid[:,k]=grid[:,k]*(self.upperBounds[k]+self.lowerBounds[k])/2
xx = np.linspace(self.lowerBounds[j], self.upperBounds[j], 10_000).reshape(10_000, 1)
grid[:,j]=xx[:,0]
mean, var = self.multiGPR.predict_y(grid)
if self.O==1:
axs[j].plot(self.X[:,j], self.Y[:,0], 'kx', mew=2)
axs[j].plot(grid[:,j], mean[:,0], 'C0', lw=2)
axs[j].fill_between(grid[:,j],
mean[:,0] - 2*np.sqrt(var[:,0]),
mean[:,0] + 2*np.sqrt(var[:,0]),
color='C0', alpha=0.2)
else:
for i in range(self.O):
axs[i, j].plot(self.X[:,j], self.Y[:,i], 'kx', mew=2)
axs[i, j].plot(grid[:,j], mean[:,i], 'C0', lw=2)
axs[i, j].fill_between(grid[:,j],
mean[:,i] - 2*np.sqrt(var[:,i]),
mean[:,i] + 2*np.sqrt(var[:,i]),
color='C0', alpha=0.2)
else:
xx = np.linspace(self.lowerBounds[0], self.upperBounds[0], 10_000).reshape(10_000, 1)
mean, var = self.multiGPR.predict_y(xx)
if self.O==1:
axs.plot(self.X, self.Y[:,0], 'kx', mew=2)
axs.plot(xx[:,0], mean[:,0], 'C0', lw=2)
axs.fill_between(xx[:,0],
mean[:,0] - 2*np.sqrt(var[:,0]),
mean[:,0] + 2*np.sqrt(var[:,0]),
color='C0', alpha=0.2)
else:
for i in range(self.O):
axs[i].plot(self.X, self.Y[:,i], 'kx', mew=2)
axs[i].plot(xx[:,0], mean[:,i], 'C0', lw=2)
axs[i].fill_between(xx[:,0],
mean[:,i] - 2*np.sqrt(var[:,i]),
mean[:,i] + 2*np.sqrt(var[:,i]),
color='C0', alpha=0.2)
return fig, axs
def plotParetos(self, state):
pareto_front = np.array(state.pareto_front)
pareto_set = np.array(state.pareto_set)
for idx, mm in enumerate(state.objective_mms):
if mm:
pareto_front[:,idx]=-pareto_front[:,idx]
best_known_pareto_front = get_pareto_undominated_by(self.Y)
best_known_pareto_set = getSetfromFront(self.X, self.Y, best_known_pareto_front)
fig1, axs1 = plt.subplots(figsize=(8,8))
if self.d>1:
axs1.plot(pareto_set[:,state.input_names.index(state.setx)],pareto_set[:,state.input_names.index(state.sety)], 'bx', markersize=3, label=r"Estimated Pareto Set")
axs1.plot(best_known_pareto_set[:,state.input_names.index(state.setx)], best_known_pareto_set[:,state.input_names.index(state.sety)], 'gx', markersize=10, label=r"Best Known Pareto Set")
axs1.set_ylabel(state.sety, fontsize=14)
axs1.set_xlabel(state.setx, fontsize=14)
else:
axs1.plot(pareto_set[:,0], [0 for _ in pareto_set[:,0]],'bx', markersize=3, label=r"Estimated Pareto Set")
axs1.plot(best_known_pareto_set[:,0], [0 for _ in best_known_pareto_set[:,0]],'gx', markersize=10, label=r"Best Known Pareto Set")
axs1.set_xlabel(state.input_names[0], fontsize=14)
axs1.set_yticks(ticks = [])
axs1.legend(fontsize=14)
fig2, axs2 = plt.subplots(figsize=(8,8))
axs2.plot(pareto_front[:,state.objective_names.index(state.frontx)], pareto_front[:,state.objective_names.index(state.fronty)], 'xb', markersize=3, label=r"Estimated Pareto Front")
axs2.plot(best_known_pareto_front[:,state.objective_names.index(state.frontx)],best_known_pareto_front[:,state.objective_names.index(state.fronty)], 'xg', markersize=10, label=r"Best Known Pareto Front")
axs2.set_xlabel(state.frontx, fontsize=14)
axs2.set_ylabel(state.fronty, fontsize=14)
axs2.legend(fontsize=14)
return fig1,fig2
def plotMetrics(self, state):
fig, axs = plt.subplots(figsize=(8,8))
axs.plot(state.ns, state.agd, label=r"$AGD_1(\mathcal{Y}_E^*, \mathcal{Y}_{BK}^*)$")
axs.plot(state.ns, state.adh, label=r"$d_{ADH}(\mathcal{Y}_E^*, \mathcal{Y}_{BK}^*)$")
axs.set_ylabel("Log Metrics",fontsize=14)
axs.set_xlabel("Algorithm Iteration",fontsize=14)
axs.legend(fontsize=14)
return fig
def dlParetos(self, state):
pareto_front = np.array(state.pareto_front)
pareto_set = np.array(state.pareto_set)
df =pd.DataFrame(data = np.append(pareto_set,pareto_front,axis=1),columns=state.input_names+state.objective_names)
return df.to_csv()
def dlParetoBestKnown(self, state):
pareto_front = get_pareto_undominated_by(self.Y)
pareto_set = getSetfromFront(self.X, self.Y, pareto_front)
df =pd.DataFrame(data = np.append(pareto_set,pareto_front,axis=1),columns=state.input_names+state.objective_names)
return df.to_csv()
class MultiGPR(object):
def __init__(self, X = None, Y = None, noise_variance=0.01):
self.GPRs = [
gpflow.models.GPR(
[X, Y[:,i:i+1]],
kernel = gpflow.kernels.SquaredExponential(),
mean_function = gpflow.mean_functions.Constant(),
noise_variance = noise_variance
)
for i in range(Y.shape[-1])
]
self.opt = gpflow.optimizers.Scipy()
def optimizeKernel(self):
for GPR in self.GPRs:
self.opt.minimize(
GPR.training_loss,
variables=GPR.trainable_variables)
def predict_y(self, xx):
mean_vars = tf.concat([GPR.predict_y(xx) for GPR in self.GPRs], axis=-1)
mean = mean_vars[0]
var = mean_vars[1]
return mean, var
def predict_f_samples(self, xx, n_samples):
presamples = [GPR.predict_f_samples(xx, n_samples) for GPR in self.GPRs]
samples = tf.concat(presamples[:], axis=-1)
return samples
def printGPRs(self):
for GPR in self.GPRs:
print_summary(GPR)
def getSetfromFront(xvalues, yvalues, front):
res = None
for y in front:
x = xvalues[np.where(np.all(yvalues==y,axis=1))[0]]
if res is None:
res = np.array(x)
else:
res = np.append(res,x, axis=0)
return res
| MiguelTaibo/DashboardMOO | streamlit-front/frontGP.py | frontGP.py | py | 8,161 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number"... |
4224070148 | import os
import argparse
import gzip
import sys
import time
import numpy as np
from multiprocessing import Pool
from contextlib import closing
import csv
import tensorflow as tf
from six.moves import urllib
bin_freq = 23
spect_width = bin_freq # Don't add one pixel of zeros on either side of the image
window_size = 100
dim_Y = 11
MEAN_SPEC = 10.786225977
# For MNIST
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
# MNIST embeddings directory
dataMnistdir = '../mnistAct/'
MNIST_DIM = 512
WORK_DIRECTORY = 'data'
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
# mnist_data = {digit: np.loadtxt(dataMnistdir + str(digit) + ".txt") for digit in range(10)}
label_map = {'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9,'o':0,'z':0}
def maybe_download(filename):
"""Download the data from Yann's website, unless it's already here."""
if not tf.gfile.Exists(WORK_DIRECTORY):
tf.gfile.MakeDirs(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not tf.gfile.Exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
def extract_data(filename, num_images):
"""Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)
return data
def extract_labels(filename, num_images):
"""Extract the labels into a vector of int64 label IDs."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
# Organizing MNIST data into mnist images and labels
test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')
# Extract it into numpy arrays.
mnist_values = extract_data(test_data_filename, 10000)
mnist_labels = extract_labels(test_labels_filename, 10000)
mnist_data = {i:[] for i in range(10)}
for i in range(len(mnist_labels)):
lab = mnist_labels[i]
val = mnist_values[i]
mnist_data[lab].append(val)
print(mnist_data[7][0].shape)
def load_from_file(f):
'''Given a file, returns a list of the string values in that value'''
data = []
for line in f:
vector = []
line = line.replace("[", "")
line = line.replace("]", "")
line_chars = line.split()
for char in line_chars:
# vector.append(float(char)-MEAN_SPEC)
vector.append(float(char))
try:
assert len(vector) == bin_freq
data.append(vector)
except AssertionError:
if len(vector) == 0:
pass
else:
# print len(vector)
raise AssertionError
# Now we have a list of length-23 vectors which we need to trim/pad to
# window_size
if len(data)>window_size:
#cut excess rows
cut = 1.*(len(data) - window_size)
data = data[int(np.floor(cut/2)):-int(np.ceil(cut/2))]
else:
# pad data with excess rows of zeros about center
cut = 1.*(window_size - len(data))
data = [[0]*bin_freq]*int(np.floor(cut/2)) + data + [[0]*bin_freq]*int(np.ceil(cut/2))
#Convert data to a numpy array and invert it
data = np.flipud(np.array(data,dtype=np.float32))
# #Pad one pixel of zeros on top and bottom of array
# zero = np.zeros((bin_freq,))
# data[0] = zero
# data[-1] = zero
return data.flatten().tolist()
def ld(rootdir,target):
with open(target, 'wb') as datafile:
writer = csv.writer(datafile)
for subdir, dirs, files in os.walk(rootdir):
for filename in files:
y = filename[3]
f = open(os.path.join(subdir, filename))
row = load_from_file(f)
f.close()
writer.writerow([y] + row)
def get_mnist_embedding(label):
digit = label[1]
data = mnist_data[digit]
i = np.random.randint(0,len(data))
return label[0],data[i]
def get_label_map(s):
return label_map[s]
def get_mismatch_mnist_embedding(label):
i = label[1]
out = None
for j in range(10):
if i != j:
_, data = get_mnist_embedding((label[0],j))
if out is None:
out = data
else:
out = np.vstack([out, data])
return label[0],out.flatten()
def generate_mnist_set(labels):
matches = []
mismatches = []
labels = [(i, label_map[labels[i].decode('utf-8')]) for i in range(len(labels))]
with closing(Pool()) as pool:
matches = pool.map(get_mnist_embedding, labels)
mismatches = pool.map(get_mismatch_mnist_embedding, labels)
matches = np.array([match[1] for match in sorted(matches)])
mismatches = np.array([mismatch[1] for mismatch in sorted(mismatches)])
return matches, mismatches.reshape((len(labels),9,-1))
| KaranKash/DigitSpeak | untrained/load_data.py | load_data.py | py | 5,542 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "tensorflow.gfile.Exists",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "tensorflow.gfile",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.gfile.MakeDirs",
"line_number": 38,
"usage_type": "call"
},
{
"api_name"... |
3826160034 | """empty message
Revision ID: 14c462e99a28
Revises: 630e94f464d4
Create Date: 2021-09-16 16:48:21.728550
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '14c462e99a28'
down_revision = '630e94f464d4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('projects', sa.Column('link', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('projects', 'link')
# ### end Alembic commands ###
| knedgen/flaskwebsiteproject | migrations/versions/14c462e99a28_.py | 14c462e99a28_.py | py | 649 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "alembic.op.add_column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Text",
... |
19102436100 | # _*_ coding: utf-8 _*_
from pycse import regress
import numpy as np
time=np.array([0.0,50.0,100.0,150.0,200.0,250.0,300.0])
Ca=np.array([50.0,38.0,30.6,25.6,22.2,19.5,17.4])*1e-3
T=np.column_stack([time**0,time,time**2,time**3,time**4])
alpha=0.05
p,pint,se=regress(T,Ca,alpha)
print(pint)
# new one
import numpy as np
from scipy.stats.distributions import t
time=np.array([0.0,50.0,100.0,150.0,200.0,250.0,300.0])
Ca=np.array([50.0,38.0,30.6,25.6,22.2,19.5,17.4])*1e-3
T=np.column_stack([time**0,time,time**2,time**3,time**4])
p,res,rank,s=np.linalg.lstsq(T,Ca)
# the parameter are now in p
# compute the confidence intervals
n=len(Ca)
k=len(p)
sigma2=np.sum((Ca-np.dot(T,p))**2)/(n-k) # RMSE
C=sigma2*np.linalg.inv(np.dot(T.T,T)) # covariance matrix
se=np.sqrt(np.diag(C)) # standard error
alhpa=0.05 #100*(1-alpha) confidence level
sT=t.ppf(1.0-alpha/2.0,n-k) # student T multiplier
CI=sT*se
for beta,c1 in zip(p,CI):
print('{2:1.2e} {0:1.4e} {1:1.4e}'.format(beta-c1,beta+c1,beta))
SS_tot=np.sum((Ca-np.mean(Ca))**2)
SS_err=np.sum((np.dot(T,p)-Ca)**2)
Rsq=1-SS_err/SS_tot
print('R^2 = {0}'.format(Rsq))
# plot fit
import matplotlib.pyplot as plt
plt.plot(time,Ca,'bo',label='Raw data')
plt.plot(time,np.dot(T,p),'r-',label='fit')
plt.xlabel('Time')
plt.ylabel('Ca (mol/L)')
plt.legend(loc='best')
plt.savefig('58.jpg',dpi=300)
plt.show()
print('plot done')
print('I will fight for myself') | ruanyangry/pycse-data_analysis-code | PYSCE-code/58.py | 58.py | py | 1,483 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.column_stack",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pycse.regress",
"line_num... |
37084706650 | import asyncio
import aiohttp
from aiohttp import ClientSession
from utils import page_status
import time
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from fake_useragent import UserAgent
from selenium.webdriver.common.by import By
class PriseParser:
ATB_regular_divclass = '[class="product-price__top"]'
EKO_regular_divclass = '[class="jsx-2be52a4b5bdfcc8a Price__value_title"]'
VARUS_special_divclass = '[class="sf-price__special"]'
SILPO_regular_divclass = '[class="current-integer"]'
def __init__(self):
self.options = webdriver.ChromeOptions()
self.useragent = UserAgent()
self.options.add_argument(f'user-agent={self.useragent.random}')
self.options.add_argument('--disable-blink-features=AutomationControlled')
self.options.add_argument('--headless')
self.serv = Service('/home/andrey/Python Projects/OOP-and-other/selenium_python/chrome_driver/chromedriver')
self.driver=webdriver.Chrome(options=self.options,service=self.serv)
async def get_page(self,url):
try:
print('Открываем страницу...')
page=self.driver.get(url)
time.sleep(5)
print('Страница закрылась!')
# print('Getting item\'s price....')
# price=self.driver.find_element(By.CSS_SELECTOR,div_class)
# print(price.text)
except Exception as ex:
print(ex)
finally:
self.driver.close()
self.driver.quit()
async def all_at_the_same_time(self):
start=time.time()
urls = [
'https://www.atbmarket.com/product/kartopla-1-gat',
'https://eko.zakaz.ua/uk/products/ovochi-kartoplia--ekomarket00000000667970/',
'https://varus.ua/kartoplya-1-gatunok-vag',
'https://shop.silpo.ua/product/kartoplia-bila-531296'
]
task_atb=asyncio.create_task(self.get_page(urls[0]))
task_eko = asyncio.create_task(self.get_page(urls[1]))
task_varus = asyncio.create_task(self.get_page(urls[2]))
task_silpo = asyncio.create_task(self.get_page(urls[3]))
await task_atb
await task_eko
await task_varus
await task_silpo
end=time.time()
print(f'Выполнение кода заняло: {end-start:.4f} c')
res=PriseParser()
asyncio.run(res.all_at_the_same_time()) | Sautenko-Andrey/OOP-and-other | selenium_python/chrome_driver/simple_try.py | simple_try.py | py | 2,459 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "fake_useragent.UserAgent",
"line_number": 20,
"usage_type": "call"
},
{
"api_... |
33227873422 | import urllib.request
import sys
import time
from os import path
from os import popen
import argparse
def arguments():
"""Parse the arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('URL', help="URL of the file",
default=None, type=str)
parser.add_argument('des', help="The name of the file\
to be saved with.", default=None, nargs='?')
args = parser.parse_args()
return args
def get_terminal_length():
"""Return the length of the terminal."""
rows, cols = popen('stty size', 'r').read().split()
return int(cols)
def get_name(URL):
"""Try to get the name of the file from the URL."""
name = 'temp'
temp_url = URL
split_url = temp_url.split('/')
for name in split_url[::-1]:
if name != '':
break
return name
def format_size(size):
"""Format the passed size.
If its more than an 1 Mb then return the size in Mb's
else return it in Kb's along with the unit.
"""
formatted_size = size
dw_unit = 'bytes'
if formatted_size > (1024 * 1024 * 1024):
formatted_size = size / (1024 * 1024 * 1024)
dw_unit = "GB's"
elif formatted_size > (1024 * 1024):
formatted_size = size / (1024 * 1024)
dw_unit = "MB's"
elif formatted_size > 1024:
formatted_size = size / 1024
dw_unit = "kb's"
return (formatted_size, dw_unit)
def download(url, des=None):
try:
# Check if the des is passed
if des is not None:
if path.isdir(des):
des = path.join(des, get_name(url))
else:
des = get_name(URL)
# Download files with a progressbar showing the percentage
try:
u = urllib.request.urlopen(url)
except Exception as e:
print("ERROR: {}".format(e))
return False
f = open(des, 'wb')
meta = u.info()
file_size = None
try:
file_size = int(meta["Content-Length"])
formatted_file_size, dw_unit = format_size(file_size)
print("Size: {} {}".format(round(formatted_file_size), dw_unit))
print("Saving as: {}".format(des))
except TypeError:
pass
file_size_dl = 0
block_sz = 8192
beg_time = time.time()
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
# Initialize all the variables that cannot be calculated
# to ''
speed = ''
time_left = ''
time_unit = ''
percent = ''
if file_size is not None:
# Calculate speed
speed = (file_size_dl / 1024) / (time.time() - beg_time)
# Calculate time left
time_left = round(((file_size - file_size_dl) / 1024) / speed)
time_unit = 's'
# Convert to min or hours as req
if time_left > 3600:
time_left = round(time_left / 3600)
time_unit = 'h'
elif time_left > 60:
time_left = round(time_left / 60)
time_unit = 'm'
# Calculate percentage
percent = file_size_dl * 100 / file_size
# file_size to show
file_size_to_disp, dw_unit = format_size(file_size_dl)
# Basename
basename = path.basename(des)
# Calculate amount of space req in between
length = get_terminal_length()
stuff_len = len(basename) + 13 + 17 + 7 + 26 + 3
space = 0
if stuff_len < length:
space = length - stuff_len
elif stuff_len > length:
basename = basename[:(length - stuff_len) - 2] + '..'
if file_size is not None:
status = r"%s %s %0.2f %s |%d kbps| ETA: %s %s |%-20s| |%3.2f%%|" % (basename, space * " ", file_size_to_disp, dw_unit, speed, time_left, time_unit, "-" * int(percent / 5), percent)
else:
status = r"%s %s %0.2f %s" %(basename, space * " ", file_size_to_disp, dw_unit)
sys.stdout.write('\r')
sys.stdout.write(status)
sys.stdout.flush()
f.close()
print()
return True
except Exception as e:
print("ERROR: {}".format(e))
return False
if __name__ == "__main__":
args = arguments()
download(args.URL, args.des)
| TrendingTechnology/QuickWall | QuickWall/download.py | download.py | py | 4,652 | python | en | code | null | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.popen",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_nu... |
37000566143 | from itertools import product
from messenger import *
def _write_matrix(builder, matrix):
# Write as column major
for col, row in product(range(4), range(4)):
# Float here since ShuffleLog stores matrices as float
builder.add_float(matrix[row][col])
class ShuffleLogAPI:
_MSG_QUERY_ENVIRONMENT = "TagTracker:QueryEnvironment"
_MSG_ENVIRONMENT = "TagTracker:Environment"
def __init__(self, conn_params, tag_infos, camera_infos):
host = conn_params['host']
port = conn_params['port']
name = conn_params['name']
mute_errors = conn_params['mute_errors']
self.msg = MessengerClient(host, port, name, mute_errors=mute_errors)
self.msg.add_handler(ShuffleLogAPI._MSG_QUERY_ENVIRONMENT, lambda t, r: self._on_query_environment(t, r))
self.tag_infos = tag_infos
self.camera_infos = camera_infos
def read(self):
self.msg.read_messages()
def shutdown(self):
self.msg.disconnect()
# This is temporary
def publish_detection_data(self, detections):
builder = self.msg.prepare('TagTracker:TestData')
builder.add_int(len(detections))
for detect in detections:
_write_matrix(builder, detect['pose'])
_write_matrix(builder, detect['camera'].robot_position)
builder.add_int(detect['tag_id'])
builder.send()
def publish_test_matrices(self, matrices):
builder = self.msg.prepare('TagTracker:TestMtx')
builder.add_int(len(matrices))
for matrix in matrices:
_write_matrix(builder, matrix)
builder.send()
def _on_query_environment(self, type, reader):
print('[debug] sending environment data to ShuffleLog')
builder = self.msg.prepare(ShuffleLogAPI._MSG_ENVIRONMENT)
builder.add_int(len(self.tag_infos))
for tag in self.tag_infos:
builder.add_double(tag['size'])
builder.add_int(tag['id'])
_write_matrix(builder, tag['transform'])
builder.add_int(len(self.camera_infos))
for camera in self.camera_infos:
builder.add_string(camera['name'])
builder.add_int(camera['port'])
_write_matrix(builder, camera['robot_pose'])
builder.send()
| recordrobotics/Jetson2023 | borrowed/TagTracker-master/src/shufflelog_api.py | shufflelog_api.py | py | 2,316 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "itertools.product",
"line_number": 6,
"usage_type": "call"
}
] |
26164662406 | import lpips
import numpy
import torch
import json
from skimage.metrics import structural_similarity as ssim
class ImageMetrics:
@staticmethod
def __l2__metric__tensor(first_image: torch.Tensor, second_image: torch.Tensor, range=255.):
return ImageMetrics.__l2_metric__numpy(first_image.numpy(), second_image.numpy(), range)
@staticmethod
def __l2_metric__numpy(first_image: numpy.ndarray, second_image: numpy.ndarray, range=255.):
return lpips.l2(first_image, second_image, range)
@staticmethod
def l2_metric(first_image, second_image, range=255.):
if type(first_image) != type(second_image):
raise Exception('Images are not of the same type')
if type(first_image) == numpy.ndarray:
return ImageMetrics.__l2_metric__numpy(first_image, second_image, range)
if type(first_image) == torch.Tensor:
return ImageMetrics.__l2__metric__tensor(first_image, second_image, range)
raise Exception('Unsupported image type')
@staticmethod
def __psnr__metric__tensor(first_image: torch.Tensor, second_image: torch.Tensor, peak=255.):
return ImageMetrics.__psnr_metric__numpy(first_image.numpy(), second_image.numpy(), peak)
@staticmethod
def __psnr_metric__numpy(first_image: numpy.ndarray, second_image: numpy.ndarray, peak=255.):
return lpips.psnr(first_image, second_image, peak)
@staticmethod
def psnr_metric(first_image, second_image, peak=1.):
if type(first_image) != type(second_image):
raise Exception('Images are not of the same type')
if type(first_image) == numpy.ndarray:
return ImageMetrics.__psnr_metric__numpy(first_image, second_image, peak)
if type(first_image) == torch.Tensor:
return ImageMetrics.__psnr__metric__tensor(first_image, second_image, peak)
raise Exception('Unsupported image type')
@staticmethod
def __ssim__metric__tensor(first_image: torch.Tensor, second_image: torch.Tensor, range=255.):
return ImageMetrics.__ssim_metric__numpy(first_image.numpy(), second_image.numpy(), range)
@staticmethod
def __ssim_metric__numpy(first_image: numpy.ndarray, second_image: numpy.ndarray, range=255.):
return ssim(first_image, second_image, channel_axis=2)
@staticmethod
def ssim_metric(first_image, second_image, range=255.):
if type(first_image) != type(second_image):
raise Exception('Images are not of the same type')
if type(first_image) == numpy.ndarray:
return ImageMetrics.__ssim_metric__numpy(first_image, second_image, range)
if type(first_image) == torch.Tensor:
return ImageMetrics.__ssim__metric__tensor(first_image, second_image, range)
raise Exception('Unsupported image type')
@staticmethod
def __lpips__metric__tensor(model: lpips.LPIPS, first_image: torch.Tensor, second_image: torch.Tensor):
return model(first_image, second_image).detach().numpy().flatten()[0]
@staticmethod
def lpips_metric(first_image, second_image, model):
if type(first_image) != type(second_image):
raise Exception('Images are not of the same type')
if type(first_image) == torch.Tensor:
return ImageMetrics.__lpips__metric__tensor(model, first_image, second_image)
raise Exception('Unsupported image type')
@staticmethod
def metric_export(first_image, second_image, interpolation, lpips_model):
dictionary = {
"name": interpolation,
"psnr": str(ImageMetrics.psnr_metric(first_image, second_image, peak=255.)),
"l2": str(ImageMetrics.l2_metric(first_image, second_image)),
"ssim": str(ImageMetrics.ssim_metric(first_image, second_image)),
"lpips": str(ImageMetrics.lpips_metric(torch.tensor(first_image).permute(2, 0, 1),
torch.tensor(second_image).permute(2, 0, 1), lpips_model))
}
return dictionary
@staticmethod
def metric_export_all(metrics, file_name):
json_object = json.dumps(metrics, indent=4)
with open(f'{file_name}_metrics.json', "w") as outfile:
outfile.write(json_object) | dinogrgic1/real-time-video-upscale-master-thesis | models/ImageMetrics.py | ImageMetrics.py | py | 4,275 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.Tensor",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "lpips.l2",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"lin... |
3801917693 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from collections import namedtuple
import inspect
ControllableAttributeConfig = namedtuple("ControllableAttributeConfig", "driven_attribute ignored_attributes facemodel_param_name facemodel_param_value facemodel_param_value_other")
class ControllabilityMetricConfigs:
@staticmethod
def all_configs():
all_attributes = inspect.getmembers(ControllabilityMetricConfigs, lambda a: not inspect.isroutine(a))
configs = [x for x in all_attributes if not (x[0].startswith('__') and x[0].endswith('__'))]
return configs
black_hair_config = ControllableAttributeConfig(
driven_attribute = "Black_Hair",
ignored_attributes = ["Blond_Hair", "Brown_Hair", "Gray_Hair"],
facemodel_param_name = "head_hair_color",
facemodel_param_value = (0, 1, 0),
facemodel_param_value_other = (0, 0.1, 0.1)
)
blond_hair_config = ControllableAttributeConfig(
driven_attribute = "Blond_Hair",
ignored_attributes = ["Black_Hair", "Brown_Hair", "Gray_Hair"],
facemodel_param_name = "head_hair_color",
facemodel_param_value = (0, 0.1, 0.1),
facemodel_param_value_other = (0, 1, 0)
)
brown_hair_config = ControllableAttributeConfig(
driven_attribute = "Brown_Hair",
ignored_attributes = ["Blond_Hair", "Black_Hair", "Gray_Hair"],
facemodel_param_name = "head_hair_color",
facemodel_param_value = (0, 0.6, 0.5),
facemodel_param_value_other = (0, 0.1, 0.1)
)
gray_hair_config = ControllableAttributeConfig(
driven_attribute = "Gray_Hair",
ignored_attributes = ["Blond_Hair", "Brown_Hair", "Black_Hair"],
facemodel_param_name = "head_hair_color",
facemodel_param_value = (0.7, 0.7, 0),
facemodel_param_value_other = (0.0, 0.7, 0)
)
mouth_open_config = ControllableAttributeConfig(
driven_attribute = "Mouth_Slightly_Open",
ignored_attributes = ["Narrow_Eyes", "Smiling"],
facemodel_param_name = "blendshape_values",
facemodel_param_value = {"jaw_opening": 0.2},
facemodel_param_value_other = {"jaw_opening": -0.05}
)
smile_config = ControllableAttributeConfig(
driven_attribute = "Smiling",
ignored_attributes = ["Narrow_Eyes", "Mouth_Slightly_Open"],
facemodel_param_name = "blendshape_values",
facemodel_param_value = {"mouthSmileLeft": 1.0, "mouthSmileRight": 1.0},
facemodel_param_value_other = {"mouthFrownLeft": 1.0, "mouthFrownRight": 1.0}
)
squint_config = ControllableAttributeConfig(
driven_attribute = "Narrow_Eyes",
ignored_attributes = ["Smiling", "Mouth_Slightly_Open"],
facemodel_param_name = "blendshape_values",
facemodel_param_value = {"EyeBLinkLeft": 0.7, "EyeBLinkRight": 0.7},
facemodel_param_value_other = {"EyeWideLeft": 1.0, "EyeWideRight": 1.0}
)
mustache_config = ControllableAttributeConfig(
driven_attribute = "Mustache",
ignored_attributes = ["No_Beard", "Goatee", "Sideburns"],
facemodel_param_name = "beard_style_embedding",
# "beard_Wavy_f"
facemodel_param_value = [
0.8493434358437133,
3.087059026013613,
0.46986106722598997,
-1.3821969829871341,
-0.33103870587106415,
-0.03649891754263812,
0.049692808518749985,
0.10727920600451613,
-0.32365312847867017
],
# "beard_none"
facemodel_param_value_other = [
-1.1549744366277825,
-0.15234213575276162,
-0.3302730721199086,
-0.47053537289207514,
-0.158377484760156,
0.3357074575072504,
-0.44934623275285585,
0.013085621430078971,
-0.0021044358910661896
]
)
| microsoft/ConfigNet | confignet/metrics/controllability_metric_configs.py | controllability_metric_configs.py | py | 4,045 | python | en | code | 104 | github-code | 36 | [
{
"api_name": "collections.namedtuple",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "inspect.getmembers",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "inspect.isroutine",
"line_number": 11,
"usage_type": "call"
}
] |
27596382692 | import matplotlib.pyplot as plot
import gradientDescent as gd
import loadData as data
import numpy as np
def plotConvergence(J_history):
plot.figure()
plot.plot(range(len(J_history)), J_history, 'bo')
plot.title(r'Convergence of J($\theta$)')
plot.xlabel('Number of iterations')
plot.ylabel(r'J($\theta$)')
def main():
theta, J_history = gd.gradientDescent(data.X, data.y, np.zeros((2,1)), 0.01, 1500)
plotConvergence(J_history)
plot.show()
# If this script is executed, then main() will be executed
if __name__ == '__main__':
main()
| mirfanmcs/Machine-Learning | Supervised Learning/Linear Regression/Linear Regression with One Variable/Python/plotConvergence.py | plotConvergence.py | py | 578 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplo... |
75143366504 | import cv2
from model import FacialKeypointModel
import numpy as np
import pandas as pd
from matplotlib.patches import Circle
facec = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
model = FacialKeypointModel("KeyPointDetector.json", "weights.hdf5")
font = cv2.FONT_HERSHEY_SIMPLEX
class VideoCamera(object):
def __init__(self):
self.video = cv2.VideoCapture('/Users/apple/Workspace/Deeplearning/FacialKeypoints/videos/presidential_debate.mp4')
facialpoints_df = pd.read_csv('KeyFacialPoints.csv')
self.columns = facialpoints_df.columns[:-1]
#For video from webcam
# self.video = cv2.VideoCapture(0)
def __del__(self):
self.video.release()
# returns camera frames along with bounding boxes and predictions
def get_frame(self):
_, fr = self.video.read()
gray_fr = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
faces = facec.detectMultiScale(gray_fr, 1.3, 5)
for (x, y, w, h) in faces:
fc = gray_fr[y:y+h, x:x+w]
roi = cv2.resize(fc, (96, 96))
df_predict = pd.DataFrame(model.predict_keypoints(roi[np.newaxis, :, :, np.newaxis]), columns = self.columns)
cv2.rectangle(fr,(x,y),(x+w,y+h),(255,0,0),2)
xScale = fc.shape[0]/96
yScale = fc.shape[1]/96
for j in range(1,31,2):
fr = cv2.drawMarker(fr,
(int(x+df_predict.loc[0][j-1] * xScale), int(y+df_predict.loc[0][j]* yScale )),
(0, 0, 255),
markerType=cv2.MARKER_CROSS,
markerSize=10,
thickness=2,
line_type=cv2.LINE_AA)
# fr = cv2.circle(fr, (df_predict.loc[0][j-1], df_predict.loc[0][j]), radius=5, color=(0, 0, 255), thickness=-1)
_, jpeg = cv2.imencode('.jpg', fr)
return jpeg.tobytes()
| kartikprabhu20/FacialKeypoints | .ipynb_checkpoints/camera-checkpoint.py | camera-checkpoint.py | py | 1,990 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.CascadeClassifier",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "model.FacialKeypointModel",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_na... |
39114860473 | """
The basic framework of the Iterative Closest Points Matching is provided by Albert-Ludwigs-Universität Freiburg,
the course Introduction to Mobile Robotics (engl.) - Autonomous Mobile Systems
Lecturer: Prof. Dr. Wolfram Burgard, Dr. Michael Tangermann, Dr. Daniel Büscher, Lukas Luft
Co-organizers: Marina Kollmitz, Iman Nematollahi
"""
import numpy as np
import math
import matplotlib.pyplot as plt
import time
def plot_icp(X, P, P0, i, rmse):
plt.cla()
plt.scatter(X[0,:], X[1,:], c='k', marker='o', s=50, lw=0)
plt.scatter(P[0,:], P[1,:], c='r', marker='o', s=50, lw=0)
plt.scatter(P0[0,:], P0[1,:], c='b', marker='o', s=50, lw=0)
plt.legend(('X', 'P', 'P0'), loc='lower left')
plt.plot(np.vstack((X[0,:], P[0,:])), np.vstack((X[1,:], P[1,:])) ,c='k')
plt.title("Iteration: " + str(i) + " RMSE: " + str(rmse))
plt.axis([-10, 15, -10, 15])
plt.gca().set_aspect('equal', adjustable='box')
plt.draw()
plt.pause(0.5)
return
def generate_data():
# create reference data
X = np.array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 9, 9, 9, 9],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,-1,-2,-3,-4,-5]])
# add noise
P = X + 0.05 * np.random.normal(0, 1, X.shape)
# translate
P[0,:] = P[0,:] + 1
P[1,:] = P[1,:] + 1
# rotate
theta1 = ( 10.0 / 360) * 2 * np.pi
theta2 = (110.0 / 360) * 2 * np.pi
rot1 = np.array([[math.cos(theta1), -math.sin(theta1)],
[math.sin(theta1), math.cos(theta1)]])
rot2 = np.array([[math.cos(theta2), -math.sin(theta2)],
[math.sin(theta2), math.cos(theta2)]])
# sets with known correspondences
P1 = np.dot(rot1, P)
P2 = np.dot(rot2, P)
# sets with unknown correspondences
P3 = np.random.permutation(P1.T).T
P4 = np.random.permutation(P2.T).T
return X, P1, P2, P3, P4
def closest_point_matching(X, P):
"""
Performs closest point matching of two point sets.
Arguments:
X -- reference point set
P -- point set to be matched with the reference
Output:
P_matched -- reordered P, so that the elements in P match the elements in X
"""
P_matched = np.empty(np.shape(P))
num_pts = np.shape(X)[1]
dist_mat = np.empty((num_pts,num_pts))
for i in range(num_pts):
for j in range(num_pts):
dist_mat[i,j] = np.linalg.norm(X[:,i] - P[:,j])
rows, cols = [], []
while(len(rows) != num_pts):
min_ind = np.unravel_index(np.argmin(dist_mat, axis=None), dist_mat.shape)
if (min_ind[0] in rows) or (min_ind[1] in cols):
dist_mat[min_ind] = math.inf
else:
P_matched[:,min_ind[0]] = P[:,min_ind[1]]
dist_mat[min_ind] = math.inf
rows.append(min_ind[0])
cols.append(min_ind[1])
# for i in range(num_pts):
# min_ind = np.unravel_index(np.argmin(dist_mat, axis=None), dist_mat.shape)
# P_matched[:,min_ind[0]] = P[:,min_ind[1]]
# dist_mat[min_ind[0],:] = math.inf
# dist_mat[:,min_ind[1]] = math.inf
return P_matched
def icp(X, P, do_matching):
P0 = P
for i in range(10):
# calculate RMSE
rmse = 0
for j in range(P.shape[1]):
rmse += math.pow(P[0,j] - X[0,j], 2) + math.pow(P[1,j] - X[1,j], 2)
rmse = math.sqrt(rmse / P.shape[1])
# print and plot
print("Iteration:", i, " RMSE:", rmse)
plot_icp(X, P, P0, i, rmse)
# data association
if do_matching:
P = closest_point_matching(X, P)
# substract center of mass
mx = np.transpose([np.mean(X, 1)])
mp = np.transpose([np.mean(P, 1)])
X_prime = X - mx
P_prime = P - mp
# singular value decomposition
W = np.dot(X_prime, P_prime.T)
U, _, V = np.linalg.svd(W)
# calculate rotation and translation
R = np.dot(U, V.T)
t = mx - np.dot(R, mp)
# apply transformation
P = np.dot(R, P) + t
return
def main():
X, P1, P2, P3, P4 = generate_data()
# icp(X, P1, False)
# icp(X, P2, False)
# icp(X, P3, True)
icp(X, P4, True)
plt.waitforbuttonpress()
if __name__ == "__main__":
main()
| SiweiGong/mobile_robot_framework | icp_matching.py | icp_matching.py | py | 4,026 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.cla",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "mat... |
12134276149 | import logging
from rest_framework import viewsets, mixins, serializers, generics
from apps.proceso.models.forms.campo import Campo
from .campo_validation_view import CampoValidationSerializer
from .validation_view import ValidationSerializer
log = logging.getLogger(__name__)
class CampoSerializer(serializers.ModelSerializer):
campovalidation_set = CampoValidationSerializer(many=True, read_only=True)
# validation = ValidationSerializer(many=True, read_only=True)
class Meta:
model = Campo
fields = ('id',
'label', 'name', 'type', 'required',
'width', 'placeholder',
'model_name',
'model_pk',
'model_label',
'json', 'formulario', 'icon',
'prefix', 'hint_start', 'hint_end_count_text', 'disabled',
'multiselect', 'order', 'accept_fileinput', 'multiple_fileinput',
'campovalidation_set',
# 'validation',
'tipo_validador',
'roles_validadores',
'documento',
'campos_validados',
'fecha_creacion', 'fecha_actualizacion')
read_only_fields = ('id', 'fecha_creacion', 'fecha_actualizacion',)
class CampoViewSet(viewsets.ModelViewSet):
"""
A simple ViewSet for listing or retrieving Campo.
"""
queryset = Campo.objects.all()
serializer_class = CampoSerializer
class CampoList(generics.ListCreateAPIView):
serializer_class = CampoSerializer
def get_queryset(self):
formulario_id = self.kwargs['formulario_id']
if formulario_id is not None:
return Campo.objects.filter(formulario__id=formulario_id).order_by('order')
else:
return Campo.objects.all().order_by('order') | vitmaraliaga/TesisDgiService | apis/proceso_api/viewsets/forms/campo_view.py | campo_view.py | py | 1,852 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 10,
"usage_type": "name"
}... |
21172342682 | from __future__ import absolute_import
import json
from keshihua.celery import app
from datetime import datetime
from demo.pachong import new_job
from demo.graph import avg_salary,lan_fre,bar_job,job_cate,jieba_count
# 数据更新
@app.task
def start_get_data():
print('正在获取并更新数据...')
count=new_job()
print('处理专业薪资图中...')
avg_salary()
print('处理语言使用情况图中...')
lan_fre()
print('处理技术块状图中...')
bar_job()
print('处理岗位饼图..')
job_cate()
jc = jieba_count()
nowtime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
with open('demo/data/UpdateTime.json', 'w', encoding='utf-8') as f:
f.write(json.dumps({
'code': 200,
'数据获取时间': nowtime,
'数据量':count,
'词云数据量':jc
}, ensure_ascii=False, indent=4))
print('获取完毕数据已更新!')
print('更新时间:' + nowtime)
with app.connection() as conn:
conn.default_channel.queue_purge(queue='celery')
| junhqin/SH-Internship-KSH | CeleryTask/task.py | task.py | py | 1,085 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "demo.pachong.new_job",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "demo.graph.avg_salary",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "demo.graph.lan_fre",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "demo.gr... |
1021808135 | import cv2
import numpy as np
import os
import pyrealsense2 as rs
# Ім'я каталогу, в якому будуть зберігатися зображення
save_dir = "path/to/save/directory"
# Ініціалізація камери
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
profile = pipeline.start(config)
# Ініціалізація змінних
saved_pairs = 0
try:
while True:
# Отримання кадру з камери
frames = pipeline.wait_for_frames()
color_frame = frames.get_color_frame()
depth_frame = frames.get_depth_frame()
# Перетворення кадру у зображення
color_image = np.asanyarray(color_frame.get_data())
depth_image = np.asanyarray(depth_frame.get_data())
# Відображення зображення
cv2.imshow("Color Image", color_image)
cv2.imshow("Depth Image", depth_image)
# Зчитування клавіші
key = cv2.waitKey(1)
# Збереження зображень при натисканні на клавішу 'Space'
if key == ord(" "):
filename_color = os.path.join(save_dir, f"{saved_pairs}_RGB.jpg")
filename_depth = os.path.join(save_dir, f"{saved_pairs}_D.jpg")
cv2.imwrite(filename_color, color_image)
cv2.imwrite(filename_depth, depth_image)
saved_pairs += 1
# Закриття вікон при натисканні клавіші 'ESC'
if key == key == 27:
break
finally:
# Зупинка камери та закриття вікна
pipeline.stop()
cv2.destroyAllWindows()
| Roman212Koval/Dual-channel_CNN | make_dataset.py | make_dataset.py | py | 1,838 | python | uk | code | 1 | github-code | 36 | [
{
"api_name": "pyrealsense2.pipeline",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pyrealsense2.config",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pyrealsense2.stream",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "py... |
36121022223 | import os
from pathlib import Path
from forte.data.base_pack import PackType
from forte.evaluation.base import Evaluator
from forte.data.extractor.utils import bio_tagging
from ft.onto.base_ontology import Sentence, Token, EntityMention
def _post_edit(element):
if element[0] is None:
return "O"
return "%s-%s" % (element[1], element[0].ner_type)
def _get_tag(data, pack):
based_on = [pack.get_entry(x) for x in data["Token"]["tid"]]
entry = [pack.get_entry(x) for x in data["EntityMention"]["tid"]]
tag = bio_tagging(based_on, entry)
tag = [_post_edit(x) for x in tag]
return tag
def _write_tokens_to_file(
pred_pack, pred_request, refer_pack, refer_request, output_filename
):
opened_file = open(output_filename, "w+")
for pred_data, refer_data in zip(
pred_pack.get_data(**pred_request), refer_pack.get_data(**refer_request)
):
pred_tag = _get_tag(pred_data, pred_pack)
refer_tag = _get_tag(refer_data, refer_pack)
words = refer_data["Token"]["text"]
pos = refer_data["Token"]["pos"]
chunk = refer_data["Token"]["chunk"]
for i, (word, position, chun, tgt, pred) in enumerate(
zip(words, pos, chunk, refer_tag, pred_tag), 1
):
opened_file.write(
"%d %s %s %s %s %s\n" % (i, word, position, chun, tgt, pred)
)
opened_file.write("\n")
opened_file.close()
class CoNLLNEREvaluator(Evaluator):
"""Evaluator for Conll NER task."""
def __init__(self):
super().__init__()
# self.test_component = CoNLLNERPredictor().name
self.output_file = "tmp_eval.txt"
self.score_file = "tmp_eval.score"
self.scores = {}
def consume_next(self, pred_pack: PackType, ref_pack: PackType):
pred_getdata_args = {
"context_type": Sentence,
"request": {
Token: {"fields": ["chunk", "pos"]},
EntityMention: {
"fields": ["ner_type"],
},
Sentence: [], # span by default
},
}
refer_getdata_args = {
"context_type": Sentence,
"request": {
Token: {"fields": ["chunk", "pos", "ner"]},
EntityMention: {
"fields": ["ner_type"],
},
Sentence: [], # span by default
},
}
_write_tokens_to_file(
pred_pack=pred_pack,
pred_request=pred_getdata_args,
refer_pack=ref_pack,
refer_request=refer_getdata_args,
output_filename=self.output_file,
)
eval_script = (
Path(os.path.abspath(__file__)).parents[2]
/ "forte/utils/eval_scripts/conll03eval.v2"
)
os.system(
f"perl {eval_script} < {self.output_file} > " f"{self.score_file}"
)
with open(self.score_file, "r") as fin:
fin.readline()
line = fin.readline()
fields = line.split(";")
acc = float(fields[0].split(":")[1].strip()[:-1])
precision = float(fields[1].split(":")[1].strip()[:-1])
recall = float(fields[2].split(":")[1].strip()[:-1])
f_1 = float(fields[3].split(":")[1].strip())
self.scores = {
"accuracy": acc,
"precision": precision,
"recall": recall,
"f1": f_1,
}
def get_result(self):
return self.scores
| asyml/forte | examples/tagging/evaluator.py | evaluator.py | py | 3,552 | python | en | code | 230 | github-code | 36 | [
{
"api_name": "forte.data.extractor.utils.bio_tagging",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "forte.evaluation.base.Evaluator",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "forte.data.base_pack.PackType",
"line_number": 56,
"usage_type": "... |
19139046842 | import sys
import pathlib
# base.pyのあるディレクトリの絶対パスを取得
current_dir = pathlib.Path(__file__).resolve().parent
# モジュールのあるパスを追加
sys.path.append(str(current_dir) + "/../")
import pandas as pd
import numpy as np
from mylib.address import Address
from mylib.mypandas import MyPandas as mp
import pandas as pd
import time
def main(input_csv, output_csv):
print("### start main ###")
df = pd.read_csv(input_csv)
rows = df.to_dict(orient="records")
for index, row in enumerate(rows):
if row["prefecture"] is np.nan:
print(row)
prefecture, city = Address.address_to_prefecture_and_city(row["place"])
rows[index]["prefecture"] = prefecture
print("## NEW ##", row)
mp.to_csv(rows, output_csv)
print("### end main ###")
if __name__ == "__main__":
start = time.time()
if len(sys.argv) == 3:
input_csv = sys.argv[1]
output_csv = sys.argv[2]
main(input_csv, output_csv)
else:
print("Not Filename")
exit()
end = time.time()
print(end - start, "s")
| sunajpdev/estates_appsheet | tool/estate_csv_setting.py | estate_csv_setting.py | py | 1,145 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_n... |
5780964210 | from pygame import Surface, image, mixer
import os
from engine.common.validated import ValidatedDict
from engine.common.constants import LogConstants
from engine.common.logger import LogManager
class AssetManager:
'''
Asset loaders, renderers, transformers and more!
'''
asset_prefix = "./engine/assets"
def __init__(self, config: ValidatedDict, logger: LogManager) -> None:
self.logger = logger
self.config = config
def loadImage(self, asset_name: str) -> Surface:
'''
Load an image in Texture form.
Given:
- asset_name: name of the asset, including extension.
Returns: Asset as a texture.
'''
asset_path = f"{self.asset_prefix}/images/{asset_name}"
if os.path.exists(asset_path):
self.logger.writeLogEntry(f'Loading asset: {asset_name}', status=LogConstants.STATUS_OK_BLUE, tool="ASSET_MGR")
return image.load(asset_path)
else:
self.logger.writeLogEntry(f'Couldn\'t find {asset_name}!', status=LogConstants.STATUS_FAIL, tool="ASSET_MGR")
def playSfx(self, asset_name: str) -> Surface:
'''
Load a sound in sound form.
Given:
- asset_name: name of the asset, including extension.
Returns: Nothing.
'''
asset_path = f"{self.asset_prefix}/sfx/{asset_name}"
sound_settings = self.config.get_dict('sound')
if sound_settings == None:
raise Exception("Sound settings in JSON are missing!")
if os.path.exists(asset_path):
self.logger.writeLogEntry(f'Loading asset: {asset_name}', status=LogConstants.STATUS_OK_BLUE, tool="ASSET_MGR")
sound = mixer.Sound(asset_path)
sound.set_volume(sound_settings.get('sfx_volume', 1.0)-0.4)
sound.play()
else:
self.logger.writeLogEntry(f'Couldn\'t find {asset_name}!', status=LogConstants.STATUS_FAIL, tool="ASSET_MGR") | Based-Games/BasedEngine | engine/common/asset.py | asset.py | py | 2,000 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "engine.common.validated.ValidatedDict",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "engine.common.logger.LogManager",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 29,
"usage_type": "call"
},
{
... |
73037188583 | # example URL(postgres): postgresql://username:password@host:port/database_name (postgresql://postgres:postgres@localhost:5432/mydatabase)
import pandas as pd
import json
import sqlalchemy as sql
from sqlalchemy import create_engine
from sqlalchemy_utils import create_database, database_exists
class DatabaseHandler:
def __init__(self):
"""
Initializes a new instance of the DatabaseHandler class.
This class is designed to handle database operations, such as
creating databases, connecting to them, inserting data, and executing queries.
"""
self.engine = None
print("Initialized class")
def create_db(self, url):
"""
Creates a new database if it doesn't already exist at the provided URL.
Args:
url (str): The database URL.
"""
self.engine = create_engine(url)
if not database_exists(self.engine.url):
create_database(self.engine.url)
print("Database created")
else:
print("Database already exists")
def connect_db(self, url):
"""
Connects to an existing database at the provided URL.
Args:
url (str): The database URL.
"""
self.engine = create_engine(url)
if database_exists(self.engine.url):
self.engine.connect(url)
print("Database connected")
else:
print("Database doesn't exist")
def insert_data(self, data, tablename):
"""
Inserts data from a JSON or CSV file into a specified table in the database.
Args:
data (str): The path to the data file (JSON or CSV).
tablename (str): The name of the table to insert the data into.
Raises:
ValueError: If the file format is not supported.
"""
if isinstance(data, pd.DataFrame):
df = data
elif data.endswith(".json"):
with open(data, 'r') as f:
file = json.load(f)
df = pd.DataFrame(file)
elif data.endswith(".csv"):
df = pd.read_csv(data)
else:
raise ValueError("Unsupported file format")
df.to_sql(tablename, con=self.engine, if_exists='replace', index=False)
print("Data entered correctly")
def executor(self, query):
"""
Executes a SQL query on the connected database.
Args:
query (str): The SQL query to execute.
Returns:
list: A list of rows as query results.
"""
try:
result = self.engine.execute(query)
rows = [row for row in result]
return rows
except Exception as e:
print(f"Error: '{e}'")
def close_connection(self):
"""
Closes the connection to the database.
"""
self.engine.dispose()
print("Database connection closed")
# usage example
if __name__ == '__main__':
test=DatabaseHandler()
test.connect_db("postgresql://postgres:postgres@localhost:5432/team_project2")
df = pd.read_csv("clean_data/gpclean.csv")
test.insert_data(df, "gpclean3")
#print(test.executor("SELECT * FROM public.revclean LIMIT 3"))
| Christian125px/team_project2 | src/database_handler.py | database_handler.py | py | 3,270 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sqlalchemy_utils.database_exists",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sqlalchemy_utils.create_database",
"line_number": 29,
"usage_type": "call"
},... |
9571125723 | #!/usr/bin/env python
import os, subprocess, sys, pwd, grp, stat
import socket, requests, json, yaml, time, logging, re, argparse
import paramiko
from pprint import pprint
_api = 'https://foobar.ru/api/'
user, group = pwd.getpwuid(os.getuid()).pw_name, grp.getgrgid(pwd.getpwuid(os.getuid()).pw_gid).gr_name
def get_hosts(hostname):
all_hosts = []
try:
host_groups = [group['name'] for group in json.loads(requests.get(_api + \
'foo/{}?format=json'.format(hostname)).text)]
except Exception as e:
host_groups = []
logging.warning('{}: {}'.format(e.__class__.__name__, e))
if host_groups:
for group in host_groups:
all_hosts.extend([host['fqdn'] for host in json.loads(requests.get(_api + \
'bar/{}?format=json'.format(group)).text)])
all_hosts = list(set(all_hosts))
all_hosts.remove(hostname)
return all_hosts
def get_config(*args):
local = False
if args and type(args) is tuple: local = args[0]
config = {}
config_files = filter(lambda x: re.match('config-[0-9]+.yml', x), os.listdir('./'))
if local:
if config_files:
timestamp = max([re.search('config-([0-9]+).yml', x).group(1) for x in config_files])
with open('config-{}.yml'.format(timestamp), 'r') as config_file:
try:
config = yaml.load(config_file)
except Exception as e:
logging.warning('{}: {}'.format(e.__class__.__name__, e))
else:
try:
config = yaml.load((requests.get('https://raw.githubusercontent.com/' \
'asmartishin/python_scripts/master/file_sync/config.yml').text))
list(map(os.remove, config_files))
with open('config-{}.yml'.format(int(time.time())), 'w') as config_file:
config_file.write(yaml.dump(config ,default_flow_style=False))
except Exception as e:
logging.warning('{}: {}'.format(e.__class__.__name__, e))
if config_files:
timestamp = max([re.search('config-([0-9]+).yml', x).group(1) for x in config_files])
with open('config-{}.yml'.format(timestamp), 'r') as config_file:
try:
config = yaml.load(config_file)
except Exception as e:
logging.warning('{}: {}'.format(e.__class__.__name__, e))
return config
# Here directory permission changes to the ones, that the user starting the script has,
# cause I assume that we start it under admin user, don't know if it is a good idea.
def get_local_files(config):
local_files = []
for directory in config['directories']:
if not os.path.isdir(directory):
subprocess.call('sudo mkdir -p {}'.format(directory), shell=True)
if user != pwd.getpwuid(os.stat(directory).st_uid).pw_name or \
group != grp.getgrgid(os.stat(directory).st_gid).gr_name:
subprocess.call('sudo chown -R {}:{} {}'.format(user, group, directory), shell=True)
for dirpath, dirnames, filenames in os.walk(directory):
local_files += [os.path.join(dirpath, filename) for filename in filenames]
return local_files
def get_host_files(hostname, config):
remote_files = []
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname, username=user)
sftp = ssh.open_sftp()
for directory in config['directories']:
try:
sftp.stat(directory)
except IOError as e:
if e.errno == 2:
ssh.exec_command(('sudo mkdir -p {}').format(directory))
huser, hgroup = re.search('([A-Za-z]+)\ ([A-Za-z_ ]+)\ ' ,ssh.exec_command(('ls -ld {}').\
format(directory))[1].read().rstrip()).group(1, 2)
if user != huser or group != hgroup:
ssh.exec_command('sudo chown -R {}:{} {}'.format(user, group, directory))
remote_files.extend(ssh.exec_command(('find {} -type f | xargs readlink -f').\
format(directory))[1].read().splitlines())
sftp.close()
ssh.close()
return remote_files
def push_files(local_files_tuple, remote_files):
print('Push: ')
hostname = local_files_tuple[0]
local_files = local_files_tuple[1]
for rhost in remote_files:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(rhost, username=user)
sftp = ssh.open_sftp()
for lfile in local_files:
if lfile not in remote_files[rhost]:
if lfile.split('/')[-2] not in remote_files.keys():
pdir, pfile = os.path.split(lfile)
rdir = '{}/{}'.format(pdir, hostname)
rpath = '{}/{}/{}'.format(pdir, hostname, pfile)
ssh.exec_command('mkdir -p {}'.format(rdir))
print('{} --> {}:{}'.format(lfile, rhost, rpath))
sftp.put(lfile, rpath)
sftp.close()
ssh.close()
print
def pull_files(local_files_tuple, remote_files):
print('Pull: ')
hostname = local_files_tuple[0]
local_files = local_files_tuple[1]
all_hosts = remote_files.keys()
all_hosts.append(hostname)
for rhost in remote_files:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(rhost, username=user)
sftp = ssh.open_sftp()
for rfile in remote_files[rhost]:
if rfile not in local_files:
if rfile.split('/')[-2] not in all_hosts:
pdir, pfile = os.path.split(rfile)
ldir = '{}/{}'.format(pdir, rhost)
lpath = '{}/{}/{}'.format(pdir, rhost, pfile)
subprocess.call('mkdir -p {}'.format(ldir), shell=True)
print('{} <-- {}:{}'.format(lpath, rhost, rfile))
sftp.get(rfile, lpath)
sftp.close()
ssh.close()
print
def parse_arguments():
parser = argparse.ArgumentParser(description='Script for syncing files on servers')
parser.add_argument('-l', '--local', action='store_true', default=False,
help='Use local copy of config file')
return parser.parse_args()
if __name__ == "__main__":
remote_files = {}
args = parse_arguments()
hostname = socket.getfqdn()
remote_hosts = get_hosts(hostname)
config = get_config(args.local)
if not config:
raise RuntimeError('Could not load config. Exiting.')
local_files_tuple = (hostname, get_local_files(config))
for host in remote_hosts:
try:
remote_files[host] = get_host_files(host, config)
except Exception as e:
logging.warning('{}: {}'.format(e.__class__.__name__, e))
push_files(local_files_tuple, remote_files)
pull_files(local_files_tuple, remote_files)
| ttymonkey/python | file_sync/sync.py | sync.py | py | 7,025 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pwd.getpwuid",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.getuid",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "grp.getgrgid",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 14,
... |
13617657820 | import unittest
from datetime import datetime
from pathlib import Path
from we1s_chomp import db, model
class TestModel(unittest.TestCase):
def setUp(self):
self.dirpath = Path("test/data")
def test_source(self):
# Create source.
source = model.Source(
name="we1s",
webpage="http://we1s.ucsb.edu",
tags=["hello"],
country="US",
language="en-US",
copyright="(C) 2017-2019 UCSB and the WE1S Project",
)
self.assertIsInstance(source, model.Source)
self.assertEqual(source.name, "we1s")
# Save to disk.
db.save_manifest_file(source, self.dirpath)
self.assertTrue((self.dirpath / f"{source.name}.json").exists())
# Load from disk.
source2 = db.load_manifest_file("we1s", self.dirpath)
self.assertDictEqual(vars(source), vars(source2))
def test_query(self):
# Create source.
query = model.Query(
source_name="we1s",
query_str="humanities",
start_date=datetime(year=2000, month=1, day=1),
end_date=datetime(year=2019, month=12, day=31),
)
self.assertIsInstance(query, model.Query)
self.assertEqual(query.name, "we1s_humanities_2000-01-01_2019-12-31")
# Save to disk.
db.save_manifest_file(query, self.dirpath)
self.assertTrue((self.dirpath / f"{query.name}.json").exists())
# Load from disk.
query2 = db.load_manifest_file(
"we1s_humanities_2000-01-01_2019-12-31", self.dirpath
)
self.assertDictEqual(vars(query), vars(query2))
def test_response(self):
# Create response.
response = model.Response(
name="chomp-response_we1s_humanities_2000-01-01_2019-12-31_0",
url="http://we1s.ucsb.edu",
content="12345 Hello!",
api_data_provider="wordpress",
source_name="we1s",
query_name="we1s_humanities_2000-01-01_2019-12-31",
)
self.assertIsInstance(response, model.Response)
self.assertEqual(
response.name, "chomp-response_we1s_humanities_2000-01-01_2019-12-31_0"
)
# Save to disk.
db.save_manifest_file(response, self.dirpath)
self.assertTrue((self.dirpath / f"{response.name}.json").exists())
# Load from disk.
response2 = db.load_manifest_file(
"chomp-response_we1s_humanities_2000-01-01_2019-12-31_0", self.dirpath
)
self.assertDictEqual(vars(response), vars(response2))
def test_article(self):
# Create article.
article = model.Article(
name="chomp_we1s_humanities_2000-01-01_2019-12-31_0",
url="http://we1s.ucsb.edu",
title="WhatEvery1Says Article",
pub="WhatEvery1Says",
pub_date=datetime(year=2019, month=12, day=31),
content_html="<h1>Hello!</h1>",
copyright="(C) 2017-2019 UCSB and the WE1S Project",
api_data_provider="wordpress",
)
self.assertIsInstance(article, model.Article)
self.assertEqual(article.name, "chomp_we1s_humanities_2000-01-01_2019-12-31_0")
# Save to disk.
db.save_manifest_file(article, self.dirpath)
self.assertTrue((self.dirpath / f"{article.name}.json").exists())
# Load from disk.
article2 = db.load_manifest_file(
"chomp_we1s_humanities_2000-01-01_2019-12-31_0", self.dirpath
)
self.assertDictEqual(vars(article), vars(article2))
| seangilleran/we1s_chomp | test/test_model.py | test_model.py | py | 3,618 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "we1s_chomp.model.Source",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "we1s_chomp... |
37683812851 | from django.conf.urls import url, include
from .views import TopicDetailView, QuestionDetailView, TopicListView
urlpatterns = [
url(r'^$', TopicListView.as_view(), name='topic-list'),
url(r'^(?P<pk>\d+)/', include([
url('^$', TopicDetailView.as_view(), name='topic-detail'),
url(r'^question-(?P<number>\d+)/$', QuestionDetailView.as_view(), name='question-detail')
]))
]
| unixander/TestsApp | apps/questions/urls.py | urls.py | py | 401 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "views.TopicListView.as_view",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "views.TopicListView",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "dja... |
42939386801 | import os.path as osp
import numpy as np
import torch
import torch.utils.data as Data
from PIL import Image
__all__ = ['CUB_200_2011']
class CUB_200_2011(Data.Dataset):
def __init__(self, root_dir, phase='train', transform=None):
super(CUB_200_2011, self).__init__()
assert phase in ('train', 'val')
self.root_dir = root_dir
self.file = osp.join(root_dir, phase + '_' + 'classes.txt')
with open(self.file) as file:
self.interst_classes = [line.strip().split(' ')[0] for line in file.readlines()]
self.classIdx2Idx = {class_idx: idx for idx, class_idx in enumerate(self.interst_classes)}
self.Idx2classIdx = {idx: class_idx for idx, class_idx in enumerate(self.interst_classes)}
self.transform = transform
self.images_path = osp.join(root_dir, 'images')
id_class_file = osp.join(root_dir, 'image_class_labels.txt')
id_images_file = osp.join(root_dir, 'images.txt')
attributes_file = osp.join(root_dir, 'attributes', 'class_attribute_labels_continuous.txt')
id_images = self._read_file(id_images_file)
self.set = list()
with open(id_class_file) as file:
for line in file.readlines():
infos = line.strip().split(' ')
if infos[1] in self.interst_classes:
self.set.append((id_images[infos[0]], self.classIdx2Idx[infos[1]]))
# Normalize attributes
source_norm_attris, target_norm_attris = self._normalize_attris(attributes_file, True)
if phase == 'train':
self.attributes = torch.FloatTensor(source_norm_attris)
else:
self.attributes = torch.FloatTensor(target_norm_attris)
def _read_file(self, read_file):
dct = dict()
with open(read_file) as file:
for line in file.readlines():
infos = line.strip().split(' ')
dct[infos[0]] = infos[1]
return dct
def _normalize_attris(self, attributes_file, mean_correction=True):
source_file = osp.join(self.root_dir, 'train_classes.txt')
target_file = osp.join(self.root_dir, 'val_classes.txt')
source_idx = [(int(line.strip().split(' ')[0]) - 1) for line in open(source_file)]
target_idx = [(int(line.strip().split(' ')[0]) - 1) for line in open(target_file)]
codes = np.loadtxt(attributes_file).astype(float)
if codes.max() > 1:
codes /= 100.
code_mean = codes[source_idx, :].mean(axis=0)
for s in range(codes.shape[1]):
codes[codes[:, s] < 0, s] = code_mean[s] if mean_correction else 0.5
# Mean correction
if mean_correction:
for s in range(codes.shape[1]):
codes[:, s] = codes[:, s] - code_mean[s] + 0.5
return codes[source_idx], codes[target_idx]
@property
def get_class_attributes(self):
return self.attributes
def __len__(self):
return len(self.set)
def __getitem__(self, index):
image_file = osp.join(self.images_path, self.set[index][0])
image_label = int(self.set[index][1])
image = Image.open(image_file).convert('RGB')
if self.transform:
image = self.transform(image)
return image, image_label
| SmallHedgehog/ZeroShotLearning | dataset/CUB_200_2011.py | CUB_200_2011.py | py | 3,311 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",... |
12027951137 | from solid import (
part,
sphere,
cube,
translate,
hull,
)
from solid.utils import right
from utils import render_to_openscad
def main():
# -> Example 1
ex1 = part()
ex1.add(translate((0, 0, 0))(
cube((5, 5, 5), center=False)
))
ex1.add(translate((0, 10, 0))(
cube((5, 5, 5), center=False)
))
combo1 = hull()(ex1)
# -> Example 2
ex2 = part()
ex2.add(translate((0, 0, 0))(
cube((5, 5, 5), center=False)
))
ex2.add(translate((10, 10, 0))(
cube((5, 5, 5), center=False)
))
combo2 = right(10)(
hull()(ex2)
)
# -> Example 3 (Crytal-like)
ex3 = part()
ex3.add(translate((0, 0, 0))(
cube((5, 5, 5), center=False)
))
ex3.add(translate((10, 10, 10))(
cube((5, 5, 5), center=False)
))
combo3 = right(25)(
hull()(ex3)
)
# -> Example 4 (Hot-air balloon)
ex4 = part()
ex4.add(sphere(d=20))
ex4.add(translate((0, 0, -20))(
sphere(d=3)
))
combo4 = right(50)(
hull()(ex4)
)
# -> Example 5 (Box with rounded corner)
# Create 8 corner spheres
ex5 = part()
for x in range(-10, 20, 20):
for y in range(-10, 20, 20):
for z in range(-10, 20, 20):
ex5.add(translate((x, y, z))(
sphere(d=10)
))
combo5 = right(80)(
hull()(ex5)
)
return combo1 + combo2 + combo3 + combo4 + combo5
if __name__ == '__main__':
render_to_openscad(main(), no_run=True)
| cr8ivecodesmith/py3dp_book | solid_/hull_.py | hull_.py | py | 1,566 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "solid.part",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "solid.translate",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "solid.cube",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "solid.translate",
"line_num... |
24327828185 | import os, socket
from time import time
import numpy as np
import tensorflow as tf
tf_float_prec = tf.float64
from pdb import set_trace as st
#from keras import backend as K
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM as LSTM_LAYER
#from keras.models import Sequential, load_model
from keras import optimizers, regularizers, initializers, losses
from dovebirdia.deeplearning.networks.base import AbstractNetwork, FeedForwardNetwork
from dovebirdia.utilities.base import dictToAttributes, saveAttrDict, saveDict
from dovebirdia.datasets.domain_randomization import DomainRandomizationDataset
from dovebirdia.datasets.outliers import generate_outliers
machine = socket.gethostname()
if machine == 'pengy':
import matplotlib.pyplot as plt
else:
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
class LSTM(FeedForwardNetwork):
"""
LSTM Class
"""
def __init__(self, params=None):
assert isinstance(params,dict)
super().__init__(params=params)
##################
# Public Methods #
##################
def evaluate(self,x=None,y=None,labels=None,
eval_ops=None,
attributes=None,
save_results=None):
x, y = self._generateDataset(x,y)
return super().evaluate(x=x,y=y,attributes=attributes,save_results=save_results)
###################
# Private Methods #
###################
def _fitDomainRandomization(self, dr_params=None, save_model=False):
# create domainRandomizationDataset object
#self._dr_dataset = DomainRandomizationDataset(**dr_params)
self._dr_dataset = dr_params.pop('class')(**dr_params)
# dictionaries to hold training and validation data
train_feed_dict = dict()
val_feed_dict = dict()
start_time = time()
with tf.Session() as sess:
# initialize variables
sess.run(tf.global_variables_initializer())
for epoch in range(1, self._epochs+1):
# set x_train, y_train, x_val and y_val in dataset_dict attribute of DomainRandomizationDataset
train_data = self._dr_dataset.generateDataset()
val_data = self._dr_dataset.generateDataset()
# train and val loss lists
train_loss_list = list()
val_loss_list = list()
train_mse_list = list()
val_mse_list = list()
# loop over trials
for x_train, y_train, x_val, y_val in zip(train_data['x_test'],train_data['y_test'],
val_data['x_test'],val_data['y_test']):
# plt.figure(figsize=(18,12))
# plt.subplot(231)
# plt.plot(x_train[:,0],label='x0',marker=None)
# plt.grid()
# plt.legend()
# plt.subplot(232)
# plt.plot(x_train[:,1],label='x1',marker=None)
# plt.grid()
# plt.legend()
# plt.subplot(233)
# plt.scatter(x_train[:,0],x_train[:,1],label='x',marker=None)
# plt.grid()
# plt.legend()
# plt.subplot(234)
# plt.plot(x_val[:,0],label='x0',marker=None)
# plt.grid()
# plt.legend()
# plt.subplot(235)
# plt.plot(x_val[:,1],label='x1',marker=None)
# plt.grid()
# plt.legend()
# plt.subplot(236)
# plt.scatter(x_val[:,0],x_val[:,1],label='x',marker=None)
# plt.grid()
# plt.legend()
# plt.show()
# plt.close()
# generate minibatches
x_train_mb, y_train_mb = self._generateMinibatches(x_train,y_train)
# Generate LSTM 3-rank tensors
x_train_mb, y_train_mb = self._generateDataset(x_train_mb, y_train_mb) if self._train_ground else self._generateDataset(x_train_mb, x_train_mb)
x_val, y_val = self._generateDataset(np.expand_dims(x_val,axis=0), np.expand_dims(y_val,axis=0)) if self._train_ground else \
self._generateDataset(np.expand_dims(x_val,axis=0), np.expand_dims(x_val,axis=0))
for x_mb, y_mb in zip(x_train_mb,y_train_mb):
train_feed_dict.update({self._X:x_mb,self._y:y_mb})
sess.run(self._optimizer_op, feed_dict=train_feed_dict)
train_loss, train_mse = sess.run([self._loss_op,self._mse_op],feed_dict=train_feed_dict)
train_loss_list.append(train_loss)
train_mse_list.append(train_mse)
for x_v, y_v in zip(x_val,y_val):
val_feed_dict.update({self._X:x_v,self._y:y_v})
val_loss, val_mse = sess.run([self._loss_op,self._mse_op],feed_dict=val_feed_dict)
val_loss_list.append(val_loss)
val_mse_list.append(val_mse)
self._history['train_loss'].append(np.asarray(train_loss).mean())
self._history['val_loss'].append(np.asarray(val_loss).mean())
self._history['train_mse'].append(np.asarray(train_mse).mean())
self._history['val_mse'].append(np.asarray(val_mse).mean())
# if epoch % 1 == 0:
# plt.figure(figsize=(6,6))
# plt.subplot(111)
# plt.plot(y_train[:,0],y_train[:,1],c='C0')
# plt.scatter(x_train[:,0],x_train[:,1],c='C1')
# plt.grid()
# plt.show()
# plt.close()
print('Epoch {epoch}, Training Loss/MSE {train_loss:0.4}/{train_mse:0.4}, Val Loss/MSE {val_loss:0.4}/{val_mse:0.4}'.format(epoch=epoch,
train_loss=self._history['train_loss'][-1],
train_mse=self._history['train_mse'][-1],
val_loss=self._history['val_loss'][-1],
val_mse=self._history['val_mse'][-1]))
self._history['runtime'] = (time() - start_time) / 60.0
if save_model:
self._saveModel(sess,'trained_model.ckpt')
return self._history
def _buildNetwork(self):
self._setPlaceholders()
# weight regularizer
try:
self._weight_regularizer = self._weight_regularizer(weight_regularizer_scale)
except:
self._weight_regularizer = None
self._y_hat = self._X
for layer in range(len(self._hidden_dims)):
input_timesteps = (self._seq_len) if layer == 0 else None
input_dim = self._input_dim if layer == 0 else None
return_seq = self._return_seq if layer < len(self._hidden_dims)-1 else False
print('Input timesteps: {input_timesteps}'.format(input_timesteps = input_timesteps))
print('Input Dim: {input_dim}'.format(input_dim = input_dim))
print('Return Seq: {return_seq}'.format(return_seq=return_seq))
print('units: {units}'.format(units = self._hidden_dims[layer]))
if layer == 0 and self._stateful:
# different inputs to first layer due to stateful parameter
self._y_hat = LSTM_LAYER(
units = self._hidden_dims[layer],
activation = self._activation,
batch_input_shape = (self._seq_len, input_timesteps, input_dim),
bias_initializer = initializers.Constant(value=self._bias_initializer),
kernel_regularizer = self._weight_regularizer,
recurrent_regularizer = self._recurrent_regularizer,
kernel_constraint = self._weight_constraint,
return_sequences = return_seq,
stateful = self._stateful,
dropout=self._input_dropout_rate)(self._y_hat)
else:
# different inputs to first layer due to stateful parameter
self._y_hat = LSTM_LAYER(
units = self._hidden_dims[layer],
activation = self._activation,
input_shape = (input_timesteps, input_dim),
bias_initializer = initializers.Constant(value=self._bias_initializer),
kernel_regularizer = self._weight_regularizer,
recurrent_regularizer = self._recurrent_regularizer,
kernel_constraint = self._weight_constraint,
return_sequences = return_seq,
stateful = self._stateful,
dropout=self._dropout_rate)(self._y_hat)
self._y_hat = Dense(units=self._output_dim)(self._y_hat)
def _setPlaceholders(self):
# input and output placeholders
self._X = tf.placeholder(dtype=tf.float32, shape=(None,self._seq_len,self._input_dim), name='X')
self._y = tf.placeholder(dtype=tf.float32, shape=(None,self._input_dim), name='y')
def _generateDataset( self, x, y):
x_wins = list()
y_wins = list()
#for trial_idx in range(x.shape[0]):
for x_trial,y_trial in zip(x,y):
x_wins_trial, y_wins_trial = list(), list()
for sample_idx in range(x_trial.shape[0]-self._seq_len):
x_wins_trial.append(x_trial[sample_idx:sample_idx+self._seq_len,:])
y_wins_trial.append(y_trial[sample_idx+self._seq_len,:])
x_wins.append(np.asarray(x_wins_trial))
y_wins.append(np.asarray(y_wins_trial))
return x_wins, y_wins
def _setLoss(self):
self._mse_op = tf.cast(self._loss(self._y,self._y_hat),tf_float_prec)
self._loss_op = self._mse_op + tf.cast(tf.losses.get_regularization_loss(), tf_float_prec)
| mattweiss/public | deeplearning/networks/lstm_tf.py | lstm_tf.py | py | 11,298 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.float64",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "socket.gethostname",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.use",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "dovebirdia.d... |
21142071947 | from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.dense.linear import Linear
from torch_geometric.typing import Adj, OptPairTensor, OptTensor, Size
from torch_geometric.utils.repeat import repeat
import torch
from torch_sparse import SparseTensor
from torch import Tensor, nn
from torch.nn import Parameter
from cutlass import cutlass
import math
import numpy as np
from typing import Any
from typing import List, Tuple, Union
def uniform(size: int, value: Any):
if isinstance(value, Tensor):
bound = 1.0 / math.sqrt(size)
value.data.uniform_(-bound, bound)
else:
for v in value.parameters() if hasattr(value, 'parameters') else []:
uniform(size, v)
for v in value.buffers() if hasattr(value, 'buffers') else []:
uniform(size, v)
def constant(value: Any, fill_value: float):
if isinstance(value, Tensor):
value.data.fill_(fill_value)
else:
for v in value.parameters() if hasattr(value, 'parameters') else []:
constant(v, fill_value)
for v in value.buffers() if hasattr(value, 'buffers') else []:
constant(v, fill_value)
def zeros(value: Any):
constant(value, 0.)
basestring = (str, bytes)
def is_list_of_strings(lst):
if lst and isinstance(lst, list):
return all(isinstance(elem, basestring) for elem in lst)
else:
return False
from cutlass import *
import scipy.optimize
MCache = None
def optimizeWeights2D(weights, basis, periodicity, nmc = 32 * 1024, targetIntegral = 1, windowFn = None, verbose = False):
global MCache
M = None
numWeights = weights.shape[0] * weights.shape[1]
# print(weights.shape, numWeights)
normalizedWeights = (weights - torch.sum(weights) / weights.numel())/torch.std(weights)
if not MCache is None:
cfg, M = MCache
w,b,n,p,wfn = cfg
if not(w == weights.shape and np.all(b == basis) and n == nmc and np.all(p ==periodicity) and wfn == windowFn):
M = None
# else:
# print('no cache')
if M is None:
r = torch.sqrt(torch.rand(size=(nmc,1)).to(weights.device).type(torch.float32))
theta = torch.rand(size=(nmc,1)).to(weights.device).type(torch.float32) *2 * np.pi
x = r * torch.cos(theta)
y = r * torch.sin(theta)
u = evalBasisFunction(weights.shape[0], x.T, which = basis[0], periodic = periodicity[0])[0,:].mT
v = evalBasisFunction(weights.shape[1], y.T, which = basis[1], periodic = periodicity[1])[0,:].mT
# print('u', u.shape, u)
# print('v', v.shape, v)
window = weights.new_ones(x.shape[0]) if windowFn is None else windowFn(torch.sqrt(x**2 + y**2))[:,0]
nuv = torch.einsum('nu, nv -> nuv', u, v)
nuv = nuv * window[:,None, None]
# print('nuv', nuv.shape, nuv)
M = np.pi * torch.sum(nuv, dim = 0).flatten().detach().cpu().numpy() / nmc
# print('M', M.shape, M)
MCache = ((weights.shape, basis, nmc, periodicity, windowFn), M)
w = normalizedWeights.flatten().detach().cpu().numpy()
eps = 1e-2
if 'chebyshev' in basis or 'fourier' in basis:
res = scipy.optimize.minimize(fun = lambda x: (M.dot(x) - targetIntegral)**2, \
jac = lambda x: 2 * M * (M.dot(x) - targetIntegral), \
hess = lambda x: 2. * np.outer(M,M), x0 = w, \
method ='trust-constr', constraints = None,\
options={'disp': False, 'maxiter':100})
else:
sumConstraint = scipy.optimize.NonlinearConstraint(fun = np.sum, lb = -eps, ub = eps)
stdConstraint = scipy.optimize.NonlinearConstraint(fun = np.std, lb = 1 - eps, ub = 1 + eps)
res = scipy.optimize.minimize(fun = lambda x: (M.dot(x) - targetIntegral)**2, \
jac = lambda x: 2 * M * (M.dot(x) - targetIntegral), \
hess = lambda x: 2. * np.outer(M,M), x0 = w, \
method ='trust-constr', constraints = [sumConstraint, stdConstraint],\
options={'disp': False, 'maxiter':100})
result = torch.from_numpy(res.x.reshape(weights.shape)).type(torch.float32).to(weights.device)
if verbose:
print('result: ', res)
print('initial weights:', normalizedWeights)
print('result weights:',result)
print('initial:', M.dot(w))
print('integral:', M.dot(res.x))
print('sumConstraint:', np.sum(res.x))
print('stdConstraint:', np.std(res.x))
return result, res.constr, res.fun, M.dot(w), M.dot(res.x)
def mapToSpherical(positions):
x = positions[:,0]
y = positions[:,1]
z = positions[:,2]
r = torch.sqrt(x**2 + y**2 + z**2)
theta = torch.atan2(y, x)
phi = torch.acos(z / (r + 1e-7))
return torch.vstack((r,theta,phi)).mT
def ballToCylinder(positions):
r = torch.linalg.norm(positions, dim = 1)
xy = torch.linalg.norm(positions[:,:2], dim = 1)
absz = torch.abs(positions[:,2])
# debugPrint(r)
# debugPrint(xy)
# debugPrint(absz)
x = positions[:,0]
y = positions[:,1]
z = positions[:,2]
termA = torch.zeros_like(positions)
eps = 1e-7
xB = x * r / (xy + eps)
yB = y * r / (xy + eps)
zB = 3 / 2 * z
termB = torch.vstack((xB, yB, zB)).mT
xC = x * torch.sqrt(3 * r / (r + absz + eps))
yC = y * torch.sqrt(3 * r / (r + absz + eps))
zC = torch.sign(z) * r
termC = torch.vstack((xC, yC, zC)).mT
mapped = torch.zeros_like(positions)
maskA = r < eps
maskB = torch.logical_and(torch.logical_not(maskA), 5/4 * z**2 <= x**2 + y**2)
maskC = torch.logical_and(torch.logical_not(maskA), torch.logical_not(maskB))
mapped[maskB] = termB[maskB]
mapped[maskC] = termC[maskC]
# debugPrint(mapped)
return mapped
# debugPrint(cylinderPositions)
def cylinderToCube(positions):
x = positions[:,0]
y = positions[:,1]
z = positions[:,2]
xy = torch.linalg.norm(positions[:,:2], dim = 1)
eps = 1e-7
termA = torch.vstack((torch.zeros_like(x), torch.zeros_like(y), z)).mT
# debugPrint(termA)
xB = torch.sign(x) * xy
yB = 4. / np.pi * torch.sign(x) * xy * torch.atan(y/(x+eps))
zB = z
termB = torch.vstack((xB, yB, zB)).mT
xC = 4. / np.pi * torch.sign(y) * xy * torch.atan(x / (y + eps))
yC = torch.sign(y) * xy
zC = z
termC = torch.vstack((xC, yC, zC)).mT
maskA = torch.logical_and(torch.abs(x) < eps, torch.abs(y) < eps)
maskB = torch.logical_and(torch.logical_not(maskA), torch.abs(y) <= torch.abs(x))
maskC = torch.logical_and(torch.logical_not(maskA), torch.logical_not(maskB))
# debugPrint(torch.sum(maskA))
# debugPrint(torch.sum(maskB))
# debugPrint(torch.sum(maskC))
mapped = torch.zeros_like(positions)
mapped[maskA] = termA[maskA]
mapped[maskB] = termB[maskB]
mapped[maskC] = termC[maskC]
return mapped
def mapToSpherePreserving(positions):
cylinderPositions = ballToCylinder(positions)
cubePositions = cylinderToCube(cylinderPositions)
return cubePositions
class RbfConv(MessagePassing):
def __init__(
self,
in_channels: int,
out_channels: int,
dim: int = 2,
size: Union[int, List[int]] = [4, 4],
coordinateMapping : str = 'cartesian',
rbf : Union[int, List[int]] = 'linear',
aggr: str = 'sum',
linearLayer: bool = False,
feedThrough: bool = False,
# biasOffset: bool = False,
preActivation = None,
postActivation = None,
bias = True,
# initializer = torch.nn.init.xavier_uniform_,
initializer = torch.nn.init.uniform_,
batch_size = [16,16],
windowFn = None,
normalizeWeights = False,
normalizationFactor = None,
**kwargs
):
super().__init__(aggr=aggr, **kwargs)
# self.aggr = aggr
# assert self.aggr in ['add', 'mean', 'max', None]
# self.flow = flow
# assert self.flow in ['source_to_target', 'target_to_source']
# self.node_dim = node_dim
# self.inspector = Inspector(self)
# self.inspector.inspect(self.message)
# self.inspector.inspect(self.aggregate, pop_first=True)
# self.inspector.inspect(self.message_and_aggregate, pop_first=True)
# self.inspector.inspect(self.update, pop_first=True)
self.__user_args__ = self.inspector.keys(
['message', 'aggregate', 'update']).difference(self.special_args)
self.__fused_user_args__ = self.inspector.keys(
['message_and_aggregate', 'update']).difference(self.special_args)
# Support for "fused" message passing.
self.fuse = self.inspector.implements('message_and_aggregate')
# Support for GNNExplainer.
self.__explain__ = False
self.__edge_mask__ = None
self.in_channels = in_channels
self.out_channels = out_channels
self.dim = dim
self.coordinateMapping = coordinateMapping
# print('coordinate mapping', self.coordinateMapping)
self.size = size if isinstance(size, list) else repeat(size, dim)
self.rbfs = rbf if is_list_of_strings(rbf) else [rbf] * dim
self.periodic = [False, False] if coordinateMapping != 'polar' else [False,True]
self.initializer = initializer
self.batchSize = batch_size
self.feedThrough = feedThrough
self.preActivation = None if preActivation is None else getattr(nn.functional, preActivation)
self.postActivation = None if postActivation is None else getattr(nn.functional, postActivation)
self.windowFn = windowFn
self.use_bias = bias
# print('Creating layer %d -> %d features'%( in_channels, out_channels))
# print('For dimensionality: %d'% dim)
# print('Parameters:')
# print('\tRBF: ', self.rbfs)
# print('\tSize: ', self.size)
# print('\tPeriodic: ', self.periodic)
if isinstance(in_channels, int):
in_channels = (in_channels, in_channels)
if self.use_bias:
self.bias = Parameter(torch.zeros(out_channels))
else:
self.register_parameter('bias', None)
self.K = torch.tensor(self.size).prod().item()
if dim == 1:
self.weight = Parameter(torch.Tensor(self.size[0], in_channels[0], out_channels))
if dim == 2:
self.weight = Parameter(torch.Tensor(self.size[0],self.size[1], in_channels[0], out_channels))
if dim == 3:
self.weight = Parameter(torch.Tensor(self.size[0],self.size[1], self.size[2], in_channels[0], out_channels))
initializer(self.weight, -0.05, 0.05)
with torch.no_grad():
if self.rbfs[0] in ['chebyshev', 'fourier', 'gabor']:
for i in range(self.dim):
if len(self.rbfs) == 1:
self.weight[i] *= np.exp(-i)
if len(self.rbfs) == 2:
self.weight[i,:] *= np.exp(-i)
if len(self.rbfs) == 3:
self.weight[i,:,:] *= np.exp(-i)
if len(self.rbfs) > 1 and self.rbfs[1] in ['chebyshev', 'fourier', 'gabor']:
for i in range(self.dim):
if len(self.rbfs) == 2:
self.weight[:,i] *= np.exp(-i)
if len(self.rbfs) == 3:
self.weight[:,i,:] *= np.exp(-i)
if len(self.rbfs) > 2 and self.rbfs[2] in ['chebyshev', 'fourier', 'gabor']:
for i in range(self.dim):
self.weight[:,:,i] = self.weight[:,:,i] * np.exp(-i)
if normalizeWeights:
if len(self.rbfs) == 2:
print('Starting normalization')
for i in range(in_channels[0]):
for j in range(out_channels):
newWeights, _, _, init, final = optimizeWeights2D(weights = self.weight[:,:,i,j].detach(),\
basis = self.rbfs, periodicity = self.periodic, \
nmc = 32*1024, targetIntegral = 1/in_channels[0], \
windowFn = self.windowFn, verbose = False)
self.weight[:,:,i,j] = newWeights
print('Normalizing [%2d x %2d]: %1.4e => %1.4e (target: %1.4e)' %(i,j, init, final, 1/in_channels[0]))
# self.weight[:,:,i,j] /= in_channels[0]
print('Done with normalization\n------------------------------------------')
self.root_weight = linearLayer
if linearLayer:
self.lin = Linear(in_channels[1], out_channels, bias=self.use_bias,
weight_initializer= 'uniform')
# if biasOffset:
# self.bias = Parameter(torch.Tensor(out_channels))
# else:
# self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
# if not isinstance(self.weight, nn.UninitializedParameter):
# size = self.weight.size(0) * self.weight.size(1)
# self.initializer(self.weight)
if self.root_weight:
self.lin.reset_parameters()
zeros(self.bias)
def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj,
edge_attr: OptTensor = None, size: Size = None) -> Tensor:
# print('x', x[0].shape, x)
# print('edge_index', edge_index.shape, edge_index)
# print('edge_attr', edge_attr.shape, edge_attr)
# print('Size', Size)
# if args.cutlad:
# out = self.propagate(edge_index, x=x, edge_attr=edge_attr, size=size)
# else:
x_i, x_j = x
edge_weights = None
if not(self.windowFn is None):
edge_weights = self.windowFn(torch.linalg.norm(edge_attr, axis = 1))
positions = torch.hstack((edge_attr, torch.zeros(edge_attr.shape[0],1, device = edge_attr.device, dtype = edge_attr.dtype)))
if self.coordinateMapping == 'polar':
spherical = mapToSpherical(positions)
mapped = torch.vstack((spherical[:,0] * 2. - 1.,spherical[:,1] / np.pi)).mT
if self.coordinateMapping == 'cartesian':
mapped = edge_attr
if self.coordinateMapping == 'preserving':
cubePositions = mapToSpherePreserving(positions)
mapped = torch.vstack((cubePositions[:,0],cubePositions[:,1] / np.pi)).mT
convolution = cutlass.apply
out = convolution(edge_index, x_i, x_j, mapped, edge_weights, self.weight,
x_i.shape[0], self.node_dim,
self.size , self.rbfs, self.periodic,
self.batchSize[0],self.batchSize[1])
# out = self.propagate2(edge_index, x=x, edge_attr=edge_attr, size=size)
# print('out: ', out.shape, out)
x_r = x[1]
if self.preActivation is not None:
out = self.preActivation(out)
if x_r is not None and self.root_weight:
out = out + self.lin(x_r) if self.preActivation is not None else self.preActivation(self.lin(x_r))
if self.bias is not None:
out = out + self.bias
if self.feedThrough:
out = out + x_r if self.preActivation is not None else self.preActivation(x_r)
if self.postActivation is not None:
out = self.postActivation(out)
return out
def message(self, x_j: Tensor, edge_attr: Tensor) -> Tensor:
if self.dim == 1:
u = evalBasisFunction(self.size[0], edge_attr[:,0], which=self.rbfs[0], periodic = self.periodic[0]).T
return torch.einsum('nu, uio,ni -> no',u,self.weight, x_j)
if self.dim == 2:
u = evalBasisFunction(self.size[0], edge_attr[:,0], which=self.rbfs[0], periodic = self.periodic[0]).T
v = evalBasisFunction(self.size[1], edge_attr[:,1], which=self.rbfs[1], periodic = self.periodic[1]).T
return torch.einsum('nu, nv, uvio,ni -> no',u,v,self.weight, x_j)
if self.dim == 3:
u = evalBasisFunction(self.size[0], edge_attr[:,0], which=self.rbfs[0], periodic = self.periodic[0]).T
v = evalBasisFunction(self.size[1], edge_attr[:,1], which=self.rbfs[1], periodic = self.periodic[1]).T
w = evalBasisFunction(self.size[2], edge_attr[:,1], which=self.rbfs[1], periodic = self.periodic[2]).T
return torch.einsum('nu, nv, uvio,ni -> no',u,v,w,self.weight, x_j)
def __repr__(self) -> str:
return (f'{self.__class__.__name__}({self.in_channels}, '
f'{self.out_channels}, dim={self.dim})')
def __check_input__(self, edge_index, size):
the_size: List[Optional[int]] = [None, None]
if isinstance(edge_index, Tensor):
assert edge_index.dtype == torch.long
assert edge_index.dim() == 2
assert edge_index.size(0) == 2
if size is not None:
the_size[0] = size[0]
the_size[1] = size[1]
return the_size
elif isinstance(edge_index, SparseTensor):
if self.flow == 'target_to_source':
raise ValueError(
('Flow direction "target_to_source" is invalid for '
'message propagation via `torch_sparse.SparseTensor`. If '
'you really want to make use of a reverse message '
'passing flow, pass in the transposed sparse tensor to '
'the message passing module, e.g., `adj_t.t()`.'))
the_size[0] = edge_index.sparse_size(1)
the_size[1] = edge_index.sparse_size(0)
return the_size
raise ValueError(
('`MessagePassing.propagate` only supports `torch.LongTensor` of '
'shape `[2, num_messages]` or `torch_sparse.SparseTensor` for '
'argument `edge_index`.'))
def __collect__(self, args, edge_index, size, kwargs):
i, j = (1, 0) if self.flow == 'source_to_target' else (0, 1)
out = {}
for arg in args:
if arg[-2:] not in ['_i', '_j']:
out[arg] = kwargs.get(arg, Parameter.empty)
else:
dim = 0 if arg[-2:] == '_j' else 1
data = kwargs.get(arg[:-2], Parameter.empty)
if isinstance(data, (tuple, list)):
assert len(data) == 2
if isinstance(data[1 - dim], Tensor):
self.__set_size__(size, 1 - dim, data[1 - dim])
data = data[dim]
if isinstance(data, Tensor):
self.__set_size__(size, dim, data)
data = self.__lift__(data, edge_index,
j if arg[-2:] == '_j' else i)
out[arg] = data
if isinstance(edge_index, Tensor):
out['adj_t'] = None
out['edge_index'] = edge_index
out['edge_index_i'] = edge_index[i]
out['edge_index_j'] = edge_index[j]
out['ptr'] = None
elif isinstance(edge_index, SparseTensor):
out['adj_t'] = edge_index
out['edge_index'] = None
out['edge_index_i'] = edge_index.storage.row()
out['edge_index_j'] = edge_index.storage.col()
out['ptr'] = edge_index.storage.rowptr()
out['edge_weight'] = edge_index.storage.value()
out['edge_attr'] = edge_index.storage.value()
out['edge_type'] = edge_index.storage.value()
out['index'] = out['edge_index_i']
out['size'] = size
out['size_i'] = size[1] or size[0]
out['size_j'] = size[0] or size[1]
out['dim_size'] = out['size_i']
return out
def propagate2(self, edge_index: Adj, size: Size = None, **kwargs):
decomposed_layers = 1 if self.explain else self.decomposed_layers
for hook in self._propagate_forward_pre_hooks.values():
res = hook(self, (edge_index, size, kwargs))
if res is not None:
edge_index, size, kwargs = res
size = self.__check_input__(edge_index, size)
if decomposed_layers > 1:
user_args = self.__user_args__
decomp_args = {a[:-2] for a in user_args if a[-2:] == '_j'}
decomp_kwargs = {
a: kwargs[a].chunk(decomposed_layers, -1)
for a in decomp_args
}
decomp_out = []
for i in range(decomposed_layers):
# if decomposed_layers > 1:
# for arg in decomp_args:
# kwargs[arg] = decomp_kwargs[arg][i]
# coll_dict = self.__collect__(self.__user_args__, edge_index,
# size, kwargs)
# msg_kwargs = self.inspector.distribute('message', coll_dict)
# for hook in self._message_forward_pre_hooks.values():
# res = hook(self, (msg_kwargs, ))
# if res is not None:
# msg_kwargs = res[0] if isinstance(res, tuple) else res
#
# aggr_kwargs = self.inspector.distribute('aggregate', coll_dict)
convolution = cutlass.apply
inFeatures = kwargs['x'][0]
edge_weights = None
if not(self.windowFn is None):
edge_weights = self.windowFn(torch.linalg.norm(kwargs['edge_attr'], axis = 1))
# print(torch.linalg.norm(kwargs['edge_attr'], axis = 1))
# print(edge_weights.shape)
# print(edge_weights)
# print(inFeatures.shape)
# inFeatures = inFeatures * window[:,None]
# print(inFeatures.shape)
positions = torch.hstack((kwargs['edge_attr'], torch.zeros(kwargs['edge_attr'].shape[0],1, device = kwargs['edge_attr'].device, dtype = kwargs['edge_attr'].dtype)))
if self.coordinateMapping == 'polar':
spherical = mapToSpherical(positions)
mapped = torch.vstack((spherical[:,0] * 2. - 1.,spherical[:,1] / np.pi)).mT
if self.coordinateMapping == 'cartesian':
mapped = kwargs['edge_attr']
if self.coordinateMapping == 'preserving':
cubePositions = mapToSpherePreserving(positions)
mapped = torch.vstack((cubePositions[:,0],cubePositions[:,1] / np.pi)).mT
out = convolution(edge_index, kwargs['x'][0], kwargs['x'][1], mapped, edge_weights, self.weight,
size[0], self.node_dim,
self.size , self.rbfs, self.periodic,
self.batchSize[0],self.batchSize[1])
# for hook in self._aggregate_forward_hooks.values():
# res = hook(self, (aggr_kwargs, ), out)
# if res is not None:
# out = res
# update_kwargs = self.inspector.distribute('update', coll_dict)
# out = self.update(out, **update_kwargs)
# if decomposed_layers > 1:
# decomp_out.append(out)
# if decomposed_layers > 1:
# out = torch.cat(decomp_out, dim=-1)
# for hook in self._propagate_forward_hooks.values():
# res = hook(self, (edge_index, size, kwargs), out)
# if res is not None:
# out = res
return out | wi-re/spheric_density_demo | rbfConv.py | rbfConv.py | py | 24,177 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "typing.Any",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 18,
"usage_type": "argument"
},
{
"api_name": "math.sqrt",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "typing.Any",
"line_number":... |
37215946531 | from django.core.management.base import BaseCommand
from academ.models import Apartment, Building, ImageGallery
import json
import os
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
def check_apartment(building):
"""
Проверка на наличие квартир в базе.
Если квартиры имеются - возвращаем 1, иначе 0
@param building: дом
@return: 0, 1
"""
try:
apartments = Apartment.objects.filter(building=building).first()
if apartments:
print(Apartment.objects.filter(building=building).first())
return 1
else:
print('No apartments found...')
except Apartment.DoesNotExist:
print('No apartments found...')
return 0
def last_floor_apartments_generator(apartment_data, building):
apartment_instance = Apartment.objects.filter(number=apartment_data['number'],)
if apartment_instance:
print(f"Apartment with number: '{apartment_data['number']}' already exists!")
pass
else:
print(f"Generating apartment with number: '{apartment_data['number']}'...")
apartment = Apartment(building=building,
floor=14,
area=apartment_data['area'],
number=apartment_data['number'],
section=apartment_data['section'],
type=apartment_data['apartment_type'])
apartment.save()
def apartment_generator(params, building):
"""
Функция, генерирующая квартиры и записывающая их в базу данных на основе набора параметров.
@param params: набор параметров
@param building: дом
"""
number = params['number']
if params['section'] != 3:
start = 4
else:
start = 2
for i in range(start, building.floors):
if i != start:
number += params['delta']
apartment_instance = Apartment.objects.filter(number=number)
if apartment_instance:
print(f"Apartment with number: '{number}' already exists!")
pass
else:
print(f"Generating apartment with number: '{number}'...")
apartment = Apartment(building=building, floor=i, area=params['area'],
number=number, section=params['section'], type=params['apartment_type'])
apartment.save()
def gallery_generator(building):
gallery_instance = ImageGallery.objects.filter(building=building).first()
if gallery_instance:
print(ImageGallery.objects.filter(building=building).first(), ' already exist!')
return
else:
filepath = f'{settings.MEDIA_ROOT}/pictures/building.jpg'
uploaded_image = SimpleUploadedFile(content=open(filepath, 'rb').read(),
content_type="image/jpg",
name='image.jpg')
instance = ImageGallery(building=building, image=uploaded_image)
instance.save()
class Command(BaseCommand):
def handle(self, *args, **options):
address = 'Academ'
try:
building = Building.objects.get(address=address)
except Building.DoesNotExist:
building = Building(address=address, floors=14, sections=3)
building.save()
# if check_apartment(building):
# return
filepath = os.path.abspath(os.path.dirname(__file__))
json_data = open(f'{filepath}/apartment_data.json')
json_data_last_floor = open(f'{filepath}/last_floor_data.json')
params_list = json.load(json_data)
params_list_last_floor = json.load(json_data_last_floor)
print('\nGenerating apartments up to 14th floor...\n')
for element in params_list:
apartment_generator(element, building)
print('\nGenerating 14th floor apartments...\n')
for element_last_floor in params_list_last_floor:
last_floor_apartments_generator(element_last_floor, building)
print('\nGenerating gallery instances...\n')
gallery_generator(building)
| pepegaFace/freedom | freedom/academ/management/commands/content_generator.py | content_generator.py | py | 4,319 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "academ.models.Apartment.objects.filter",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "academ.models.Apartment.objects",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "academ.models.Apartment",
"line_number": 17,
"usage_type": "n... |
21167613561 | from application import app
from flask import render_template
from application.models import *
from flask_restplus import Api, Resource, fields
from flask.ext.restplus.reqparse import RequestParser
from flask.ext.restplus.inputs import date
api = Api(app, version='1.0', title='ElesVotam API')
ns = api.namespace('elesvotam', description='ElesVotam operations')
votacao_parser = RequestParser()
votacao_parser.add_argument('votacaoid', type=int)
votacao_fields = {'votacaoid': fields.Integer(),
'sessao_id': fields.Integer(),
'tipo': fields.String(),
'materia': fields.String(),
'ementa': fields.String(),
'resultado': fields.String(),
'presentes': fields.String(),
'sim': fields.Integer(),
'nao': fields.Integer(),
'abstencao': fields.Integer(),
'branco': fields.Integer(),
'notas_rodape': fields.String(),
}
votacao_model = api.model('Votacao', votacao_fields)
@ns.route('/votacao')
class ElesVotamVotacaosApi(Resource):
@api.doc(parser=votacao_parser)
@api.marshal_with(votacao_model)
def get(self):
args = votacao_parser.parse_args()
votacaoid = args['votacaoid']
votacao = db.session.query(Votacao).filter(Votacao.votacaoid == votacaoid).one()
return votacao
sessao_parser = RequestParser()
sessao_parser.add_argument('sessaoid', type=int)
sessao_parser.add_argument('data', type=date)
sessao_fields = {'id': fields.Integer(),
'nome': fields.String(),
'data': fields.Date(),
'votacoes': fields.Nested(votacao_model)
}
sessao_model = api.model('sessao', sessao_fields)
@ns.route('/sessao')
class ElesVotamSessaosApi(Resource):
@api.doc(parser=sessao_parser)
@api.marshal_with(sessao_model)
def get(self):
args = sessao_parser.parse_args()
sessaoid = args['sessaoid']
sessao_date = args['data']
if not sessao_date:
sessao = db.session.query(Sessao).filter(Sessao.id == sessaoid).one()
votacoes = db.session.query(Votacao).filter(Votacao.sessao_id == sessao.id).all()
sessao.votacoes = votacoes
else:
sessao_date = sessao_date.strftime('%Y-%m-%d')
sessao = db.session.query(Sessao).filter(Sessao.data == sessao_date).all()
for i,s in enumerate(sessao):
votacoes = db.session.query(Votacao).filter(Votacao.sessao_id == s.id).all()
sessao[i].votacoes = votacoes
return sessao
partido_fields = {'id': fields.Integer(),
'nome': fields.String(),
}
partido_model = api.model('partido', partido_fields)
@ns.route('/partidos')
class ElesVotamPartidosApi(Resource):
@api.marshal_with(partido_model)
def get(self):
partidos = db.session.query(Partido).all()
return partidos
partido_parser = RequestParser()
partido_parser.add_argument('nome', type=str)
vereador_fields = {'id': fields.Integer(),
'nome': fields.String(),
'idparlamentar': fields.String()
}
vereador_model = api.model('vereador', vereador_fields)
@ns.route('/partidoVereadores')
class ElesVotamPartidoVereadoresApi(Resource):
@api.doc(parser=partido_parser)
@api.marshal_with(vereador_model)
def get(self):
args = partido_parser.parse_args()
partido_nome = args['nome']
partido = db.session.query(Partido).filter(Partido.nome == partido_nome).one()
vereadores = db.session.query(Vereador).filter(Vereador.partido_id == partido.id).all()
return vereadores
votacao_votos_parser = RequestParser()
votacao_votos_parser.add_argument('votacao_id', type=int)
voto_fields = {'id': fields.Integer(),
'vereador': fields.Nested(vereador_model),
'valor': fields.String()
}
voto_model = api.model('voto', voto_fields)
@ns.route('/votacaoVotos')
class ElesVotamVotacaoVotosApi(Resource):
@api.doc(parser=votacao_votos_parser)
@api.marshal_with(voto_model)
def get(self):
args = votacao_votos_parser.parse_args()
votacao_id = args['votacao_id']
votacao = db.session.query(Votacao).filter(Votacao.votacaoid == votacao_id).one()
votos = db.session.query(Voto).filter(Voto.votacao_id == votacao.id).all()
return votos
@app.route('/')
@app.route('/index/')
def index():
return render_template('info/index.html', title='Flask-Bootstrap')
@app.route('/hello/<username>/')
def hello_username(username):
return render_template('info/hello.html', title="Flask-Bootstrap, Hi %s"
% (username), username=username)
| okfn-brasil/elesvotam | application/manager.py | manager.py | py | 4,853 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "flask_restplus.Api",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "application.app",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "flask.ext.restplus.reqparse.RequestParser",
"line_number": 11,
"usage_type": "call"
},
{
"ap... |
5743766982 | from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
class Vader:
def __init__(self):
self.analyzer = SentimentIntensityAnalyzer()
def AnalyzeSentence(self, sentence):
vs = self.analyzer.polarity_scores(sentence)
if(vs['compound'] >= 0):#.05):
return 1
elif(vs['compound'] < 0):#-0.05):
return 0
# if(vs['pos'] > vs['neg']):
# return 1
# else:
# return 0
def getVaderScore(self, testData):
totalCount = 0
successCount = 0
for item in testData:
totalCount += 1
if(self.AnalyzeSentence(item[0]) == item[1]):
successCount += 1
return (successCount/len(testData)) | rafaatsouza/ufmg-practical-assignments | natural-language-processing/final-assignment/source/Vader.py | Vader.py | py | 781 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer",
"line_number": 5,
"usage_type": "call"
}
] |
40152772858 | """Display a rotomap."""
import enum
import functools
import sys
import cv2
import numpy
import mel.lib.common
import mel.lib.fullscreenui
import mel.lib.image
import mel.rotomap.detectmoles
import mel.rotomap.mask
import mel.rotomap.moles
import mel.rotomap.tricolour
DEFAULT_MASKER_RADIUS = 200
_WHITE = (255, 255, 255)
_BLACK = (0, 0, 0)
def draw_mole(image, x, y, colours):
def circle(radius, col):
cv2.circle(image, (x, y), radius, col, -1)
circle(20, _WHITE)
circle(18, _BLACK)
radius = 16
for index in range(3):
circle(radius, colours[index])
radius -= 4
def draw_non_canonical_mole(image, x, y, colours):
def rect(size, col):
top_left = (x - size, y - size)
bottom_right = (x + size, y + size)
cv2.rectangle(image, top_left, bottom_right, col, -1)
rect(20, _WHITE)
rect(18, _BLACK)
draw_mole(image, x, y, colours)
def draw_crosshair(image, x, y):
inner_radius = 16
outer_radius = 24
directions = [(1, 0), (0, -1), (-1, 0), (0, 1)] # Right, down, left, up
size_color_list = [(3, _WHITE), (2, _BLACK)]
for size, color in size_color_list:
for d in directions:
cv2.line(
image,
(x + (inner_radius * d[0]), y + (inner_radius * d[1])),
(x + (outer_radius * d[0]), y + (outer_radius * d[1])),
color,
size,
)
class Display(mel.lib.fullscreenui.ZoomableMixin):
def __init__(self, screen):
super().__init__()
self._image_display = screen
self._rect = numpy.array((screen.width, screen.height))
title_height, _ = mel.lib.image.measure_text_height_width("abc")
self._spacer_height = 10
self._image_rect = self._rect - numpy.array(
(0, title_height + self._spacer_height)
)
self._title = ""
def show_current(self, image, overlay):
self.zoomable_transform_update(image, self._image_rect)
image = self.zoomable_transform_render()
if overlay is not None:
image = overlay(image, self._transform)
caption = mel.lib.image.render_text_as_image(self._title)
image = mel.lib.image.montage_vertical(
self._spacer_height, image, caption
)
self._image_display.show_opencv_image(image)
def set_title(self, title):
self._title = title
def make_composite_overlay(*overlays):
"""Return an overlay, which will composite the supplied overlays in turn.
:*overlays: The overlay callables to composite.
:returns: A function which will composite *overlays and return the image.
"""
def do_overlay(image, transform):
for o in overlays:
image = o(image, transform)
return image
return do_overlay
class StatusOverlay:
def __init__(self):
self.text = ""
def __call__(self, image, transform):
if self.text:
text_image = mel.lib.image.render_text_as_image(self.text)
mel.lib.common.copy_image_into_image(text_image, image, 0, 0)
return image
class MoleMarkerOverlay:
def __init__(self, uuid_to_tricolour):
self._is_showing_markers = True
self._is_faded_markers = True
self._highlight_uuid = None
self._uuid_to_tricolour = uuid_to_tricolour
if self._uuid_to_tricolour is None:
self._uuid_to_tricolour = (
mel.rotomap.tricolour.uuid_to_tricolour_first_digits
)
self.moles = None
def toggle_markers(self):
self._is_showing_markers = not self._is_showing_markers
def set_highlight_uuid(self, highlight_uuid):
self._highlight_uuid = highlight_uuid
def toggle_faded_markers(self):
self._is_faded_markers = not self._is_faded_markers
def __call__(self, image, transform):
if not self._is_showing_markers:
return image
highlight_mole = None
if self._highlight_uuid is not None:
for m in self.moles:
if m["uuid"] == self._highlight_uuid:
highlight_mole = m
break
marker_image = image
if self._is_faded_markers:
marker_image = image.copy()
for mole in self.moles:
x, y = transform.imagexy_to_transformedxy(mole["x"], mole["y"])
if mole is highlight_mole:
draw_crosshair(marker_image, x, y)
colours = self._uuid_to_tricolour(mole["uuid"])
if mole[mel.rotomap.moles.KEY_IS_CONFIRMED]:
draw_mole(marker_image, x, y, colours)
else:
draw_non_canonical_mole(marker_image, x, y, colours)
if self._is_faded_markers:
image = cv2.addWeighted(image, 0.75, marker_image, 0.25, 0.0)
return image
class MarkedMoleOverlay:
"""An overlay to make marked moles obvious, for checking mark positions."""
def __init__(self):
self.moles = None
self._highlight_uuid = None
self.is_accentuate_marked_mode = False
def set_highlight_uuid(self, highlight_uuid):
self._highlight_uuid = highlight_uuid
def __call__(self, image, transform):
if self.is_accentuate_marked_mode:
return self._draw_accentuated(image, transform)
else:
return self._draw_markers(image, transform)
def _draw_accentuated(self, image, transform):
# Reveal the moles that have been marked, whilst still showing
# markers. This is good for verifying that markers are actually
# positioned on moles.
mask_radius = 50
image = image.copy() // 2
mask = numpy.zeros((*image.shape[:2], 1), numpy.uint8)
for mole in self.moles:
x, y = transform.imagexy_to_transformedxy(mole["x"], mole["y"])
cv2.circle(mask, (x, y), mask_radius, 255, -1)
masked_faded = cv2.bitwise_and(image, image, mask=mask)
image = cv2.add(masked_faded, image)
highlight_mole = None
if self._highlight_uuid is not None:
for m in self.moles:
if m["uuid"] == self._highlight_uuid:
highlight_mole = m
break
for mole in self.moles:
x, y = transform.imagexy_to_transformedxy(mole["x"], mole["y"])
kind = mole.get("kind", None)
looks_like = mole.get("looks_like", None)
colour = (128, 0, 0)
if mole[mel.rotomap.moles.KEY_IS_CONFIRMED]:
colour = (255, 0, 0)
if kind == "mole":
if looks_like == "mole":
colour = (255, 255, 255)
elif looks_like == "non-mole":
colour = (255, 255, 0)
elif looks_like == "unsure":
colour = (255, 0, 128)
else:
raise Exception(f"Mole looks_like is invalid: {mole}")
elif kind == "non-mole":
if looks_like == "mole":
colour = (0, 255, 255)
elif looks_like == "non-mole":
colour = (0, 0, 255)
elif looks_like == "unsure":
colour = (128, 0, 255)
else:
raise Exception(f"Mole looks_like is invalid: {mole}")
cv2.circle(image, (x, y), mask_radius, colour, 2)
if mole is highlight_mole:
draw_crosshair(image, x, y)
return image
def _draw_markers(self, image, transform):
# Hide the moles that have been marked, showing markers
# distinctly from moles. This is good for marking moles that
# haven't been marked, without worrying about the ones that
# have been marked.
for mole in self.moles:
x, y = transform.imagexy_to_transformedxy(mole["x"], mole["y"])
draw_mole(image, x, y, [[255, 0, 0], [255, 128, 128], [255, 0, 0]])
return image
class BoundingAreaOverlay:
"""An overlay to show the bounding area, if any."""
def __init__(self):
self.bounding_box = None
def __call__(self, image, transform):
image //= 2
if self.bounding_box is not None:
color = (0, 0, 255)
size = 2
space = mel.lib.ellipsespace.Transform(self.bounding_box)
def toimage(point):
point = space.from_space((point))
point = transform.imagexy_to_transformedxy(*point)
return point
border = [
toimage((-1, -1)),
toimage((1, -1)),
toimage((1, 1)),
toimage((-1, 1)),
toimage((-1, -1)),
]
border = numpy.array(border)
centre = [
toimage((0, 0.1)),
toimage((0, -0.1)),
toimage((0.05, 0)),
toimage((0.1, 0)),
toimage((-0.1, 0)),
toimage((0, 0)),
toimage((0, 0.1)),
]
centre = numpy.array(centre)
cv2.drawContours(image, [border, centre], -1, color, size)
return image
class EditorMode(enum.Enum):
edit_mole = 1
edit_mask = 2
bounding_area = 3
mole_mark = 4
debug_automole = 0
class Editor:
def __init__(self, directory_list, screen):
self._uuid_to_tricolour = mel.rotomap.tricolour.UuidTriColourPicker()
self.display = Display(screen)
self.moledata_list = [MoleData(x.image_paths) for x in directory_list]
self._mode = EditorMode.edit_mole
self.moledata_index = 0
self.moledata = self.moledata_list[self.moledata_index]
self._follow = None
self._mole_overlay = MoleMarkerOverlay(self._uuid_to_tricolour)
self.marked_mole_overlay = MarkedMoleOverlay()
self.bounding_area_overlay = BoundingAreaOverlay()
self._status_overlay = StatusOverlay()
self.show_current()
self.masker_radius = DEFAULT_MASKER_RADIUS
def set_smaller_masker(self):
self.masker_radius //= 2
def set_larger_masker(self):
self.masker_radius *= 2
def set_default_masker(self):
self.masker_radius = DEFAULT_MASKER_RADIUS
def set_automoledebug_mode(self):
self._mode = EditorMode.debug_automole
self.show_current()
def set_editmole_mode(self):
self._mode = EditorMode.edit_mole
self.show_current()
def set_editmask_mode(self):
self._mode = EditorMode.edit_mask
self.show_current()
def set_boundingarea_mode(self):
self._mode = EditorMode.bounding_area
self.show_current()
def set_molemark_mode(self):
self._mode = EditorMode.mole_mark
self.show_current()
def set_status(self, text):
self._status_overlay.text = text
def visit(self, visit_target_str):
# Expect a string formatted like this:
#
# path/to/jpg:uuid
#
# Anything after the expected bits is ignored.
#
path, visit_uuid, *_ = visit_target_str.split(":")
print(path, visit_uuid)
for _ in range(len(self.moledata_list)):
if self.moledata.try_jump_to_path(str(path)):
for m in self.moledata.moles:
if m["uuid"] == visit_uuid:
self.moledata.get_image()
self._follow = visit_uuid
self._mole_overlay.set_highlight_uuid(self._follow)
self.marked_mole_overlay.set_highlight_uuid(
self._follow
)
self.show_zoomed_display(m["x"], m["y"])
return
self.show_current()
return
self.moledata_index += 1
self.moledata_index %= len(self.moledata_list)
self.moledata = self.moledata_list[self.moledata_index]
print("Could not find:", path, ":", visit_uuid, file=sys.stderr)
self.show_current()
def follow(self, uuid_to_follow):
self._follow = uuid_to_follow
self._mole_overlay.set_highlight_uuid(self._follow)
self.marked_mole_overlay.set_highlight_uuid(self._follow)
follow_mole = None
for m in self.moledata.moles:
if m["uuid"] == self._follow:
follow_mole = m
break
if follow_mole is not None:
self.show_zoomed_display(follow_mole["x"], follow_mole["y"])
def skip_to_mole(self, uuid_to_skip_to):
original_index = self.moledata.index()
done = False
while not done:
for m in self.moledata.moles:
if m["uuid"] == uuid_to_skip_to:
return
self.moledata.increment()
self.moledata.get_image()
if self.moledata.index() == original_index:
return
def toggle_markers(self):
self._mole_overlay.toggle_markers()
self.show_current()
def toggle_faded_markers(self):
self._mole_overlay.toggle_faded_markers()
self.show_current()
def set_mask(self, mouse_x, mouse_y, enable):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
value = 255 if enable else 0
radius = self.masker_radius
cv2.circle(self.moledata.mask, (image_x, image_y), radius, value, -1)
self.moledata.save_mask()
self.show_current()
def show_current(self):
self.display.set_title(self.moledata.current_image_path())
image = self.moledata.get_image()
if self._mode is EditorMode.edit_mole:
self._mole_overlay.moles = self.moledata.moles
self.display.show_current(
image,
make_composite_overlay(
self._mole_overlay, self._status_overlay
),
)
elif self._mode is EditorMode.debug_automole:
image = image[:]
image = mel.rotomap.detectmoles.draw_debug(
image, self.moledata.mask
)
self.display.show_current(image, None)
elif self._mode is EditorMode.edit_mask:
mask = self.moledata.mask
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray_image = cv2.cvtColor(gray_image, cv2.COLOR_GRAY2BGR)
gray_image[:, :, 2] = mask
self.display.show_current(gray_image, None)
elif self._mode is EditorMode.bounding_area:
box = self.moledata.metadata.get("ellipse", None)
self.bounding_area_overlay.bounding_box = box
self.display.show_current(image, self.bounding_area_overlay)
elif self._mode is EditorMode.mole_mark:
self.marked_mole_overlay.moles = self.moledata.moles
self.display.show_current(image, self.marked_mole_overlay)
else:
raise Exception("Unknown mode", self._mode)
def show_fitted(self):
self.display.set_fitted()
self.show_current()
def set_zoom_level(self, zoom_level):
self.display.set_zoom_level(zoom_level)
self.show_current()
def show_zoomed(self, mouse_x, mouse_y, zoom_level=None):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
self.display.set_zoomed(image_x, image_y, zoom_level)
self.show_current()
def show_zoomed_display(self, image_x, image_y, zoom_level=None):
self.display.set_zoomed(image_x, image_y, zoom_level)
self.show_current()
def show_prev_map(self):
def transition():
self.moledata_index -= 1
self.moledata_index %= len(self.moledata_list)
self.moledata = self.moledata_list[self.moledata_index]
self._adjusted_transition(transition)
self.show_current()
def show_next_map(self):
def transition():
self.moledata_index += 1
self.moledata_index %= len(self.moledata_list)
self.moledata = self.moledata_list[self.moledata_index]
self._adjusted_transition(transition)
self.show_current()
def show_prev(self):
self._adjusted_transition(self.moledata.decrement)
self.show_current()
def show_next(self):
self._adjusted_transition(self.moledata.increment)
self.show_current()
def _adjusted_transition(self, transition_func):
if self.display.is_zoomed() and "ellipse" in self.moledata.metadata:
pos = self.display.get_zoom_pos()
ellipse = self.moledata.metadata["ellipse"]
pos = mel.lib.ellipsespace.Transform(ellipse).to_space(pos)
transition_func()
self.moledata.ensure_loaded()
if "ellipse" in self.moledata.metadata:
ellipse = self.moledata.metadata["ellipse"]
pos = mel.lib.ellipsespace.Transform(ellipse).from_space(pos)
self.display.set_zoomed(pos[0], pos[1])
else:
transition_func()
def show_next_n(self, number_to_advance):
for i in range(number_to_advance):
self.moledata.increment()
self.moledata.get_image()
self.show_current()
def add_mole(self, mouse_x, mouse_y):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
mel.rotomap.moles.add_mole(self.moledata.moles, image_x, image_y)
self.moledata.save_moles()
self.show_current()
def add_mole_display(self, image_x, image_y, mole_uuid=None):
mel.rotomap.moles.add_mole(
self.moledata.moles, image_x, image_y, mole_uuid
)
self.moledata.save_moles()
self.show_current()
def confirm_mole(self, mouse_x, mouse_y):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
mole_uuid = mel.rotomap.moles.get_nearest_mole_uuid(
self.moledata.moles, image_x, image_y
)
mel.rotomap.moles.set_nearest_mole_uuid(
self.moledata.moles, image_x, image_y, mole_uuid, is_canonical=True
)
self.moledata.save_moles()
self.show_current()
def confirm_all(self):
for m in self.moledata.moles:
m["is_uuid_canonical"] = True
self.moledata.save_moles()
self.show_current()
def set_mole_uuid(self, mouse_x, mouse_y, mole_uuid, is_canonical=True):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
mel.rotomap.moles.set_nearest_mole_uuid(
self.moledata.moles, image_x, image_y, mole_uuid, is_canonical
)
self.moledata.save_moles()
self.show_current()
def get_mole_uuid(self, mouse_x, mouse_y):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
return mel.rotomap.moles.get_nearest_mole_uuid(
self.moledata.moles, image_x, image_y
)
def get_nearest_mole(self, mouse_x, mouse_y):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
nearest_index = mel.rotomap.moles.nearest_mole_index(
self.moledata.moles, image_x, image_y
)
mole = None
if nearest_index is not None:
mole = self.moledata.moles[nearest_index]
return mole
def move_nearest_mole(self, mouse_x, mouse_y):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
mel.rotomap.moles.move_nearest_mole(
self.moledata.moles, image_x, image_y
)
self.moledata.save_moles()
self.show_current()
def remove_mole(self, mouse_x, mouse_y):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
mel.rotomap.moles.remove_nearest_mole(
self.moledata.moles, image_x, image_y
)
self.moledata.save_moles()
self.show_current()
def crud_mole(self, mole_uuid, mouse_x, mouse_y):
image_x, image_y = self.display.windowxy_to_imagexy(mouse_x, mouse_y)
i = mel.rotomap.moles.uuid_mole_index(self.moledata.moles, mole_uuid)
if i is not None:
self.moledata.moles[i]["x"] = image_x
self.moledata.moles[i]["y"] = image_y
else:
mel.rotomap.moles.add_mole(
self.moledata.moles, image_x, image_y, mole_uuid
)
self.moledata.save_moles()
self.show_current()
def remap_uuid(self, from_uuid, to_uuid):
print(f"Remap globally {from_uuid} to {to_uuid}.")
self.moledata.remap_uuid(from_uuid, to_uuid)
self.show_current()
class MoleData:
def __init__(self, path_list):
# Make an instance-specific cache of images. Note that this means that
# mel will need to be re-run in order to pick up changes to mole
# images. This seems to be fine for use-cases to date, only the mole
# data seems to change from underneath really.
@functools.lru_cache()
def load_image(image_path):
return mel.lib.image.load_image(image_path)
self._load_image = load_image
self.moles = []
self.metadata = {}
self.image = None
self.mask = None
self._mask_path = None
self._path_list = path_list
self._list_index = 0
self.image_path = self._path_list[self._list_index]
self._num_images = len(self._path_list)
self._loaded_index = None
self.ensure_loaded()
def get_image(self):
self.ensure_loaded()
return self.image
def reload(self):
self._loaded_index = None
self.ensure_loaded()
def ensure_loaded(self):
if self._loaded_index == self._list_index:
return
image_path = self._path_list[self._list_index]
self.image = self._load_image(image_path)
self.image_path = image_path
self.moles = mel.rotomap.moles.load_image_moles(image_path)
self.metadata = mel.rotomap.moles.load_image_metadata(image_path)
height, width = self.image.shape[:2]
self._mask_path = mel.rotomap.mask.path(image_path)
self.mask = mel.rotomap.mask.load_or_none(image_path)
if self.mask is None:
self.mask = numpy.zeros((height, width), numpy.uint8)
self._loaded_index = self._list_index
def remap_uuid(self, from_uuid, to_uuid):
for image_path in self._path_list:
moles = mel.rotomap.moles.load_image_moles(image_path)
for m in moles:
if m["uuid"] == from_uuid:
m["uuid"] = to_uuid
m[mel.rotomap.moles.KEY_IS_CONFIRMED] = True
mel.rotomap.moles.save_image_moles(moles, image_path)
image_path = self._path_list[self._list_index]
self.moles = mel.rotomap.moles.load_image_moles(image_path)
def decrement(self):
new_index = self._list_index + self._num_images - 1
self._list_index = new_index % self._num_images
def increment(self):
self._list_index = (self._list_index + 1) % self._num_images
def index(self):
return self._list_index
def save_mask(self):
mel.lib.common.write_image(self._mask_path, self.mask)
def save_moles(self):
image_path = self._path_list[self._list_index]
mel.rotomap.moles.normalise_moles(self.moles)
mel.rotomap.moles.save_image_moles(self.moles, image_path)
def current_image_path(self):
return self._path_list[self._list_index]
def try_jump_to_path(self, path):
for i, image_path in enumerate(self._path_list):
if str(path) == str(image_path):
if self._list_index != i:
self._list_index = i
self.ensure_loaded()
return True
return False
# -----------------------------------------------------------------------------
# Copyright (C) 2016-2018 Angelos Evripiotis.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| aevri/mel | mel/rotomap/display.py | display.py | py | 24,857 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "cv2.circle",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "cv2.line",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "mel.lib.common.lib",
"line_numb... |
13989773677 | # -*- coding: utf-8 -*-
import time
import numpy
from ntplib import NTPClient
from .base import Utils, MultiTasks
from .task import IntervalTask
from .future import ThreadPool
from hagworm.extend.error import NTPCalibrateError
from hagworm.extend.interface import TaskInterface
class _Interface(TaskInterface):
"""NTP客户端接口定义
"""
def start(self):
raise NotImplementedError()
def stop(self):
raise NotImplementedError()
def is_running(self):
raise NotImplementedError()
def calibrate_offset(self):
raise NotImplementedError()
@property
def offset(self):
raise NotImplementedError()
@property
def timestamp(self):
raise NotImplementedError()
class AsyncNTPClient(_Interface):
"""异步NTP客户端类
"""
@classmethod
async def create(cls, host):
client = cls(host)
await client.calibrate_offset()
client.start()
return client
def __init__(self, host, *, version=2, port=r'ntp', timeout=5, interval=3600, sampling=5):
self._settings = {
r'host': host,
r'version': version,
r'port': port,
r'timeout': timeout,
}
self._client = NTPClient()
self._offset = 0
self._thread_pool = ThreadPool(1)
self._sync_task = IntervalTask(self.calibrate_offset, interval)
self._sampling = sampling
def start(self):
return self._sync_task.start()
def stop(self):
return self._sync_task.stop()
def is_running(self):
return self._sync_task.is_running()
async def calibrate_offset(self):
return await self._thread_pool.run(self._calibrate_offset)
def _calibrate_offset(self):
samples = []
host_name = self._settings[r'host']
# 多次采样取中位数,减少抖动影响
for _ in range(self._sampling):
try:
resp = self._client.request(**self._settings)
samples.append(resp.offset)
except Exception as err:
Utils.log.error(f'NTP server {host_name} request error: {err}')
if samples:
self._offset = float(numpy.median(samples))
Utils.log.debug(f'NTP server {host_name} offset median {self._offset} samples: {samples}')
else:
raise NTPCalibrateError(f'NTP server {host_name} not available, timestamp uncalibrated')
@property
def offset(self):
return self._offset
@property
def timestamp(self):
return time.time() + self._offset
class AsyncNTPClientPool(_Interface):
"""异步NTP客户端池,多节点取中位数实现高可用
"""
@classmethod
async def create(cls, hosts):
client_pool = cls()
for host in hosts:
client_pool.append(host)
await client_pool.calibrate_offset()
client_pool.start()
return client_pool
def __init__(self):
self._clients = []
self._running = False
def append(self, host, *, version=2, port='ntp', timeout=5, interval=3600, sampling=5):
client = AsyncNTPClient(host, version=version, port=port, timeout=timeout, interval=interval, sampling=sampling)
self._clients.append(client)
if self._running:
client.start()
def start(self):
for client in self._clients:
client.start()
self._running = True
def stop(self):
for client in self._clients:
client.stop()
self._running = False
def is_running(self):
return self._running
async def calibrate_offset(self):
tasks = MultiTasks()
for client in self._clients:
tasks.append(client.calibrate_offset())
await tasks
@property
def offset(self):
samples = []
for client in self._clients:
samples.append(client.offset)
return float(numpy.median(samples))
@property
def timestamp(self):
return time.time() + self.offset
| wsb310/hagworm | hagworm/extend/asyncio/ntp.py | ntp.py | py | 4,131 | python | en | code | 13 | github-code | 36 | [
{
"api_name": "hagworm.extend.interface.TaskInterface",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "ntplib.NTPClient",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "future.ThreadPool",
"line_number": 68,
"usage_type": "call"
},
{
"api_nam... |
17041944520 | import numpy as np
import typing
import random
import string
import os
import pickle
import inspect
import time
# time stamp + line numbers
def write_log(file, timestamp, function_name, input_ids=[], output_ids=[], frame=None, args=None):
# if function_name == '__getitem__': # handle edge case
# return
if not frame:
frame_info = inspect.stack()[-1]
else:
frame_info = inspect.getframeinfo(frame)
if frame_info:
fileline = ','.join([str(frame_info.filename), str(frame_info.lineno)])
code_context = frame_info.code_context
else:
fileline = ''
code_context = ''
args = str(args)
log = {'time': timestamp, 'filename': fileline, 'context': code_context, 'function_name': function_name, 'input_ids': input_ids,
'output_ids': output_ids, 'args': args}
log = str(log)
log = log + '\n'
file.write(log)
def write_child_log(file, time, parent_ids, child_ids):
if isinstance(parent_ids, list):
parent_ids = ','.join(parent_ids)
if isinstance(child_ids, list):
parent_ids = ','.join(child_ids)
log = '{};relation;{};{}\n'.format(time, parent_ids, child_ids)
file.write(log)
def write_new_log(file, time, id):
log = '{};new;{}\n'.format(time, id)
file.write(log)
def rand_string(N):
return ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(N))
class LoggedNDArray(np.ndarray):
file_name = '/tmp/logs/log.txt'
directory = '/tmp/logs'
next_id = 1
def __new__(cls, input_array):
obj = np.asarray(input_array).view(cls)
return obj
def __array_finalize__(self, obj):
# if obj is None:
self.file = open(self.file_name, 'a+')
if isinstance(obj, LoggedNDArray):
id_ = str(obj.get_id())
self.write_log = getattr(obj, 'write_log', True)
if self.write_log:
write_child_log(self.file, time.time(), id_, str(self.get_id()))
else:
self.write_log = True
write_new_log(self.file, time.time(), str(self.get_id()))
def __getitem__(self, key) -> typing.Any:
if self.write_log:
write_log(self.file, str(time.time()), self.__getitem__.__name__, input_ids=self.get_id(),
args={'key': key})
return super().__getitem__(key)
def __setitem__(self, key, value) -> None:
if self.write_log:
write_log(self.file, str(time.time()), self.__setitem__.__name__, input_ids=self.get_id(),
args={'key': key})
self.file.write(str(self.funct) + " ; " + self.__setitem__.__name__ + " ; " + str(key) + '\n')
return super().__setitem__(key, value)
def get_id(self, index=None):
if not hasattr(self, 'id'):
self.id = LoggedNDArray.next_id
LoggedNDArray.next_id += 1
if index != None:
id_ = str(self.id) + '_' + index
else:
id_ = self.id
id_ = (self.shape, id_)
return id_
def set_write_log(self, value):
self.write_log = value
def take(self, indices, axis=None, out=None, mode='raise'):
if self.write_log:
if out != None:
out = out.view(np.ndarray)
output = super().take(indices, axis, out, mode)
output = output.view(LoggedNDArray)
output.set_write_log(self.write_log)
args = {}
args['indices'] = str(indices)
args['axis'] = str(axis)
args['mode'] = str(mode)
if self.write_log:
write_child_log(self.file, time.time(), str(self.get_id()), str(output.get_id()))
write_log(self.file, str(time.time()), self.take.__name__, input_ids=self.get_id(),
output_ids=output.get_id(), args=args)
return output
else:
return super().take(indices, axis, out, mode)
# self.file.write(str(self.funct) + " ; " + self.take.__name__ + " ; " + str(kwargs) + '\n')
# def __getattr__(self, name):
# if self.write_log:
# write_log(self.file, str(time.time()), self.__getattr__.__name__, input_ids=str(self.get_id()), args = {'name': name})
# print(type(super()))
# return super().__getattr__(name)
def __array_ufunc__(self, ufunc, method, *inputs, out=None, where=True, **kwargs):
args = []
input_ids = []
# input_ids.append(str(self.get_id()))
logged_args = {}
new_nd_arrays = []
for input_ in inputs:
if isinstance(input_, LoggedNDArray):
args.append(input_.view(np.ndarray))
input_ids.append(input_.get_id())
elif isinstance(input_, np.ndarray):
args.append(input_)
id_file = str(id(input_)) + '_' + rand_string(10)
id_ = (input_.shape, id_file)
new_nd_arrays.append((self.file, time.time(), id_))
array_path = os.path.join(self.directory, id_file + '.npy')
with open(array_path, 'w') as file:
np.save(file, input_)
input_ids.append(id_)
else:
args.append(input_)
input_ids.append(input_)
# deal with ufunc methods
if method == 'reduceat' or method == 'at':
if isinstance(inputs[1], LoggedNDArray):
logged_args['indices'] = inputs[1].get_id(rand_string(10))
array_path = os.path.join(self.directory, logged_args['indices'][1] + '.npy')
logged_args['indices'] = str(logged_args['indices'])
input_ids[1] = logged_args['indices']
with open(array_path, 'w') as file:
np.save(file, args[1])
elif isinstance(inputs[1], np.array):
logged_args['indices'] = input_ids[1]
# if indices is a tuple
elif isinstance(inputs[1], tuple):
indices = []
args[1] = []
for index in inputs[1]:
if isinstance(index, LoggedNDArray):
id_ = index.get_id(rand_string(10))
indices.append(str(id_))
array_path = os.path.join(self.directory, id_[1] + '.npy')
arr = index.view(np.ndarray)
args[1].append(arr)
with open(array_path, 'w') as file:
np.save(file, arr)
elif isinstance(index, np.array):
id_file = str(id(index)) + '_' + rand_string(10)
id_ = str((index.shape, id_file))
indices.append(id_)
array_path = os.path.join(self.directory, id_file + '.npy')
args[1].append(index)
with open(array_path, 'w') as file:
np.save(file, index)
else:
id_file = str(id(index)) + '_' + rand_string(10)
indices.append(str(('object', id_file)))
obj_path = os.path.join(self.directory, id_file + '.pickle')
with open(obj_path, 'w') as file:
np.save(file, index)
args[1] = tuple(args[1])
logged_args['indices'] = str(indices)
else:
id_file = str(id(inputs[1])) + '_' + rand_string(10)
logged_args['indices'] = str(('object', id_file))
obj_path = os.path.join(self.directory, id_file + '.pickle')
with open(obj_path, 'w') as file:
pickle.dump(inputs[1], file)
# deal with out argument
if isinstance(out, LoggedNDArray):
outputs = out.view(np.ndarray)
elif isinstance(out, list):
outputs = []
for out_ in out:
if isinstance(out_, LoggedNDArray):
outputs.append(out_.view(np.ndarray))
else:
outputs = out
if not isinstance(outputs, list):
kwargs['out'] = outputs
else:
if outputs != None:
kwargs['out'] = tuple(outputs)
# deal with where argument
if isinstance(where, LoggedNDArray):
w = where.view(np.ndarray)
id_ = where.get_id(rand_string(10))
array_path = os.path.join(self.directory, id_[1] + '.npy')
with open(array_path, 'w') as file:
np.save(file, w)
logged_args['where'] = str(id_)
elif isinstance(where, np.ndarray):
w = where
id_ = str(id(where)) + '_' + rand_string(10)
logged_args['where'] = str((where.shape, id_))
array_path = os.path.join(self.directory, str(id_) + '.npy')
with open(array_path, 'w') as file:
np.save(file, w)
elif where is not True:
w = where
id_file = str(id(where)) + '_' + rand_string(10)
logged_args['where'] = str(('object', id_file))
obj_path = os.path.join(self.directory, id_file + '.pickle')
with open(obj_path, 'w') as file:
pickle.dump(where, file)
else:
w = True
if w is not True:
kwargs['where'] = w
results = super().__array_ufunc__(ufunc, method,
*args, **kwargs)
if results is NotImplemented:
return NotImplemented
if ufunc.nout == 1:
results = (results,)
results_ = []
output_ids = []
if outputs == None:
for result in results:
if isinstance(result, LoggedNDArray):
results_.append(result)
output_ids.append(result.get_id())
elif isinstance(result, np.ndarray):
result_ = result.view(LoggedNDArray)
results_.append(result_)
output_ids.append(result_.get_id())
elif result is None:
pass
else:
results_.append(result)
output_ids.append(result)
else:
if not isinstance(outputs, tuple):
outputs = (outputs,)
for result, output in zip(results, outputs):
if output == None:
if isinstance(result, np.ndarray):
results_.append(result.view(LoggedNDArray))
else:
results_.append(result)
else:
results_.append(output)
output_ids.append(None)
results = tuple(results_)
# write array without output, where, and methods
name = ufunc.__name__ + ',' + method
# these are already saved by their ids in logged_args or output_id
if 'out' in kwargs:
del kwargs['out']
if 'where' in kwargs:
del kwargs['where']
args = kwargs.update(logged_args)
if self.write_log:
write_log(self.file, str(time.time()), name, input_ids=input_ids, output_ids=output_ids, args=args)
if method == 'at':
return
return results[0] if len(results) == 1 else results
| j2zhao/DSClean | ds_clean/logged_array.py | logged_array.py | py | 11,555 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "inspect.stack",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "inspect.getframeinfo",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "random.SystemRandom",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "string.ascii_l... |
19102323120 | # _*_ coding: utf-8 _*_
import numpy as np
n1=30 #students in class A
x1=78.0 #average grade in class A
s1=10.0 #std dev of exam grade in class A
n2=25 #students in class B
x2=85.0 #average grade in class B
s2=15.0 #std dev of exam grade in class B
# the standard error of the difference between in the average
SE=np.sqrt(s1**2/n1+s2**2/n2)
# compute DOF
DF=(n1-1)+(n2-1)
print('SE=',SE,'DF=',DF)
# calculate t-score
tscore=np.abs(((x1-x2)-0)/SE)
print(tscore)
# calculate t-value
from scipy.stats.distributions import t
# set confident level equal c1
c1=0.95
alpha=1-c1
t95=t.ppf(1.0-alpha/2.0,DF)
print(t95)
# set confident level equal c1
c1=0.94
alpha=1-c1
t95=t.ppf(1.0-alpha/2.0,DF)
print(t95)
f=t.cdf(tscore,DF)-t.cdf(-tscore,DF)
print(f)
| ruanyangry/pycse-data_analysis-code | PYSCE-code/40.py | 40.py | py | 851 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.sqrt",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "scipy.stats.distributions.t.ppf",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "scipy.stats.dist... |
72694605863 | import requests
from datetime import datetime
from helpers.db import ExchangeRateDb
class RetrieveHourlyCryptoToUSDData:
def __init__(self):
self.db_path = 'helpers/cryptocurrency_exchange_rate.db'
self.db = ExchangeRateDb(self.db_path)
self.currency = None
def insert_data_to_table(self, exchange_rate):
updated_time = datetime.now().strftime('%Y-%m-%d %H:00:00')
query = f'''
insert into {self.currency}_exchange_rate (date, '{self.currency}')
values ('{updated_time}', '{exchange_rate}')
'''
self.db.execute(query=query)
| madeleinema-cee/walletwatch_python_backend | update/generic_retrieve_exchange_rate_class.py | generic_retrieve_exchange_rate_class.py | py | 635 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "helpers.db.ExchangeRateDb",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 13,
"usage_type": "name"
}
] |
9367032792 | import time
import argparse
import numpy as np
import torch
from deeprobust.graph.defense import GCN, ProGNN
from deeprobust.graph.data import Dataset, PrePtbDataset
from deeprobust.graph.utils import preprocess, encode_onehot, get_train_val_test
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true',
default=False, help='debug mode')
parser.add_argument('--only_gcn', action='store_true',
default=False, help='test the performance of gcn without other components')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
parser.add_argument('--seed', type=int, default=15, help='Random seed.')
parser.add_argument('--lr', type=float, default=0.01,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=16,
help='Number of hidden units.')
parser.add_argument('--dropout', type=float, default=0.5,
help='Dropout rate (1 - keep probability).')
parser.add_argument('--dataset', type=str, default='cora',
choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed'], help='dataset')
parser.add_argument('--attack', type=str, default='meta',
choices=['no', 'meta', 'random', 'nettack'])
parser.add_argument('--ptb_rate', type=float, default=0.05, help="noise ptb_rate")
parser.add_argument('--epochs', type=int, default=400, help='Number of epochs to train.')
parser.add_argument('--alpha', type=float, default=5e-4, help='weight of l1 norm')
parser.add_argument('--beta', type=float, default=1.5, help='weight of nuclear norm')
parser.add_argument('--gamma', type=float, default=1, help='weight of l2 norm')
parser.add_argument('--lambda_', type=float, default=0, help='weight of feature smoothing')
parser.add_argument('--phi', type=float, default=0, help='weight of symmetric loss')
parser.add_argument('--inner_steps', type=int, default=2, help='steps for inner optimization')
parser.add_argument('--outer_steps', type=int, default=1, help='steps for outer optimization')
parser.add_argument('--lr_adj', type=float, default=0.01, help='lr for training adj')
parser.add_argument('--symmetric', action='store_true', default=False,
help='whether use symmetric matrix')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
if args.cuda:
torch.cuda.manual_seed(args.seed)
if args.ptb_rate == 0:
args.attack = "no"
print(args)
# Here the random seed is to split the train/val/test data, we need to set the random seed to be the same as that when you generate the perturbed graph
data = Dataset(root='/tmp/', name=args.dataset, setting='nettack', seed=15)
adj, features, labels = data.adj, data.features, data.labels
idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
if args.dataset == 'pubmed':
# just for matching the results in the paper, see details in https://github.com/ChandlerBang/Pro-GNN/issues/2
print("just for matching the results in the paper," + \
"see details in https://github.com/ChandlerBang/Pro-GNN/issues/2")
import ipdb
ipdb.set_trace()
idx_train, idx_val, idx_test = get_train_val_test(adj.shape[0],
val_size=0.1, test_size=0.8, stratify=encode_onehot(labels), seed=15)
import json
splits = {'idx_train': idx_train.tolist(),
'idx_val': idx_val.tolist(),
'idx_test': idx_test.tolist()}
with open(f'splits/{args.dataset}_prognn_splits.json', 'w') as f:
json.dump(splits, f)
| ChandlerBang/Pro-GNN | save_splits.py | save_splits.py | py | 3,762 | python | en | code | 249 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "torch... |
70040826024 | from django.db import models
class IterFieldsValuesModel(models.Model):
def __iter__(self):
for field_name in self._meta.fields:
value = getattr(self, field_name.name)
yield (field_name.name, value)
class Meta:
abstract = True
| luke9642/Poll | questionnaire/models/iter_fields_values_model.py | iter_fields_values_model.py | py | 289 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 4,
"usage_type": "name"
}
] |
7285626466 | # -*- coding: utf-8 -*-
from datetime import datetime
from dateutil.parser import parse
import os
import json
from pyramid.view import view_config
from pyramid.security import remember
from pyramid.httpexceptions import HTTPFound
from pyramid.security import (
Allow,
Everyone,
)
from pyramid.view import (
view_config,
forbidden_view_config,
)
from pyramid.security import (
remember,
forget,
)
from .security import authenticate
from .models import CasaCloud, Machines, LocalPorts
@view_config(context='.models.CasaCloud',
route_name='home',
renderer='templates/index.html',
permission="can_use",
)
def view_home(request):
is_success = True
error_message = ""
login = request.authenticated_userid
local_ports = LocalPorts(request.registry.settings)
machines = Machines(local_ports, request.registry.settings)
max_cpu_cores = int(request.registry.settings["docker_container_max_cores"])
max_memory = int(request.registry.settings["docker_container_max_memory"])
min_days_to_use = int(request.registry.settings["min_days_to_use"])
docker_container_create_lock_file = request.registry.settings["docker_container_create_lock_file"]
docker_container_max_num_containers = int(request.registry.settings["docker_container_max_num_containers"])
images_settings = eval(request.registry.settings["docker_image_names"])
title = request.registry.settings["website_title"]
images = []
for image_key in images_settings:
images.append(image_key)
can_add_machine = True
is_lock_create_machine = False
if os.path.isfile(docker_container_create_lock_file):
lock_data = json.load(open(docker_container_create_lock_file, "r"))
lock_time = lock_data["lock_time"]
lock_time = parse(lock_time)
diff_time = datetime.now() - lock_time
if diff_time.seconds < 10*60:
is_lock_create_machine = True
names, values = machines.search_machines(login)
if len(values) >= docker_container_max_num_containers:
can_add_machine = False
if request.POST:
if "del_machine_port" in request.POST:
del_machine_port = request.POST["del_machine_port"]
machines.remove_machine(login, int(del_machine_port))
else:
if is_success and is_lock_create_machine:
is_success = False
error_message = "The container creation is locked. Please wait for a moment to retry."
if is_success and not can_add_machine:
is_success = False
error_message = "You already have max number of %d machines." % docker_container_max_num_containers
if is_success:
lock_data = {}
lock_data["lock_time"] = datetime.now().isoformat()
json.dump(lock_data, open(docker_container_create_lock_file, "w+"))
cpu_cores = request.POST["cpu_cores"]
memory = request.POST["memory"]
expiry_date = parse(request.POST["expiry_date"])
image = request.POST["image"]
diff_time = datetime.now() - expiry_date
if diff_time.days < min_days_to_use:
additional_options = request.registry.settings.get("docker_container_start_opts", "")
machines.create_machine(login, cpu_cores, memory, expiry_date, image, additional_options)
else:
is_success = False
error_message = "You should use at least %d days" % min_days_to_use
if os.path.isfile(docker_container_create_lock_file):
os.remove(docker_container_create_lock_file)
names, values = machines.search_machines(login)
render_machines = []
for row_values in values:
item = {}
for i, name in enumerate(names):
item[name] = row_values[i]
render_machines.append(item)
return {
"render_machines": render_machines,
"cpu_core_options": range(1, max_cpu_cores + 1),
"memory_options": range(1, max_memory + 1),
"is_success" : is_success,
"error_message": error_message,
"images": images,
"title": title,
}
@view_config(route_name='login',
renderer='templates/login.html',
)
@forbidden_view_config(renderer='templates/login.html')
def view_login(request):
login_url = request.resource_url(request.context, 'login')
referrer = request.url
if referrer == login_url:
referrer = '/' # never use the login form itself as came_from
came_from = request.params.get('came_from', referrer)
message = ''
login = ''
password = ''
if "login" in request.params and "password" in request.params:
login = request.params['login']
password = request.params['password']
if authenticate(request, login, password):
#print("view_login correct login and password")
#print("came_from=", came_from)
headers = remember(request, login)
return HTTPFound(location=came_from,
headers=headers)
message = 'Failed to login...'
return dict(
message=message,
url=request.registry.settings["website_base_url"] + '/login',
came_from=came_from,
login=login,
password=password,
)
@view_config(context='.models.CasaCloud', name='logout')
def logout(request):
headers = forget(request)
return HTTPFound(location=request.resource_url(request.context),
headers=headers)
| cati-neuroimaging/casa_cloud | casa_cloud/views.py | views.py | py | 5,714 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "models.LocalPorts",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "models.Machines",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_... |
32352103733 | from service_app.logger import get_logger
from scrapers_app.constants import *
from lxml import html
import requests
import copy
import re
logger = get_logger(__name__)
# аттрибут == элемент
class ZaraItemInfoScraper:
NAME = "name"
SIZES_ON_SITE = "sizes_on_site"
COLORS_ON_SITE = "colors_on_site"
PRICE = "current_price"
# информация, которая должна быть
elements = {
NAME: ['//*[@class = "product-detail-info__name"]'],
SIZES_ON_SITE: ['//*[@class = "product-detail-size-info__main-label"]'],
COLORS_ON_SITE: ['//*[@class = "product-detail-color-selector__color-area"]/span'],
PRICE: ['//*[@class = "price__amount-current"]']
}
def __init__(self, item):
self.item = item
self.html_tree = self.get_page_html_tree()
# найденная на странице информация
self.found_elements = {}
# xpath-ы, по которым была найдена информация
self.found_elements_xpaths = {}
@logger.log_scraper_method
def find_elements_on_page(self, elements = None):
if elements is None:
elements = self.elements.keys()
for element_name in elements:
for element_xpath in self.elements[element_name]:
found_elements = self.html_tree.xpath(element_xpath)
if len(found_elements) != 0:
self.found_elements.update({element_name: found_elements})
self.found_elements_xpaths.update({element_name: element_xpath})
@logger.log_scraper_method
def init_item(self, elements = None):
if elements is None:
elements = [self.NAME, self.SIZES_ON_SITE, self.COLORS_ON_SITE, self.PRICE]
if self.NAME in elements:
self.item.name = self.get_name()
if self.SIZES_ON_SITE in elements:
self.item.sizes_on_site = self.get_sizes()
if self.COLORS_ON_SITE in elements:
self.item.colors_on_site = self.get_colors()
if self.PRICE in elements:
self.item.current_price = self.get_price()
def get_page_html_tree(self):
response = requests.get(self.item.url, headers = HEADERS)
logger.debug(f"{response}: {self.item.url}")
return html.fromstring(response.text)
def get_name(self):
return self.found_elements[self.NAME][0].text
def get_sizes(self):
return [x.text for x in self.found_elements[self.SIZES_ON_SITE]] if self.item.has_sizes else []
def get_colors(self):
return [x.text for x in self.found_elements[self.COLORS_ON_SITE]] if self.item.has_colors else []
def get_price(self):
return int("".join(re.findall(r"\d+", self.found_elements[self.PRICE][0].text)))
@property
def not_found_elements(self):
needed_elements = copy.copy(list(self.elements.keys()))
if not self.item.has_sizes:
needed_elements.remove(self.SIZES_ON_SITE)
if not self.item.has_colors:
needed_elements.remove(self.COLORS_ON_SITE)
return [x for x in needed_elements if x not in self.found_elements]
@property
def found_all_elements(self):
return len(self.not_found_elements) == 0
| Radislav123/discount_waiter | scrapers_app/scrapers/zara_item_info_scraper.py | zara_item_info_scraper.py | py | 3,298 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "service_app.logger.get_logger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "lxml.html.fromstring",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "lxml.h... |
43535999214 | """
Very simple Flask web site, with one page
displaying a course schedule.
"""
import flask
from flask import render_template
from flask import request
from flask import url_for
from flask import jsonify # For AJAX transactions
import json
import logging
# Date handling
import arrow # Replacement for datetime, based on moment.js
import datetime # But we still need time
from dateutil import tz # For interpreting local times
# Our own module
# import acp_limits
###
# Globals
###
app = flask.Flask(__name__)
import CONFIG
import uuid
app.secret_key = str(uuid.uuid4())
app.debug=CONFIG.DEBUG
app.logger.setLevel(logging.DEBUG)
###
# Pages
###
@app.route("/")
@app.route("/index")
@app.route("/calc")
def index():
app.logger.debug("Main page entry")
return flask.render_template('calc.html')
@app.errorhandler(404)
def page_not_found(error):
app.logger.debug("Page not found")
flask.session['linkback'] = flask.url_for("calc")
return flask.render_template('page_not_found.html'), 404
############### Work around for loading extra js plugins ##############
@app.route("/_moment")
def moment():
app.logger.debug("Moment.js Page")
return flask.render_template('moment.js')
@app.route("/_collapse")
def collapse():
app.logger.debug("Collapse.js Page")
return flask.render_template('collapse.js')
@app.route("/_transitions")
def transitions():
app.logger.debug("Transition.js Page")
return flask.render_template('transition.js')
@app.route("/_bootdate")
def bootdate():
app.logger.debug("Bootstrap Datepicker.js Page")
return flask.render_template('bootstrap-datetimepicker.min.js')
@app.route("/_boot")
def boot():
app.logger.debug("Bootstrap min.js Page")
return flask.render_template('bootstrap.min.js')
######################################################
###############
#
# AJAX request handlers
# These return JSON, rather than rendering pages.
#
###############
@app.route("/_calc_close_times")
def calc_times():
"""
Calculates open/close times from miles, using rules
described at http://www.rusa.org/octime_alg.html.
Expects one URL-encoded argument, the number of miles.
"""
app.logger.debug("Got a JSON request");
miles = request.args.get('miles', 0, type=int)
# brevetDist = request.args.get('brevetDist', 0, type=int)
if miles in range(0,601):
return jsonify(result=miles/15)
elif miles in range(601,1000):
return jsonify(result=miles/11.428)
elif miles in range(1000, 1300):
return jsonify(result=miles/13.333)
@app.route("/_calc_open_times")
def calc_open_times():
"""
Calculates open/close times from miles, using rules
described at http://www.rusa.org/octime_alg.html.
Expects one URL-encoded argument, the number of miles.
"""
app.logger.debug("Got a JSON request");
miles = request.args.get('miles', 0, type=int)
# brevetDist = request.args.get('brevetDist', 0, type=int)
if miles in range(0,201):
# hours=miles/34
return jsonify(hours=miles//34)
elif miles in range(201,401):
return jsonify(hours=miles//32)
elif miles in range(401,601):
return jsonify(result=miles//30)
elif miles in range(601,1001):
return jsonify(result=miles//28)
elif miles in range(1001, 1301):
return jsonify(result=miles//26)
#################
#
# Functions used within the templates
#
#################
@app.template_filter( 'fmtdate' )
def format_arrow_date( date ):
try:
normal = arrow.get( date )
return normal.format("ddd MM/DD/YYYY")
except:
return "(bad date)"
@app.template_filter( 'fmttime' )
def format_arrow_time( time ):
try:
normal = arrow.get( date )
return normal.format("hh:mm")
except:
return "(bad time)"
#############
if __name__ == "__main__":
import uuid
app.secret_key = str(uuid.uuid4())
app.debug=CONFIG.DEBUG
app.logger.setLevel(logging.DEBUG)
app.run(port=CONFIG.PORT)
| RedMustard/proj3-ajax | app.py | app.py | py | 3,951 | python | en | code | null | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "CONFIG.DEBUG",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "logging.DEBUG",
"line_nu... |
35917171486 | import ipywidgets as widgets
# Measured in g/cm^3
MATERIAL_DENSITIES = {
"Aluminium": 2.7,
"Egetræ": 0.85,
"Granit": 2.650,
"Vand": 1.00,
"Uran": 18.70,
"Magnesium": 1.74,
"Messing": 8.40,
"Candy floss": 0.059,
}
MaterialDropDown = widgets.Dropdown(
options=[(f"{name} ({value} g/cm^3)", value) for (name, value) in MATERIAL_DENSITIES.items()],
description='Materiale: ',
)
AngleSlider = widgets.FloatSlider(
value = 30,
min = 0,
max = 90,
step = 0.1,
description= "Vinkel",
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
)
LengthScaleSlider = widgets.FloatSlider(
value = 10,
min = 10,
max = 100,
step = 0.1,
description= "Vinkel",
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
) | EmilLytthansBoesen/source | opgave_2/user_interface.py | user_interface.py | py | 882 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ipywidgets.Dropdown",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "ipywidgets.FloatSlider",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "ipywidgets.FloatSlider",
"line_number": 36,
"usage_type": "call"
}
] |
5667898056 | from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from flask_restful import Resource, Api
import sqlite3 as lite
app = Flask(__name__)
api = Api(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///covid.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
ma = Marshmallow(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(32), unique=True)
IC = db.Column(db.String(32))
temperature = db.Column(db.String(32))
location = db.Column(db.String(32))
db.create_all()
def __init__(self, username, IC, temperature, location):
self.username = username
self.IC = IC
self.temperature = temperature
self.location = location
class UserSchema(ma.Schema):
class Meta:
fields = ('id', 'username', 'IC', 'temperature', 'location')
user_schema = UserSchema()
users_schema = UserSchema(many=True)
class UserManager(Resource):
@staticmethod
def get():
users = User.query.all()
return jsonify(users_schema.dump(users))
"""
try: id = request.args['id']
except Exception as _: id = None
if not id:
users = User.query.all()
return jsonify(users_schema.dump(users))
user = User.query.get(id)
return jsonify(user_schema.dump(user))
"""
@staticmethod
def post():
print("I received a post!")
username = request.json['username']
IC = request.json['IC']
temperature = request.json['temperature']
location = request.json['location']
user = User(username, IC, temperature, location)
db.session.update(user)
db.session.commit()
return jsonify({
'Message': f'User, {username}, with IC {IC}, {temperature} Celsius inserted at {location}.'
})
@staticmethod
def put():
try: id = request.args['id']
except Exception as _: id = None
if not id:
return jsonify({ 'Message': 'Must provide the user ID' })
user = User.query.get(id)
username = request.json['username']
IC = request.json['IC']
temperature = request.json['temperature']
location = request.json['location']
user.username = username
user.IC = IC
user.temperature = temperature
user.location = location
db.session.commit()
return jsonify({
'Message': f'User, {username}, with IC {IC}, {temperature} Celsius altered at {location}.'
})
@staticmethod
def delete():
try: id = request.args['id']
except Exception as _: id = None
if not id:
return jsonify({ 'Message': 'Must provide the user ID' })
user = User.query.get(id)
db.session.delete(user)
db.session.commit()
return jsonify({
'Message': f'User {str(id)} deleted.'
})
api.add_resource(UserManager, '/api/users')
if __name__ == '__main__':
app.run(debug=True) | joshloo/iot-pandemic-stack | web-database/flaskapp.py | flaskapp.py | py | 3,161 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask_restful.Api",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask_marshma... |
15185050967 | """Extraction."""
import json
import logging
import time
from pathlib import Path
from typing import Any, List
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup, SoupStrainer
from requests.exceptions import HTTPError
DATA_DIR = "/opt/airflow/data"
IMDB_TABLES = ["title.basics", "title.ratings"]
def _extract_nyt_reviews(url: str, key: str, left_boundary: str, right_boundary: str) -> bool:
"""Extract NYT movie reviews from movie review API.
Fetch movie reviews in a time frame starting at left_boundary and ending
at right_boundary. The server only allows for 10 requests per minute so,
there will be a timeout of one minute in case a 429 status code is
encountered. The result is dumped as json to ./data.
Args:
url: URL for the NYT movie review API.
key: Key for the NYT movie review API.
left_boundary: Start date, format must be %Y-%m-%d.
right_boundary: End date, format must be %Y-%m-%d.
Returns:
Boolean indicating if reviews were dumped.
"""
movies = []
has_more = True
offset = 0
while has_more:
try:
response = requests.get(
url=url + "/reviews/search.json",
params={
"api-key": key,
"opening-date": f"{left_boundary}:{right_boundary}",
"offset": str(offset),
},
)
response.raise_for_status()
response_parsed = response.json()
# Check if response has more results
has_more = response_parsed["has_more"]
offset += 20
results = response_parsed["results"]
if results is not None:
movies += results
except HTTPError as err:
# Pause for 1 minute in case request limit is reached
if err.response.status_code == 429:
time.sleep(60)
else:
logging.error(err)
file_name = "nyt-review.json"
if movies:
logging.info(f"Fetched {len(movies)} movie reviews. Writing to {file_name}.")
with open(f"{DATA_DIR}/nyt/nyt-review.json", "w") as f:
json.dump(movies, f, indent=4)
else:
logging.info("No reviews available.")
return True if movies else False
def _get_download_links(url: str) -> List[str]:
"""Get download links from url.
Parse the site and extract all hrefs that point to zipped files.
Args:
url: The URL for the site to parse.
Returns:
A list of urls.
"""
links = []
response = requests.get(url)
for link in BeautifulSoup(response.content, parse_only=SoupStrainer("a"), features="lxml"):
if hasattr(link, "href") and link["href"].endswith("gz"):
links.append(link["href"])
return links
def _extract_imdb_datasets(url: str) -> List[str]:
"""Extract datasets from IMDB.
Fetch the title.basics and title.ratings datasets from IMDB and dump new
rows as csv.gz to ./data.
Args:
url: URL to get download links via _get_download_links.
Returns:
List of dumped table names.
"""
urls = _get_download_links(url)
urls = [url for url in urls if any(keep_url in url for keep_url in IMDB_TABLES)]
tbl_urls = {tbl: url for tbl, url in zip(IMDB_TABLES, urls)}
dumped_tbls: List[str] = []
for tbl, url in tbl_urls.items():
df = pd.read_table(url, header=0, compression="gzip")
ids_file = f"{DATA_DIR}/imdb/ids/ids.{tbl}.csv"
if Path(ids_file).exists():
existing_ids = pd.read_csv(ids_file, header=None).squeeze("columns")
df = df.loc[~df.tconst.isin(existing_ids)]
# Append new ids
df.tconst.to_csv(ids_file, header=False, index=False, mode="a")
# '\\N' encodes missing values
df = df.where(df != "\\N", other=np.nan)
n_rows = df.shape[0]
file_name = f"imdb/tables/{tbl}.csv.gz"
if n_rows > 0:
logging.info(f"Fetched {n_rows} new rows for {tbl}. Writing to {file_name}.")
dumped_tbls.append(tbl)
df.to_csv(f"{DATA_DIR}/{file_name}", index=False)
else:
logging.info(f"No new rows for {tbl}.")
return dumped_tbls
def _branch_nyt_tests(**context: Any) -> str:
"""Branch for testing.
Skip the data tests if there are no new reviews available.
Args:
context: Airflow context.
Returns:
ID of task to run.
"""
has_results = context["task_instance"].xcom_pull(
task_ids="extract_nyt_reviews", key="return_value"
)
return "run_tests_raw_nyt_reviews" if has_results else "skip_tests_raw_nyt_reviews"
def _branch_nyt_copy(**context: Any) -> str:
"""Branch for copying.
Skip the copy if there are no new reviews available.
Args:
context: Airflow context.
Returns:
ID of task to run.
"""
has_results = context["task_instance"].xcom_pull(
task_ids="extract_nyt_reviews", key="return_value"
)
return "copy_raw_nyt_table" if has_results else "skip_copy_raw_nyt_table"
def _branch_imdb_tests(**context: Any) -> List[str]:
"""Branch for testing IMDB datasets.
Skip the data tests if there are no new records available.
Args:
context: Airflow context.
Returns:
IDs of tasks to run.
"""
dumped_tbls = context["task_instance"].xcom_pull(
task_ids="extract_imdb_datasets", key="return_value"
)
next_tasks = []
for tbl in IMDB_TABLES:
tbl_suffix = tbl.replace("title.", "")
if tbl in dumped_tbls:
next_tasks.append(f"run_tests_raw_imdb_{tbl_suffix}")
else:
next_tasks.append(f"skip_tests_raw_imdb_{tbl_suffix}")
return next_tasks
def _branch_imdb_copy(**context: Any) -> List[str]:
"""Branch for copying IMDB datasets.
Skip the copy if there are no new records available.
Args:
context: Airflow context.
Returns:
IDs of tasks to run.
"""
dumped_tbls = context["task_instance"].xcom_pull(
task_ids="extract_imdb_datasets", key="return_value"
)
next_tasks = []
for tbl in IMDB_TABLES:
tbl_suffix = tbl.replace("title.", "")
if tbl in dumped_tbls:
next_tasks.append(f"copy_raw_imdb_{tbl_suffix}_table")
else:
next_tasks.append(f"skip_copy_raw_imdb_{tbl_suffix}_table")
return next_tasks
| albutz/de-movies | dags/extract.py | extract.py | py | 6,534 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "requests.exceptions.HTTPError",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "logging.error",... |
11194234055 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Use this to execute the differential kinematics
controller in our kinecontrol paper.
'''
from __future__ import print_function
import Sofa
import math
import sys, os
import time
import logging
import datetime
import numpy as np
from utils import *
from config import *
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
logger = logging.getLogger(__name__)
# https://www.sofa-framework.org/community/forum/topic/get-the-position-value-from-a-mechanicalobject-point-in-python/
def moveRestPos(rest_pos, pose):
str_out = ' '
dx, dy, dz = pose
for i in range(0,len(rest_pos)) :
str_out= str_out + ' ' + str(rest_pos[i][0]+dx)
str_out= str_out + ' ' + str(rest_pos[i][1]+dy)
str_out= str_out + ' ' + str(rest_pos[i][2]+dz)
return str_out
def rotateRestPos(rest_pos,rx,centerPosY,centerPosZ):
str_out = ' '
for i in xrange(0,len(rest_pos)) :
newRestPosY = (rest_pos[i][1] - centerPosY)*math.cos(rx) - (rest_pos[i][2] - centerPosZ)*math.sin(rx) + centerPosY
newRestPosZ = (rest_pos[i][1] - centerPosY)*math.sin(rx) + (rest_pos[i][2] - centerPosZ)*math.cos(rx) + centerPosZ
str_out= str_out + ' ' + str(rest_pos[i][0])
str_out= str_out + ' ' + str(newRestPosY)
str_out= str_out + ' ' + str(newRestPosZ)
return str_out
class controller(Sofa.PythonScriptController):
'''
For examples, see:
+ Keyboard Control:
- https://github.com/lakehanne/sofa/blob/master/examples/Tutorials/StepByStep/Dentistry_Python/keyboardControl.py
+ Parallel and SSH Launcher:
- https://github.com/lakehanne/sofa/blob/master/tools/sofa-launcher/launcher.py
+ OneParticle:
- https://github.com/lakehanne/sofa/blob/master/tools/sofa-launcher/example.py
'''
def initGraph(self, root):
self.move_dist = move_dist #(0, .40, 0)
self.growth_rate = growth_rate #.5 #was .05
self.max_pressure = max_pressure #100 # was 15
self.root = root
dome_all_dofs = self.get_dome_dofs(self.root)
self.dh_dofs = dome_all_dofs.dh_dofs
self.cav_dofs = dome_all_dofs.cav_dofs
self.cover_dofs = dome_all_dofs.cover_dofs
# domes' mechanical states
def get_dome_dofs(self, node):
'here node is root'
domehead = node.getChild('DomeHead')
dh_dofs = domehead.getObject('dh_dofs')
cav_node = domehead.getChild('DomeCavity')
cav_dofs = cav_node.getObject('dome_cav_dofs')
cover_node = domehead.getChild('DomeCover')
cover_dofs = cover_node.getObject('dome_cover_dofs')
cover_collis_node = domehead.getChild('DomeCoverCollis')
cover_collis_dofs = cover_collis_node.getObject('dome_cover_collis_dofs')
return Bundle(dict(dh_dofs=dh_dofs,
cav_dofs=cav_dofs,
cover_dofs=cover_dofs
))
def bwdInitGraph(self,node):
# find the position at the end of the shape (which has the biggest x coordinate)
dh_dofs = self.get_dome_dofs(self.root).dh_dofs.position
max_x, max_y, max_z = 0, 0, 0
max_idx_x, max_idx_y, max_idx_z = 0, 0, 0
for i in range(len(dh_dofs)):
if dh_dofs[i][0] > max_x:
max_idx_x = i
max_x = dh_dofs[i][0]
if dh_dofs[i][1] > max_y:
max_idx_y = i
max_y = dh_dofs[i][1]
if dh_dofs[i][2] > max_z:
max_idx_z = i
max_z = dh_dofs[i][2]
self.max_vals = Bundle(dict(max_x=max_x, max_y=max_y, max_z=max_z))
print('dh trans [x,y,z] {}, {}, {}'.format(max_x, max_y, max_z))
return 0
def run_traj_plotter(self):
if self.is_chart_updated:
self.traj_plotter.update(self.data)
# time.sleep(.11)
self.is_chart_updated = False
return 0
def deform_positive(self, dofs):
print('dome head dofs: ', dofs.position)
def onBeginAnimationStep(self, deltaTime):
deltaTime += deltaTime
# obtain associated dofs and cavity dofs
while(deltaTime < 2):
self.deform_positive(self.dh_dofs)
return 0;
def onEndAnimationStep(self, deltaTime):
sys.stdout.flush()
#access the 'position' state vector
self.bwdInitGraph(self.root)
return 0;
def onKeyPressed(self, c):
self.dt = self.root.findData('dt').value
incr = self.dt*1000.0
self.dh_dofs = self.get_dome_dofs(self.root).dh_dofs
# self.dh_dofs = dome_all_dofs.dh_dofs
if (ord(c)==19): # UP Key
print("expanding ...")
test = moveRestPos(self.dh_dofs.position, (300.0, 300.0, 300.0))
self.dh_dofs.findData('position').value = test
if (ord(c)==21): # DOWN Key
print("retracting ...")
test = moveRestPos(self.dh_dofs.position, (-300.0, -300.0, -300.0))
self.dh_dofs.findData('position').value = test
self.bwdInitGraph(self.root)
def onLoaded(self, node):
return 0;
def reset(self):
## Please feel free to add an example for a simple usage in /home/lex/catkin_ws/src/superchicko/sofa/python/xml_2_scn.py
return 0;
def onMouseButtonMiddle(self, mouseX,mouseY,isPressed):
# usage e.g.
if isPressed :
print("Control+Middle mouse button pressed at position "+str(mouseX)+", "+str(mouseY))
return 0;
def onScriptEvent(self, senderNode, eventName,data):
## Please feel free to add an example for a simple usage in /home/lex/catkin_ws/src/superchicko/sofa/python/xml_2_scn.py
return 0;
def onMouseButtonRight(self, mouseX,mouseY,isPressed):
## usage e.g.
if isPressed :
print("Control+Right mouse button pressed at position "+str(mouseX)+", "+str(mouseY))
return 0;
def onMouseButtonLeft(self, mouseX,mouseY,isPressed):
## usage e.g.
if isPressed :
print("Control+Left mouse button pressed at position "+str(mouseX)+", "+str(mouseY))
return 0;
| robotsorcerer/superchicko | sofa/python/kinecontrol/single_controller.py | single_controller.py | py | 5,485 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 38,... |
19279623611 | import tensorflow as tf
import numpy as np
import random
from agents.AbstractAgent import AbstractAgent
from minigames.utils import state_of_marine, move_to_position
from utils.select_algorithm import choose_algorithm
from utils.replay_buffer import UniformBuffer
class Agent(AbstractAgent):
def __init__(self, env, action_dim, screen_size, method, gamma=0.99, epsilon=1.0, lr=1e-4, loss='mse', batch_size=32,
epsilon_decrease=0.001, epsilon_min=0.05, update_target=2000, num_episodes=5000, max_memory=100000):
super(Agent, self).__init__(screen_size)
obs = env.reset()
screen = np.array(obs.observation['feature_screen'])
screen = np.reshape(screen, (screen.shape[1], screen.shape[2], screen.shape[0]))
screen = tf.convert_to_tensor(screen, dtype=tf.float64)
self.input_dim = screen.shape
self.action_dim = action_dim
# Hiperparametros
self.gamma = gamma
self.epsilon = epsilon
self.lr = lr
self.loss = loss
self.batch_size = batch_size
self.epsilon_decrease = epsilon_decrease
self.epsilon_min = epsilon_min
self.update_target = update_target
self.num_episodes = num_episodes
self.memory_size = max_memory
self.cur_frame = 0
# Red principal y target.
self.main_nn, self.target_nn, \
self.optimizer, self.loss_fn = choose_algorithm(method, self.input_dim, self.action_dim,
self.lr, self.loss)
# Buffer donde se almacenaran las experiencias del agente.
self.buffer = UniformBuffer(self.memory_size)
def step(self, state, pos_marine):
action = self.select_epsilon_greedy_action(state)
# Dependiendo de la acción se mueve ciertas coordenadas
destination = move_to_position(action, self.screen_size)
return action, self._MOVE_SCREEN("now", self._xy_offset(pos_marine, destination[0], destination[1]))
def state_marine(self, obs):
# Representación del beacon y marino
beacon = self.get_beacon(obs)
marine = self.get_marine(obs)
dist = np.hypot((beacon.x - marine.x), (beacon.y - marine.y))
screen = np.array(obs.observation['feature_screen'])
screen = np.reshape(screen, (screen.shape[1], screen.shape[2], screen.shape[0]))
state = tf.convert_to_tensor(screen, dtype=tf.float64)
pos_marine = self.get_unit_pos(marine)
return state, pos_marine, dist
def select_army(self, obs):
# La primera acción selecciona a los army
if obs.first():
return self._SELECT_ARMY
def select_epsilon_greedy_action(self, state, aux_epsilon=1.0):
"""Realiza una acción aleatoria con prob. épsilon; de lo contrario, realiza la mejor acción."""
result = tf.random.uniform((1,))
if result < self.epsilon and result < aux_epsilon:
return random.choice(range(self.action_dim)) #env.action_space.sample() # Acción aleatoria.
else:
state = np.reshape(state, (1, tf.shape(state)[0].numpy(), tf.shape(state)[1].numpy(), tf.shape(state)[2].numpy()))
return tf.argmax(self.main_nn.predict(state)[0]).numpy() # Acción greddy.
def train_step(self, states, actions, rewards, next_states, dones):
"""Realiza una iteración de entrenamiento en un batch de datos."""
next_qs = self.target_nn.predict(next_states, batch_size=self.batch_size)
max_next_qs = tf.reduce_max(next_qs, axis=-1)
target = rewards + (1. - dones) * self.gamma * max_next_qs
with tf.GradientTape() as tape:
qs = self.main_nn(states)
action_masks = tf.one_hot(actions, self.action_dim)
masked_qs = tf.reduce_sum(action_masks * qs, axis=-1)
loss = self.loss_fn(target, masked_qs)
grads = tape.gradient(loss, self.main_nn.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.main_nn.trainable_variables))
return loss
def decrease_epsilon(self):
"""Decrecimiento del epsilon."""
if self.epsilon > self.epsilon_min:
self.epsilon -= self.epsilon_decrease
else:
self.epsilon = self.epsilon_min
def copy_weights(self, Copy_from, Copy_to):
"""
Function to copy weights of a model to other
"""
variables2 = Copy_from.trainable_variables
variables1 = Copy_to.trainable_variables
for v1, v2 in zip(variables1, variables2):
v1.assign(v2.numpy())
def save_model(self, filename):
self.learner.save_q_table(filename + '/model.pkl')
def load_model(self, filename):
self.learner.load_model(filename + '/model.pkl')
| ericPrimelles/RLProject | agents/Agent.py | Agent.py | py | 4,824 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "agents.AbstractAgent.AbstractAgent",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tensorfl... |
5547518029 | """
Tests for voting 10/07/2021.
"""
import pytest
from scripts.vote_2021_10_07 import (start_vote)
from utils.config import ldo_token_address, lido_dao_acl_address, lido_dao_token_manager_address
PURCHASE_CONTRACT_PAYOUT_ADDRESS = '0x689E03565e36B034EcCf12d182c3DC38b2Bb7D33'
payout_curve_rewards = {
'amount': 3_550_000 * (10 ** 18),
'address': '0x753D5167C31fBEB5b49624314d74A957Eb271709',
}
payout_balancer_rewards = {
'amount': 300_000 * (10 ** 18),
'address': '0x1dD909cDdF3dbe61aC08112dC0Fdf2Ab949f79D8',
}
payout_purchase_contract = {
'amount': '462962962962963400000000', # 462,962.9629629634 * (10 ** 18)
'address': PURCHASE_CONTRACT_PAYOUT_ADDRESS,
}
grant_role_purchase_contract = {
'address': PURCHASE_CONTRACT_PAYOUT_ADDRESS,
'permission_name': 'ASSIGN_ROLE'
}
payout_finance_multisig = {
'amount': 28_500 * (10 ** 18), # TODO: Check current rate on 1inch before run
'address': '0x48F300bD3C52c7dA6aAbDE4B683dEB27d38B9ABb',
'reference': 'Finance multisig transfer to pay a bug bounty'
}
def curve_balance(ldo) -> int:
"""Returns LDO balance of Curve rewards distributor"""
return ldo.balanceOf(payout_curve_rewards['address'])
def balancer_balance(ldo) -> int:
"""Returns LDO balance of Balancer rewards distributor"""
return ldo.balanceOf(payout_balancer_rewards['address'])
def purchase_contract_balance(ldo) -> int:
"""Returns LDO balance of purchase contract"""
return ldo.balanceOf(payout_purchase_contract['address'])
def finance_multisig_balance(ldo) -> int:
"""Returns LDO balance of finance multisig contract"""
return ldo.balanceOf(payout_finance_multisig['address'])
def has_assign_role_permission(acl, token_manager, who) -> int:
"""Returns if address has ASSIGN_ROLE on TokenManager contract"""
return acl.hasPermission(who, token_manager, token_manager.ASSIGN_ROLE())
@pytest.fixture(scope='module')
def ldo(interface):
"""Returns contract of LDO token."""
return interface.ERC20(ldo_token_address)
@pytest.fixture(scope='module')
def acl(interface):
"""Returns ACL contract"""
return interface.ACL(lido_dao_acl_address)
@pytest.fixture(scope='module')
def token_manager(interface):
"""Returns TokenManager contract"""
return interface.TokenManager(lido_dao_token_manager_address)
def test_common(
acl, token_manager, ldo_holder,
helpers, accounts, dao_voting, ldo
):
"""Perform testing for the whole voting."""
curve_balance_before = curve_balance(ldo)
balancer_balance_before = balancer_balance(ldo)
purchase_contract_balance_before = purchase_contract_balance(ldo)
finance_multisig_balance_before = finance_multisig_balance(ldo)
assert not has_assign_role_permission(acl, token_manager, grant_role_purchase_contract['address'])
vote_id, _ = start_vote({
'from': ldo_holder
}, silent=True)
helpers.execute_vote(
vote_id=vote_id, accounts=accounts, dao_voting=dao_voting
)
curve_balance_after = curve_balance(ldo)
balancer_balance_after = balancer_balance(ldo)
purchase_contract_balance_after = purchase_contract_balance(ldo)
finance_multisig_balance_after = finance_multisig_balance(ldo)
curve_inc = curve_balance_after - curve_balance_before
balancer_inc = balancer_balance_after - balancer_balance_before
purchase_contract_balance_inc = purchase_contract_balance_after - purchase_contract_balance_before
finance_multisig_balance_inc = finance_multisig_balance_after - finance_multisig_balance_before
assert curve_inc == payout_curve_rewards['amount'], 'Failed on Curve'
assert balancer_inc == payout_balancer_rewards['amount'], 'Failed on Balancer'
assert purchase_contract_balance_inc == payout_purchase_contract['amount'], 'Failed on purchase contract'
assert has_assign_role_permission(acl, token_manager,
grant_role_purchase_contract['address']), 'Failed on grant ASSIGN_ROLE'
assert finance_multisig_balance_inc == payout_finance_multisig['amount'], 'Failed on purchase contract'
| lidofinance/scripts | archive/tests/xtest_2021_10_07.py | xtest_2021_10_07.py | py | 4,121 | python | en | code | 14 | github-code | 36 | [
{
"api_name": "utils.config.ldo_token_address",
"line_number": 67,
"usage_type": "argument"
},
{
"api_name": "pytest.fixture",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "utils.config.lido_dao_acl_address",
"line_number": 73,
"usage_type": "argument"
},
... |
18878055040 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
from imp import reload
except ImportError:
from importlib import reload
from django.core.urlresolvers import resolve, reverse
from django.http import Http404
from django.test import override_settings
from django.utils.translation import override
import parler.appsettings
from ..models import Category
from ..views import FaqByCategoryView, FaqAnswerView
from .test_base import AldrynFaqTest
class TestFaqByCategoryView(AldrynFaqTest):
reload_parler_appsettings = True
def test_view_context(self):
"""Tests that the FaqByCategoryView produces the correct context."""
category_1 = self.reload(self.category1, "en")
category_1_url = category_1.get_absolute_url()
question_1 = self.reload(self.question1, "en")
request = self.get_page_request(
page=self.page,
user=self.user,
path=category_1_url,
)
url_kwargs = resolve(category_1_url).kwargs
try:
response = FaqByCategoryView.as_view()(request, **url_kwargs)
except Http404:
self.fail('Could not find category')
self.assertEqualItems(
response.context_data['object_list'],
[question_1, ],
)
def test_view_context_fallback(self):
"""
Tests that the FaqByCategoryView produces the correct context
when requesting a category in an untranslated language.
"""
category_2 = self.reload(self.category2, "en")
category_2_url = category_2.get_absolute_url()
question_2 = self.reload(self.question2, "en")
request = self.get_page_request(
page=self.page,
user=self.user,
path=category_2_url,
)
url_kwargs = resolve(category_2_url).kwargs
with override_settings(**self.enabled_parler_fallback_settings):
reload(parler.appsettings)
try:
response = FaqByCategoryView.as_view()(request, **url_kwargs)
except Http404:
self.fail('Could not find category')
self.assertEqualItems(
response.context_data['object_list'],
[question_2, ],
)
def test_view_old_format_redirect(self):
"""
Tests that the FaqByCategoryView redirects user
when accessed with old category url format
"""
category_1 = self.reload(self.category1, "en")
category_1_url_new = category_1.get_absolute_url()
kwargs = {"category_slug": category_1.slug}
with override('en'):
category_1_url_name = '{ns}:faq-category'.format(
ns=self.app_config.namespace
)
category_1_url_old = reverse(category_1_url_name, kwargs=kwargs)
request = self.get_page_request(
page=self.page,
user=self.user,
path=category_1_url_old,
)
response = FaqByCategoryView.as_view()(request, **kwargs)
self.assertEquals(response.status_code, 301)
self.assertEquals(response.url, category_1_url_new)
def test_list_view(self):
"""Test category list view to contain a proper set of categories"""
def _do_test_list_view(language_code):
with override(language_code):
categories = Category.objects.language(
language_code).active_translations(language_code).filter(
appconfig=self.app_config)
url = reverse('{ns}:faq-category-list'.format(
ns=self.app_config.namespace))
response = self.client.get(url, follow=True)
for category in categories:
self.assertContains(response, category.name)
for language_code in ('en', 'de'):
_do_test_list_view(language_code)
with override_settings(**self.settings_en):
reload(parler.appsettings)
_do_test_list_view('en')
class TestFaqAnswerView(AldrynFaqTest):
reload_parler_appsettings = True
def test_view_context(self):
"""Tests that the FaqByCategoryView produces the correct context."""
question_1 = self.reload(self.question1, "en")
question_1_url = question_1.get_absolute_url("en")
url_kwargs = resolve(question_1_url).kwargs
request = self.get_page_request(
page=self.page,
user=self.user,
path=question_1_url,
)
response = FaqAnswerView.as_view()(request, **url_kwargs)
self.assertEqual(
response.context_data['object'],
question_1,
)
def test_view_context_fallback(self):
"""
Tests that the FaqByCategoryView produces the correct context
when requesting a category in an untranslated language.
"""
question_2 = self.reload(self.question1, "en")
question_2_url = question_2.get_absolute_url("en")
url_kwargs = resolve(question_2_url).kwargs
request = self.get_page_request(
page=self.page,
user=self.user,
path=question_2_url,
)
with override_settings(**self.enabled_parler_fallback_settings):
reload(parler.appsettings)
response = FaqAnswerView.as_view()(request, **url_kwargs)
self.assertEqual(
response.context_data['object'],
question_2,
)
def test_view_old_format_redirect(self):
"""
Tests that the TestFaqAnswerView redirects user
when accessed with old category url format
"""
category_1 = self.reload(self.category1, "en")
question_1 = self.reload(self.question1, "en")
question_1_url_new = question_1.get_absolute_url()
kwargs = {
"category_slug": category_1.slug,
"pk": question_1.pk
}
with override('en'):
url_name = '{ns}:faq-answer'.format(ns=self.app_config.namespace)
question_1_url_old = reverse(url_name, kwargs=kwargs)
request = self.get_page_request(
page=self.page,
user=self.user,
path=question_1_url_old,
)
response = FaqAnswerView.as_view()(request, **kwargs)
self.assertEquals(response.status_code, 301)
self.assertEquals(response.url, question_1_url_new)
def test_answer_match_category(self):
"""
Tests that the question id given in url
belongs to the given category, if not then 404 is raised.
"""
category_1 = self.reload(self.category1, "de")
question_2 = self.reload(self.question2, "de")
kwargs = {
"category_pk": category_1.pk,
"category_slug": category_1.slug,
"pk": question_2.pk
}
with override('de'):
url_name = '{ns}:faq-answer'.format(ns=self.app_config.namespace)
question_2_invalid_url = reverse(url_name, kwargs=kwargs)
request = self.get_page_request(
page=self.page,
user=self.user,
path=question_2_invalid_url,
)
with self.assertRaises(Http404):
FaqAnswerView.as_view()(request, **kwargs)
| aldryn/aldryn-faq | aldryn_faq/tests/test_views.py | test_views.py | py | 7,363 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "test_base.AldrynFaqTest",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.core.urlresolvers.resolve",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "views.FaqByCategoryView.as_view",
"line_number": 42,
"usage_type": "call"
},
... |
37442560200 | """estres forms"""
from django import forms
from estres.models import *
class EstresForm(forms.ModelForm):
class Meta:
model = EstresModel
fields = [
"numero_escenarios",
"horizonte_riesgo",
"fechacorte",
"info_mercado",
"inc_financiero",
"inc_biometricos",
"inc_frecsev",
]
labels = {
"numero_escenarios":"Número de escenarios (entero de 1000 a 10000)",
"horizonte_riesgo":"Horizonte de riesgo",
"fechacorte":"Fecha de corte",
"info_mercado":"INFO_MERCADO.xlsx",
"inc_financiero":"INC_FINANCIERO.xlsx",
"inc_biometricos":"INC_BIOMETRICOS.xlsx",
"inc_frecsev":"INC_FRECUENCIA_SEVERIDAD.xlsx",
}
widgets = {
"numero_escenarios":forms.TextInput(),
"horizonte_riesgo":forms.Select(),
"fechacorte":forms.DateInput(format=('%Y-%m-%d'), attrs={'class':'datepicker'}),
"info_mercado":forms.FileInput(attrs={"accept":".xlsx"}),
"inc_financiero":forms.FileInput(attrs={"accept":".xlsx"}),
"inc_biometricos":forms.FileInput(attrs={"accept":".xlsx"}),
"inc_frecsev":forms.FileInput(attrs={"accept":".xlsx"}),
}
| adandh/Bedu_RiesgosSeguros | estres/forms.py | forms.py | py | 1,467 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.forms.TextInput",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.... |
36121100233 | import logging
import os
from time import time
from abc import abstractmethod, ABC
from pathlib import Path
from typing import Any, Iterator, Optional, Union, List, Dict, Set
from forte.common.configuration import Config
from forte.common.exception import ProcessExecutionException
from forte.common.resources import Resources
from forte.data.base_pack import PackType
from forte.data.data_pack import DataPack
from forte.data.multi_pack import MultiPack
from forte.data.types import ReplaceOperationsType
from forte.pipeline_component import PipelineComponent
from forte.utils.utils import get_full_module_name
__all__ = [
"BaseReader",
"PackReader",
"MultiPackReader",
]
logger = logging.getLogger(__name__)
class BaseReader(PipelineComponent[PackType], ABC):
r"""The basic data reader class. To be inherited by all data readers.
Args:
from_cache: Decide whether to read from cache
if cache file exists. By default (``False``), the reader will
only read from the original file and use the cache file path
for caching, it will not read from the ``cache_directory``.
If ``True``, the reader will try to read a datapack from the
caching file.
cache_directory: The base directory to place the
path of the caching files. Each collection is contained in one
cached file, under this directory. The cached location for each
collection is computed by
:meth:`~forte.data.base_reader.BaseReader._cache_key_function`.
.. note::
A collection is the data returned by
:meth:`~forte.data.base_reader.BaseReader._collect`.
append_to_cache: Decide whether to append write
if cache file already exists. By default (``False``), we
will overwrite the existing caching file. If ``True``, we will
cache the datapack append to end of the caching file.
"""
def __init__(
self,
from_cache: bool = False,
cache_directory: Optional[str] = None,
append_to_cache: bool = False,
cache_in_memory: bool = False,
):
super().__init__()
self.from_cache = from_cache
self._cache_directory = cache_directory
self.component_name = get_full_module_name(self)
self.append_to_cache = append_to_cache
self._cache_in_memory = cache_in_memory
self._cache_ready: bool = False
self._data_packs: List[PackType] = []
# needed for time profiling of reader
self._enable_profiling: bool = False
self._start_time: float = 0.0
self.time_profile: float = 0.0
def initialize(self, resources: Resources, configs: Config):
super().initialize(resources, configs)
# Clear memory cache
self._cache_ready = False
del self._data_packs[:]
@classmethod
def default_configs(cls):
r"""Returns a `dict` of configurations of the reader with default
values. Used to replace the missing values of input `configs`
during pipeline construction.
Here:
- zip_pack (bool): whether to zip the results. The default value is
False.
- serialize_method: The method used to serialize the data. Current
available options are `json`, `jsonpickle` and `pickle`. Default is
`json`.
"""
return {"zip_pack": False, "serialize_method": "json"}
@staticmethod
def pack_type():
raise NotImplementedError
@abstractmethod
def _collect(self, *args: Any, **kwargs: Any) -> Iterator[Any]:
r"""Returns an iterator of data objects, and each individual object
should contain sufficient information needed to construct or locate
a data pack in cache.
For example: `data_source` can be a ``kwarg`` which is the path to a
file that a reader can take to read and parse a file.
Args:
args: Specify the data source.
kwargs: Specify the data source.
Returns: Iterator of collections that are sufficient to create one pack.
"""
raise NotImplementedError
def parse_pack(self, collection: Any) -> Iterator[PackType]:
r"""Calls :meth:`_parse_pack` to create packs from the collection.
This internally setup the component meta data. Users should implement
the :meth:`_parse_pack` method.
"""
if collection is None:
raise ProcessExecutionException(
"Got None collection, cannot parse as data pack."
)
for p in self._parse_pack(collection):
p.add_all_remaining_entries(self.name)
yield p
@abstractmethod
def _parse_pack(self, collection: Any) -> Iterator[PackType]:
r"""Returns an iterator of Packs parsed from a collection. Readers
should implement this class to populate the class input.
Args:
collection: Object that can be parsed into a Pack.
Returns: Iterator of Packs.
"""
raise NotImplementedError
def _cache_key_function(self, collection: Any) -> Optional[str]:
# pylint: disable=unused-argument
r"""Computes the cache key based on the type of data.
Args:
collection: Any object that provides information to identify the
name and location of the cache file
"""
return None
def text_replace_operation(self, text: str) -> ReplaceOperationsType:
# pylint: disable=unused-argument
r"""Given the possibly noisy text, compute and return the
replacement operations in the form of a list of (span, str)
pairs, where the content in the span will be replaced by the
corresponding str.
Args:
text: The original data text to be cleaned.
Returns (List[Tuple[Tuple[int, int], str]]):
the replacement operations.
"""
return []
def _get_cache_location(self, collection: Any) -> str:
r"""Gets the path to the cache file for a collection.
Args:
collection: information to compute cache key.
Returns (Path): file path to the cache file for a Pack.
"""
# pylint: disable=assignment-from-none
file_path = self._cache_key_function(collection)
if file_path is None:
raise ProcessExecutionException(
"Cache key is None. You probably set `from_cache` to true but "
"fail to implement the _cache_key_function"
)
return os.path.join(str(self._cache_directory), file_path)
def _lazy_iter(self, *args, **kwargs):
for collection in self._collect(*args, **kwargs):
if self.from_cache:
for pack in self.read_from_cache(
self._get_cache_location(collection)
):
pack.add_all_remaining_entries()
yield pack
else:
not_first = False
for pack in self.parse_pack(collection):
# write to the cache if _cache_directory specified
if self._cache_directory is not None:
self.cache_data(collection, pack, not_first)
if not isinstance(pack, self.pack_type()):
raise ValueError(
f"No Pack object read from the given "
f"collection {collection}, returned {type(pack)}."
)
not_first = True
pack.add_all_remaining_entries()
yield pack
def set_profiling(self, enable_profiling: bool = True):
r"""Set profiling option.
Args:
enable_profiling: A boolean of whether to enable profiling
for the reader or not (the default is True).
"""
self._enable_profiling = enable_profiling
def timer_yield(self, pack: PackType):
r"""Wrapper generator for time profiling. Insert timers around
'yield' to support time profiling for reader.
Args:
pack: DataPack passed from self.iter()
"""
# Aggregate time cost
if self._enable_profiling:
self.time_profile += time() - self._start_time
yield pack
# Start timer
if self._enable_profiling:
self._start_time = time()
def iter(self, *args, **kwargs) -> Iterator[PackType]:
# pylint: disable=protected-access
r"""An iterator over the entire dataset, giving all Packs processed
as list or Iterator depending on `lazy`, giving all the Packs read
from the data source(s). If not reading from cache, should call
``collect``.
Args:
args: One or more input data sources, for example, most
DataPack readers accept `data_source` as file/folder path.
kwargs: Iterator of DataPacks.
"""
# Start timer
if self._enable_profiling:
self._start_time = time()
if self._cache_in_memory and self._cache_ready:
# Read from memory
for pack in self._data_packs:
if self._check_type_consistency:
if hasattr(pack._meta, "record"):
self.record(pack._meta.record)
yield from self.timer_yield(pack)
else:
# Read via parsing dataset
for pack in self._lazy_iter(*args, **kwargs):
if self._check_type_consistency:
if hasattr(pack._meta, "record"):
self.record(pack._meta.record)
if self._cache_in_memory:
self._data_packs.append(pack)
yield from self.timer_yield(pack)
self._cache_ready = True
def record(self, record_meta: Dict[str, Set[str]]):
r"""Modify the pack meta record field of the reader's output. The
key of the record should be the entry type and values should
be attributes of the entry type. All the information would be used
for consistency checking purpose if the pipeline is initialized with
`enforce_consistency=True`.
Args:
record_meta: the field in the datapack for type record that need to
fill in for consistency checking.
"""
pass
def cache_data(self, collection: Any, pack: PackType, append: bool):
r"""Specify the path to the cache directory.
After you call this method, the dataset reader will use its
``cache_directory`` to store a cache of
:class:`~forte.data.base_pack.BasePack` read
from every document passed to read, serialized as one
string-formatted :class:`~forte.data.base_pack.BasePack`. If the cache file for a given
``file_path`` exists, we read the :class:`~forte.data.base_pack.BasePack` from the cache.
If the cache file does not exist, we will `create` it on our first
pass through the data.
Args:
collection: The collection is a piece of data from the
:meth:`_collect` function, to be read to produce DataPack(s).
During caching, a cache key is computed based on the data in
this collection.
pack: The data pack to be cached.
append: Whether to allow appending to the cache.
"""
if not self._cache_directory:
raise ValueError("Can not cache without a cache_directory!")
os.makedirs(self._cache_directory, exist_ok=True)
cache_filename = os.path.join(
self._cache_directory, self._get_cache_location(collection)
)
logger.info("Caching pack to %s", cache_filename)
if append:
with open(
cache_filename,
"a",
encoding="utf-8",
) as cache:
cache.write(pack.to_string() + "\n")
else:
with open(
cache_filename,
"w",
encoding="utf-8",
) as cache:
cache.write(pack.to_string() + "\n")
def read_from_cache(
self, cache_filename: Union[Path, str]
) -> Iterator[PackType]:
r"""Reads one or more Packs from ``cache_filename``, and yields Pack(s)
from the cache file.
Args:
cache_filename: Path to the cache file.
Returns:
List of cached data packs.
"""
logger.info("reading from cache file %s", cache_filename)
with open(cache_filename, "r", encoding="utf-8") as cache_file:
for line in cache_file:
pack = DataPack.from_string(line.strip())
if not isinstance(pack, self.pack_type()):
raise TypeError(
f"Pack deserialized from {cache_filename} "
f"is {type(pack)}, but expect {self.pack_type()}"
)
yield pack
def finish(self, resource: Resources):
pass
def set_text(self, pack: DataPack, text: str):
r"""Assign the text value to the
:class:`~forte.data.data_pack.DataPack`. This function will
pass the ``text_replace_operation`` to the
:class:`~forte.data.data_pack.DataPack` to conduct
the pre-processing step.
Args:
pack: The :class:`~forte.data.data_pack.DataPack` to assign value for.
text: The original text to be recorded in this dataset.
"""
pack.set_text(text, replace_func=self.text_replace_operation)
class PackReader(BaseReader[DataPack], ABC):
r"""A Pack Reader reads data into :class:`~forte.data.data_pack.DataPack`."""
@staticmethod
def pack_type():
return DataPack
class MultiPackReader(BaseReader[MultiPack], ABC):
r"""The basic :class:`~forte.data.multi_pack.MultiPack` data reader class.
To be inherited by all
data readers which return :class:`~forte.data.multi_pack.MultiPack`.
"""
@staticmethod
def pack_type():
return MultiPack
| asyml/forte | forte/data/base_reader.py | base_reader.py | py | 14,338 | python | en | code | 230 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "forte.pipeline_component.PipelineComponent",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "forte.data.base_pack.PackType",
"line_number": 27,
"usage_type": "name"
},... |
8891981416 | from os import access
from tkinter import image_names
import cv2
import dropbox
import time
import random
start_time=time.time()
def take_snapshot():
number = random.randint(0,100)
videoCaptureObject=cv2.VideoCapture(0)
result=True
while(result):
ret,frame=videoCaptureObject.read()
img_name="img"+str(number)+".png"
#print(ret,frame)
cv2.imwrite(img_name,frame)
start_time=time.time()
result=False
return img_name
print("Snapshot taken")
videoCaptureObject.release()
cv2.destroyAllWindows()
def upload_files(image_name):
access_token="sl.BFNXVhLcVxzWNu4AWyBuMBw9q07HWCeS1ifaeAhkH5wibVA8iwoLjtu8wVC-BWZ_dTMYCduOJ1NHUYhq1GMEDxuPeoUiGHtmbkwJuStlCNJnL8wtjFjws_HXPC7eISi4P6PNqoqvzzE"
file= image_name
file_from=file
file_to="/ary/"+(image_name)
dbx=dropbox.Dropbox(access_token)
with open(file_from,"rb") as f:
dbx.file_upload(f.read(),file_to,mode=dropbox.files.WriteMode.overwrite)
print("file uploaded")
def main():
while(True):
if((time.time() - start_time) >= 5):
name=take_snapshot()
upload_files(name)
main()
| ARYAN0021/PythonProjectsFinal | SecurityWebCam.py | SecurityWebCam.py | py | 1,223 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_numb... |
30864450241 | import torch
from torch.utils.data import Dataset
import numpy as np
from pathlib import Path
from synthesizer.utils.text import text_to_sequence
class SynthesizerDataset(Dataset):
def __init__(self, metadata_fpath: Path, mel_dir: Path, embed_dir: Path, hparams):
print("Using inputs from:\n\t%s\n\t%s\n\t%s" % (metadata_fpath, mel_dir, embed_dir))
with metadata_fpath.open("r") as metadata_file:
metadata = [line.split("|") for line in metadata_file]
mel_fnames = [x[1] for x in metadata if int(x[4])]
mel_fpaths = [mel_dir.joinpath(fname) for fname in mel_fnames]
embed_fnames = [x[2] for x in metadata if int(x[4])]
embed_fpaths = [embed_dir.joinpath(fname) for fname in embed_fnames]
self.samples_fpaths = list(zip(mel_fpaths, embed_fpaths))
self.samples_texts = [x[5].strip() for x in metadata if int(x[4])]
self.metadata = metadata
self.hparams = hparams
print("Found %d samples" % len(self.samples_fpaths))
def __getitem__(self, index):
# Sometimes index may be a list of 2 (not sure why this happens)
# If that is the case, return a single item corresponding to first element in index
if index is list:
index = index[0]
mel_path, embed_path = self.samples_fpaths[index]
mel = np.load(mel_path).T.astype(np.float32)
# Load the embed
embed = np.load(embed_path)
# Get the text and clean it
text = text_to_sequence(self.samples_texts[index], self.hparams.tts_cleaner_names)
# Convert the list returned by text_to_sequence to a numpy array
text = np.asarray(text).astype(np.int32)
return text, mel.astype(np.float32), embed.astype(np.float32), index
def __len__(self):
return len(self.samples_fpaths)
def collate_synthesizer(batch, r, hparams):
# batch = shape(char_sequence, mel_spec, speaker_embedding, index)
# get the lengths of every text sequence in the batch
x_lens = [len(x[0]) for x in batch]
# get the length of the longest text sequence
max_x_len = max(x_lens)
# make of list of all text sequences padded to the length of the longest one
chars = [pad1d(x[0], max_x_len) for x in batch]
# turn that list into a stack
chars = np.stack(chars)
# get the horizontal (i.e. stepwise) length of every mel-spectrogram
spec_lens = [x[1].shape[-1] for x in batch]
# get the length of the longest one
max_spec_len = max(spec_lens) + 1
# if the longest one is not divisible by reduction factor, then make it divisible by reduction factor
if max_spec_len % r != 0:
max_spec_len += r - (max_spec_len % r)
# WaveRNN mel spectrograms are normalized to [0, 1] so zero padding adds silence
# By default, SV2TTS uses symmetric mels, where -1*max_abs_value is silence.
if hparams.symmetric_mels:
mel_pad_value = -1 * hparams.max_abs_value
else:
mel_pad_value = 0
# pad mels along step-axis, then turn in numpy stack (ndArray)
mel = [pad2d(x[1], max_spec_len, pad_value=mel_pad_value) for x in batch]
mel = np.stack(mel)
# get all the speaker transfer embeddings for SV2TTS
embeds = np.array([x[2] for x in batch])
# get the indices for vocoder preprocessing
indices = [x[3] for x in batch]
# convert all of these sequence containers (apart from indices) to PyTorch tensors
chars = torch.tensor(chars).long()
mel = torch.tensor(mel)
embeds = torch.tensor(embeds)
return chars, mel, embeds, indices
def pad1d(x, max_len, pad_value=0):
return np.pad(x, (0, max_len - len(x)), mode="constant", constant_values=pad_value)
def pad2d(x, max_len, pad_value=0):
return np.pad(x, ((0, 0), (0, max_len - x.shape[-1])), mode="constant", constant_values=pad_value)
| IronIron2121/not_i | modules/synthesizer/synthesizer_dataset.py | synthesizer_dataset.py | py | 4,028 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "numpy.load",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"l... |
74123444263 | #!/usr/bin/env python
"""Test `crtm_api` module."""
from crtm_poll import crtm_api
from aiohttp import ClientSession
from aioresponses import aioresponses
import os
import pytest
class TestAPI:
def test_can_log_fetch(self, tmpdir):
fetch_path = 'fetch_log'
file = tmpdir.join(fetch_path)
crtm_api.stop_times.fetch_log(file, 'a', 'b', 'c')
assert file.readlines()[1] == 'a,b,c'
def test_can_not_log_fetch(self, tmpdir):
fetch_path = 'fetch_log'
file = tmpdir.join(fetch_path)
crtm_api.stop_times.fetch_log(None, 'a', 'b', 'c')
assert not os.path.isfile(file)
@pytest.mark.asyncio
async def test_can_fetch_ok_stop(self):
with aioresponses() as m:
m.get('https://www.crtm.es/widgets/api/GetStopsTimes.php?'
'codStop=8_17491&orderBy=2&stopTimesByIti=3&type=1',
status=200, body='test')
session = ClientSession()
fetch_conf = {
'log': None,
'timeout': 10,
'max_connections': 1}
resp_text = await crtm_api.stop_times.fetch('8_17491', session,
fetch_conf)
assert 'test' in resp_text
await session.close()
| cgupm/crtm_poll | tests/test_stop_times.py | test_stop_times.py | py | 1,317 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "crtm_poll.crtm_api.stop_times.fetch_log",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "crtm_poll.crtm_api.stop_times",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "crtm_poll.crtm_api",
"line_number": 17,
"usage_type": "name"
... |
21186201558 | from datetime import datetime
import pytest
from model_bakery import baker
from mytodo.models import Tarefa
from mytodo.services import tarefa_service
def test_should_get_tarefa_as_pending(db):
my_tarefa = baker.make(Tarefa, description='Create an ansible deploy script', due_to=datetime.now())
assert my_tarefa.status == 'pending'
def test_should_get_tarefa_as_done(db):
my_tarefa = baker.make(Tarefa, description='Create an ansible deploy script', due_to=datetime.now())
tarefa_updated = tarefa_service.mark_as_done(my_tarefa.id)
assert tarefa_updated.status == 'done'
def test_should_raise_an_erro_for_invalid_tarefa_id(db):
invalid_tarefa = 0
with pytest.raises(RuntimeError) as error:
tarefa = tarefa_service.mark_as_done(invalid_tarefa)
assert str(error.value) == f"Tarefa ID: {invalid_tarefa} invalida"
def test_should_mark_as_undone(db):
my_tarefa = baker.make(
Tarefa,
description='Create an ansible deploy script',
due_to=datetime.now(),
done=True)
tarefa_updated = tarefa_service.mark_as_done(my_tarefa.id)
assert tarefa_updated.status == 'pending'
| JonathansManoel/todolist | mytodo/tests/test_tarefa_status.py | test_tarefa_status.py | py | 1,160 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "model_bakery.baker.make",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "mytodo.models.Tarefa",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "model_bakery.baker",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "d... |
75287915945 |
from PyQt5.QtWidgets import QLabel,QLineEdit,QPushButton,QMessageBox,QWidget,QApplication,QMainWindow,QTextEdit
from PyQt5.QtGui import QFont
import sys,time
getter=str()
getter2=str()
class FirstWindow(QWidget):
def __init__(self):
super().__init__()
self.setGeometry(200, 100, 500, 500)
self.setWindowTitle("Сhinese telegram")
self.start()
def font(self,obj):
obj.setFont(QFont("Times",30))
def start(self):
yozuv1=QLabel("Login:",self)
yozuv1.setFont(QFont("Times",25))
yozuv1.move(50,60)
self.log=QLineEdit(self)
self.log.setFont(QFont("Times",25))
self.log.move(150,60)
self.log.setPlaceholderText("login kiriting....")
yozuv2=QLabel("Parol:",self)
yozuv2.setFont(QFont("Times",25))
yozuv2.move(50,150)
self.par=QLineEdit(self)
self.par.setFont(QFont("Times",25))
self.par.move(150,150)
self.par.setPlaceholderText("parol kiriting....")
ok=QPushButton("OK",self)
ok.setFont(QFont("Times",50))
ok.move(200,250)
ok.clicked.connect(self.run)
btn=QPushButton("exit",self)
btn.clicked.connect(self.hide)
def hide(self):
self.hide()
def run(self):
log_par=[['Admin','12345'],['User','54321']]
if log_par[0][0]==self.log.text() and log_par[0][1]==self.par.text():
self.Chat=Chat()
self.Chat.show()
if log_par[1][0]==self.log.text() and log_par[1][1]==self.par.text():
self.Chat1=Chat1()
self.Chat1.show()
class Chat(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.btn=QPushButton("Back",self)
self.btn.clicked.connect(self.run1)
self.resize(500, 500)
self.send()
self.setWindowTitle("Admin's Chat")
self.show()
def run1(self):
self.hide()
def send(self):
self.wn1=QTextEdit(self)
self.wn1.setGeometry(10,40,480,150)
self.wn1.setFont(QFont("Times",20))
self.wn1.setPlaceholderText("Text kiriting...")
self.wn2=QTextEdit(self)
self.wn2.setGeometry(10,200,480,150)
self.wn2.setFont(QFont("Times",20))
self.wn2.setPlaceholderText("Sizga kelgan xabarlar... \n")
self.wn2.setText(getter2)
self.send1=QPushButton("Send💬",self)
self.send1.setFont(QFont("Times",15))
self.send1.move(200,400)
self.send1.clicked.connect(self.sender)
def sender(self):
global getter
getter=self.wn1.toPlainText()
self.send1.hide()
class Chat1(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.btn=QPushButton("Back",self)
self.btn.clicked.connect(self.run1)
self.resize(500, 500)
self.send()
self.setWindowTitle("User's Chat")
self.show()
def run1(self):
self.hide()
def send(self):
self.wn1=QTextEdit(self)
self.wn1.setGeometry(10,40,480,150)
self.wn1.setFont(QFont("Times",20))
self.wn1.setPlaceholderText("Text kiriting...")
self.wn2=QTextEdit(self)
self.wn2.setGeometry(10,200,480,150)
self.wn2.setFont(QFont("Times",20))
self.wn2.setPlaceholderText("Sizga kelgan xabarlar... \n")
self.wn2.setText(getter)
self.send1=QPushButton("Send💬",self)
self.send1.setFont(QFont("Times",15))
self.send1.move(200,400)
self.send1.clicked.connect(self.sender)
def sender(self):
global getter2
getter2=self.wn1.toPlainText()
self.send1.hide()
app=QApplication(sys.argv)
oyna=FirstWindow()
oyna.show()
sys.exit(app.exec_())
| Golibbek0414/PYTHON | imtihonga.py/class1.py | class1.py | py | 3,837 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtGui.QFont",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PyQt... |
40533943010 | import io
import os.path
import pickle
import json
import time
from threading import Thread
from googleapiclient.discovery import build
from googleapiclient.http import MediaIoBaseDownload
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import _G
# Goole Drive File Downloader
class Downloader:
def __init__(self, id):
self.id = id
self.filename = ""
self.downloader = None
self.progress = 0
self.thread = None
self.flag_complete = False
# setup before the download
def setup(self):
self.header = _G.GDriveService.files().get(fileId=self.id).execute()
self.request = _G.GDriveService.files().get_media(fileId=self.id)
# start download
def start(self):
self.filename = self.header['name'].replace(_G.ForbiddenFileChar, '')
stream = io.FileIO(self.filename, 'wb')
self.downloader = MediaIoBaseDownload(stream, self.request)
self.downloader._chunksize = _G.DownloaderChunkSize
self.thread = Thread(target=self.download)
self.thread.start()
# async download the file
def download(self):
while self.flag_complete is False:
stat, self.flag_complete = self.downloader.next_chunk()
self.progress = int(stat.progress() * 100)
def is_completed(self):
return self.flag_complete
def get_auth_creds():
if not os.path.exists('secret/'):
os.mkdir('secret')
if os.path.exists(_G.GDriveCredCache):
with open(_G.GDriveCredCache, 'rb') as token:
return pickle.load(token)
return None
def load_creds_json():
raw = _G.loadfile_with_resuce(_G.GDriveCredFilename, 'r')
if not raw:
raw = os.environ['CLIPPER_GDRIVE_CREDS']
return json.loads(raw)
def start_auth_session(creds):
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_config(load_creds_json(), _G.GDriveScopes)
creds = flow.run_local_server(port=0)
with open(_G.GDriveCredCache, 'wb') as file:
pickle.dump(creds, file)
_G.GDriveService = build('drive', 'v3', credentials=creds)
def download_data_async(id):
worker = Downloader(id)
worker.setup()
worker.start()
return worker
def init():
creds = get_auth_creds()
start_auth_session(creds) | ken1882/pyclip_analyze_repeater | datamanager.py | datamanager.py | py | 2,333 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "_G.GDriveService.files",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "_G.GDriveService",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "_G.GDriveService.files",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "_... |
1312451969 | """changing viewer layer names
Revision ID: 99ebe4492cee
Revises: b0b51fd07bfa
Create Date: 2023-06-12 15:34:29.609937
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '99ebe4492cee'
down_revision = 'b0b51fd07bfa'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('datastack', sa.Column('viewer_layer_name', sa.String(length=100), nullable=True))
op.add_column('image_source', sa.Column('viewer_layer_name', sa.String(length=100), nullable=True))
op.drop_column('image_source', 'ngl_layer_name')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('image_source', sa.Column('ngl_layer_name', sa.VARCHAR(length=100), autoincrement=False, nullable=True))
op.drop_column('image_source', 'viewer_layer_name')
op.drop_column('datastack', 'viewer_layer_name')
# ### end Alembic commands ###
| seung-lab/AnnotationFrameworkInfoService | migrations/versions/99ebe4492cee_changing_viewer_layer_names.py | 99ebe4492cee_changing_viewer_layer_names.py | py | 1,039 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "alembic.op.add_column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String"... |
31462474999 | # Python built-in modules and packages
from dataclasses import dataclass
from typing import Dict, List, Union, Tuple
# --- Useful type hints ---
TableName = str
DatafieldName = str
PacketType = str
PacketData = Union[int, str, float]
Packet = Dict[DatafieldName, PacketData]
Message = Dict[str, List[Packet]]
RowDB = List[Tuple[DatafieldName, PacketData]]
DbFormat = Dict[str, List[DatafieldName]]
# Database table formats
dbFormats: DbFormat = {
"gps": [
"timestamp",
"date",
"hour",
"tbr_serial_id",
"SLIM_status",
"longitude",
"latitude",
"pdop",
"FIX",
"num_sat_tracked",
"comment",
],
"tbr": [
"timestamp",
"date",
"hour",
"tbr_serial_id",
"temperature",
"temperature_data_raw",
"noise_avg",
"noise_peak",
"frequency",
"comment",
],
"tag": [
"timestamp",
"date",
"hour",
"tbr_serial_id",
"comm_protocol",
"frequency",
"tag_id",
"tag_data",
"tag_data_raw",
"tag_data_2", # DS256
"tag_data_raw_2", # DS256
"snr",
"millisecond",
"comment",
],
}
@dataclass
class DatabasePacket:
table: TableName
columns: Tuple[str]
values: Tuple[PacketData]
numOfValues: int = 0
sql_columns: str = ""
sql_values: str = ""
def __post_init__(self):
self.numOfValues = len(self.values)
self._handle_sql_columns()
self._handle_sql_values()
def _handle_sql_columns(self):
"""
Uses columns=('col_name_1', 'col_name_2', ..., 'col_name_n')
to set self.sql_columns='(col_name_1, col_name_2, ..., col_name_3)'
"""
iterable = iter(self.columns)
column_names = f"({next(iterable)}"
while True:
try:
column_names += f", {next(iterable)}"
except StopIteration:
column_names += ")"
break
self.sql_columns = column_names
def _handle_sql_values(self):
"""
Uses numOfValues to set self.sql_values='(?, ?, ..., ?)' for safer sql insertion
"""
self.sql_values = f"({'?, '*(self.numOfValues - 1)}?)"
def convert_msg_to_database_format(msg: Message, msgID: int) -> List[DatabasePacket]:
dbmsg: List[DatabasePacket] = []
for i, packet in enumerate(msg.payload):
type: TableName = packet["packetType"]
dbformat: List[DatafieldName] = dbFormats[type]
columns, values = [], [] # type: List[str], List[PacketData]
for datafield in dbformat:
if datafield in packet:
columns.append(datafield)
values.append(packet[datafield])
# add message_id to packet
columns.insert(0, "message_id")
values.insert(0, msgID)
dbPacket = DatabasePacket(type, tuple(columns), tuple(values))
dbmsg.append(dbPacket)
return dbmsg
| PerKjelsvik/iof | src/backend/dbmanager/msgconversion.py | msgconversion.py | py | 3,037 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "typing.Union",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number":... |
28247026448 | import datetime
import time
from playsound import playsound
# using deffunction set the alarm-time
def set_alarm(alarm_time):
while True:
current_time = datetime.datetime.now().strftime("%H:%M:%S")
if current_time == alarm_time:
print("It's an alarm time")
print("Play the alarm")
playsound(r"C:\Users\madhu\Downloads\Alarm.mp3.mp3")
break
time.sleep(1)
time_formats = ["%H:%M:%S", "%H:%M"]
print("Assigned time formats:")
for i, time_format in enumerate(time_formats, start=1):
print(str(i) + " " + time_format)
choice = int(input("Enter your choice:"))
selected_format = time_formats[choice - 1]
alarm_time = input("Enter the alarm time(" + selected_format + "):")
if selected_format == "%H:%M:%S":
alarm_time = datetime.datetime.strptime(alarm_time, "%H:%M:%S").strftime("%H:%M:%S")
if selected_format == "%H:%M":
alarm_time = datetime.datetime.strptime(alarm_time, "%H:%M").strftime("%H:%M:%S")
# function call
set_alarm(alarm_time)
| Madhusudhan178/Alarm_clock-py | alarm_clock.py | alarm_clock.py | py | 1,033 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "playsound.playsound",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "time.s... |
24975211817 | import os
from setuptools import setup, find_packages
from version import get_git_version
def files(folder, exclude=[]):
found_files = []
for root, dirs, files in os.walk(folder):
for f in files:
if not any(("%s/%s" % (root, f)).startswith(e) for e in exclude):
found_files.append((root, f))
return found_files
def flatten_all_files(*dirs, **kwargs):
exclude = kwargs.get('exclude', [])
root = kwargs.get('root', '')
all_files = []
for d in dirs:
for f in files(d, exclude):
froot, fnm = f
prefix_start = froot.find(root)
assert prefix_start > -1, 'Impossible base root provided. Found files do not match: %s' % root
all_files.append(("%s" % (froot[prefix_start + len(root):]), ["%s/%s" % (froot, fnm)]))
return all_files
#
data_files = flatten_all_files('build/Onc/production/',
exclude=('build/Onc/production/lib/ext-4.2', 'build/Onc/production/.sass-cache'),
root='build/Onc/production/'
)
data_files.append(('', ['favicon.png', 'beep.wav', 'portal.html']))
setup(
name="opennode.oms.onc",
version=get_git_version(),
description="""OpenNode Console application""",
author="OpenNode Developers",
author_email="info@opennodecloud.com",
packages=find_packages(),
data_files=data_files,
namespace_packages=['opennode'],
zip_safe=False, # we need to serve real files
entry_points={'oms.plugins': ['onc = opennode.onc.main:OncPlugin']},
install_requires=[
"setuptools", # Redundant but removes a warning
"opennode.oms.core",
"opennode.oms.knot",
],
license='GPLv2',
)
| opennode/opennode-console | setup.py | setup.py | py | 1,732 | python | en | code | 13 | github-code | 36 | [
{
"api_name": "os.walk",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "version.get_git_version",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packa... |
28092663576 | from statistics import mean
n, x = (map(int, input().split()))
ar = []
for i in range(x):
ar.append(list(map(float, input().split())))
s = zip(*ar)
for i in s:
print (mean(i))
| Avani18/Hackerrank-Python | 11. Built-Ins/Zipped.py | Zipped.py | py | 188 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "statistics.mean",
"line_number": 12,
"usage_type": "call"
}
] |
74261408425 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import time
import random
from twisted.internet import task
from twisted.internet.protocol import DatagramProtocol
from demo.main import reactor
from demo.proto import ProtocolParser
tasks = {}
finished_tasks = {}
class ClientProtocol(DatagramProtocol):
def __init__(self, config):
self.config = config
def callback(self):
task_id = '%s_%i_%i' % (self.config['name'], int(time.time()), random.randint(10, 1000))
proto = ProtocolParser(self.config['name'], {'task_client_request': True}, task_id)
tasks[proto.task_id] = time.time()
print('Send client request %s' % proto.task_id)
self.transport.write(proto.serialize())
def startProtocol(self):
self.transport.connect(self.config['dispatcher_address'], self.config['dispatcher_port'])
def stopProtocol(self):
reactor.listenUDP(0, self)
def datagramReceived(self, datagram, addr):
proto = ProtocolParser.deserialize(datagram)
print('Incoming request from %s %s' % addr)
if proto.task_id is None or proto.task_client_response is False:
print('Wrong packet')
return None
if tasks.get(proto.task_id):
finished_tasks[proto.task_id] = time.time() - tasks[proto.task_id]
tasks[proto.task_id] = True
print('Incoming response %s. Work time is %.2f sec' % (proto.task_id, finished_tasks[proto.task_id]))
else:
print('Wrong task %s' % proto.task_id)
def initial(conf):
# Prepare config
config = {
'name': 'client_%i_%i' % (int(time.time()), random.randint(10, 1000)),
'dispatcher_address': '127.0.0.1',
'dispatcher_port': 8000,
'task_send_timeout': 60
}
config.update(conf)
# Watchdog beat
watchdog = ClientProtocol(config)
reactor.listenUDP(0, watchdog)
loop = task.LoopingCall(watchdog.callback)
loop.start(config['task_send_timeout'])
def report():
times = finished_tasks.values()
average = sum(times) / len(times) if times else 0
not_answered_tasks = len([i for i in tasks.values() if i is not True])
print('Sent tasks: %d' % len(tasks))
print('Finished tasks: %d' % len(finished_tasks))
print('Not answered tasks: %d' % not_answered_tasks)
print('Minimal task time: %.2f' % (min(times) if times else 0))
print('Average task time: %.2f' % average)
print('Maximal task time: %.2f' % (max(times) if times else 0))
| frad00r4/demo_project | demo/modules/client.py | client.py | py | 2,553 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "twisted.internet.protocol.DatagramProtocol",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "d... |
28856814409 | from __future__ import print_function, absolute_import
import sys
import PyQt4.QtGui as QtGui
import PyQt4.QtScript as QtScript
from PyQt4.QtCore import SIGNAL
app = QtGui.QApplication(sys.argv)
from qtreactor import pyqt4reactor
pyqt4reactor.install()
class DoNothing(object):
def __init__(self):
self.count = 0
self.running = False
def button_click(self):
if not self.running:
from twisted.scripts import trial
trial.run()
def run():
t = DoNothing()
engine = QtScript.QScriptEngine()
button = QtGui.QPushButton()
button = engine.newQObject(button)
engine.globalObject().setProperty("button", button)
app.connect(button, SIGNAL("clicked()"), t.button_click)
engine.evaluate("button.text = 'Do Twisted Gui Trial'")
engine.evaluate("button.styleSheet = 'font-style: italic'")
engine.evaluate("button.show()")
app.exec_()
print('fell off the bottom?...')
| ghtdak/qtreactor | qtreactor/gtrial.py | gtrial.py | py | 969 | python | en | code | 50 | github-code | 36 | [
{
"api_name": "PyQt4.QtGui.QApplication",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "qtreactor.pyqt4reacto... |
40961303379 | # coding: utf-8
import argparse
import importlib.util
import inspect
import os
from datetime import datetime
problemas = []
def problema(mensaje, *args):
problemas.append(mensaje.format(*args))
def validar_tiempo(inicio, fin, tope, mensaje):
diferencia = (fin - inicio).total_seconds()
if diferencia > tope:
problema(mensaje)
def probar_codigo(interactivo=False, saltear_errores=False, resultado_verboso=False, grupo=None):
# dependencias
try:
from simpleai.search.models import SearchNode
except ImportError:
problema('No se pudo importar SimpleAI. Se encuentra instalado?')
return
# intentar importar la entrega
print('Importando la entrega...')
try:
inicio = datetime.now()
if grupo:
spec = importlib.util.spec_from_file_location("{}.entrega1".format(grupo),
"{}/entrega1.py".format(grupo))
entrega1 = importlib.util.module_from_spec(spec)
spec.loader.exec_module(entrega1)
else:
import entrega1
fin = datetime.now()
except ImportError:
problema('No se pudo encontrar el código python. Probablemente el nombre del archivo .py '
'no es correcto, o no está en la raiz del repositorio.')
return
validar_tiempo(inicio, fin, 3,
'El import de la entrega demora demasiado tiempo, probablemente están '
'haciendo búsqueda en el import. Hagan lo del if __name__ ... que se '
'recomienda en la consigna.')
# intentar extraer y validar la funcion resolver
print('Extrayendo la función resolver...')
resolver = getattr(entrega1, 'resolver', None)
if resolver is None:
problema('El módulo python no define la función resolver.')
return
firma_resolver = inspect.getargspec(resolver)
args = firma_resolver.args
defaults = firma_resolver.defaults or []
if args[:len(args) - len(defaults)] != ['metodo_busqueda', 'franceses', 'piratas']:
problema('La función resolver no recibe los parámetros definidos en la entrega.')
return
# validar el funcionamiento de la funcion resolver y el planteo del problema en general
print('Probando la resolución de problemas...')
franceses_consigna = (
(0, 2),
(0, 3),
(1, 2),
(1, 3),
(2, 1),
(2, 2),
(2, 3),
(3, 0),
(3, 1),
(3, 2),
(4, 0),
(4, 1),
(5, 0),
)
piratas_consigna = (
(4, 4),
(4, 5),
(5, 4),
)
# metodo_busqueda, franceses, piratas, limite_largo_camino, limite_tiempo
pruebas = (
# sin franceses, 1 pirata
('breadth_first', [], [(4, 4)], 14, 2),
('depth_first', [], [(4, 4)], 100, 10),
('greedy', [], [(4, 4)], 25, 2),
('astar', [], [(4, 4)], 14, 2),
# 3 franceses, 2 piratas
('breadth_first', [(1, 0), (2, 1), (3, 0)], [(3, 4), (4, 4)], 19, 10),
('depth_first', [(1, 0), (2, 1), (3, 0)], [(3, 4), (4, 4)], 200, 30),
('greedy', [(1, 0), (2, 1), (3, 0)], [(3, 4), (4, 4)], 50, 10),
('astar', [(1, 0), (2, 1), (3, 0)], [(3, 4), (4, 4)], 19, 10),
# caso de la consigna
('breadth_first', franceses_consigna, piratas_consigna, 33, 30),
('depth_first', franceses_consigna, piratas_consigna, 500, 60),
('greedy', franceses_consigna, piratas_consigna, 60, 30),
('astar', franceses_consigna, piratas_consigna, 33, 30),
)
for numero_prueba, prueba in enumerate(pruebas):
metodo_busqueda, franceses, piratas, limite_largo_camino, limite_tiempo = prueba
print(' Prueba', numero_prueba, ':', metodo_busqueda, 'franceses:', franceses, 'piratas:',
piratas)
if not interactivo or input('ejecutar? (Y/n)').strip() in ('y', ''):
try:
inicio = datetime.now()
resultado = resolver(metodo_busqueda=metodo_busqueda,
franceses=franceses,
piratas=piratas)
fin = datetime.now()
if isinstance(resultado, SearchNode):
print(' largo camino:', len(resultado.path()))
print(' estado:', resultado.state)
print(' acciones:', [accion for accion, estado in resultado.path()])
if resultado_verboso:
print(' meta:', repr(resultado.state))
print(' camino:', repr(resultado.path()))
else:
print(' resultado:', str(resultado))
print(' duración:', (fin - inicio).total_seconds())
if resultado is None:
problema('El resultado devuelto por la función resolver en la prueba {} fue '
'None, cuando el problema tiene que encontrar solución y se espera '
'que retorne el nodo resultante. Puede que la función resolver no '
'esté devolviendo el nodo resultante, o que el problema no esté '
'encontrando solución como debería.',
numero_prueba)
elif isinstance(resultado, SearchNode):
if limite_largo_camino and len(resultado.path()) > limite_largo_camino:
problema('El resultado devuelto en la prueba {} excede el largo de camino'
'esperable ({}) para ese problema y método de búsqueda. Es '
'posible que algo no esté bien.',
numero_prueba, limite_largo_camino)
else:
problema('El resultado devuelto por la función resolver en la prueba {} no es '
'un nodo de búsqueda.',
numero_prueba)
if limite_tiempo is not None:
validar_tiempo(inicio, fin, limite_tiempo,
'La prueba {} demoró demasiado tiempo (más de {} segundos), '
'probablemente algo no está demasiado '
'bien.'.format(numero_prueba, limite_tiempo))
except Exception as err:
if saltear_errores:
problema('Error al ejecutar prueba {} ({})', numero_prueba, str(err))
else:
raise
def probar_estadisticas(grupo=None):
# abrir el archivo de estadisticas
print('Abriendo estadísticas...')
nombre_archivo = 'entrega1.txt'
if grupo:
nombre_archivo = os.path.join(grupo,nombre_archivo)
if not os.path.exists(nombre_archivo):
problema('No se pudo encontrar el archivo de estadísticas. Probablemente el nombre del '
'archivo no es correcto, o no está en la raiz del repositorio.')
return
with open(nombre_archivo) as archivo_stats:
lineas_stats = archivo_stats.readlines()
# validar contenidos
casos = list(range(1, 5))
casos_pendientes = casos[:]
for linea in lineas_stats:
linea = linea.strip()
if linea:
try:
caso, valores = linea.split(':')
caso = int(caso)
valores = list(map(int, valores.split(',')))
if len(valores) != 4:
raise ValueError()
if caso not in casos:
problema('Caso desconocido en archivo de estadísticas: {}', caso)
elif caso not in casos_pendientes:
problema('Caso repetido en archivo de estadísticas: {}', caso)
else:
print(' Encontrado caso', caso)
print(' Valores:', valores)
casos_pendientes.remove(caso)
except:
problema('La siguiente linea de estadísticas no respeta el formato definido: {}',
linea)
if casos_pendientes:
problema('No se incluyeron las estadísticas de los siguientes casos: {}',
repr(casos_pendientes))
def imprimir_resultados():
def listar_cosas(titulo, cosas):
if cosas:
print(titulo + ':')
for cosa in cosas:
print('*', cosa)
listar_cosas('Problemas que es necesario corregir', problemas)
def probar(interactivo=False, saltear_errores=False, resultado_verboso=False, grupo=None):
print('#' * 80)
if grupo:
print("Probando grupo", grupo)
probar_codigo(interactivo, saltear_errores, resultado_verboso, grupo)
print()
probar_estadisticas(grupo)
print()
print('Pruebas automáticas terminadas!')
print()
imprimir_resultados()
print('#' * 80)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', action='store_true', help='Interactivo')
parser.add_argument('-s', action='store_true', help='Saltear errores')
parser.add_argument('-v', action='store_true', help='Resultado verboso')
parser.add_argument('--path', help='Path a la entrega')
args = parser.parse_args()
probar(args.i, args.s, args.v, args.path)
| ucse-ia/ucse_ia | 2019/probar_entrega1.py | probar_entrega1.py | py | 9,466 | python | es | code | 5 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "importlib.util.util.spec_from_file_location",
"line_number": 36,
"usage_type": "call"
},
{
... |
17233286907 | import argparse
import torch
import torch.nn as nn
import numpy as np
import os
import pickle
from data_loader import get_loader
from build_vocab import Vocabulary
from model import EncoderCNN, DecoderRNN
from torch.nn.utils.rnn import pack_padded_sequence
from torchvision import transforms
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def main():
# Create model directory
if not os.path.exists('models/'):
os.makedirs('models/')
# Image preprocessing, normalization for the pretrained resnet
transform = transforms.Compose([
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
# Load vocabulary wrapper
with open('data/vocab.pkl', 'rb') as f:
vocab = pickle.load(f)
# Build data loader
data_loader = get_loader('data/resized2014', 'data/annotations/captions_train2014.json', vocab,
transform, 128,
shuffle=True, num_workers=2)
# Build the models
encoder = EncoderCNN(256).to(device)
decoder = DecoderRNN(256, 512, len(vocab), 1).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
params = list(decoder.parameters()) + list(encoder.linear.parameters()) + list(encoder.bn.parameters())
optimizer = torch.optim.Adam(params, lr=0.001)
# Train the models
total_step = len(data_loader)
for epoch in range(5):
for i, (images, captions, lengths) in enumerate(data_loader):
# Set mini-batch dataset
images = images.to(device)
captions = captions.to(device)
targets = pack_padded_sequence(captions, lengths, batch_first=True)[0]
# Forward, backward and optimize
features = encoder(images)
outputs = decoder(features, captions, lengths)
loss = criterion(outputs, targets)
decoder.zero_grad()
encoder.zero_grad()
loss.backward()
optimizer.step()
# Print log info
if i % 10 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Perplexity: {:5.4f}'
.format(epoch, 5, i, total_step, loss.item(), np.exp(loss.item())))
# Save the model checkpoints
if (i+1) % 1000 == 0:
torch.save(decoder, os.path.join(
'models/', 'decoder-{}-{}.pkl'.format(epoch+1, i+1)))
torch.save(encoder, os.path.join(
'models/', 'encoder-{}-{}.pkl'.format(epoch+1, i+1)))
if __name__ == '__main__':
main()
| vshantam/ImageCaptioning | train.py | train.py | py | 2,842 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "torch.device",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",... |
19368162206 | #! /usr/bin/python2.7
import rospy
import datetime
import geometry_msgs.msg
CMD_VEL_TOPIC = "/cmd_vel"
def main():
rospy.init_node("drive_forward_node")
drive_speed = rospy.get_param("~drive_speed")
drive_time = rospy.get_param("~drive_time")
twist_publisher = rospy.Publisher(CMD_VEL_TOPIC, geometry_msgs.msg.Twist)
rospy.loginfo(
"Initializing drive forward node with velocity {drive_speed} m/s and drive time {drive_time} seconds".format(
drive_speed=drive_speed,
drive_time=drive_time
)
)
drive_forward_message = geometry_msgs.msg.Twist()
drive_forward_message.linear.x = drive_speed
stop_message = geometry_msgs.msg.Twist()
stop_message.linear.x = 0
drive_start_time = datetime.datetime.now()
publish_rate = rospy.Rate(10)
while not rospy.is_shutdown():
current_drive_time_seconds = (datetime.datetime.now() - drive_start_time).total_seconds()
if current_drive_time_seconds > drive_time:
twist_publisher.publish(stop_message)
rospy.loginfo("Finished driving. Stopping.")
break
twist_publisher.publish(drive_forward_message)
publish_rate.sleep()
if not rospy.is_shutdown():
rospy.loginfo("Terminating drive forward node")
if __name__ == "__main__":
main()
| slensgra/robotic-perception-systems-assignment-1 | src/drive_forward_node.py | drive_forward_node.py | py | 1,349 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rospy.init_node",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "rospy.get_param",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "rospy.get_param",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "rospy.Publisher",
... |
32788623389 | import wx
import re
import Wammu
import Wammu.Events
import Wammu.Utils
import Wammu.Paths
from Wammu.Locales import StrConv, ugettext as _
import wx.lib.mixins.listctrl
COLUMN_INFO = {
'info':
(
(
_('Name'),
_('Value')
),
(
'Name',
'Value'
),
),
'contact':
(
(
_('Location'),
_('Memory'),
_('Name'),
_('Number')
),
(
'Location',
'MemoryType',
'Name',
'Number'
),
),
'call':
(
(
_('Location'),
_('Type'),
_('Name'),
_('Number'),
_('Date')
),
(
'Location',
'MemoryType',
'Name',
'Number',
'Date'
),
),
'message':
(
(
_('Location'),
_('Status'),
_('Number'),
_('Date'),
_('Text')
),
(
'Location',
'State',
'Number',
'DateTime',
'Text'
),
),
'todo':
(
(
_('Location'),
_('Completed'),
_('Priority'),
_('Text'),
_('Date')
),
(
'Location',
'Completed',
'Priority',
'Text',
'Date'
),
),
'calendar':
(
(
_('Location'),
_('Type'),
_('Start'),
_('End'),
_('Text'),
_('Alarm'),
_('Recurrence')
),
(
'Location',
'Type',
'Start',
'End',
'Text',
'Alarm',
'Recurrence'
),
)
}
class FilterException(Exception):
'''
Exception which occurs when there is something wrong in filtering
expression.
'''
pass
class Browser(wx.ListCtrl, wx.lib.mixins.listctrl.ListCtrlAutoWidthMixin):
'''
Generic class for browsing values.
'''
def __init__(self, parent, win, cfg):
wx.ListCtrl.__init__(
self,
parent,
-1,
style=wx.LC_REPORT | wx.LC_VIRTUAL | wx.LC_HRULES | wx.LC_VRULES
)
self.win = win
self.cfg = cfg
self.itemno = -1
self.type = ''
self.values = []
self.allvalues = []
self.sortkey = ''
self.sortorder = 1
self.columns = []
self.keys = []
self.popup_index = -1
color = wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DLIGHT)
self.attr1 = wx.ListItemAttr()
self.attr2 = wx.ListItemAttr()
self.attr2.SetBackgroundColour(color)
self.attr3 = wx.ListItemAttr()
fnt = self.attr3.GetFont()
fnt.SetStyle(wx.FONTSTYLE_ITALIC)
self.attr3.SetFont(fnt)
self.attr4 = wx.ListItemAttr()
self.attr4.SetBackgroundColour(color)
self.attr4.SetFont(fnt)
image_list = wx.ImageList(16, 16)
down_bitmap = wx.Bitmap(Wammu.Paths.MiscPath('downarrow'))
up_bitmap = wx.Bitmap(Wammu.Paths.MiscPath('uparrow'))
self.downarrow = image_list.Add(down_bitmap)
self.uparrow = image_list.Add(up_bitmap)
self.AssignImageList(image_list, wx.IMAGE_LIST_SMALL)
wx.lib.mixins.listctrl.ListCtrlAutoWidthMixin.__init__(self)
# Create IDs for popup menu
self.popup_id_send = wx.NewId()
self.popup_id_edit = wx.NewId()
self.popup_id_message = wx.NewId()
self.popup_id_contact = wx.NewId()
self.popup_id_call = wx.NewId()
self.popup_id_delete = wx.NewId()
self.popup_id_delete_selection = wx.NewId()
self.popup_id_duplicate = wx.NewId()
self.popup_id_reply = wx.NewId()
self.popup_id_backup_one = wx.NewId()
self.popup_id_backup_selection = wx.NewId()
self.popup_id_backup_all = wx.NewId()
self.BindEvents()
def BindEvents(self):
'''
Bind various event handlers to events we need.
'''
self.Bind(
wx.EVT_LIST_ITEM_SELECTED,
self.OnItemSelected,
self
)
self.Bind(
wx.EVT_LIST_ITEM_ACTIVATED,
self.OnItemActivated,
self
)
self.Bind(
wx.EVT_LIST_KEY_DOWN,
self.OnKey,
self
)
self.Bind(
wx.EVT_LIST_COL_CLICK,
self.OnColClick,
self
)
self.Bind(
wx.EVT_LIST_ITEM_RIGHT_CLICK,
self.OnRightClick,
self
)
self.Bind(
wx.EVT_MENU,
self.OnPopupSend,
id=self.popup_id_send
)
self.Bind(
wx.EVT_MENU,
self.OnPopupEdit,
id=self.popup_id_edit
)
self.Bind(
wx.EVT_MENU,
self.OnPopupMessage,
id=self.popup_id_message
)
self.Bind(
wx.EVT_MENU,
self.OnPopupContact,
id=self.popup_id_contact
)
self.Bind(
wx.EVT_MENU,
self.OnPopupCall,
id=self.popup_id_call
)
self.Bind(
wx.EVT_MENU,
self.OnPopupDelete,
id=self.popup_id_delete
)
self.Bind(
wx.EVT_MENU,
self.OnPopupDeleteSel,
id=self.popup_id_delete_selection
)
self.Bind(
wx.EVT_MENU,
self.OnPopupDuplicate,
id=self.popup_id_duplicate
)
self.Bind(
wx.EVT_MENU,
self.OnPopupReply,
id=self.popup_id_reply
)
self.Bind(
wx.EVT_MENU,
self.OnPopupBackupOne,
id=self.popup_id_backup_one
)
self.Bind(
wx.EVT_MENU,
self.OnPopupBackupSel,
id=self.popup_id_backup_selection
)
self.Bind(
wx.EVT_MENU,
self.OnPopupBackupAll,
id=self.popup_id_backup_all
)
def ShowHeaders(self):
'''
Updates which headers and keys should be show and displays them.
'''
self.columns = COLUMN_INFO[self.type][0]
self.keys = COLUMN_INFO[self.type][1]
cnt = len(self.columns)
for i in range(cnt):
self.InsertColumn(i, self.columns[i])
# resize columns to fit content
# FIXME: this should be acquired better!
spc = 10
maxval = [0] * cnt
for i in range(cnt):
size = self.GetTextExtent(StrConv(self.columns[i]))[0]
# 16 bellow is for sort arrrow
if size + 16 > maxval[i]:
maxval[i] = size + 16
for current in self.values:
for i in range(cnt):
size = self.GetTextExtent(StrConv(current[self.keys[i]]))
if size[0] > maxval[i]:
maxval[i] = size[0]
for i in range(cnt - 1):
self.SetColumnWidth(i, maxval[i] + spc)
self.resizeLastColumn(maxval[cnt - 1] + spc)
def Filter(self, text, filter_type):
'''
Filters content of browser by various expressions (type of expression
is defined by filter_type).
'''
if text == '':
self.values = self.allvalues
else:
num = None
if text.isdigit():
num = int(text)
if filter_type == 0:
match = re.compile('.*%s.*' % re.escape(text), re.I)
elif filter_type == 1:
try:
match = re.compile(text, re.I)
except:
raise FilterException('Failed to compile regexp')
elif filter_type == 2:
text = text.replace('*', '__SEARCH_ALL__')
text = text.replace('?', '__SEARCH_ONE__')
text = re.escape(text)
text = text.replace('\\_\\_SEARCH\\_ALL\\_\\_', '.*')
text = text.replace('\\_\\_SEARCH\\_ONE\\_\\_', '.')
match = re.compile('.*%s.*' % text, re.I)
else:
raise Exception('Unsupported filter type %s!' % filter_type)
self.values = [
item for item in self.allvalues
if Wammu.Utils.MatchesText(item, match, num)
]
self.SetItemCount(len(self.values))
self.RefreshView()
self.ShowRow(0)
def Sorter(self, item1, item2):
'''
Compare function for internal list of values.
'''
if self.sortkey == 'Location' and isinstance(item1[self.sortkey], str):
return self.sortorder * cmp(
int(item1[self.sortkey].split(',')[0]),
int(item2[self.sortkey].split(', ')[0]))
elif item1[self.sortkey] is None:
return -self.sortorder
elif item2[self.sortkey] is None:
return self.sortorder
return self.sortorder * cmp(item1[self.sortkey], item2[self.sortkey])
def ShowLocation(self, loc, second=None):
'''
Shows row which is stored on defined location. Search can be extended
by specifiyng second tupe of search attribute and value.
'''
result = Wammu.Utils.SearchLocation(self.values, loc, second)
if result != -1:
self.ShowRow(result)
def ShowRow(self, index):
'''
Activates id-th row.
'''
if (self.GetItemCount() > index and index >= 0 and
self.GetCountPerPage() > 0):
self.itemno = index
while self.GetFirstSelected() != -1:
self.SetItemState(
self.GetFirstSelected(), 0, wx.LIST_STATE_SELECTED
)
self.SetItemState(
index,
wx.LIST_STATE_FOCUSED | wx.LIST_STATE_SELECTED,
wx.LIST_STATE_FOCUSED | wx.LIST_STATE_SELECTED
)
self.EnsureVisible(index)
else:
evt = Wammu.Events.ShowEvent(data=None)
wx.PostEvent(self.win, evt)
def Change(self, newtype, values):
'''
Change type of browser component.
'''
if self.type != '':
self.cfg.Write(
'/BrowserSortKey/%s' % self.type, self.sortkey
)
self.cfg.WriteInt(
'/BrowserSortOrder/%s' % self.type, self.sortorder
)
self.type = newtype
self.values = values
self.allvalues = values
self.sortkey = ''
self.sortorder = 1
self.ClearAll()
self.SetItemCount(len(values))
self.ShowHeaders()
# restore sort order
found = False
readsort = self.cfg.Read('/BrowserSortKey/%s' % self.type)
readorder = self.cfg.ReadInt('/BrowserSortOrder/%s' % self.type)
for i in range(len(self.keys)):
if self.keys[i] == readsort:
if readorder == -1:
self.sortkey = readsort
self.Resort(i)
found = True
if not found:
self.Resort(0)
def Resort(self, col):
'''
Changes sort order of listing.
'''
# remember show item
try:
item = self.values[self.itemno]
except IndexError:
item = None
# find keys and order
nextsort = self.keys[col]
if nextsort == self.sortkey:
self.sortorder = -1 * self.sortorder
else:
self.sortorder = 1
self.sortkey = nextsort
# do the real sort
self.values.sort(self.Sorter)
# set image
for i in range(self.GetColumnCount()):
self.ClearColumnImage(i)
if self.sortorder == 1:
image = self.downarrow
else:
image = self.uparrow
self.SetColumnImage(col, image)
self.RefreshView()
if item is not None:
self.ShowRow(self.values.index(item))
def RefreshView(self):
'''
Refresh displayed items.
'''
if self.GetItemCount() != 0:
top = self.GetTopItem()
if top < 0:
top = 0
count = self.GetCountPerPage()
totalcount = self.GetItemCount()
if count < 0:
count = totalcount
last = min(totalcount - 1, top + count)
self.RefreshItems(top, last)
def OnKey(self, evt):
'''
Key handler which catches delete key for deletion of current item and
R/r key for message reply.
'''
if evt.GetKeyCode() == wx.WXK_DELETE:
self.DoSelectedDelete()
elif evt.GetKeyCode() in [114, 82]:
self.DoReply()
def DoSelectedDelete(self):
'''
Delete selected message.
'''
lst = []
index = self.GetFirstSelected()
while index != -1:
lst.append(self.values[index])
index = self.GetNextSelected(index)
self.DoDelete(lst)
def DoDelete(self, lst):
'''
Send delete event to parent.
'''
evt = Wammu.Events.DeleteEvent(lst=lst)
wx.PostEvent(self.win, evt)
def DoBackup(self, lst):
'''
Send backup event to parent.
'''
evt = Wammu.Events.BackupEvent(lst=lst)
wx.PostEvent(self.win, evt)
def DoReply(self):
'''
Send reply event to parent.
'''
evt = Wammu.Events.ReplyEvent(data=self.values[self.GetFocusedItem()])
wx.PostEvent(self.win, evt)
def OnRightClick(self, evt):
'''
Handle right click - show context menu with correct options for
current type of listing.
'''
if self.type == 'info':
return
self.popup_index = evt.m_itemIndex
# make a menu
menu = wx.Menu()
# add some items
if self.popup_index != -1 and self.type == 'message':
if self.values[evt.m_itemIndex]['State'] == 'Sent':
menu.Append(self.popup_id_send, _('Resend'))
if self.values[evt.m_itemIndex]['State'] == 'UnSent':
menu.Append(self.popup_id_send, _('Send'))
if self.values[evt.m_itemIndex]['State'] in ('Read', 'UnRead'):
menu.Append(self.popup_id_reply, _('Reply'))
if self.values[evt.m_itemIndex]['Number'] != '':
menu.Append(self.popup_id_call, _('Call'))
menu.AppendSeparator()
if self.popup_index != -1 and self.type in ['contact', 'call']:
menu.Append(self.popup_id_message, _('Send message'))
menu.Append(self.popup_id_call, _('Call'))
if self.popup_index != -1 and self.type in ['call']:
menu.Append(self.popup_id_contact, _('Store as new contact'))
menu.AppendSeparator()
if self.popup_index != -1 and self.type not in ['call', 'message']:
menu.Append(self.popup_id_edit, _('Edit'))
if self.popup_index != -1 and self.type not in ['call']:
menu.Append(self.popup_id_duplicate, _('Duplicate'))
menu.AppendSeparator()
if self.popup_index != -1:
menu.Append(self.popup_id_delete, _('Delete current'))
menu.Append(self.popup_id_delete_selection, _('Delete selected'))
menu.AppendSeparator()
if self.popup_index != -1:
menu.Append(self.popup_id_backup_one, _('Backup current'))
menu.Append(self.popup_id_backup_selection, _('Backup selected'))
menu.Append(self.popup_id_backup_all, _('Backup all'))
# Popup the menu. If an item is selected then its handler
# will be called before PopupMenu returns.
self.PopupMenu(menu, evt.GetPoint())
def OnPopupDuplicate(self, event):
evt = Wammu.Events.DuplicateEvent(data=self.values[self.popup_index])
wx.PostEvent(self.win, evt)
def OnPopupReply(self, event):
evt = Wammu.Events.ReplyEvent(data=self.values[self.popup_index])
wx.PostEvent(self.win, evt)
def OnPopupSend(self, event):
evt = Wammu.Events.SendEvent(data=self.values[self.popup_index])
wx.PostEvent(self.win, evt)
def OnPopupCall(self, event):
evt = Wammu.Events.CallEvent(data=self.values[self.popup_index])
wx.PostEvent(self.win, evt)
def OnPopupMessage(self, event):
evt = Wammu.Events.MessageEvent(data=self.values[self.popup_index])
wx.PostEvent(self.win, evt)
def OnPopupContact(self, event):
data = self.values[self.popup_index]
data['Location'] = 0
data['MemoryType'] = 'ME'
evt = Wammu.Events.EditEvent(data=data)
wx.PostEvent(self.win, evt)
def OnPopupEdit(self, event):
evt = Wammu.Events.EditEvent(data=self.values[self.popup_index])
wx.PostEvent(self.win, evt)
def OnPopupDelete(self, event):
self.DoDelete([self.values[self.popup_index]])
def OnPopupDeleteSel(self, event):
self.DoSelectedDelete()
def OnPopupBackupOne(self, event):
self.DoBackup([self.values[self.popup_index]])
def OnPopupBackupSel(self, event):
item_list = []
index = self.GetFirstSelected()
while index != -1:
item_list.append(self.values[index])
index = self.GetNextSelected(index)
self.DoBackup(item_list)
def OnPopupBackupAll(self, event):
self.DoBackup(self.values)
def OnColClick(self, evt):
self.Resort(evt.GetColumn())
def OnItemSelected(self, event):
self.itemno = event.m_itemIndex
evt = Wammu.Events.ShowEvent(data=self.values[event.m_itemIndex])
wx.PostEvent(self.win, evt)
def OnItemActivated(self, event):
evt = Wammu.Events.EditEvent(data=self.values[event.m_itemIndex])
wx.PostEvent(self.win, evt)
def getColumnText(self, index, col):
item = self.GetItem(index, col)
return item.GetText()
def OnGetItemText(self, item, col):
'''
Get item text.
'''
if item >= len(self.values):
return None
return StrConv(self.values[item][self.keys[col]])
def OnGetItemAttr(self, item):
'''
Get item attributes - highlight synced items, make odd and even rows
different.
'''
if self.values[item]['Synced']:
if item % 2 == 1:
return self.attr1
else:
return self.attr2
if item % 2 == 1:
return self.attr3
else:
return self.attr4
| gammu/wammu | Wammu/Browser.py | Browser.py | py | 19,382 | python | en | code | 63 | github-code | 36 | [
{
"api_name": "Wammu.Locales.ugettext",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "Wammu.Locales.ugettext",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "Wammu.Locales.ugettext",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "... |
20163912786 | from django.db import models
from django.contrib.auth.models import User
# Crear Modelos
class Mueble(models.Model):
nombre=models.CharField(max_length=40)
modelo=models.CharField(max_length=150)
descripcion=models.CharField(max_length=250)
precio=models.FloatField(default='')
imagen=models.ImageField(null=True, blank=True, upload_to="static\core\images\\media")
oferta=models.BooleanField(default='False')
def __str__(self):
return f"{self.id} - {self.nombre}"
class Avatar(models.Model):
#vinculo con usuario
user=models.ForeignKey(User, on_delete=models.CASCADE)
#subcarpeta avatares de media
imagen=models.ImageField(upload_to='avatares',null=True, blank=True)
def __str__(self):
return f"{self.user} - {self.imagen}"
class Comentario(models.Model):
comentario = models.ForeignKey(Mueble, related_name='comentarios', on_delete=models.CASCADE, null=True)
nombre = models.CharField(max_length=40)
mensaje = models.TextField(null=True, blank=True)
fechaComentario = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['-fechaComentario']
def __str__(self):
return '%s - %s' % (self.nombre, self.comentario)
class Carrito(models.Model):
def __init__(self, request):
self.request = request
self.session = request.session
carrito = self.session.get("carrito")
if not carrito:
self.session["carrito"] = {}
self.carrito = self.session["carrito"]
else:
self.carrito = carrito
def agregar(self, producto):
id = str(producto.id)
if id not in self.carrito.keys():
self.carrito[id]={
"producto_id": producto.id,
"nombre": producto.nombre,
"precio": float(producto.precio),
"acumulado": float(producto.precio),
"imagen":producto.imagen.url,
"cantidad": 1,
}
else:
self.carrito[id]["cantidad"] += 1
self.carrito[id]["precio"] = producto.precio
self.carrito[id]["acumulado"] += producto.precio
self.guardar_carrito()
def guardar_carrito(self):
self.session["carrito"] = self.carrito
self.session.modified = True
def eliminar(self, producto):
id = str(producto.id)
if id in self.carrito:
del self.carrito[id]
self.guardar_carrito()
def restar(self, producto):
id = str(producto.id)
if id in self.carrito.keys():
self.carrito[id]["cantidad"] -= 1
self.carrito[id]["precio"] = producto.precio
self.carrito[id]["acumulado"] -= producto.precio
if self.carrito[id]["cantidad"] <= 0: self.eliminar(producto)
self.guardar_carrito()
def limpiar(self):
self.session["carrito"] = {}
self.session.modified = True
| gonzalezmirko/Proyecto-Final-Coder | Proyecto/core/models.py | models.py | py | 2,980 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "... |
10211953658 | from collections import defaultdict
import time
import os
#This class represents a directed graph using adjacency list representation
class Graph:
def __init__(self,vertices):
self.V = vertices
self.graph = defaultdict(list)
self.file = open("Result1.txt","a")
# function to add an edge to graph
def addEdge(self,u,v):
self.graph[u].append(v)
# A function used by DFS
def DFSUtil(self,v,visited):
# Mark the current node as visited and print it
visited[v]= True
self.file.write(str(v) +" ")
#Recur for all the vertices adjacent to this vertex
for i in self.graph[v]:
if visited[i]==False:
self.DFSUtil(i,visited)
def fillOrder(self,v,visited, stack):
# Mark the current node as visited
visited[v]= True
#Recur for all the vertices adjacent to this vertex
for i in self.graph[v]:
if visited[i]==False:
self.fillOrder(i, visited, stack)
stack = stack.append(v)
# The main function that finds and prints all strongly
# connected components
def printSCCs(self):
stack = []
# Mark all the vertices as not visited (For first DFS)
visited =[False]*(self.V)
# Fill vertices in stack according to their finishing
# times
for i in range(self.V):
if visited[i]==False:
self.fillOrder(i, visited, stack)
#Mark all the vertices as not visited (For second DFS)
visited =[False]*(self.V)
# Now process all vertices in order defined by Stack
while stack:
i = stack.pop()
if visited[i]==False:
self.DFSUtil(i, visited)
self.file.write("\n")
self.file.close()
#os.system('cls')
file = open("task1.txt")
headers=4
for index in range(0,4):
file.readline()
g = Graph(5105044)
g1 = Graph(5105044)
g2 = Graph(5105044)
g3 = Graph(5105044)
g4 = Graph(5105044)
g5 = Graph(5105044)
g6 = Graph(5105044)
g7 = Graph(5105044)
g8 = Graph(5105044)
g9 = Graph(5105044)
FromNodeId=0
ToNodeId=0
count=0
for value in file.readlines():
if(count<510504):
value=value.replace('\n','')
temp=value.split('\t')
FromNodeId=temp[0]
ToNodeId=temp[1]
g.addEdge(int(FromNodeId), int(ToNodeId))
count+=1
if(count<1021008):
value=value.replace('\n','')
temp=value.split('\t')
FromNodeId=temp[0]
ToNodeId=temp[1]
g1.addEdge(int(FromNodeId), int(ToNodeId))
count+=1
if(count<1531512):
value=value.replace('\n','')
temp=value.split('\t')
FromNodeId=temp[0]
ToNodeId=temp[1]
g2.addEdge(int(FromNodeId), int(ToNodeId))
count+=1
if(count<2042016):
value=value.replace('\n','')
temp=value.split('\t')
FromNodeId=temp[0]
ToNodeId=temp[1]
g3.addEdge(int(FromNodeId), int(ToNodeId))
count+=1
if(count<2552520):
value=value.replace('\n','')
temp=value.split('\t')
FromNodeId=temp[0]
ToNodeId=temp[1]
g4.addEdge(int(FromNodeId), int(ToNodeId))
count+=1
if(count<3063024):
value=value.replace('\n','')
temp=value.split('\t')
FromNodeId=temp[0]
ToNodeId=temp[1]
g5.addEdge(int(FromNodeId), int(ToNodeId))
count+=1
if(count<3573528):
value=value.replace('\n','')
temp=value.split('\t')
FromNodeId=temp[0]
ToNodeId=temp[1]
g6.addEdge(int(FromNodeId), int(ToNodeId))
count+=1
if(count<4084032):
value=value.replace('\n','')
temp=value.split('\t')
FromNodeId=temp[0]
ToNodeId=temp[1]
g7.addEdge(int(FromNodeId), int(ToNodeId))
count+=1
if(count<4594536):
value=value.replace('\n','')
temp=value.split('\t')
FromNodeId=temp[0]
ToNodeId=temp[1]
g8.addEdge(int(FromNodeId), int(ToNodeId))
count+=1
if(count<5105043):
value=value.replace('\n','')
temp=value.split('\t')
FromNodeId=temp[0]
ToNodeId=temp[1]
g9.addEdge(int(FromNodeId), int(ToNodeId))
count+=1
print("Loading ",end = ' ')
for i in range(3):
time.sleep(0.8)
print(" . ",end = ' ')
os.system('cls')
print("Number of Iterations are ",count)
print ("Strongly connected components in graph are given below")
g.printSCCs()
g1.printSCCs()
g2.printSCCs()
"""
g3.printSCCs()
g4.printSCCs()
g5.printSCCs()
g6.printSCCs()
g7.printSCCs()
g8.printSCCs()
g9.printSCCs()
"""
fin= open("Result1.txt", 'r')
for index in range(0,15251652):
fin.readline()
for index in range(15251652,15252405):
print(fin.read())
time.sleep(0.5)
fin.close()
print("End of the procedure .........") | MuhammadAliAhson/Connected_Components_Graphs | Code_To_Find_Connected_Nodes_1.py | Code_To_Find_Connected_Nodes_1.py | py | 4,926 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_... |
34892117243 | from django.contrib import admin
from .models import Product, Order, OrderProduct
class ProductAdmin(admin.ModelAdmin):
list_display = ('name', 'price', 'quantity', 'product_image')
list_filter = ('price', 'quantity')
search_fields = ('name', 'price')
class OrderAdmin(admin.ModelAdmin):
list_display = ('date_ordered', 'name', 'email', 'phone', 'address')
list_filter = ('date_ordered', 'products', 'name')
search_fields = ('name', 'email', 'phone', 'address', 'products')
class OrderProductAdmin(admin.ModelAdmin):
list_display = ('order', 'product', 'quantity')
list_filter = ('order', 'product')
search_fields = ('order', 'product')
admin.site.register(Product, ProductAdmin)
admin.site.register(Order, OrderAdmin)
admin.site.register(OrderProduct, OrderProductAdmin)
| rustamovilyos/raw_materials | app/admin.py | admin.py | py | 817 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 11,
"usage_type": "attribute"
... |
13483897568 | import bpy
from bpy import context
import sys
import os
from os.path import exists
from os.path import splitext
fileArgument = sys.argv[-1]
print("\r\n")
print("Looking for FBX file " + fileArgument + " in working directory:")
print(os.getcwd())
filename = splitext(fileArgument)[0]
if exists(filename + ".fbx"):
print("FBX file name " + filename + " was found.\r\n")
else:
sys.exit("FBX file named " + fileArgument + " was not found.\r\n")
try:
os.mkdir(filename)
except OSError as error:
print(error)
bpy.ops.wm.save_as_mainfile(filepath=filename + ".blend")
# Delete all default objects
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete()
bpy.ops.wm.save_mainfile()
# Import fbx object.
print("Importing FBX object.")
bpy.ops.import_scene.fbx(filepath = str(filename + ".fbx"))
bpy.ops.wm.save_mainfile()
def ApplyMaterial(material_name, obj):
mat = bpy.data.materials.get(material_name)
if mat is not None:
# assign material
if obj.data.materials:
# assign to 1st material slot
obj.data.materials[0] = mat
else:
obj.data.materials.append(mat)
def matcher(x):
return '.' not in x.name
bpy.ops.wm.save_mainfile()
print("Separating unique materials...")
uniqueMaterials = filter(matcher, bpy.data.materials)
for material in uniqueMaterials:
bpy.ops.object.select_all(action='DESELECT')
mat_name = material.name
col = bpy.data.collections.new(mat_name)
bpy.context.scene.collection.children.link(col)
print("Linking " + mat_name + " collection")
for object in bpy.data.objects:
# Select only mesh objects
if object.type == "MESH":
bpy.context.view_layer.objects.active = object
# Gets the first material for that object
objectMaterial = None
if 0 < len(object.data.materials):
objectMaterial = object.data.materials[0]
# if the object's material starts with the name of the current unique material
# apply the unique material to that object.
if objectMaterial is not None and objectMaterial.name.startswith(mat_name):
bpy.context.view_layer.objects.active = object
col.objects.link(object)
ApplyMaterial(mat_name, object)
m_col = bpy.data.collections.get("Collection")
bpy.context.scene.collection.children.unlink(m_col)
print("Unique materials separated.")
# ctx = bpy.context.copy()
# ctx['selected_objects'] = col.objects
# col_filename = filename + "/" + mat_name + ".blend"
# bpy.data.libraries.write(col_filename, set(ctx['selected_objects']), fake_user=True)
# hull_col = bpy.data.collections.new(mat_name + "_hulls")
# bpy.context.scene.collection.children.link(hull_col)
# for object in col.objects:
# if object.type == "MESH":
# print("Creating hull: " + object.name + "_hull")
# hull = convex_hull(object.name + "_hull", object)
# if hull is not None:
# hull_col.objects.link(hull)
#
# print("Completed hull: " + object.name + "_hull")
# bpy.ops.wm.save_mainfile()
bpy.ops.wm.save_mainfile()
| lmsorenson/PyGeometry | create_scene_from_fbx.py | create_scene_from_fbx.py | py | 3,235 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_nu... |
30553351838 | '''
statistics.minutes delayed.weather: number of minutes delayed (per month) caused by significant meteorological
conditions that, in the judgment of the carrier, delays or prevents the operation of a flight.
'''
'''
In de opdracht willen ze dat we list_of_airports twee keer gaan gebruiken...
Dit kan natuurlijk niet....
Maar de 'tweede' list_of_airports is nu vliegveld_code =)
'''
import json
from pprint import pprint
list_of_airports = []
with open('airports.json', 'r') as infile:
list_of_airports = json.load(infile) #Laadt alle data uit airports.json in list_of_airports
infile.close() #Sluit de file, want wij hebben m niet meer nodig
#pprint(list_of_airports)
vliegveld_code = [] #Nieuwe lijst, omdat de opdracht is slecht gemaakt en anders moeten we list_of_airports overschrijven :3
for vliegveld in list_of_airports:
airport_code = vliegveld['airport']['code'] #JSON.airport.code
min_delayed_weather = vliegveld['statistics']['minutes delayed']['weather'] #JSON.statistics.minutes delayed.weather
vliegveld_code.append((airport_code, min_delayed_weather)) #Voegt code en minuten vertraging als gevolg van Weather toe als TUPLE aan vliegveld
pprint(vliegveld_code)
# ssort airports by min_delayed_weather
#print(sorted(vliegveld_code)) #sorteerd op key, maar moet op value =(
vliegveld_code_sorted = sorted(vliegveld_code, key=lambda vliegveld_code: vliegveld_code[1], reverse=True) #Sorted op 2e plaats in tuple(Weather delay) =) en omgekeerd, want standaard is klein ---> groot :(
pprint(vliegveld_code_sorted) | puppy1004/School-Python | School/Oefentoets2/opgave_2.py | opgave_2.py | py | 1,623 | python | nl | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 36,
"usage_type": "call"
}
] |
7994328417 | import pymongo
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
products_db = myclient["products"]
branches_db = myclient["branches"]
order_management_db = myclient["order_management"]
# branches = {
# 1: {"name":"Katipunan","phonenumber":"09179990000"},
# 2: {"name":"Tomas Morato","phonenumber":"09179990001"},
# 3: {"name":"Eastwood","phonenumber":"09179990002"},
# 4: {"name":"Tiendesitas","phonenumber":"09179990003"},
# 5: {"name":"Arcovia","phonenumber":"09179990004"},
# }
def get_product(code):
products_coll = products_db["products"]
product = products_coll.find_one({"code":code},{"_id":0})
return product
def get_products():
product_list = []
products_coll = products_db["products"]
for p in products_coll.find({},{"_id":0}):
product_list.append(p)
return product_list
def get_branch(code):
branches_coll = branches_db["branches"]
branches = branches_coll.find_one({"code":code})
return branches
def get_branches():
branch_list = []
branch_coll = branches_db["branches"]
for m in branch_coll.find({}):
branch_list.append(m)
return branch_list
def get_user(username):
customers_coll = order_management_db['customers']
user=customers_coll.find_one({"username":username})
return user
def create_order(order):
orders_coll = order_management_db['orders']
orders_coll.insert(order)
| mjimlee/Flask-ecommerce | digitalcafe/database.py | database.py | py | 1,431 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 3,
"usage_type": "call"
}
] |
27977157667 | import os
from django.core.exceptions import ValidationError
def validate_recording_file_extension(value):
# [0] returns path+filename, [1] returns the extension
extension = os.path.splitext(value.name)[1]
# Add/Remove from the valid music file extensions as you see fit
valid_extensions = ['.mp3','.ogg','.wave']
if not extension.lower() in valid_extensions:
raise ValidationError(u'Unsupported file extension') | Trainzack/MarkTime | MarkTimeSite/MarkTimeApp/validators.py | validators.py | py | 443 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.splitext",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.core.exceptions.ValidationError",
"line_number": 12,
"usage_type": "call"
}
] |
30349073082 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('paper', '0010_auto_20160131_0616'),
]
operations = [
migrations.AddField(
model_name='paper',
name='time_final',
field=models.FloatField(max_length=255, default=0),
),
]
| sychen1121/paper_label | website/paper/migrations/0011_paper_time_final.py | 0011_paper_time_final.py | py | 414 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AddField",
"line_number": 14,
"usage_type": "call"
},
{
... |
73103994024 | import datetime
import jwt
from api import app, db
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy import text as sa_text
from sqlalchemy_utils.types.password import PasswordType
class User(db.Model):
id = db.Column(UUID(as_uuid=True), primary_key=True, server_default=sa_text("uuid_generate_v4()"))
name = db.Column(db.String(120), unique=True)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(PasswordType(
schemes=[
'pbkdf2_sha512',
'md5_crypt'
],
deprecated=['md5_crypt']
))
def to_dict(self):
return {
"id": self.id,
"name": self.name,
"email": self.email
}
@staticmethod
def encode_auth_token(user_id):
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=10),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
return jwt.encode(
payload,
app.config.get('SECRET_KEY'),
algorithm='HS256'
)
except Exception as e:
return e
@staticmethod
def decode_auth_token(auth_token):
try:
payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))
return payload
except jwt.ExpiredSignatureError:
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please log in again.'
| Basile-Lequeux/PerfectTripApi | api/models/User.py | User.py | py | 1,587 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "api.db.Model",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "api.db",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "api.db.Column",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "api.db",
"line_number": 10,
... |
34954108287 |
import logging
import apache_beam as beam
from etl_operations.models.remittances import RemittanceSchema
from etl_operations.transforms.left_join import LeftJoin
def filter_remittance(remittance):
schema = RemittanceSchema()
errors = schema.validate(remittance)
logging.info(f'{errors} - {errors == {}} - {remittance}')
return errors == {}
class TransformRemittance(beam.DoFn):
def process(self, remittance):
schema = RemittanceSchema()
parsed_id = str(remittance['id']) if remittance['id'] else None
parsed_created = str(remittance['created_via_app_version']) if remittance['created_via_app_version'] else None
parsed_receipt_times = str(remittance['id']) if remittance['id'] else None
remittance.update({
'id': parsed_id,
'created_via_app_version': parsed_created,
'receipt_times': parsed_receipt_times,
})
yield schema.dump(remittance)
class MergeRemittancesUsers(beam.PTransform):
def expand(self, p):
read_remittances, read_auth = p
merged_remittances_created_user = ((read_remittances, read_auth)
| 'MergeRemittancesCreatedUsers' >> LeftJoin(
'created_by_user',
'id',
'created_by_user_'
))
merged_remittances_customers = ((merged_remittances_created_user, read_auth)
| 'MergeRemittancesCustomers' >> LeftJoin('customer', 'id', 'customer_'))
merged_remittances_tellers = ((merged_remittances_customers, read_auth)
| 'MergeRemittancesTellers' >> LeftJoin('teller', 'id', 'teller_'))
return ((merged_remittances_tellers, read_auth)
| 'MergeRemittancesUserProcessors' >> LeftJoin(
'user_processor',
'id',
'user_processor_'
))
| luisarboleda17/etls_valiu | etl_operations/etl_operations/transforms/remittances.py | remittances.py | py | 2,088 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "etl_operations.models.remittances.RemittanceSchema",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "apache_beam.DoFn",
"line_number": 15,
"usage_type": "attribute"
},
{
... |
6829356831 | import re
import time
from os import environ
from datetime import datetime
from mkdocs.config import config_options
from mkdocs.plugins import BasePlugin
from .gitinfo import GitInfo
class GitShowHistoryLogPlugin(BasePlugin):
config_scheme = (
('max_number_of_commits', config_options.Type(int, default=5)),
)
def __init__(self):
self.enabled = True
self.from_git = GitInfo()
def on_page_markdown(self, markdown, page, config, files):
if not self.enabled:
return markdown
pos = re.search(r"\{\{(\s)*git_show_history_log(\s)*\}\}", markdown)
list_of_git_commits = self.from_git.get_commits_for_file(page.file.abs_src_path, self.config['max_number_of_commits'])
table_header = "| Version | Author | When | Message |\n" \
"|---------|--------|------|---------|\n"
pos = markdown.find("{{ git_show_history_log }}")
pos += len(table_header)
markdown = re.sub(r"\{\{(\s)*git_show_history_log(\s)*\}\}",
table_header,
markdown,
flags=re.IGNORECASE)
for commit in list_of_git_commits:
author = str(commit.author)
date = time.strftime('%Y-%m-%d, %H:%M:%S', time.gmtime(commit.committed_date))
msg = commit.message.partition('\n')[0]
tag = str(self.from_git.get_tag_for_commit(commit))
newstr = "| " + tag + " | " + author + " | " + date + " | " + msg + " |\n"
new_markdown = markdown[:pos] + newstr + markdown[pos:]
markdown = new_markdown
pos += len(newstr)
return markdown
| pawelsikora/mkdocs-git-show-history-log-plugin | mkdocs_git_show_history_log_plugin/plugin.py | plugin.py | py | 1,784 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "mkdocs.plugins.BasePlugin",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "mkdocs.config.config_options.Type",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "mkdocs.config.config_options",
"line_number": 11,
"usage_type": "name"
},
... |
6667647383 | from flask import request, jsonify, abort, Blueprint
import requests
import json
from app import models
from .authRoutines import *
likeRoutes = Blueprint('likesBp', __name__)
# {authToken: xxxx, like: 0, betId: xxxx}
# {authToken: xxxx, like: 1, betId: xxxx}
@likeRoutes.route('/like/update', methods=['POST'])
def like_update():
authClass = authBackend()
if request.method == 'POST':
payload = json.loads(request.data.decode())
token = payload['authToken']
email = authClass.decode_jwt(token)
user = db.session.query(models.User).filter_by(email=email).first()
if email is False:
return jsonify({'result': False, 'error': 'Failed Token'}), 400
else:
bet = db.session.query(models.Bet).filter_by(id=payload['betId']).first()
if bet is None:
return jsonify({'result': False, 'error': 'Bet Doesn\'t Exist'}), 400
else:
like = db.session.query(models.Likes).filter_by(user_id=user.id, bet_id=bet.id).first()
if payload['like'] == 1:
if like is None:
like = models.Likes(bet.id, user.id)
like.save()
return jsonify({'result': True, 'success': 'Like Created'}), 200
else:
return jsonify({'result': True, 'success': 'Like Already in DB'}), 200
else:
if like is not None:
like.delete()
return jsonify({'result': True, 'success': 'Like Removed'}), 200
else:
return jsonify({'result': True, 'success': 'Like Did not Exist'}), 200
else:
return jsonify({'result': True, 'Fail': 'Use POST'}), 400
| ThreeOhSeven/Backend | app/likesBp.py | likesBp.py | py | 1,828 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "json.loads",
... |
42522310312 | #WebCrawler é uma ferramenta de captura de informações em sites, cadastrando e salvando o que acha que seja mais relevante por meio de palavras chave
#Importa operadores matematicos
import operator
#Biblioteca de manipulação de estruturas do python
from collections import Counter
from bs4 import BeautifulSoup
import requests
def start(url):
wordList = []
source_code = requests.get(url).text
#Requisita dados HTML da pagina
soup = BeautifulSoup(source_code, 'html.parser')
#Procura tudo que for classe e div e transforma em texto
for each_text in soup.findAll('div', {'class': 'entry_content'}):
content = each_text.text
words = content.lower().split()
for each_word in words:
wordList.append(each_word)
clean_wordList(wordList)
#Remove simbolos indesejados
def clean_wordList(wordList):
clean_list = []
for word in wordList:
symbols = '!@#$%^&*()_-+={[]}|\;:"<>?/,.'
for i in range(0, len(symbols)):
#Tira o simbolo e nacoloca nada no lugar
word = word.replace(symbols[i], '')
if len(word)>0:
clean_list.append(word)
create_dictionary(clean_list)
#Passa pela lista e mostra as palavras mais repetidas
def create_dictionary(clean_list):
word_count = {}
for word in clean_list:
if word in word_count:
word_count[word] += 1
else:
word_count[word] += 1
for key, value in sorted(word_count.items(), key = operator.itemgetter(1)):
print("%s: %s" % (key, value))
c = Counter(word_count)
top = c.most_common(10)
print(top)
if __name__ == "__main__":
start('https://www.geeksforgeeks.org/python-programming-language/?ref=leftbar') | CaioNM/S.I.comPython | Aula 04/Web Crawler.py | Web Crawler.py | py | 1,835 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "operator.itemgetter",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "collections.Counte... |
73451722984 | '''
Lets implement CutAndPaste augmentation
This augmentations can be added as an augmentation in the DataGenerators, but for the sake of keeping this project
simple I am doing this separately and then performing other-augmentations.
This can be considered as the first augmentation of Albumentation augmentations.
ref : https://arxiv.org/pdf/2012.07177.pdf
LETSS DO IT !! CutAndPaste is no longer plagiarizing
'''
import cv2
import argparse
import base64
import json
import numpy as np
import random
import pandas as pd
from tqdm import tqdm
import os
import os.path as osp
from labelme import utils
class CopyAndPaste:
def __init__(self, input_dir, background_dir):
self.input_dir = input_dir
self.json_mask_dir = osp.join(osp.dirname(self.input_dir), 'json_mask')
self.mask_dir = osp.join(osp.dirname(self.input_dir), 'mask')
self.background_dir = background_dir
# default can be changed anytime
self.augmentation_copies = 10
self.w_J_test_size = 3
self.wo_J_test_size = 2
self.img_sz = 256
def augment_images(self):
# creating a random-test set for no leakage from training
test_samples = []
w_Js = [w_J for w_J in os.listdir(self.json_mask_dir) if 'no' not in w_J]
wo_Js = [wo_J for wo_J in os.listdir(self.json_mask_dir) if 'no' in wo_J]
test_samples += list(np.random.choice(w_Js, size=self.w_J_test_size, replace=False))
test_samples += list(np.random.choice(wo_Js, size=self.wo_J_test_size, replace=False))
imgs = []
grps = []
for img_f in tqdm(os.listdir(self.input_dir)):
if 'CAP' not in img_f:
if img_f in os.listdir(self.mask_dir):
imgs.append(img_f.replace('.json', ''))
if img_f not in test_samples:
grps.append('train')
img, mask = self.get_img_n_mask(img_f)
imgs, grps = self.create_augmentations(img, mask, imgs, grps,
img_name=img_f.replace('.png', ''))
else:
grps.append('test')
df = pd.DataFrame()
df['images'] = imgs
df['group'] = grps
df.to_csv(osp.join(osp.dirname(self.input_dir), 'log_meta.csv'), index=False)
def get_img_n_mask(self, img_f):
img_ = cv2.imread(osp.join(self.input_dir, img_f), cv2.COLOR_BGR2RGB)
mask_ = cv2.imread(osp.join(self.mask_dir, img_f), cv2.IMREAD_GRAYSCALE)
return img_, mask_
def create_augmentations(self, img, mask, img_list, group_list, img_name):
# first lets select 10-images at random from the background
background_imgs = list(np.random.choice(os.listdir(self.background_dir),
size=self.augmentation_copies,
replace=False))
for idx, background_img in enumerate(background_imgs):
'''
There are two ways of doing we can add J in the same location as it is in the original image
but that noob-level lets resize the images before pasting them on top of background and then
un-masking the pizels which are not labeled as J.
'''
bg_img = cv2.imread(osp.join(self.background_dir, background_img), cv2.COLOR_BGR2RGB)
bg_img = cv2.resize(bg_img, (self.img_sz, self.img_sz))
if len(bg_img.shape) < 3:
bg_img = np.repeat(bg_img[..., np.newaxis], 3, axis=2)
# lets resize the og-image anywhere in between 180-256 (256 final desired size)
random_sz = np.random.randint(180, 256)
re_img = cv2.resize(img, (random_sz, random_sz))
re_mask = cv2.resize(mask.astype('uint8'), (random_sz, random_sz))[..., np.newaxis]
# now lets find a patch in the background-image
x_init = np.random.randint(0, self.img_sz - random_sz)
y_init = np.random.randint(0, self.img_sz - random_sz)
bg_mask_img = np.zeros((self.img_sz, self.img_sz, 1))
ix_, iy_, _ = np.where(re_mask != 0)
bg_patch = bg_img[x_init:(x_init+random_sz), y_init:(y_init+random_sz), :]
bg_patch[ix_, iy_, :] = re_img[ix_, iy_, :]
bg_img[x_init:(x_init + random_sz), y_init:(y_init + random_sz), :] = bg_patch
if 'no' not in img_name:
bg_mask_img[x_init:(x_init + random_sz), y_init:(y_init + random_sz), :] = re_mask
# saving the mask
cv2.imwrite(osp.join(self.mask_dir, f'CAP_{img_name}_{idx}.png'), bg_mask_img)
# saving the image
cv2.imwrite(osp.join(self.input_dir, f'CAP_{img_name}_{idx}.png'), bg_img)
img_list.append(f'CAP_{img_name}_{idx}')
group_list.append('train')
return img_list, group_list
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input_dir")
parser.add_argument("background_dir")
args = parser.parse_args()
input_dir = args.input_dir
background_dir = args.background_dir
# initialize and create masks for where J exists
augment = CopyAndPaste(input_dir, background_dir)
# Create masks
augment.augment_images() | Anshul22Verma/TP_projects | CopyAndPaste/copy_and_paste_augmentation.py | copy_and_paste_augmentation.py | py | 5,384 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number"... |
18355189069 | from flask import Flask, render_template, request, redirect, session, flash, url_for
from mysqlconnection import connectToMySQL # import the function that will return an instance of a connection
import re # the regex module
from flask_bcrypt import Bcrypt
import sys;
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
SCHEMA_NAME = "favoritebooks"
app = Flask(__name__)
app.secret_key ='asdfeeffefa' 'keep it secret, keep it safe' # set a secret key for security purposes
bcrypt = Bcrypt(app) # we are creating an object called bcrypt,
# which is made by invoking the function Bcrypt with our app as an argument
#603-
#There routes: / /register /login /books - /addbook /doFavorite/<bookID> /books/<bookID> - /updateDescription<bookID> /unfavorite/<bookID> /logout
# / is the root route and renders the registration/login page
# /register seems to be needed to catch the forms sent from the register side of the reglogPW.html page (typically the index page)
# redirects to /books if success, or back to / if fail
# /login seems to be needed to catch the forms sent from the login side of the reglogPW.html page (typically the index page)
# redirects to /books if success, or back to / if fail
# /books is about rendering the allBooks.html page....the success page
# /addbook is aobut adding a book to books table
# /doFavorite/<bookID> receives form from allBooks.html and oneBook.html, performs a favorting action in favoites table database and redirects to /books
# /books/<bookID> is about rendering the oneBook.html page....
# /updateDescription<bookID>
# /unfavorite>/<bookID> receives form from oneBook.html, performs action in database and redirects to /books
# /logout would do just that: log the user out, and send some place safe and useful...like the root route which then renders the reglogPW.html
#
#there are three html pages that look different only by flash messages and personalization such as using users name and populated data fields
#reglogFav.html or index.html is used for registration and login WH users_new
#allBooks.html or books_index.html is used for displaying All books whether favorited or not This is a success page WH: call it books_index
#oneBook.html or book_show displays single book WH: books_show
@app.route('/')
def index():
pass
# This will render the login and registration page
# return render_template("")
if "form" not in session: #this is about populating and retrieving registration field data. Ex: "fname_submitted" is value in first name field
dataNameEmail= {
"fname_submitted": "",
"lname_submitted": "",
"email_submitted": ""
}
session['form'] = dataNameEmail
print("-"*80)
print(dataNameEmail)
print(session['form'])
return render_template("reglogFav.html") # This is the registration login form
@app.route('/register', methods=['POST']) # this route shows up when the user clicks register and will send user to /wall, or if bad, then redirect to /
def register():
dataNameEmail= {
"fname_submitted": request.form['fname_submitted'],
"lname_submitted": request.form['lname_submitted'],
"email_submitted": request.form["email_submitted"]
}
session['form'] = dataNameEmail
valid=True
print("-"*80)
print(dataNameEmail)
print(session['form'])
print("above should be catalog dateNameEmail followed by result of web form")
if (len(request.form['fname_submitted']) < 2 or not request.form['fname_submitted'].isalpha()):
valid=False
flash("Name must be all alpha and at least 2 characters.", "register")
if (len(request.form['lname_submitted']) < 2 or not request.form['lname_submitted'].isalpha()):
valid=False
flash("Last name must be all alpha and at least 2 characters.", "register")
if not EMAIL_REGEX.match(request.form["email_submitted"]): # test whether a field matches the pattern
flash("Invalid email address!", "register")
print("you should have entered something")
if (len(request.form['pw_submitted']) < 8):
flash("Password must be at least 7 characters.", "register")
if (request.form['pw_submitted'] != request.form['pwconf_submitted']):
flash("Confirmation password did not match.", "register")
retrievedEmail = request.form['email_submitted']
print("retrievedEmail from webform ", retrievedEmail)
mysql = connectToMySQL(SCHEMA_NAME)
query = "select * from users where email=%(em)s;"
print("query is ", query)
data = {
"em": retrievedEmail
}
matching_users = mysql.query_db(query, data)
print("data is: ", data)
print("next line prints matching_users: ")
print(matching_users)
print("next line should give length of matching_user...number of items in list")
print(len(matching_users))
if len(matching_users)>0: #alternate if len(matching_users)>0: the other: if len(matching_users)>0: reason is that Python considers NONE to be false https://docs.python.org/2.4/lib/truth.html
print("email already exists")
print("*"*80)
print("matching ", matching_users[0]['email'])
print("*"*80)
valid=False
flash("Entered email already exists. Mabybe you are already registered. Else use another email address.", "register")
print("valid is")
print(valid)
if(not valid):
print("test")
return redirect ("/")
pw_hash = bcrypt.generate_password_hash(request.form['pw_submitted'])
print("hashed password is: ", pw_hash)
# input is good and we will write to database and show success page--Don't need or have an else because all invalids have already resulted in returns
mysql = connectToMySQL(SCHEMA_NAME)
query = "INSERT INTO users (fname, lname, email, pw_hash, created_at, updated_at) VALUES(%(fname_bydata)s, %(lname_bydata)s, %(email_bydata)s, %(pw_has_bydata)s, NOW(), NOW());"
data = {
"fname_bydata": request.form["fname_submitted"],
"lname_bydata": request.form["lname_submitted"],
"email_bydata": request.form["email_submitted"],
"pw_has_bydata": pw_hash
}
new_user_id = mysql.query_db(query, data)
session['user_id'] = new_user_id
session['session_fname'] = request.form["fname_submitted"]
print ("session stored user id is now: " + str(session['user_id']) + "and name is: " + session['session_fname'])
flash("You have been successfully registered", "success")
#Also should remove session cookie holding name and email from html form
# as in:
session.pop('form') #but it can be done at top of success page
print("here"*80)
return redirect('/books')
# end /register route
@app.route('/login', methods=['POST']) # this route shows up when the user clicks login and will send user to /books, or if bad, then redirect to /
def login():
print(request.form)
#print(request.form[0])
login_email=request.form['log_email_submitted']
login_pw=request.form['log_pw_submitted']
if login_pw=="": # validation prevents blank password, but if it were bcrypt returns an error, so at least for testing time...
login_pw="1" #Just a non-blank character as defensive code
hashed_login_pw=bcrypt.generate_password_hash(login_pw)
print("h"*80)
print("email, pw, hashed pw " + login_email + login_pw, hashed_login_pw)
# Check whether the email provided is associated with a user in the database
# If it is, check whether the password matches what's saved in the database
# input is good and we will write to database and show success page
mysql = connectToMySQL(SCHEMA_NAME)
query = "select email, pw_hash, id, fname from users where email=%(em)s;" #shortened from select * to select email
print("query is ", query)
data = {
"em": login_email
}
result = mysql.query_db(query, data)
print("query returns result of match between email submitted in login and user table: ")
print(result)
if len(result)<1: # result is a list of dictionaries with each dictionary being a record from database. length = 0 means list has no elements, so no match
print("evil hacker with fake email")
flash("You could not be logged in", "login")
return redirect ("/")
print("r"*70)
print(type(result[0]))
matched_hashed_pw=result[0]['pw_hash']
print(type(matched_hashed_pw))
print("matching hashed PW: " + str(result[0]['pw_hash']))
print("variable matched_hashed_pw: " + str(matched_hashed_pw))
if bcrypt.check_password_hash(matched_hashed_pw,login_pw):
print("we got a match")
session['user_id'] = result[0]['id']
print("so cookie id is: " + str(session['user_id']))
flash("You were logged in", "login")
session['session_fname'] = result[0]['fname'] #storing first name of logged in user for handy retrieval from html pages
return redirect('/books')
else:
print("evil hacker")
flash("You could not be logged in", "login")
return redirect ("/")
# End /login route
@app.route('/books')
def books():
#like a success route this route is the result of a redirect...so it renders a template, but also the user can use 5000/books as a bookmark as the entry place which will only work if they are still logged in
# check cookie to see if logged in
# no cookie, then not logged in
if 'user_id' not in session:
print("key 'user_id' does NOT exist")
flash("You must be logged in to enter this website", "login")
return redirect ("/")
else:
print("!"*80)
print('key user_id exists!')
mysql = connectToMySQL(SCHEMA_NAME)
query = """select books.id AS 'books_ID', books.title AS 'books_title', books.added_by_id AS 'book_added_by_id', favorites.fav_by_id AS 'who_faved_book',
users.fname AS 'fname who added', users.lname AS 'lname who added'
from books
join users ON books.added_by_id=users.id
join favorites on favorites.book_id=books.id;"""
print("query is ", query)
allBooks =mysql.query_db(query)
print(allBooks)
print("+"*76)
return render_template("allBooks.html", allBooks=allBooks )
# end /books route
@app.route('/addbook', methods=['POST'])
def addbook():
#adds the book to books table
#submitter is automatically added to favorites table for this book
print("p"*80)
print(" this should be the book title: " + request.form['sentbooktitle'])
print("this should be the book description: " + request.form['sentbookdescription'])
valid=True
if (len(request.form['sentbooktitle']))<1:
valid=False
flash("Title must not be blank", "addfavbook")
if (len(request.form['sentbookdescription']))<5:
valid=False
flash("Please include at least 5 characters in description", "addfavbook")
if(not valid):
print("test")
return redirect('/books')
query = """insert into books (added_by_id, title, description ) VALUES (%(currentloggedidbydata)s, %(booktitlebydata)s, %(bookdescriptionbydata)s);"""
data = {
"currentloggedidbydata": session['user_id'],
"booktitlebydata": request.form['sentbooktitle'],
"bookdescriptionbydata": request.form['sentbookdescription']
}
print("q"*80)
print(query)
print(data)
mysql = connectToMySQL(SCHEMA_NAME)
new_book_id = mysql.query_db(query, data)
########
query = """insert into favorites (book_id, fav_by_id) VALUES (%(newbookbydata)s, %(favedbyidbydata)s);"""
data = {
"newbookbydata": new_book_id,
"favedbyidbydata": session['user_id']
}
print("9"*80)
print(query)
print(data)
mysql = connectToMySQL(SCHEMA_NAME)
new_fav_id = mysql.query_db(query, data)
return redirect('/books')
# End /doFavorite route
@app.route('/doFavorite/<bookID>', methods=['GET'])
def doFavorite(bookID):
print("book to be favorited: " , bookID)
print("logged in user: ", session['user_id'])
query = """insert into favorites (book_id, fav_by_id) VALUES (%(newbookbydata)s, %(favedbyidbydata)s);"""
data = {
"newbookbydata": bookID,
"favedbyidbydata": session['user_id']
}
print("9"*80)
print(query)
print(data)
mysql = connectToMySQL(SCHEMA_NAME)
new_fav_id = mysql.query_db(query, data)
return redirect('/books')
# End /doFavorite route
@app.route('/books/<bookID>', methods=['POST'])
def booksOne():
pass
return render_template(oneBook.html)
# End /doFavorite route
#return render_template("oneBook.html", )
# end /books route
@app.route('/updateDescription<bookID>', methods=['POST'])
def updateDesc():
pass
@app.route('/unfavorite/<bookID>', methods=['POST'])
def unFav():
pass
#return redirect ("/books")
# End /doFavorite route
#return redirect ("/books")
# End /doFavorite route
@app.route('/logout', methods=['GET']) # this route is the result of the user clicking anchor tag labeled "logout"
def logout():
#Clear cookies and do flash message displaying on reglogPW.html saying "You have been logged out"
print("are we in logout?")
#Clear cookies and do flash message saying "You have been logged out"
session.pop('user_id')
session.pop('session_fname')
flash("You have been logged out", "logout")
return redirect ("/")
# end logout route
if __name__ == "__main__":
app.run(debug=True) | full-time-april-irvine/kent_hervey | flask/flask_mysql/FavoriteBooksFlask/fav_books.py | fav_books.py | py | 14,005 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask_bcrypt.Bcrypt",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_n... |
5209753399 | import os
import bs4
import requests
import re
import time
def main():
# from alphabet "a" to "c"
alphabetical_list = "abc"
for char in alphabetical_list:
try:
url = "https://www.gutenberg.org/browse/authors/{}".format(char)
site = pull_site(url)
authors = scrape_author(site)
print(authors)
except:
continue
time.sleep(2)
def pull_site(url):
raw_site_page = requests.get(url)
raw_site_page.raise_for_status()
return raw_site_page
def scrape_author(site):
soup = bs4.BeautifulSoup(site.text, 'html.parser')
authors = []
for a in soup.find_all('a', href=True):
link_to_text = re.search(r'^/browse/authors/.*$', a['href'])
if link_to_text:
authors.append(link_to_text.group(1))
return authors
def scrape_bookid(site):
soup = bs4.BeautifulSoup(site.text, 'html.parser')
bookid_list = []
for a in soup.find_all('a', href=True):
# e.g. https://www.gutenberg.org/ebooks/14269
link_to_text = re.search(r'^/ebooks/(\d+)$', a['href'])
if link_to_text:
bookid_list.append(link_to_text.group(1))
return bookid_list
def download_books(book_id):
# http://www.gutenberg.org/cache/epub/14269/pg14269.txt
url = "https://www.gutenberg.org/cache/epub/{}/pg{}.txt".format(book_id, book_id)
response = requests.get(url)
response.raise_for_status()
return response.text
def save(book_id, book_data):
book_folder = "/Users/admin/Desktop/books"
if not os.path.exists(book_folder):
os.mkdir(book_folder)
book_name = "book{}.txt".format(book_id)
full_path = os.path.join(book_folder, book_name)
with open(full_path, 'w', encoding='utf-8') as fout:
fout.write(book_data)
if __name__ == '__main__':
main()
| zabuchan/web_scraping | show_gutenberg_authors.py | show_gutenberg_authors.py | py | 1,662 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.sleep",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_numbe... |
7537125263 | from __future__ import print_function
import six
from six.moves.html_parser import HTMLParser
from collections import defaultdict
from itertools import count
HTMLP = HTMLParser()
class SugarEntry:
"""Define an entry of a SugarCRM module."""
_hashes = defaultdict(count(1).next if hasattr(count(1), 'next') else count(1).__next__)
def __init__(self, module, fmap = None):
"""Represents a new or an existing entry.
Keyword arguments:
module -- SugarModule object the entry belongs to
"""
# Keep a reference to the parent module.
self._module = module
# Keep a mapping 'field_name' => value for every valid field retrieved.
self._fields = {}
self._dirty_fields = []
# Allow initial fields in constructor.
if fmap is not None:
self._fields.update(fmap)
# Make sure that the 'id' field is always defined.
if 'id' not in list(self._fields.keys()):
self._fields['id'] = ''
def __hash__(self):
return self._hashes['%s-%s' % (self._module._name, self['id'])]
def __unicode__(self):
return "<SugarCRM %s entry '%s'>" % \
(self._module._name.rstrip('s'), self['name'])
def __str__(self):
return str(self).encode('utf-8')
def __contains__(self, key):
return key in self._module._fields
def _retrieve(self, fieldlist, force = False):
qstring = "%s.id = '%s'" % (self._module._table, self['id'])
if not force:
fieldlist = set(fieldlist) - set(self._fields.keys())
if not fieldlist:
return
res = self._module._connection.get_entry_list(self._module._name,
qstring, '', 0,
list(fieldlist), 1, 0)
if not res['entry_list'] or not res['entry_list'][0]['name_value_list']:
for field in fieldlist:
self[field] = ''
return
for prop, obj in list(res['entry_list'][0]['name_value_list'].items()):
if obj['value']:
self[prop] = HTMLP.unescape(obj['value'])
else:
self[prop] = ''
def __getitem__(self, field_name):
"""Return the value of the field 'field_name' of this SugarEntry.
Keyword arguments:
field_name -- name of the field to be retrieved. Supports a tuple
of fields, in which case the return is a tuple.
"""
if isinstance(field_name, tuple):
self._retrieve(field_name)
return tuple(self[n] for n in field_name)
if field_name not in self._module._fields:
raise AttributeError("Invalid field '%s'" % field_name)
if field_name not in self._fields:
self._retrieve([field_name])
return self._fields[field_name]
def __setitem__(self, field_name, value):
"""Set the value of a field of this SugarEntry.
Keyword arguments:
field_name -- name of the field to be updated
value -- new value for the field
"""
if field_name in self._module._fields:
self._fields[field_name] = value
if field_name not in self._dirty_fields:
self._dirty_fields.append(field_name)
else:
raise AttributeError("Invalid field '%s'" % field_name)
def save(self):
"""Save this entry in the SugarCRM server.
If the 'id' field is blank, it creates a new entry and sets the
'id' value.
"""
# If 'id' wasn't blank, it's added to the list of dirty fields; this
# way the entry will be updated in the SugarCRM connection.
if self['id'] != '':
self._dirty_fields.append('id')
# nvl is the name_value_list, which has the list of attributes.
nvl = []
for field in set(self._dirty_fields):
# Define an individual name_value record.
nv = dict(name = field, value = self[field])
nvl.append(nv)
# Use the API's set_entry to update the entry in SugarCRM.
result = self._module._connection.set_entry(self._module._name, nvl)
try:
self._fields['id'] = result['id']
except:
print(result)
self._dirty_fields = []
return True
def relate(self, *related, **kwargs):
"""
Relate this SugarEntry with other Sugar Entries.
Positional Arguments:
related -- Secondary SugarEntry Object(s) to relate to this entry.
Keyword arguments:
relateby -> iterable of relationship names. Should match the
length of *secondary. Defaults to secondary
module table names (appropriate for most
predefined relationships).
"""
self._module._connection.relate(self, *related, **kwargs)
def get_related(self, module, fields = None, relateby = None, links_to_fields = None):
"""Return the related entries in another module.
Keyword arguments:
module -- related SugarModule object
relateby -- custom relationship name (defaults to module.lower())
links_to_fields -- Allows retrieval of related fields from addtional related modules for retrieved records.
"""
if fields is None:
fields = ['id']
if links_to_fields is None:
links_to_fields = []
connection = self._module._connection
# Accomodate retrieval of modules by name.
if isinstance(module, six.string_types):
module = connection[module]
result = connection.get_relationships(self._module._name,
self['id'],
relateby or module._name.lower(),
'', # Where clause placeholder.
fields,
links_to_fields)
entries = []
for idx, elem in enumerate(result['entry_list']):
entry = SugarEntry(module)
for name, field in list(elem['name_value_list'].items()):
val = field['value']
entry._fields[name] = HTMLP.unescape(val) if isinstance(val, basestring) else val
entry.related_beans = defaultdict(list)
# try:
linked = result['relationship_list'][idx]
for relmod in linked:
for record in relmod['records']:
relentry = {}
for fname, fmap in record.items():
rfield = fmap['value']
relentry[fname] = HTMLP.unescape(rfield) if isinstance(rfield, six.string_types) else val
entry.related_beans[relmod['name']].append(relentry)
# except:
# pass
entries.append(entry)
return entries
| gddc/python_webservices_library | sugarcrm/sugarentry.py | sugarentry.py | py | 7,119 | python | en | code | 46 | github-code | 36 | [
{
"api_name": "six.moves.html_parser.HTMLParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "itertools.count",
"line_number": 11,
"usage_type": "call"
},
{
"api_name"... |
36896022139 | import dns.exception
import dns.name
import dns.resolver
public_enum_domain = dns.name.from_text('e164.arpa.')
def from_e164(text, origin=public_enum_domain):
"""Convert an E.164 number in textual form into a Name object whose
value is the ENUM domain name for that number.
@param text: an E.164 number in textual form.
@type text: str
@param origin: The domain in which the number should be constructed.
The default is e164.arpa.
@type: dns.name.Name object or None
@rtype: dns.name.Name object
"""
parts = [d for d in text if d.isdigit()]
parts.reverse()
return dns.name.from_text('.'.join(parts), origin=origin)
def to_e164(name, origin=public_enum_domain, want_plus_prefix=True):
"""Convert an ENUM domain name into an E.164 number.
@param name: the ENUM domain name.
@type name: dns.name.Name object.
@param origin: A domain containing the ENUM domain name. The
name is relativized to this domain before being converted to text.
@type: dns.name.Name object or None
@param want_plus_prefix: if True, add a '+' to the beginning of the
returned number.
@rtype: str
"""
if not origin is None:
name = name.relativize(origin)
dlabels = [d for d in name.labels if (d.isdigit() and len(d) == 1)]
if len(dlabels) != len(name.labels):
raise dns.exception.SyntaxError('non-digit labels in ENUM domain name')
dlabels.reverse()
text = ''.join(dlabels)
if want_plus_prefix:
text = '+' + text
return text
def query(number, domains, resolver=None):
"""Look for NAPTR RRs for the specified number in the specified domains.
e.g. lookup('16505551212', ['e164.dnspython.org.', 'e164.arpa.'])
"""
if resolver is None:
resolver = dns.resolver.get_default_resolver()
for domain in domains:
if isinstance(domain, (str, unicode)):
domain = dns.name.from_text(domain)
qname = dns.e164.from_e164(number, domain)
try:
return resolver.query(qname, 'NAPTR')
except dns.resolver.NXDOMAIN:
pass
raise dns.resolver.NXDOMAIN
| RMerl/asuswrt-merlin | release/src/router/samba-3.6.x/lib/dnspython/dns/e164.py | e164.py | py | 2,142 | python | en | code | 6,715 | github-code | 36 | [
{
"api_name": "dns.exception.name.from_text",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "dns.exception.name",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "dns.exception",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "dns.e... |
41308544644 | import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk, GLib, GObject
from random import randint
from threading import Thread
from time import sleep
import i3
def do_nothing():
return
def do_nothing(a, b):
return
# needed so one can jump to a specific window on button click.
# there are probably way better ways to do this...
class WindowButton():
def __init__(self, a_id, a_window):
self.id = a_id
self.button = Gtk.Button()
self.window = a_window
def clicked(self, widget):
i3.focus(id=self.id)
self.window._close_window()
# the class that handles the Window
class mywindow:
def __init__(self):
#needed because the whole window is running in a seperate thread from the loop that reads the fifo
Gdk.threads_init()
GObject.threads_init()
Thread(target=self._init_helper).start()
# the real __init__ that gets started as a new Thread
def _init_helper(self):
self.win = Gtk.Window()
# important for my i3 config. it gets set to fullscreen by that
self.win.set_role("i3-overview")
self.win.connect("delete-event", do_nothing)
self.open = False
#initial setup for the window components
self.populate_window()
Gtk.main()
def populate_window(self):
#top-level boxes stacking horizontally
self.mbox = Gtk.Box(spacing=6, orientation=1)
self.tree_grid = Gtk.Grid()
self.tree_grid.override_background_color(0,Gdk.RGBA(0,0,0,1))
self.mbox.pack_start(self.tree_grid, True, True, 0)
self.win.add(self.mbox)
# this adds a big fat exit button to the bottom of the window
#bbox = Gtk.Box(spacing=6, )
#exit_but = Gtk.Button(label="exit")
#exit_but.connect("clicked", self.exit_button_click)
#bbox.pack_start(exit_but, True, True, 0)
#self.mbox.pack_end(bbox, True, True, 0)
#this creates the tree of labels/buttons
self._create_tree()
def _create_tree(self):
#clean the tree-box from all children
for child in self.tree_grid.get_children():
self.tree_grid.remove(child)
#get the current tree layout
tree = i3.get_tree()
# the top level of the trees are the displays
num_displays = len(tree["nodes"]) - 1 # ignore the __i3 thingy
display_counter = 0
for display in tree["nodes"]:
if "__i3" in display["name"]: # ignores the __i3 thingy. i think it contains the i3bar
continue
# every display gets his own label on the top
disp_label = Gtk.Label(label=display["name"])
disp_label.override_background_color(0, Gdk.RGBA(0.8,0,0,1))
disp_label.override_color(0, Gdk.RGBA(1,1,1,1))
display_grid = Gtk.Grid() #every display gets its own grid, so we can present them tidely
display_grid.override_background_color(0, Gdk.RGBA(0,0,0,1))
spacer = Gtk.Label(label="Hah")
spacer.override_background_color(0, Gdk.RGBA(0,0,0,1)) # needed because grids dont support spacing
spacer.override_color(0, Gdk.RGBA(0,0,0,1))
row = 0
if display_counter > num_displays / 2 - 1:
row = 1
line = display_counter % (num_displays / 2)
self.tree_grid.attach(disp_label, line, row*3, 1 , 1)
self.tree_grid.attach(display_grid, line, row*3+1, 1 , 1)
self.tree_grid.attach(spacer, line, row*3 + 2, 1 , 1)
for cont in display["nodes"]:
if "content" == cont["name"]: #each display has content and top/bottom docker. we only want the content
ws_counter = 0
num_ws = len(cont["nodes"])
for workspace in cont["nodes"]:
if len(workspace["nodes"]) == 0:
continue
# every workspace gets his own label on the top
label = Gtk.Label()
label.set_label(workspace["name"])
label.override_color(0,Gdk.RGBA(1,1,1,1))
label.override_background_color(0,Gdk.RGBA(0,0.1,0.6,0.6))
grid = Gtk.Grid()
next_level_box = Gtk.Box(spacing=0, ) # here is the place where the containers/windows get added
grid.attach(label,0,0,1,1)
grid.attach(next_level_box,0,1,1,1);
spacerh = Gtk.Label(label="Hah") # needed because grids dont support spacing
spacerv = Gtk.Label(label="Hah") # needed because grids dont support spacing
spacerh.override_background_color(0, Gdk.RGBA(0,0,0,1))
spacerv.override_background_color(0, Gdk.RGBA(0,0,0,1))
spacerh.override_color(0, Gdk.RGBA(0,0,0,1))
spacerv.override_color(0, Gdk.RGBA(0,0,0,1))
# partion the workspaces into three rows (and in my case maximum 3 lines)
row = 0
if ws_counter > num_ws / 3 - 1:
row = 1
if ws_counter > (num_ws*2) / 3 - 1:
row = 2
line = ws_counter % (num_ws / 3)
display_grid.attach(grid, line*2, row*2, 1 , 1)
display_grid.attach(spacerh, line*2, row*2 + 1, 1 , 1)
display_grid.attach(spacerv, line*2 + 1, row*2, 1 , 1)
self._rec_tree_func(workspace, next_level_box, 0)
ws_counter += 1
display_counter += 1
def _rec_tree_func(self, root, parent_box, level):
#decide wether the leave is a container or a window
for leave in root["nodes"] :
if len(leave["nodes"]) == 0:
label = str(leave["name"]).split("-")[-1] # only display the text after the last dash. in most cases the programs name
button = WindowButton(leave["window"], self)
button.button.set_label(label)
button.button.connect("clicked", button.clicked) #jumps to the window and closes the overview
parent_box.pack_start(button.button, True, True, 0)
button.button.override_background_color(0,Gdk.RGBA(0,0,0,1))
button.button.override_color(0,Gdk.RGBA(1,1,1,1))
else:
# generating some nice grey tones for the labels for better differentiation
label = Gtk.Label()
label.override_color(0,Gdk.RGBA(1,1,1,1))
r = 0.7 - 0.1*level
label.override_background_color(0,Gdk.RGBA(r,r,r,1))
if leave["name"]: #sometimes the containers do not have names. defaulting to "container"
label.set_label(leave["name"])
else:
label.set_label("container")
grid = Gtk.Grid()
next_level_box = Gtk.Box(spacing=0, ) # here is the place for the next level of recursion
grid.attach(label,0,0,1,1)
grid.attach(next_level_box,0,1,1,1);
parent_box.pack_start(grid, True, True, 0)
self._rec_tree_func(leave, next_level_box, level + 1) # start next level of recursion only if we didnt operate on a window
# wouldnt make much of a difference but ya know
# not needed anymore. leaving it still
def exit_button_click(self, button):
self._close_window()
#open window from within the thread
def _open_window(self):
self._create_tree()
self.win.show_all()
self.open = True
#open window from outside the thread
def open_window(self):
Gdk.threads_enter()
self._open_window()
Gdk.threads_leave()
#closing window from within the thread
def _close_window(self):
self.win.hide()
self.open = False
#closing window from outside the thread
def close_window(self):
Gdk.threads_enter()
self._close_window()
Gdk.threads_leave()
#toggel the window from within the Thread
def _toggle_window(self):
if(self.open):
self._close_window()
else:
self._open_window()
#toggel the window from outside the Thread
def toggle_window(self):
if(self.open):
self.close_window()
else:
self.open_window()
#exit the Gtk loop
def exit(self):
Gtk.main_quit()
| KillingSpark/i3-Overview | mywindow.py | mywindow.py | py | 8,918 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "gi.require_version",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk.Button",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gtk",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "i3.focus... |
11686525581 | from pynamodb.models import Model
from pynamodb.attributes import (
UnicodeAttribute,
UnicodeSetAttribute,
NumberAttribute,
BooleanAttribute,
MapAttribute,
UTCDateTimeAttribute
)
from datetime import datetime
class WatchingList(Model):
"""
References:
https://pynamodb.readthedocs.io/en/latest/index.html
Notes:
Be aware that the models defined here should be consistent with
dynamo stack in cdk.
"""
class Meta:
table_name = 'watching-list'
region = 'us-east-1'
ticker_symbol = UnicodeAttribute(hash_key=True)
| tanlin2013/stockbot | .aws/stack/lib/dynamo/table.py | table.py | py | 603 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pynamodb.models.Model",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "pynamodb.attributes.UnicodeAttribute",
"line_number": 26,
"usage_type": "call"
}
] |
42152798528 | #! Lianjia_Sold/sync2es.py
# synchronize data in MongoDB to ElasticSearch with updating item
from pymongo import MongoClient
from datetime import datetime
from uuid import uuid1
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
# import json
class MongoSyncEs(object):
def __init__(self):
self.es_node = '198.181.46.127:9200'
self.es_index = 'crawler.sold.v2'
self.es_type = 'info'
self.mongo_uri = 'mongodb://mongo:mongo2018@140.143.237.148:27020/?replicaSet=rs-27020'
self.mongo_db = 'scrapy-lianjia_sold'
self.count = 0
self.query = datetime.now().date().strftime('%Y-%m-%d')
def connection_mongo(self):
conn = MongoClient(self.mongo_uri)
db = conn[self.mongo_db]
return db
def connection_es(self):
es = Elasticsearch([self.es_node])
if not es.indices.exists(index=self.es_index): es.indices.create(index=self.es_index)
return es
def mongo_data_process(self, data):
# format data collected from mongo
if data['浏览'] == 'NA':
data['浏览'] = 0
if data['挂牌价格'] == 'NA':
data['挂牌价格'] = 0
if data['基本属性']['建成年代'] == '未知':
data['基本属性']['建成年代'] = 0
else:
data['基本属性']['建成年代'] = int(data['基本属性']['建成年代'])
if data['基本属性']['建筑面积']:
data['基本属性']['建筑面积'] = float(data['基本属性']['建筑面积'][:-1])
else:
data['基本属性']['建筑面积'] = float(0)
if not data['基本属性']['产权年限'] == '未知':
data['基本属性']['产权年限'] = int(data['基本属性']['产权年限'][:-1])
else:
data['基本属性']['产权年限'] = 0
if not data['小区概况']['年代'] == '未知':
data['小区概况']['年代'] = int(data['小区概况']['年代'][:-1])
else:
data['小区概况']['年代'] = 0
if data['小区概况']['楼栋总数']:
data['小区概况']['楼栋总数'] = int(data['小区概况']['楼栋总数'][:-1])
if data['成交时间']:
data['成交时间'] = data['成交时间'].replace('.', '-')
return data
def es_data_create(self, data):
doc = {
'_op_type': 'create',
'_index': self.es_index,
'_type': self.es_type,
'_id': uuid1(),
'_source': self.mongo_data_process(data)
}
yield doc
def es_pipeline_datetime(self, es):
id = 1
es.ingest.put_pipeline(
id=id,
body={
"description": "crawler.lianjia",
"processors": [
{
"date": {
"field": "initial_time",
"target_field": "@timestamp",
"formats": ["Y-M-d H:m:s"],
"timezone": "Asia/Shanghai"
}
}
]
}
)
return id
def start(self):
db = self.connection_mongo()
es = self.connection_es()
with open('sync_data.txt', 'a') as f:
f.write('+++{}\n'.format(datetime.now()))
for collection in ['sh-sold', 'su-sold']:
cursor = db[collection].find({'initial_time':{'$regex':self.query}},
{'_id':0,'基本属性.套内面积':0,'基本属性.供暖方式':0,'小区概况.hid':0,
'小区概况.rid':0,'小区概况.其他户型':0,'小区概况.在售链接':0,'小区概况.成交链接':0,
'小区概况.小区详情':0,'小区概况.出租链接':0})
for data in cursor:
bulk(es, self.es_data_create(data), pipeline=self.es_pipeline_datetime(es))
self.count += 1
f.write(data.get('房源链接')+'\n')
f.write('+++total data: {}\n'.format(self.count))
task = MongoSyncEs()
task.start()
| feelingu1314/lianjia | lianjia_sold/lianjia_sold/sync2es.py | sync2es.py | py | 4,288 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "elasticse... |
1985519465 | import kivy
from kivy.app import App
from kivy.lang import Builder
from kivy.properties import NumericProperty, ObjectProperty, StringProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.recycleview import RecycleView
from kivy.uix.screenmanager import Screen, ScreenManager, WipeTransition
import bank
import calc
# Database modules.
import database
import user
from bank import init_bank
from database import main
kivy.require("1.11.1")
from os import listdir
kv_path = "./kv/"
for kv in listdir(kv_path):
Builder.load_file(kv_path + kv)
class LoginScreen(Screen):
"""This' the first screen (1)
This screen will display the login. Both, signup and login options.
Args:
Screen (Screen): A new window.
"""
username = StringProperty()
password = StringProperty()
def login(self):
# If True, go to menu screen, otherwise show a popup.
if database.db.validate_user(
self.username,
self.password,
):
# Create an instance for the current user; with his info start
# running the bank.
user.init_user(self.username, self.password)
init_bank(user.user)
sm.transition.direction = "up"
sm.current = "menu"
else:
popup_msg(
msg="Ups! It seems like you haven't created any "
+ "account yet\nTry to create a new one first!",
status=False,
)
def sign_up(self):
"""Go to sign up screen."""
sm.transition.direction = "right"
sm.current = "sign_up"
class SignupScreen(Screen):
"""Here, the user will be able to create a new account. After that, he'll go
to menu screen immediately.
Args:
Screen (Screen): A different screen for signing up.
"""
username = StringProperty()
password = StringProperty()
def add_new_user(self):
if database.db.create_new_user(
self.username,
self.password,
):
popup_msg(
func=self.go_to_menu, msg="User created successfully!", status=True
)
# After sign up as log in, creates a new user and run the bank.
user.init_user(self.username, self.password)
init_bank(user.user)
else:
popup_msg(
msg="Ups! We've caught a bug!\nPlease send an issue with"
+ " an extend description of this annoying bug!",
status=False,
)
def go_to_menu(self, *args):
sm.transition.direction = "up"
sm.current = "menu"
class MenuScreen(Screen):
"""This' the second screen (2)
# Will display the different available options to the user.
Args:
Screen (Screen): The screen.
"""
pass
class TransactionScreen(Screen):
"""This' the third screen (3)
Args:
Screen (Screen): The screen.
"""
user_id = ObjectProperty(None)
cash = ObjectProperty(None)
object = StringProperty()
def make_transaction(self):
try:
bank.bank.cash_transaction(int(self.user_id.text), float(self.cash.text))
popup_msg(msg="Transaccion completada!", status=True)
except Exception as e:
print(e)
popup_msg(msg=str(e))
class StatusScreen(Screen):
"""Screen for displying the info of the actual user only.
Args:
Screen (Screen): The screen.
"""
deposit_count = ObjectProperty(rebind=True)
loan_count = ObjectProperty(rebind=True)
deposit_total = ObjectProperty(None)
loan_total = ObjectProperty(None)
euros = ObjectProperty(None)
dollars = ObjectProperty(None)
object = ObjectProperty(None)
def show_data(self):
"""Get the data from the bank and then shows it to the current user."""
labels = (
self.deposit_count,
self.loan_count,
self.deposit_total,
self.loan_total,
self.euros,
self.dollars,
self.object,
)
data = bank.bank.load_data_user()
try:
for label, data in zip(labels, data):
label.text = str(data) if not isinstance(data, float) else f"{data:.6}"
except Exception as e:
popup_msg(msg=str(e))
class ConverterScreen(Screen):
input_amount = NumericProperty()
lbl_convert = ObjectProperty(None)
def __init__(self, **kw):
super().__init__(**kw)
self.spinner_value_from = None
self.spinner_value_to = None
def set_spinner_value_from(self, spinner):
self.spinner_value_from = spinner.text
def set_spinner_value_to(self, spinner):
self.spinner_value_to = spinner.text
def get_match_currency(self):
DO: str = "Dominican pesos"
USD: str = "Dollars"
EUR: str = "Euros"
if self.spinner_value_from == USD:
if self.spinner_value_to == EUR:
return calc.dollars_to_euros
elif self.spinner_value_to == DO:
return calc.dollars_to_dop
elif self.spinner_value_from == EUR:
if self.spinner_value_to == USD:
return calc.euros_to_dollars
elif self.spinner_value_to == DO:
return calc.euros_to_dop
elif self.spinner_value_from == DO:
if self.spinner_value_to == USD:
return calc.dop_to_dollars
elif self.spinner_value_to == EUR:
return calc.dop_to_euros
else:
popup_msg()
if self.spinner_value_from == self.spinner_value_to:
self.lbl_convert.text = str(round(self.input_amount, 2))
def do_convertion(self):
conditions = (
self.spinner_value_from is not None,
self.spinner_value_to is not None,
)
if all(conditions):
action = self.get_match_currency()
if action:
value = action(self.input_amount)
self.lbl_convert.text = str(value)
else:
self.lbl_convert.text = "0.0"
print(f"## From: {self.spinner_value_from} To: {self.spinner_value_to}")
class SaveObjectScreen(Screen):
pass
# The screen's manager; to change between different screens
class Manager(ScreenManager):
pass
class RV(RecycleView):
"""For containing the menu's buttons.
Args:
RecycleView (RecycleView): The RecycleView to be used.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.data = [
{"text": "Realizar un deposito", "on_press": MyLayout.show_deposit},
{"text": "Tomar un prestamo", "on_press": MyLayout.show_loan},
{"text": "Transacciones", "on_press": MyLayout.show_transaction},
{"text": "Consulta de estado", "on_press": MyLayout.show_status},
{"text": "Pago de prestamo", "on_press": MyLayout.show_payment},
{"text": "Cambio de divisas", "on_press": MyLayout.show_converter},
{"text": "Guardar un objeto", "on_press": MyLayout.show_save_object},
]
class MyLayout(BoxLayout):
"""For being used with the popups.
Args:
BoxLayout (BoxLayout): The layout to be used.
"""
message = ObjectProperty(None)
amount = StringProperty()
button = ObjectProperty(None)
def __init__(self, msg: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self.message.text = msg
@staticmethod
def show_deposit():
layout = MyLayout("Enter the amount to be saved.")
popup_msg(content=layout, title="Make deposit")
layout.button.text = "Save deposit!"
layout.button.bind(on_press=layout.do_deposit)
def do_deposit(self, *args):
try:
bank.bank.make_deposit(float(self.amount))
popup_msg(msg="Deposito realizado con exito!", status=True)
# else:
# popup_msg()
except Exception as e:
popup_msg(msg=str(e))
@staticmethod
def show_loan():
layout = MyLayout("Enter the needed cash.")
popup_msg(content=layout, title="Make a loan")
layout.button.text = "Receive the loan!"
layout.button.bind(on_press=layout.make_loan)
def make_loan(self, *args):
try:
bank.bank.make_loan(float(self.amount))
popup_msg(msg="Prestamo recibido!", status=True)
except Exception as e:
popup_msg(msg=str(e))
@staticmethod
def show_transaction():
sm.current = "transaction"
@staticmethod
def show_status():
sm.get_screen("status").show_data()
sm.current = "status"
@staticmethod
def show_payment():
layout = MyLayout(f"Debes {bank.bank.get_total_loan:.6}")
popup_msg(content=layout, title="Payment")
layout.button.text = "Pay loan!"
layout.button.bind(on_press=layout.make_payment)
def make_payment(self, *args):
try:
bank.bank.pay_loan(float(self.amount))
popup_msg(msg="Payment done!", status=True)
except Exception as e:
popup_msg(msg=str(e))
@staticmethod
def show_converter():
sm.current = "converter"
@staticmethod
def show_save_object():
sm.current = "save_object"
# Create the screen manager.
sm = ScreenManager(transition=WipeTransition())
# Used to run the program. This class must be one method (build) and return it.
class BankManagementApp(App):
def build(self):
# A tuple with the different screens
screens = (
LoginScreen(name="login"),
SignupScreen(name="sign_up"),
MenuScreen(name="menu"),
TransactionScreen(name="transaction"),
StatusScreen(name="status"),
ConverterScreen(name="converter"),
SaveObjectScreen(name="save_object"),
)
for i in screens:
sm.add_widget(i)
return sm
def popup_msg(
func=lambda *args: None,
msg: str = "Ups! A bug caught!",
status: bool = False,
content=None,
title: str = None,
*args,
**kwargs,
):
"""Display a popup depending in the given optional arguments.
Args:
func (def, optional): The function to be bind (on_dismiss). Defaults to None.
msg (str, optional): The menssage to show. Defaults to "Ups! A bug caught!".
status (bool, optional): True for done; False to error. Defaults to True.
content (Layout): The layout to be used by the popup. If no passed a
label will be used.
title (str): For the title of the popup. If no passed a title will be
chose depending the status (Error or Done).
"""
# Set the title.
if title is not None:
popup_title = title
else:
if status:
popup_title = "Done!"
else:
popup_title = "Error!"
# Create the predefined label, to be used if any content didn't be passed.
lbl = Label(
text=msg,
italic=True,
font_size=20,
halign="justify",
)
title_size = 20
title_align = "center"
title_color = 1, 0, 0, 0.8
# Create a new popup.
popup = Popup(
title=popup_title,
content=content if content is not None else lbl,
title_size=title_size,
size_hint=(0.8, 0.65),
title_align=title_align,
title_color=title_color,
on_dismiss=func,
)
popup.open()
# Run the app.
if __name__ == "__main__":
main()
app = BankManagementApp()
app.run()
| djose1164/bank-management-system | src/main.py | main.py | py | 11,784 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "kivy.require",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "kivy.lang.Builder.load_file",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "kivy.lang.Builder... |
40861536656 | import numpy as np
from dataclasses import dataclass
import random
from image_multi_thresholding.base import _between_class_var, _image_probabilities
"""
Find thresholds of the gray levels using shuffled frog-leaping algorithm with between
class variance as fitness function.
"""
def _is_valid_frog(frog, L):
return (len(set(frog)) == len(frog) and frog[0] != 0 and frog[-1] != L-1)
@dataclass()
class SFLOptions:
"""Options to be passed to the threshold_sfl function."""
number_memeplex: int = 4
"""Number of memeplexes."""
number_frog: int = 10
"""Number of frogs in each memeplex."""
number_evolution: int = 10
"""Total of replication in memeplex evolution."""
def threshold_sfl(
img: np.ndarray,
k: int,
iter: int = 100,
options: SFLOptions = SFLOptions()):
"""Find thresholds of the gray levels using shuffled frog-leaping algorithm.
Uses between class variance as a fitness function. SFLOptions has default recommended
values for this algorithm, but you can change them by creating a new instance of it
with your preferred values.
**Arguments**:
img: A 2D numpy.ndarray containing the pixel values of the image.
k: Number of thresholds to find.
iter: Number of iterations for the algorithm.
options: If set, overrides the default options for the algorithm.
**Typical usage example**:
img = base.load_image('/route/to/image.png')
options = SFLOptions(
number_memeplex = 42
)
thresholds = threshold_sfl(
img = img,
k = 10,
options = options
)
"""
prob = _image_probabilities(img)
L = len(prob)
pop_size = options.number_memeplex * options.number_frog
frogs = np.array([[random.randint(0, L)
for _ in range(k)] for _ in range(pop_size)])
frogs.sort()
bcv = np.array([_between_class_var(prob, frog)
if _is_valid_frog(frog, L) else 0 for frog in frogs])
best_bcv = max(bcv)
best_global_frog = frogs[np.argmax(bcv)]
counter = 0
while counter < iter:
sort_indeces = np.flip(np.argsort(bcv))
sorted_frogs = np.array([frogs[i] for i in sort_indeces])
all_frogs = []
for m in range(options.number_memeplex):
memeplex_frogs = np.array(
[sorted_frogs[n+m*options.number_frog] for n in range(options.number_frog)], dtype=np.int16)
evolution = 0
while evolution < options.number_evolution:
bcv_memeplex = np.array([_between_class_var(prob, frog)
if _is_valid_frog(frog, L) else 0 for frog in memeplex_frogs])
best_frog = memeplex_frogs[np.argmax(bcv_memeplex)]
worst_frog = memeplex_frogs[np.argmin(bcv_memeplex)]
worst_position = np.argmin(bcv_memeplex)
new_worst_frog = np.sort(np.array(
worst_frog + random.random()*(best_frog - worst_frog), dtype=np.int16))
if _is_valid_frog(new_worst_frog, L):
if _between_class_var(prob, new_worst_frog) > _between_class_var(prob, worst_frog):
memeplex_frogs[worst_position] = new_worst_frog
else:
new_worst_frog = np.sort(
worst_frog + random.random()*(best_global_frog - worst_frog)).astype(np.int16)
if _is_valid_frog(new_worst_frog, L) and _between_class_var(prob, new_worst_frog) > _between_class_var(prob, worst_frog):
memeplex_frogs[worst_position] = new_worst_frog
else:
memeplex_frogs[worst_position] = np.array(
[random.random() * (L-1) for _ in range(k)])
evolution = evolution + 1
if len(all_frogs) == 0:
all_frogs = memeplex_frogs
else:
all_frogs = np.concatenate((all_frogs, memeplex_frogs))
bcv = np.array([_between_class_var(prob, frog)
if _is_valid_frog(frog, L) else 0 for frog in all_frogs])
best_bcv = max(bcv)
best_global_frog = all_frogs[np.argmax(bcv)]
counter += 1
return sorted(best_global_frog.tolist())
| image-multithresholding/Image-multithresholding | src/image_multi_thresholding/threshold/sfl.py | sfl.py | py | 4,404 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "dataclasses.dataclass",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "image_multi_thresholding.base._image_probabilities",
"line_number": 59,
"usage_type": "call"
}... |
31489463671 | from flask import Flask
from flask import render_template
from flask import request
from urllib.parse import quote
from urllib.request import urlopen
import json
app = Flask(__name__)
OPEN_WEATHER_URL = "http://api.openweathermap.org/data/2.5/weather?q={0}&units=metric&APPID={1}"
OPEN_WEATHER_KEY = '36c794142f7b54ffe6765a3276168f2d'
OPEN_NEWS_URL = "http://newsapi.org/v2/everything?q={0}&from=2021-01-01&sortBy=publishedAt&apiKey={1}"
OPEN_NEWS_KEY = 'c52994d166e04d2483ab223e8abc68b7'
OPEN_COVID_URL = "http://newsapi.org/v2/everything?q=tesla&from=2021-01-01&sortBy=publishedAt&apiKey=c52994d166e04d2483ab223e8abc68b7"
@app.route('/')
def index():
city = request.args.get('city')
if not city:
city = 'bangkok'
weather = get_weather(city,OPEN_WEATHER_KEY)
url1 = OPEN_COVID_URL
data1 = urlopen(url1).read()
parsed1 = json.loads(data1)
articles = parsed1['articles']
desc = []
news = []
img = []
link = []
for i in range(1,6):
myarticles = articles[i]
news.append(myarticles['title'])
desc.append(myarticles['content'])
img.append(myarticles['urlToImage'])
link.append(myarticles['url'])
mylist = zip(news, desc, img, link)
return render_template('index.html', weather= weather, context= mylist)
def get_weather(city,API_KEY):
query = quote(city)
url = OPEN_WEATHER_URL.format(city, API_KEY)
data = urlopen(url).read()
parsed = json.loads(data)
weather = None
if parsed.get('weather'):
description = parsed['weather'][0]['description']
temperature = parsed['main']['temp']
city = parsed['name']
pressure = parsed['main']['pressure']
humidity = parsed['main']['humidity']
wind = parsed['wind']['speed']
icon = parsed['weather'][0]['icon']
country = parsed['sys']['country']
weather = {'description': description,
'temperature': temperature,
'city': city,
'country': country,
'pressure': pressure,
'humidity': humidity,
'wind': wind,
'icon': icon
}
return weather
@app.route('/news')
def news():
news = request.args.get('news')
if not news:
news = 'covid-19'
news_list = get_news(news,OPEN_NEWS_KEY)
return render_template('news.html', context = news_list)
def get_news(news,NEWS_KEY):
query_news = quote(news)
url_news = OPEN_NEWS_URL.format(news,NEWS_KEY)
data_news = urlopen(url_news).read()
parsed_news = json.loads(data_news)
articles_news = parsed_news['articles']
desc = []
news = []
link = []
for i in range(len(articles_news)):
myarticles_news = articles_news[i]
news.append(myarticles_news['title'])
desc.append(myarticles_news['content'])
link.append(myarticles_news['url'])
mylist = zip(news,desc,link)
return mylist
@app.route('/about')
def about():
return render_template('about.html')
| pacharasiri/news-app-61102010154 | app.py | app.py | py | 3,173 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "flask.reque... |
17795520961 | import math
from typing import List
class Solution:
def minEatingSpeed(self, piles: List[int], h: int) -> int:
def can_eat(k: int) -> bool:
ans = 0
for pile in piles:
ans += math.ceil(pile / k)
return ans <= h
left = 1
right = 10 ** 9
while left < right:
mid = (left + right) >> 1
if not can_eat(mid):
left = mid + 1
else:
right = mid
return left
| fastso/learning-python | leetcode_cn/solved/pg_875.py | pg_875.py | py | 517 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "math.ceil",
"line_number": 11,
"usage_type": "call"
}
] |
36494224748 | import os
import json
import platform
from os import path
from time import sleep
import winsound
from win10toast import ToastNotifier
toaster = ToastNotifier()
# assets
APP_ICO = path.join("assets","app.ico")
COFFEE_ICO = path.join("assets","coffee.ico")
TAUNT_WAV= path.join("assets","taunt.wav")
JSDATA:dict
def load_json():
with open("appdata.json") as jsfile:
return json.load(jsfile)
def update_json(data:dict):
with open("appdata.json","w") as jsfile:
json.dump(data,jsfile,indent=2)
#notifier
def _notify(msg, icon=COFFEE_ICO, title=None,Soundfile =TAUNT_WAV ):
toaster.show_toast(title=title if title else "Notification",
msg=msg,
icon_path=icon,
threaded = True)
if Soundfile:
winsound.PlaySound(Soundfile,flags=winsound.SND_FILENAME)
def sed_alert():
dt = load_json()
if dt['sedentary_alert']:
interval_secs = dt["interval"] * 6
sleep(interval_secs)
_notify(
msg="Blink Your eyes",)
sed_alert()
| roshansai24081/Sedentary-alert | _app.py | _app.py | py | 1,172 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "win10toast.ToastNotifier",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"lin... |
34696107762 | #!/usr/bin/env python3
# coding : utf-8
# @author : Francis.zz
# @date : 2023-07-28 15:30
# @desc : 使用网页的session key访问chat gpt
from revChatGPT.V1 import Chatbot
import json
"""
使用 `pip install --upgrade revChatGPT` 安装依赖包
使用文档说明:https://github.com/CoolPlayLin/ChatGPT-Wiki/blob/master/docs/ChatGPT/V1.md
1. 可以使用用户名密码、session_token或者access_token 3种方式访问,但是不能同时存在
config参数:
{
"email" - "OpenAI 账户邮箱",
"password" - "OpenAI 账户密码",
"session_token" - "<session_token>"
"access_token" - "<access_token>"
"proxy" - "<proxy_url_string>",
"paid" - True/False #是不是plus帐户
}
2. 用户名密码方式不支持谷歌和微软账号注册的
3. https://chat.openai.com/api/auth/session 获取access_token。
在chat.openai.com的 cookie 中找到__Secure-next-auth.session-token。access_token和session-token使用一个就行了
"""
chatbot = Chatbot(config=json.load(open("D:\\qiyu-work\\chatgpt_auth.json")))
def start_chat():
print('Welcome to ChatGPT CLI')
while True:
prompt = input('> ')
response = ""
for data in chatbot.ask(
prompt
):
response = data["message"]
print(response)
if __name__ == "__main__":
start_chat()
| zzfengxia/python3-learn | gpt/chatgpt_conversion.py | chatgpt_conversion.py | py | 1,355 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "revChatGPT.V1.Chatbot",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 28,
"usage_type": "call"
}
] |
2440750792 | #!/bin/env python3
from typing import Optional, TypeVar
from sqlmodel import SQLModel, Field
class MedicationLinkBase(SQLModel):
medication_id : Optional[int] = Field(
default=None,
foreign_key="medication.id"
)
class MedicationLinkBaseWithRequiredID(SQLModel):
medication_id : int = Field(
foreign_key="medication.id"
)
class MedicationLinkBaseAsPrimaryKey(SQLModel):
medication_id : Optional[int] = Field(
default=None,
foreign_key="medication.id",
primary_key=True
)
class MedicationLinkBaseAsPrimaryKeyWithRequiredID(SQLModel):
medication_id : int = Field(
foreign_key="medication.id",
primary_key=True
)
T_MedicationLink_TypeVar = TypeVar(
"T_MedicationLink_TypeVar",
bound=MedicationLinkBase|MedicationLinkBaseAsPrimaryKey|MedicationLinkBaseWithRequiredID|MedicationLinkBaseAsPrimaryKeyWithRequiredID
)
| shlomo-Kallner/poppy_backend_assessment | src/poppy_s/lib/models/base/medications.py | medications.py | py | 924 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlmodel.SQLModel",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "sqlmodel.Field",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sqlmodel.SQLModel",
... |
30164407430 | #!/usr/bin/python3
"""
File: test_file_storage.py
"""
import unittest
import json
class TestFileStorage(unittest.TestCase):
"""File Storage Test"""
def test_json_load(self):
with open("file.json") as fd:
d = json.load(fd)
self.assertEqual(isinstance(d, dict), True)
def test_file(self):
with open("file.json") as fd:
self.assertTrue(len(fd.read()) > 0)
if __name__ == '__main__':
unittest.main()
| peterkthomas/AirBnB_clone | tests/test_models/test_engine/test_file_storage.py | test_file_storage.py | py | 474 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "unittest.main",
"line_number": 23,
"usage_type": "call"
}
] |
335981878 | import os, wx, atexit
class StickyNotes():
def __init__(self):
self.save_exists = False
self.list = [""]
self.list_to_save = []
def run(self):
self.check_file()
for line in self.list:
frame = StickyFrame(None, 'Sticky Note', line)
def check_file(self):
if not os.path.exists("sn_save.txt"):
f = file("sn_save.txt", "w+")
if os.stat("sn_save.txt").st_size != 0:
self.list = open("sn_save.txt").read().split("///%")
def exit_save(self):
to_save = "///%".join(self.list_to_save)
file_ = open('sn_save.txt', 'w')
file_.write(to_save)
file_.close()
class StickyFrame(wx.Frame):
def __init__(self, parent, title, words=""):
wx.Frame.__init__(self, parent, title=title, size=(400,300))
self.control = wx.TextCtrl(self, style=wx.TE_MULTILINE, value=words)
self.Show(True)
filemenu= wx.Menu()
menuAbout = filemenu.Append(wx.ID_ABOUT,"&New Note","Opens a New Note")
menuExit = filemenu.Append(wx.ID_EXIT,"Save and E&xit"," Terminate the program")
menuBar = wx.MenuBar()
menuBar.Append(filemenu,"&File") # Adding the "filemenu" to the MenuBar
self.SetMenuBar(menuBar) # Adding the MenuBar to the Frame content.
self.Bind(wx.EVT_MENU, self.OnExit, menuExit)
self.Bind(wx.EVT_MENU, self.OnAbout, menuAbout)
self.Show(True)
def OnExit(self,e):
s.list_to_save.append(self.control.Value)
self.Close(True) # Close the frame.
def OnAbout(self,e):
frame = StickyFrame(None, 'Sticky Note')
app = wx.App(False)
s = StickyNotes()
s.run()
app.MainLoop()
atexit.register(s.exit_save)
| Ghrehh/stickynotes | stickynotes.py | stickynotes.py | py | 1,750 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.stat",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "wx.Frame",
"line_number": 28,... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.