seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
26023690910 | import matplotlib.pyplot as plt
import numpy as np
x=np.arange(-10,10,0.01)
y=1/(np.sin(x)+2)
z=1/(np.cos(x)+2)
plt.plot(x,y,x,z) #生成在一张图像上
fig2,(axs1,axs2)=plt.subplots(2,1) #分配两个坐标轴并且按照(2,1)的形状
axs1.plot(x,y)
axs2.plot(x,z) #在两个轴上单独生成一次
plt.show()
| suanhaitech/pythonstudy2023 | Wangwenbin/Matplotlib1.py | Matplotlib1.py | py | 388 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "numpy.arange",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_numb... |
29099471877 | from openpyxl import load_workbook, Workbook
from openpyxl.formatting.rule import ColorScaleRule
from openpyxl.styles import PatternFill, Font
def _cal_writer_final_report(barcode, ws_report, all_data, init_row, init_col, report_output):
row_counter = init_row
ws_report.cell(column=-1 + init_col, row=row_counter, value=barcode).font = Font(b=True, underline="single")
row_counter += 1
for plate_analysed in all_data["calculations"]:
# Removing other calculations than avg and stdev
if plate_analysed != "other_data":
# Checks to see if the overview of avg and stv should be included
if report_output[plate_analysed]["overview"]:
# Writes the analysed method in, if the overview is set to true
ws_report.cell(column=-1 + init_col, row=row_counter, value=plate_analysed).font = Font(b=True)
# row_counter += 1
for state in all_data["calculations"][plate_analysed]:
if report_output[plate_analysed][state]:
ws_report.cell(column=init_col, row=row_counter, value=state).font = Font(b=True)
for calc in all_data["calculations"][plate_analysed][state]:
# Writes avg and stdev including values
ws_report.cell(column=init_col + 1, row=row_counter, value=calc)
ws_report.cell(column=init_col + 2, row=row_counter,
value=all_data["calculations"][plate_analysed][state][calc])
row_counter += 1
else:
if report_output["z_prime"]:
ws_report.cell(column=init_col, row=row_counter,
value="z-Prime").font = Font(b=True)
try:
ws_report.cell(column=init_col + 2, row=row_counter,
value=all_data["calculations"][plate_analysed]["z_prime"])
except KeyError:
ws_report.cell(column=init_col + 2, row=row_counter,
value="Z-Prime is not calculated for the plates")
row_counter += 1
row_counter += 1
return ws_report, row_counter
def _well_writer_final_report(ws, hits, final_report_setup, init_row):
indent_col = 1
row_counter = init_row
for barcode in hits:
# Writes headline for data inserts to see where the data is coming from
ws.cell(column=indent_col, row=row_counter, value=barcode).font = Font(b=True, underline="single")
row_counter += 1
for method in hits[barcode]:
if final_report_setup["methods"][method]:
# writes method
ws.cell(column=indent_col, row=row_counter, value=method).font = Font(b=True)
row_counter += 1
for split in hits[barcode][method]:
ws.cell(column=indent_col, row=row_counter, value=split).font = Font(b=True)
ws.cell(column=indent_col+1, row=row_counter,
value=final_report_setup["pora_threshold"][split]["min"]).font = \
Font(underline="single")
ws.cell(column=indent_col+2, row=row_counter,
value=final_report_setup["pora_threshold"][split]["max"]).font = \
Font(underline="single")
row_counter += 1
for well in hits[barcode][method][split]:
ws.cell(column=indent_col + 1, row=row_counter, value=well)
ws.cell(column=indent_col + 2, row=row_counter,
value=hits[barcode][method][split][well])
row_counter += 1
indent_col += 4
row_counter = init_row
def _get_data(all_plate_data, final_report_setup):
data_calc_dict = {}
temp_hits = {}
plate_counter = 0
all_states = []
all_methods = []
for barcode in all_plate_data:
plate_counter += 1
temp_hits[barcode] = {}
data_calc_dict[barcode] = {}
for method in all_plate_data[barcode]["plates"]:
if method != "other_data":
if method not in all_methods:
all_methods.append(method)
if final_report_setup["methods"][method]:
temp_hits[barcode][method] = {"low": {}, "mid": {}, "high": {}}
for well in all_plate_data[barcode]["plates"][method]["wells"]:
if well in all_plate_data[barcode]["plates"][method]["sample"]:
for split in final_report_setup["pora_threshold"]:
temp_well_value = all_plate_data[barcode]["plates"][method]["wells"][well]
if float(final_report_setup["pora_threshold"][split]["min"]) < float(temp_well_value) < \
float(final_report_setup["pora_threshold"][split]["max"]):
temp_hits[barcode][method][split][well] = temp_well_value
for method in all_plate_data[barcode]["calculations"]:
data_calc_dict[barcode][method] = {}
if method != "other_data":
for state in all_plate_data[barcode]["calculations"][method]:
if state not in all_states:
all_states.append(state)
data_calc_dict[barcode][method][state] = {}
for calc in all_plate_data[barcode]["calculations"][method][state]:
data_calc_dict[barcode][method][state][calc] = \
all_plate_data[barcode]["calculations"][method][state][calc]
else:
for other_calc in all_plate_data[barcode]["calculations"][method]:
data_calc_dict[barcode][method][other_calc] = \
all_plate_data[barcode]["calculations"][method][other_calc]
return temp_hits, data_calc_dict, plate_counter, all_states, all_methods
def _ws_creator(wb, name):
return wb.create_sheet(f"{name}_Matrix")
def _matrix_writer(ws, data_calc_dict, state, plate_counter, all_methods):
init_row = 2
init_col = 2
spacer = 4
col_stdev = init_col + plate_counter + spacer
col_counter = init_col + 1
row_counter = init_row + 1
col_stdev_counter = col_stdev + 1
row_offset = init_row
for method in all_methods:
temp_avg_list = []
temp_stdev_list = []
mw_col = col_counter
mw_row = row_counter
mw_col_stdev = col_stdev_counter
for barcodes in data_calc_dict:
# Writes Plate names in row and clm for avg
ws.cell(column=init_col - 1, row=row_counter, value=barcodes).font = Font(b=True)
ws.cell(column=col_counter, row=row_offset - 1, value=barcodes).font = Font(b=True)
# Writes Plate names in row and clm for stdev
ws.cell(column=col_stdev - 1, row=row_counter, value=barcodes).font = Font(b=True)
ws.cell(column=col_stdev_counter, row=row_offset - 1, value=barcodes).font = Font(b=True)
for index_method, _ in enumerate(data_calc_dict[barcodes]):
if index_method == 0:
# Writes method for avg
ws.cell(column=init_col, row=row_offset - 1, value=method).font = Font(b=True)
# Writes method for stdev
ws.cell(column=col_stdev, row=row_offset - 1, value=method).font = Font(b=True)
if method != "other_data":
for calc in data_calc_dict[barcodes][method][state]:
temp_value = data_calc_dict[barcodes][method][state][calc]
# gets avg values
if calc == "avg":
ws.cell(column=init_col, row=row_offset, value=calc).font = Font(b=True)
ws.cell(column=init_col, row=row_counter, value=temp_value)
ws.cell(column=col_counter, row=row_offset, value=temp_value)
temp_avg_list.append(temp_value)
elif calc == "stdev":
ws.cell(column=col_stdev, row=row_offset, value=calc).font = Font(b=True)
ws.cell(column=col_stdev, row=row_counter, value=temp_value)
ws.cell(column=col_stdev_counter, row=row_offset, value=temp_value)
temp_stdev_list.append(temp_value)
# Sets offset for next loop, for writing headlines the right place
col_counter += 1
row_counter += 1
col_stdev_counter += 1
# calculate the % difference between avg for each plate
_matrix_calculator(ws, mw_row, mw_col, temp_avg_list)
# calculate the % difference between stdev for each plate
_matrix_calculator(ws, mw_row, mw_col_stdev, temp_stdev_list)
# makes sure that next loop is writen below the first method. One method per row, with avg and stdev for each.
col_stdev = init_col + plate_counter + spacer
col_counter = init_col + 1
row_counter += spacer
col_stdev_counter = col_stdev + 1
row_offset += (plate_counter + spacer)
def _matrix_calculator(ws, row, col, temp_data_list):
start_row = row
start_col = col
for index_x, _ in enumerate(temp_data_list):
for index_y, _ in enumerate(temp_data_list):
try:
temp_value = (float(temp_data_list[index_x]) / float(temp_data_list[index_y])) * 100
except ZeroDivisionError:
temp_value = "Na"
ws.cell(column=start_col + index_x, row=start_row + index_y, value=temp_value)
def _z_prime(ws, data_calc_dict):
init_row = 2
init_col = 2
col_counter = init_col + 1
row_counter = init_row + 1
z_prime_list = []
for barcodes in data_calc_dict:
# Writes Plate names
ws.cell(column=init_col-1, row=row_counter, value=barcodes).font = Font(b=True)
ws.cell(column=col_counter, row=init_row-1, value=barcodes).font = Font(b=True)
# Writes values for Z-Prime
z_prime = data_calc_dict[barcodes]["other_data"]["z_prime"]
ws.cell(column=init_col, row=row_counter, value=z_prime)
ws.cell(column=col_counter, row=init_row, value=z_prime)
col_counter += 1
row_counter += 1
z_prime_list.append(z_prime)
col_counter = init_col + 1
row_counter = init_row + 1
for index_x, _ in enumerate(z_prime_list):
for index_y, _ in enumerate(z_prime_list):
temp_value = (z_prime_list[index_x] / z_prime_list[index_y]) * 100
ws.cell(column=col_counter + index_x, row=row_counter + index_y, value=temp_value)
def bio_final_report_controller(analyse_method, all_plate_data, output_file, final_report_setup):
wb = Workbook()
ws_report = wb.active
ws_report.title = "Full report"
ws_well_info = wb.create_sheet("Well Info")
ws_z_prime = wb.create_sheet("Z-Prime")
# ws_minimum = wb.create_sheet("Minimum")
# ws_maximum = wb.create_sheet("Maximum")
init_row = 2
init_col = 2
row = init_row
col = init_col
# calc overview:
for index, barcode in enumerate(all_plate_data):
ws, row_counter = _cal_writer_final_report(barcode, ws_report, all_plate_data[barcode], row, col,
final_report_setup["calc"])
# Writes 5 plates horizontal, before changing rows.
col += 5
if index % 5 == 0 and index > 0:
row += row_counter
col = init_col
# gets data:
temp_hits, data_calc_dict, plate_counter, all_states, all_methods = _get_data(all_plate_data, final_report_setup)
# write well data
_well_writer_final_report(ws_well_info, temp_hits, final_report_setup, init_row)
# writes Matrix of data:
# inside guard ! ! ! !
print(all_states)
for states in all_states:
if final_report_setup["full_report_matrix"][states]:
_matrix_writer(_ws_creator(wb, states), data_calc_dict, states, plate_counter, all_methods)
# writes Z-prime
if final_report_setup["full_report_matrix"]["z_prime"]:
_z_prime(ws_z_prime, data_calc_dict)
wb.save(output_file)
| ZexiDilling/structure_search | report_setup.py | report_setup.py | py | 12,580 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "openpyxl.styles.Font",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Font",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Font",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "openpy... |
28653090658 | ##### Native libraries
##### Python Libraries
import numpy as np
from IPython.core import debugger
breakpoint = debugger.set_trace
##### Local libraries
import Utils_Data
from Timer import Timer
##### NOTE: To download the full dataset (which will take about 30 hours on wifi maybe less on ethernet)
##### set the filename_urls to train.npy, set num_labels to 14951, set the
##### for loop iterations to data_urls_train.size
##### Path to datasets
path_urls = '../../data/image_retrieval/image_recognition/'
save_path = path_urls + 'images/'
filename_urls = 'train.npy' # Change this to train.npy to download the full dataset
##### Dataset format parameters
## Number of labels to use out of all the available ones
## For train.npy (max = 14951)
## For train_100.npy (max = 79)
## For train_1000.npy (max = 692)
## For train_10000.npy (max = 3487)
num_labels=50
## Percent of entries to place in train set
train_size=0.9
## Percent of entries to place in test set
test_size=0.1
# breakpoint()
##### Load dataset
dataset = np.load(path_urls+filename_urls)
##### Split dataset in train and test containing the specified number of classes
## The following function returns all entries sorted for both train and test sets.
(data_urls_train, labels_train, imgid_train, data_urls_test, labels_test, imgid_test) = Utils_Data.FormatDataset(dataset, num_labels=num_labels, train_size=train_size, test_size=test_size)
########## DOWNLOAD TRAINING SET ##############
#### UNCOMMENT THE FOLLOWING SNIPPET TO DOWNLOAD THE TRAIN SET
# n_images = data_urls_train.size
# ##### Downloads Train set
# for i in range(0,n_images):
# with Timer('Download Image Time'):
# print("Image {} out of {}".format(i, n_images))
# # image = Utils_Data.DownloadAndSaveImage(url=data_urls_train[i],out_dir=save_path,imgid=imgid_train[i])
# image = Utils_Data.DownloadResizeAndSave(url=data_urls_train[i],out_dir=save_path,imgid=imgid_train[i])
########## DOWNLOAD TEST SET ##############
#### UNCOMMENT THE FOLLOWING SNIPPET TO DOWNLOAD THE TEST SET
n_images = data_urls_test.size
##### Downloads Test set
for i in range(0,n_images):
with Timer('Download Image Time'):
print("Image {} out of {}".format(i, n_images))
# image = Utils_Data.DownloadAndSaveImage(url=data_urls_train[i],out_dir=save_path,imgid=imgid_train[i])
image = Utils_Data.DownloadResizeAndSave(url=data_urls_test[i],out_dir=save_path,imgid=imgid_test[i])
| BradleyAllanDavis/760-project | data_download/Example_DownloadDataset.py | Example_DownloadDataset.py | py | 2,412 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "IPython.core.debugger.set_trace",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "IPython.core.debugger",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "numpy.load",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "U... |
17430806092 | #!/usr/bin/python
# https://www.udemy.com/course/complete-python-developer-zero-to-mastery/
# 256. Building A Flask Server
# https://flask.palletsprojects.com/en/1.1.x/quickstart/
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types
# https://swapi.dev/ - Star Wars API server
# http://www.mashup-template.com/templates.html - Free HTML templates
# https://html5up.net/ - Free HTML templates
# https://robohash.org/ - Robot generating API
# We need to run:
# $ source ./venv/bin/activate
# $ export FLASK_APP=server.py
# $ export FLASK_ENV=development
# $ flask run
import os
import datetime
import csv
from flask import Flask, render_template, request, send_from_directory, redirect
app = Flask(__name__)
@app.route('/')
def my_home():
print(render_template('index.html'))
return render_template('index.html')
@app.route('/<string:page_name>')
def html_page(page_name):
return render_template(page_name)
# https://flask.palletsprojects.com/en/1.1.x/quickstart/#accessing-request-data
@app.route('/submit_form', methods=['POST', 'GET'])
def submit_form():
if request.method == 'POST':
try:
data=request.form.to_dict()
write_to_csv(data)
return redirect('/thankyou.html')
except:
return 'did not save to database'
else:
return 'something went wrong'
# https://flask.palletsprojects.com/en/1.1.x/patterns/favicon/
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static', 'assets'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
def write_to_file(data):
"""Write message to the database.txt"""
with open('database.txt', mode='a') as database:
date = str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
email = data["email"]
subject = data["subject"]
message = data["message"]
database.write(f'{date}, {email}, {subject}, {message}\n')
def write_to_csv(data):
"""Write message to the database.csv"""
with open('database.csv', newline='', mode='a') as database:
date = str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
email = data["email"]
subject = data["subject"]
message = data["message"]
message_writer = csv.writer(database, delimiter=',',
quotechar='"', quoting=csv.QUOTE_NONE)
message_writer.writerow([date, email, subject, message])
| olexandrch/UdemyCompletePythonDeveloper | Sec.19 Web Development with Python/portfolio/server.py | server.py | py | 2,518 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "flask.render_... |
19250585752 | # Modules which need to be installed
import irc.bot
from dotenv import load_dotenv
load_dotenv()
# Setup / included imports
import os
import commands
import asyncio
prefix = os.getenv('COMMANDPREFIX')
# Make sure the Twitch credentials have been added to the .env file
if os.getenv('TWITCHUSERNAME') == "" or os.getenv('TWITCHTOKEN') == "":
print("Please input your Twitch credentials in the .env file.")
exit(0)
# Login to IRC as the streamer and listen for commands in Twitch Chat
class TwitchListener(irc.bot.SingleServerIRCBot):
def __init__(self, username, token, channel):
self.token = token
self.channel = '#' + channel
server = 'irc.chat.twitch.tv'
port = 6667
# Login to Twitch IRC
print('Connecting to Twitch IRC: ' + server + ' on port ' + str(port))
irc.bot.SingleServerIRCBot.__init__(self, [(server, port, token)], username, username)
# Join this streamer's Twitch Chat channel
def on_welcome(self, c, e):
print('Joining ' + self.channel)
c.join(self.channel)
# Listen for messages, and if they start with the prefix, try to execute them as commands
def on_pubmsg(self, c, e):
if e.arguments[0][:1] == prefix:
cmd = e.arguments[0].split(' ')[0][1:]
commands.handleCommand(cmd)
return
# Load the Twitch login values from the .env file and run the IRC 'bot' above
bot = TwitchListener(str(os.getenv('TWITCHUSERNAME')), str(os.getenv('TWITCHTOKEN')), str(os.getenv('TWITCHUSERNAME')))
bot.start() | R2D2VaderBeef/SectorsEdgeStreamControl | main.py | main.py | py | 1,555 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "irc.bot.bot",
"line_number"... |
42597128032 | import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sklearn
df=pd.read_csv("insurance.csv")
tem=pd.get_dummies(df["region"])
df.drop("region",axis=1,inplace=True)
df=pd.concat([df,tem],axis=1)
print(df.head(10))
map={"yes":1,"no":0}
df["smoker"]=df["smoker"].map(map)
map1={"female":0,"male":1}
df["sex"]=df["sex"].map(map1)
print(df.head(10))
df.corr()
plt.figure(figsize=(20,20))
sns.heatmap(df.corr(),annot=True,cmap="coolwarm",linewidths=2)
plt.show()
x=df["smoker"]
y=df["expenses"]
plt.figure(figsize=(12,9))
plt.scatter(x,y)
plt.xlabel("Non Smoker Vs Smoker")
plt.ylabel("Charges")
Y=df["charges"]
X=df.drop("charges",axis=1)
from sklearn.model_selection import train_test_split
#Splitting the data into 85% for training and 15% for testing
x_train,x_test,y_train,y_test=train_test_split(X,Y,random_state=1,test_size=0.15)
from sklearn.linear_model import LinearRegression
#Training a multiple linear regression model
reg=LinearRegression().fit(x_train,y_train)
y_pred=reg.predict(x_test)
from sklearn.metrics import r2_score
#Checking the R squared error on test data
r2_score(y_test,y_pred)
# Storing independent features in a temporary variable
P_X=X
from sklearn.preprocessing import PolynomialFeatures
#Changing the data to a 3rd degree polynomial
pol=PolynomialFeatures(degree=3)
P_X=pol.fit_transform(X)
P_X
#Training the model similarly but with 3rd degree polynomial of X this time
x_train,x_test,y_train,y_test=train_test_split(P_X,Y,random_state=1,test_size=0.15)
reg=LinearRegression().fit(x_train,y_train)
y_pred=reg.predict(x_test)
r2_score(y_test,y_pred)
#Cross validating the score to check and avoid overfitting
from sklearn.model_selection import cross_val_score
c=cross_val_score(reg,P_X,Y,cv=4)
c
# Final Mean Accuracy
print("Mean accuracy after cross validation is:",c.mean()*100,end="%")
| manav88/Medical-cost-prediction | med_cost.py | med_cost.py | py | 1,956 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figu... |
34905834189 | import shutil
import tempfile
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import Client, TestCase, override_settings
from django.urls import reverse
from ..forms import PostForm
from ..models import Post
User = get_user_model()
TEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)
small_gif = (
b'\x47\x49\x46\x38\x39\x61\x02\x00'
b'\x01\x00\x80\x00\x00\x00\x00\x00'
b'\xFF\xFF\xFF\x21\xF9\x04\x00\x00'
b'\x00\x00\x00\x2C\x00\x00\x00\x00'
b'\x02\x00\x01\x00\x00\x02\x02\x0C'
b'\x0A\x00\x3B'
)
@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)
class PostFormTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user(username='username')
cls.uploaded = SimpleUploadedFile(
name='small.gif',
content=small_gif,
content_type='image/gif'
)
cls.form = PostForm()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)
def setUp(self):
self.authorized_client = Client()
self.authorized_client.force_login(self.user)
def test_create_post(self):
"""Валидная форма создает запись в Post."""
# Подсчитаем количество записей в Post
posts_count = Post.objects.count()
form_data = {
'text': 'Тестовый пост',
'image': PostFormTests.uploaded,
}
self.uploaded.seek(0)
# Отправляем POST-запрос
response = self.authorized_client.post(
reverse('posts:post_create'),
data=form_data,
follow=True
)
# Проверяем, сработал ли редирект
self.assertRedirects(response, reverse(
'posts:profile',
kwargs={'username': 'username'})
)
# Проверяем, увеличилось ли число постов
self.assertEqual(Post.objects.count(), posts_count + 1)
# Проверяем, что создалась запись с заданным id
self.assertTrue(
Post.objects.filter(
text='Тестовый пост',
pk=1,
image='posts/small.gif',
).exists()
)
def test_edit_post(self):
Post.objects.create(
text='Тестовый пост',
author=self.user,
pk=1,
image=self.uploaded,
)
form_data = {
'text': 'Тестовый пост изменился',
}
# Отправляем POST-запрос
response = self.authorized_client.post(
reverse('posts:post_edit', kwargs={'post_id': 1}),
data=form_data,
follow=True
)
post_changed = Post.objects.get(pk=1)
# Проверяем, сработал ли редирект c тем же id
self.assertRedirects(response, reverse(
'posts:post_detail',
kwargs={'post_id': 1})
)
self.assertEqual(post_changed.text, 'Тестовый пост изменился')
class CommentFormTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user(username='username')
cls.guest_client = Client()
cls.authorized_client = Client()
cls.authorized_client.force_login(cls.user)
Post.objects.create(
text='Тестовый пост',
author=cls.user,
pk=1,
)
def test_add_comment(self):
"""Комментировать посты может только авторизованный пользователь."""
form_data = {
'text': 'Тестовый комментарий',
}
response1 = self.authorized_client.post(
reverse('posts:add_comment', kwargs={'post_id': 1}),
data=form_data,
follow=True
)
response2 = self.guest_client.post(
reverse('posts:add_comment', kwargs={'post_id': 1}),
data=form_data,
follow=True
)
# Проверяем, сработал ли редирект
self.assertRedirects(response1, reverse(
'posts:post_detail',
kwargs={'post_id': 1})
)
self.assertRedirects(response2,
'/auth/login/?next=/posts/1/comment/'
)
| DianaKab/hw05_final_new | yatube/posts/tests/test_forms.py | test_forms.py | py | 4,715 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tempfile.mkdtemp",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.BASE_DIR",
"line_number": 14,
"usage_type": "attribute"
},
{... |
6453212033 | from django.conf.urls import url
from testuser import views
app_name = 'test'
urlpatterns = [
# url(r'^$',views.logout, name = 'logout'),
url(r'^$',views.loginIndex, name = 'loginIndex'),
url(r'^login/$',views.login, name = 'login'),
# url(r'^signUp/$',views.signup, name = 'signup'),
# url(r'^forgotPass/$',views.forgot, name = 'forgot'),
# url(r'^login/check/$',views.loginCheck, name = 'logincheck'),
# url(r'^signUp/check/$',views.signupCheck, name = 'signupcheck'),
]
| baivarn-tjr/SYOT-python | SYOT/testuser/urls.py | urls.py | py | 503 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "testuser.views.loginIndex",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "testuser.views",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "djang... |
45017345126 | #!/usr/bin/env python3
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# DESCRIPTION:
#
# CALL SAMPLE:
# ~/data/solarity/sit-raspi/modbus/direct_marketing_interface.py --host_ip '192.168.0.34' --host_mac '00:90:E8:7B:76:9C' -v -t
#
# REQUIRE
#
# CALL PARAMETERS:
# 1)
#
# @author: Philippe Gachoud
# @creation: 20200408
# @last modification:
# @version: 1.0
# @URL: $URL
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# INCLUDES
try:
import sys
import os, errno
sys.path.append(os.path.join(os.path.dirname(__file__), '../lib')) #the way to import directories
from sit_logger import SitLogger
from pymodbus.constants import Endian
from sit_modbus_device import SitModbusDevice #from file_name import ClassName
from sit_modbus_register import SitModbusRegister
from inverter_manager import InverterManager
#import sitmodbus#, SitModbusRegister
import logging # http://www.onlamp.com/pub/a/python/2005/06/02/logging.html
from logging import handlers
import argparse
#sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'pysunspec'))
from datetime import datetime, date, time, timedelta
except ImportError as l_err:
print("ImportError: {0}".format(l_err))
raise l_err
class DirectMarketerInterface(InverterManager):
# CONSTANTS
DEFAULT_SLAVE_ADDRESS = 200
# CLASS ATTRIBUTES
# FUNCTIONS DEFINITION
"""
Initialize
"""
def __init__(self, a_slave_address=DEFAULT_SLAVE_ADDRESS):
try:
self.init_arg_parse()
l_slave_address = self.DEFAULT_SLAVE_ADDRESS
if self._args.slave_address:
if self.valid_slave_address(self._args.slave_address):
self._slave_address = int(self._args.slave_address)
#self, a_slave_address=DEFAULT_SLAVE_ADDRESS, a_port=DEFAULT_MODBUS_PORT, an_ip_address=None
super().__init__(l_slave_address, a_port=self.DEFAULT_MODBUS_PORT, an_ip_address=self._args.host_ip)
self._logger = SitLogger().new_logger(__name__, self._args.host_mac)
self._init_sit_modbus_registers()
#self._logger.debug('init->' + self.out())
except OSError as l_e:
self._logger.warning("init-> OSError, probably rollingfileAppender" % (l_e))
if e.errno != errno.ENOENT:
raise l_e
except Exception as l_e:
print('Error in init: %s' % (l_e))
raise l_e
#exit(1)
def _init_sit_modbus_registers(self):
"""
Initializes self._sit_modbus_registers
"""
# P.44 of doc
self.add_modbus_register('OutLimitPerc', 'Specified output limitation through direct marketer n% (0-10000)', 1, SitModbusRegister.REGISTER_TYPE_INT_16, SitModbusRegister.ACCESS_MODE_RW, 'uint16')
self.add_modbus_register('OutLimitPercMan', 'Manual output limitation that has been set via Sunspec Modbus', 2, SitModbusRegister.REGISTER_TYPE_INT_16, SitModbusRegister.ACCESS_MODE_R, 'uint16')
self.add_modbus_register('OutLimitPercIoBox', 'Output limitation through the electric utility company that has been set via the IO box.', 3, SitModbusRegister.REGISTER_TYPE_INT_16, SitModbusRegister.ACCESS_MODE_R, 'uint16')
self.add_modbus_register('OutLimitMin', 'Minimum of all output limitations. The nominal PV system power is derated to this value.', 4, SitModbusRegister.REGISTER_TYPE_INT_16, SitModbusRegister.ACCESS_MODE_R, 'uint16')
# self.add_modbus_register('Md', 'Model (Md): SMA Inverter Manager', 40021, SitModbusRegister.REGISTER_TYPE_STRING_16, SitModbusRegister.ACCESS_MODE_R, 'String16')
# self.add_modbus_register('Opt', 'Options (Opt): Inverter Manager name', 40037, SitModbusRegister.REGISTER_TYPE_STRING_8, SitModbusRegister.ACCESS_MODE_R, 'String8')
# self.add_modbus_register('Vr', 'Version (Vr): Version number of the installed firmware', 40045, SitModbusRegister.REGISTER_TYPE_STRING_8, SitModbusRegister.ACCESS_MODE_R, 'String8')
# self.add_modbus_register('SN', 'Serial number (SN) of the device that uses the Modbus unit ID', 40053, SitModbusRegister.REGISTER_TYPE_STRING_16, SitModbusRegister.ACCESS_MODE_R, 'String16')
# self.add_modbus_register('PPVphA', 'Voltage, line conductor L1 to N (PPVphA), in V-V_SF (40199): average value of all inverters', 40196, SitModbusRegister.REGISTER_TYPE_INT_16, SitModbusRegister.ACCESS_MODE_R, 'V', 40199)
# self.add_modbus_register('AC_A', 'AC Current sum of all inverters', 40188, SitModbusRegister.REGISTER_TYPE_INT_16, SitModbusRegister.ACCESS_MODE_R, 'A', 40192)
# self.add_modbus_register('W', 'Active power (W), in W-W_SF (40201): sum of all inverters', 40200, SitModbusRegister.REGISTER_TYPE_INT_16, SitModbusRegister.ACCESS_MODE_R, 'W', 40192)
# self.add_modbus_register('WH', 'Total yield (WH), in Wh WH_SF (40212): sum of all inverters', 40210, SitModbusRegister.REGISTER_TYPE_INT_32, SitModbusRegister.ACCESS_MODE_R, 'WH', 40212)
# self.add_modbus_register('TmpCab', 'Internal temperature, in °C Tmp_SF (40223): average value of all inverters', 40219, SitModbusRegister.REGISTER_TYPE_INT_16, SitModbusRegister.ACCESS_MODE_R, '°C', 40223)
# self.add_modbus_register('ID', 'Model ID (ID): 120 = Sunspec nameplate model', 40238, SitModbusRegister.REGISTER_TYPE_INT_16, SitModbusRegister.ACCESS_MODE_R, 'uint16')
# self.add_modbus_register('VArPct_Mod', 'Mode of the percentile reactive power limitation: 1 = in % of WMax', 40365, SitModbusRegister.REGISTER_TYPE_ENUM_16, SitModbusRegister.ACCESS_MODE_R, 'enum16')
# self.add_modbus_register('VArPct_Ena', 'Control of the percentile reactive power limitation,(SMA: Qext): 1 = activated', 40365, SitModbusRegister.REGISTER_TYPE_ENUM_16, SitModbusRegister.ACCESS_MODE_RW, 'enum16')
def init_arg_parse(self):
"""
Parsing arguments
"""
self._parser = argparse.ArgumentParser(description='Actions with Inverter Manager through TCP')
self._parser.add_argument('-v', '--verbose', help='increase output verbosity', action="store_true")
self._parser.add_argument('-t', '--test', help='Runs test method', action="store_true")
self._parser.add_argument('-u', '--slave_address', help='Slave address of modbus device', nargs='?')
#self._parser.add_argument('-u', '--base_url', help='NOT_IMPLEMENTED:Gives the base URL for requests actions', nargs='?', default=self.DEFAULT_BASE_URL)
l_required_named = self._parser.add_argument_group('required named arguments')
l_required_named.add_argument('-i', '--host_ip', help='Host IP', nargs='?', required=True)
l_required_named.add_argument('-m', '--host_mac', help='Host MAC', nargs='?', required=True)
# l_required_named.add_argument('-l', '--longitude', help='Longitude coordinate (beware timezone is set to Chile)', nargs='?', required=True)
# l_required_named.add_argument('-a', '--lattitude', help='Lattitude coordinate (beware timezone is set to Chile)', nargs='?', required=True)
# l_required_named.add_argument('-d', '--device_type', help='Device Type:' + ('|'.join(str(l) for l in self.DEVICE_TYPES_ARRAY)), nargs='?', required=True)
l_args = self._parser.parse_args()
self._args = l_args
# ACCESS
# IMPLEMENTATION
# EXECUTE ARGS
"""
Parsing arguments and calling corresponding functions
"""
def execute_corresponding_args(self):
if self._args.verbose:
self._logger.setLevel(logging.DEBUG)
else:
self._logger.setLevel(logging.DEBUG)
if self._args.test:
self.test()
#if self._args.store_values:
"""
Test function
"""
def test(self):
try:
self.connect()
self.read_all_sit_modbus_registers()
print ("################# BEGIN #################")
# self._logger.info("--> ************* device models *************: %s" % (l_d.models)) #Lists properties to be loaded with l_d.<property>.read() and then access them
# self._logger.info("-->inverter ************* l_d.inverter.points *************: %s" % (l_d.inverter.points)) #Gives the inverter available properties
# self._logger.info("-->inverter ************* common *************: %s" % (l_d.common))
# self._logger.info("-->inverter ************* common Serial Number *************: %s" % (l_d.common.SN))
print ("################# END #################")
except Exception as l_e:
self._logger.exception("Exception occured: %s" % (l_e))
print('Error: %s' % (l_e))
self._logger.error('Error: %s' % (l_e))
raise l_e
finally:
self.disconnect()
"""
Main method
"""
def main():
#logging.basicConfig(level=logging.DEBUG, stream=sys.stdout, filemode="a+", format="%(asctime)-15s %(levelname)-8s %(message)s")
logger = logging.getLogger(__name__)
try:
l_obj = DirectMarketerInterface()
l_obj.execute_corresponding_args()
# l_id.test()
pass
except KeyboardInterrupt:
logger.exception("Keyboard interruption")
except Exception:
logger.exception("Exception occured")
finally:
logger.info("Main method end -- end of script")
if __name__ == '__main__':
main()
| phgachoud/sty-pub-raspi-modbus-drivers | sma/direct_marketing_interface.py | direct_marketing_interface.py | py | 8,844 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
17169060948 | from django.urls import path
from . import views
urlpatterns=[
path('sub/',views.SubjectVW,name='sub'),
path('trainer/',views.TrainerVW,name='trainer'),
path('profile/',views.TranierDisplay,name='profile'),
path('batchvw/',views.BatchVW,name='batchvw'),
path('bdisplay/',views.BatchDisplay,name='bdisplay'),
path('trainerupdate/<pk>/',views.TrainerUP,name='trainerupdate'),
path('Home/',views.Home,name='Home'),
] | mithun-gowda/PyInstitute | Batch/urls.py | urls.py | py | 444 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
24923354544 | from pytesseract import Output
import pytesseract
import argparse
import imutils
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to input image")
ap.add_argument("-o", "--output", required=False, help="path to output image. override if not given.")
ap.add_argument("-a", "--angle", required=True, help="rotation angle", type=int)
args = vars(ap.parse_args())
original = cv2.imread(args["image"])
if original is None:
exit("Thats not an image =(")
# rotate the image and save to disk
angle = args["angle"]
rotated = imutils.rotate_bound(original, angle=args["angle"])
output = args.get("output")
if output:
text = f"Saving rotated image (by {angle} degrees) into: {output}"
else:
output = args["image"]
text = f"Overwriting rotated image (by {angle} degrees) into: {output}"
print(text)
cv2.imwrite(output, rotated) | uborzz/images-playground | tools/rotate_image.py | rotate_image.py | py | 887 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "imutils.rotate_bound",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
... |
40761579635 | """Module containing the Fetcher Class to get financial
data of given ticker using the yfinance package."""
from datetime import datetime, timedelta
import yfinance as yf
import pandas as pd
class Fetcher:
"""Class that fetches data about a given ticker
This class does a few things:
1. Checks for validity of arguments before instantiating
2. Pulls data and checks for whether it contains > 1 day.
Attributes:
ticker: String containing the inputted ticker name
start_date: Beginning day of retrieved financial data
end_date: Final day of retrieved financial data
"""
def __init__(self, args: dict) -> None:
fetcher_args = self._check_args_validity(args)
self._ticker = fetcher_args["ticker"]
self._start_date = fetcher_args["start_date"]
self._end_date = fetcher_args["end_date"]
def _check_args_validity(self, args: dict) -> dict:
"""Checks for the validity of the CLI arguments
This function checks for the validity of the input arguments
before initializing the class. Otherwise, it throws an exception
Args:
args: dictionary containing the input CLI Arguments
Returns:
dictionary of parsed arguments to be used in yfinance
"""
# Check for possible ticker errors
ticker = args["ticker"]
# Datetime automatically checks for datetime argument errors
start_date = datetime.strptime(args["b"], "%Y%m%d")
end_date = datetime.strptime(args["e"], "%Y%m%d") \
if args["e"] is not None else datetime.now()
# Compensate for yfinance bug, more specifically:
# API Given Tracks until 1 day before end
end_date += timedelta(days = 1)
# Start date cannot be later than current date
if start_date > datetime.now():
raise ValueError("Start date cannot be after current time")
# Start date cannot be later than the ending date
if start_date > end_date:
raise ValueError("End Date is earlier than Start Date")
fetcher_args = {
"ticker": ticker,
"start_date": start_date,
"end_date": end_date
}
return fetcher_args
def fetch_data(self) -> pd.DataFrame:
"""Function that fetches data from yfinance.
After checking, it checks for the data validity before proceeding.
Returns:
Dataframe with the columns representing financial data
of the given ticker, arranged from earliest to latest date.
"""
tracker = yf.Ticker(self._ticker)
try:
data: pd.DataFrame = tracker.history(
start = self._start_date,
end = self._end_date
)[self._start_date : self._end_date]
if len(data) == 0:
raise Exception("No data available for given ticker.")
if len(data) == 1:
raise Exception("Only 1 data point seen. Check time period.")
return data
# Error can be caused as raw date is converted to seconds (Line 150):
# https://github.com/ranaroussi/yfinance/blob/main/yfinance/base.py
# Best solution is to try a date that's more recent, within 50 years
except OverflowError as err:
raise ValueError(
"Start date too distant. Try a start date within 50 years."
) from err
except BaseException as err:
raise err
| webclinic017/YSC4228-QuantFin | scrape_mkt_data/tools/fetcher.py | fetcher.py | py | 3,536 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.strptime",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 43,
"usage_type": "call"
},
{
"api_name"... |
29541301753 | from django.contrib import admin, messages
from .models import Poll, Question, Choice
class ChoiceInline(admin.StackedInline):
model = Choice
extra = 0
class QuestionInline(admin.StackedInline):
model = Question
readonly_fields = ['question_type']
extra = 0
class PollAdmin(admin.ModelAdmin):
inlines = [QuestionInline]
fieldsets = [
(None, {'fields': ['name', 'slug', 'status', 'description']}),
('DATE INFO', {'fields': [('starting', 'finished')]})
]
prepopulated_fields = {'slug': ('name',)}
readonly_fields = [
'starting',
'finished',
]
def save_model(self, request, obj, form, change):
"""Save Model override for access control of the poll"""
import datetime
def get_message(msg, type):
messages.add_message(
request,
type,
f'{msg}!'
)
if not obj.starting and obj.status == 'IN_PROGRESS':
obj.starting = datetime.datetime.now()
get_message('Poll has started!', messages.SUCCESS)
obj.save()
if obj.starting and not obj.finished and obj.status == 'FINISHED':
obj.finished = datetime.datetime.now()
get_message('Poll has finished!', messages.SUCCESS)
obj.save()
if not (obj.starting or obj.finished) and obj.status != 'WAITING':
obj.status = 'WAITING'
get_message('Woo Wee Woo Waa! Error!', messages.ERROR)
obj.save()
if not obj.id:
obj.save()
class QuestionAdmin(admin.ModelAdmin):
inlines = [ChoiceInline]
# def save_model(self, request, obj, form, change):
# When Admin choose type of the question is text, answer choices are removing
# choices = Choice.objects.filter(question=obj)
# if obj.question_type == '1' and choices:
# choices.delete()
# obj.save()
admin.site.register(Poll, PollAdmin)
admin.site.register(Question, QuestionAdmin)
admin.site.register(Choice)
| RamilPowers/poll_app | api/admin.py | admin.py | py | 2,063 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.admin.StackedInline",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "models.Choice",
"line_number": 6,
"usage_type": "name"
},
{
"api_name"... |
17549816996 | from flask import Flask, render_template, request
from tensorflow.keras.layers import Dense, Embedding, Bidirectional, LSTM, Concatenate, Dropout
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras import Input, Model
import gensim
import numpy as np
import BahdanauAttention #모델.py 불러오기
from konlpy.tag import Mecab
import pickle
import tensorflow as tf
import re
lstm_model = BahdanauAttention.BahdanauAttention(64)
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False #한글 깨짐 현상
wv_model = gensim.models.Word2Vec.load('model/aihub_review_6.model')
mecab = Mecab(dicpath=r"C:\mecab\mecab-ko-dic") #mecab 윈도우에서 설정
tokenizer = pickle.load(open('model/tokenizer.pickle','rb'))
############ 모델 부분
max_len = 100
EMBEDDING_DIM = 100
sequence_input = Input(shape=(max_len,), dtype='int32')
VOCAB_SIZE = len(tokenizer.index_word) + 1
EMBEDDING_DIM = 100
embedding_matrix = np.zeros((VOCAB_SIZE, EMBEDDING_DIM))
# tokenizer에 있는 단어 사전을 순회하면서 word2vec의 100차원 vector를 가져옵니다
for word, idx in tokenizer.word_index.items():
embedding_vector = wv_model[word] if word in wv_model else None
if embedding_vector is not None:
embedding_matrix[idx] = embedding_vector
embedded_sequences = Embedding(VOCAB_SIZE,
EMBEDDING_DIM,
input_length=max_len,
weights=[embedding_matrix], # weight는 바로 위의 embedding_matrix 대입
trainable=False # embedding layer에 대한 train은 꼭 false로 지정
)(sequence_input)
# embedded_sequences = Embedding(vocab_size, 128, input_length=max_len, mask_zero = True)(sequence_input)
lstm = Bidirectional(LSTM(64, dropout=0.5, return_sequences=True))(embedded_sequences)
lstm, forward_h, forward_c, backward_h, backward_c = Bidirectional(
LSTM(64, dropout=0.5, return_sequences=True, return_state=True))(lstm)
state_h = Concatenate()([forward_h, backward_h]) # 은닉 상태
state_c = Concatenate()([forward_c, backward_c]) # 셀 상태
attention = lstm_model # 가중치 크기 정의
context_vector, attention_weights = attention(lstm, state_h)
dense1 = Dense(20, activation="relu")(context_vector)
dropout = Dropout(0.5)(dense1)
output = Dense(1, activation="sigmoid")(dropout)
model = Model(inputs=sequence_input, outputs=output)
model.load_weights('model/best_model.h5')
stopwords = ['도', '는', '다', '의', '가', '이', '은', '한', '에', '하', '고', '을', '를', '인', '듯', '과', '와', '네', '들', '듯', '지', '임', '게', '만', '게임', '겜', '되', '음', '면']
def sentiment_predict(new_sentence):
new_sentence = re.sub(r'[^ㄱ-ㅎㅏ-ㅣ가-힣 ]','', new_sentence)
new_sentence = mecab.morphs(new_sentence) # 토큰화
new_sentence = [word for word in new_sentence] # 불용어 제거
encoded = tokenizer.texts_to_sequences([new_sentence]) # 정수 인코딩
pad_new = pad_sequences(encoded, maxlen = max_len,padding='post') # 패딩
score = float(model.predict(pad_new)) # 예측
return round(score, 2)
# if(score > 0.5):
# print("{:.2f}% 확률로 욕설에 가깝습니다.".format(score * 100))
# else:
# print("{:.2f}% 확률로 욕설이 아닙니다.".format((1 - score) * 100))
@app.route('/', methods=['GET','POST'])
def test():
return render_template('user.html')
@app.route('/post', methods=['GET','POST'])
def post():
original_test = request.form['test']
score = sentiment_predict(original_test)
return render_template('post.html', score=score)
@app.route('/ajax_model', methods=['GET','POST'])
def ajax_model():
original_test = request.json['send_data']
score = sentiment_predict(original_test)
return str(score*100)
if __name__ == '__main__':
app.run() | rlagywns0213/korea_bad_comments_analysis | comment_confirm.py | comment_confirm.py | py | 3,906 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "BahdanauAttention.BahdanauAttention",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "gensim.models.Word2Vec.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_n... |
42549531170 | ### This file has been adopted from
### https://github.com/openlawlibrary/pygls/blob/master/examples/json-extension/server/server.py
import asyncio
from bisect import bisect
from cromwell_tools import api as cromwell_api
from cromwell_tools.cromwell_auth import CromwellAuth
from cromwell_tools.utilities import download
from functools import wraps
from pygls.features import (
CODE_ACTION,
DEFINITION,
REFERENCES,
TEXT_DOCUMENT_DID_OPEN,
TEXT_DOCUMENT_DID_CHANGE,
TEXT_DOCUMENT_DID_SAVE,
TEXT_DOCUMENT_WILL_SAVE,
WORKSPACE_DID_CHANGE_CONFIGURATION,
WORKSPACE_DID_CHANGE_WATCHED_FILES,
)
from pygls.server import LanguageServer
from pygls.types import (
CodeActionParams,
ConfigurationItem,
ConfigurationParams,
Diagnostic,
DiagnosticSeverity,
DidChangeConfigurationParams,
DidOpenTextDocumentParams,
DidChangeTextDocumentParams,
DidSaveTextDocumentParams,
WillSaveTextDocumentParams,
TextDocumentPositionParams,
DidChangeWatchedFiles,
FileChangeType,
MessageType,
Location,
Position,
Range,
)
from os import environ, name as platform, pathsep
from pathlib import Path
import re, sys
from requests import HTTPError
from threading import Timer
from time import sleep
from typing import Callable, Dict, Iterable, List, Set, Tuple, Union
from urllib.parse import urlparse
import WDL
from WDL import SourceNode, SourcePosition, Lint
PARSE_DELAY_SEC = 0.5 # delay parsing of WDL until no more keystrokes are sent
class Server(LanguageServer):
SERVER_NAME = 'wdl'
CONFIG_SECTION = SERVER_NAME
CMD_RUN_WDL = SERVER_NAME + '.run'
def __init__(self):
super().__init__()
self.wdl_paths: Dict[str, Set[str]] = dict()
self.wdl_types: Dict[str, Dict[str, SourcePosition]] = dict()
self.wdl_defs: Dict[str, Dict[SourcePosition, SourcePosition]] = dict()
self.wdl_refs: Dict[str, Dict[SourcePosition, List[SourcePosition]]] = dict()
self.wdl_symbols: Dict[str, List[SourcePosition]] = dict()
self.aborting_workflows: Set[str] = set()
def catch_error(self, log = False):
def decorator(func: Callable):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if log:
self.show_message_log(str(e), MessageType.Error)
else:
self.show_message(str(e), MessageType.Error)
return wrapper
return decorator
server = Server()
def _get_client_config(ls: Server):
config = ls.get_configuration(ConfigurationParams([
ConfigurationItem(section=Server.CONFIG_SECTION)
])).result()
return config[0]
# https://gist.github.com/walkermatt/2871026
def debounce(delay_sec: float, id_arg: Union[int, str]):
""" Decorator that will postpone a functions
execution until after wait seconds
have elapsed since the last time it was invoked. """
def decorator(func: Callable):
@wraps(func)
def debounced(*args, **kwargs):
if not hasattr(debounced, 'timers'):
debounced.timers: Dict[str, Timer] = dict()
id = args[id_arg] if isinstance(id_arg, int) else kwargs[id_arg]
if id in debounced.timers:
debounced.timers[id].cancel()
timer = Timer(delay_sec, lambda: func(*args, **kwargs))
debounced.timers[id] = timer
timer.start()
return debounced
return decorator
@debounce(PARSE_DELAY_SEC, 1)
def parse_wdl(ls: Server, uri: str):
ls.show_message_log('Validating ' + uri, MessageType.Info)
diagnostics, wdl = _parse_wdl(ls, uri)
ls.publish_diagnostics(uri, diagnostics)
ls.show_message_log(
'{} {}'.format('Valid' if wdl else 'Invalid', uri),
MessageType.Info if wdl else MessageType.Warning
)
def _parse_wdl(ls: Server, uri: str):
try:
paths = _get_wdl_paths(ls, uri)
doc = asyncio.run(
WDL.load_async(uri, path=paths, read_source=_read_source(ls))
)
types = _get_types(doc.children, dict())
ls.wdl_types[uri] = types
ls.wdl_defs[uri], ls.wdl_refs[uri] = _get_links(doc.children, types, dict(), dict())
ls.wdl_symbols[uri] = sorted(_get_symbols(doc.children, []))
return list(_lint_wdl(ls, doc)), doc
except WDL.Error.MultipleValidationErrors as errs:
return [_diagnostic_err(e) for e in errs.exceptions], None
except WDLError as e:
return [_diagnostic_err(e)], None
except Exception as e:
ls.show_message_log(str(e), MessageType.Error)
return [], None
def _read_source(ls: Server):
async def read_source(uri: str, path, importer):
uri = await WDL.resolve_file_import(uri, path, importer)
if uri.startswith('/'):
uri = 'file://' + uri
source = ls.workspace.get_document(uri).source
return WDL.ReadSourceResult(source_text=source, abspath=uri)
return read_source
def _get_symbols(nodes: Iterable[SourceNode], symbols: List[SourcePosition]):
for node in nodes:
symbols.append(node.pos)
_get_symbols(node.children, symbols)
return symbols
# find SourcePosition as the minimum bounding box for cursor Position
def _find_symbol(ls: Server, uri: str, p: Position):
if uri not in ls.wdl_symbols:
return
symbols = ls.wdl_symbols[uri]
best_score = (sys.maxsize, sys.maxsize)
best_sym: SourcePosition = None
line = p.line + 1
col = p.character + 1
min_pos = SourcePosition(uri, uri, line, 0, line, 0)
i = bisect(symbols, min_pos)
while i < len(symbols):
sym = symbols[i]
if sym.line > line or (sym.line == line and sym.column > col):
break
elif sym.end_line > line or (sym.end_line == line and sym.end_column >= col):
score = (sym.end_line - sym.line, sym.end_column - sym.column)
if score <= best_score:
best_score = score
best_sym = sym
i += 1
return best_sym
def _get_types(nodes: Iterable[SourceNode], types: Dict[str, SourcePosition]):
for node in nodes:
if isinstance(node, WDL.StructTypeDef):
types[node.type_id] = node.pos
_get_types(node.children, types)
return types
def _get_links(
nodes: Iterable[SourceNode],
types: Dict[str, SourcePosition],
defs: Dict[SourcePosition, SourcePosition],
refs: Dict[SourcePosition, List[SourcePosition]],
):
for node in nodes:
source: SourcePosition = None
if isinstance(node, WDL.Call):
source = node.callee.pos
elif isinstance(node, WDL.Decl) and isinstance(node.type, WDL.Type.StructInstance):
source = types[node.type.type_id]
elif isinstance(node, WDL.Expr.Ident):
ref = node.referee
if isinstance(ref, WDL.Tree.Gather):
source = ref.final_referee.pos
else:
source = ref.pos
if source is not None:
defs[node.pos] = source
refs.setdefault(source, []).append(node.pos)
_get_links(node.children, types, defs, refs)
return defs, refs
SourceLinks = Union[SourcePosition, List[SourcePosition]]
def _find_links(ls: Server, uri: str, pos: Position, links: Dict[str, Dict[SourcePosition, SourceLinks]]):
symbol = _find_symbol(ls, uri, pos)
if (symbol is None) or (uri not in links):
return
symbols = links[uri]
if symbol in symbols:
return symbols[symbol]
def _find_def(ls: Server, uri: str, pos: Position):
link = _find_links(ls, uri, pos, ls.wdl_defs)
if link is not None:
return Location(link.abspath, _get_range(link))
def _find_refs(ls: Server, uri: str, pos: Position):
links = _find_links(ls, uri, pos, ls.wdl_refs)
if links is not None:
return [Location(link.abspath, _get_range(link)) for link in links]
def _lint_wdl(ls: Server, doc: WDL.Document):
_check_linter_path()
warnings = Lint.collect(Lint.lint(doc, descend_imports=False))
_check_linter_available(ls)
for pos, _, msg, _ in warnings:
yield _diagnostic(msg, pos, DiagnosticSeverity.Warning)
def _check_linter_path():
if getattr(_check_linter_path, 'skip', False):
return
LOCAL_BIN = '/usr/local/bin'
PATH = environ['PATH'].split(pathsep)
if platform == 'posix' and LOCAL_BIN not in PATH:
environ['PATH'] = pathsep.join([LOCAL_BIN] + PATH)
_check_linter_path.skip = True
def _check_linter_available(ls: Server):
if getattr(_check_linter_available, 'skip', False):
return
if not Lint._shellcheck_available:
ls.show_message('''
WDL task command linter is not available on the system PATH.
Please install ShellCheck and/or add it to the PATH:
https://github.com/koalaman/shellcheck#installing
''', MessageType.Warning)
_check_linter_available.skip = True
def _get_wdl_paths(ls: Server, wdl_uri: str, reuse_paths = True) -> List[str]:
ws = ls.workspace
if ws.folders:
ws_uris = [f for f in ws.folders if wdl_uri.startswith(f)]
elif ws.root_uri:
ws_uris = [ws.root_uri]
else:
ws_uris = []
wdl_paths: Set[str] = set()
for ws_uri in ws_uris:
if reuse_paths and (ws_uri in ls.wdl_paths):
ws_paths = ls.wdl_paths[ws_uri]
else:
ws_paths: Set[str] = set()
ws_root = Path(urlparse(ws_uri).path)
for p in ws_root.rglob('*.wdl'):
ws_paths.add(str(p.parent))
ls.wdl_paths[ws_uri] = ws_paths
wdl_paths.update(ws_paths)
return list(wdl_paths)
WDLError = (WDL.Error.ImportError, WDL.Error.SyntaxError, WDL.Error.ValidationError)
def _diagnostic(msg: str, pos: SourcePosition = None, severity = DiagnosticSeverity.Error):
return Diagnostic(_get_range(pos), msg, severity=severity)
def _get_range(p: SourcePosition = None):
if p is None:
return Range(
Position(),
Position(0, sys.maxsize),
)
else:
return Range(
Position(p.line - 1, p.column - 1),
Position(p.end_line - 1, p.end_column - 1),
)
def _diagnostic_err(e: WDLError):
cause = ': {}'.format(e.__cause__) if e.__cause__ else ''
msg = str(e) + cause
return _diagnostic(msg, e.pos)
@server.thread()
@server.feature(TEXT_DOCUMENT_DID_OPEN)
@server.catch_error()
def did_open(ls: Server, params: DidOpenTextDocumentParams):
parse_wdl(ls, params.textDocument.uri)
@server.thread()
@server.feature(TEXT_DOCUMENT_DID_CHANGE)
@server.catch_error()
def did_change(ls: Server, params: DidChangeTextDocumentParams):
parse_wdl(ls, params.textDocument.uri)
@server.thread()
@server.feature(TEXT_DOCUMENT_DID_SAVE)
@server.catch_error()
def did_save(ls: Server, params: DidSaveTextDocumentParams):
pass
@server.thread()
@server.feature(TEXT_DOCUMENT_WILL_SAVE)
@server.catch_error()
def will_save(ls: Server, params: WillSaveTextDocumentParams):
pass
@server.feature(WORKSPACE_DID_CHANGE_CONFIGURATION)
def did_change_configuration(ls: Server, params: DidChangeConfigurationParams):
pass
@server.thread()
@server.feature(WORKSPACE_DID_CHANGE_WATCHED_FILES)
@server.catch_error()
def did_change_watched_files(ls: Server, params: DidChangeWatchedFiles):
for change in params.changes:
if change.type in [FileChangeType.Created, FileChangeType.Deleted] and \
change.uri.endswith('.wdl'):
_get_wdl_paths(ls, change.uri, reuse_paths=False)
@server.thread()
@server.feature(DEFINITION)
@server.catch_error()
def goto_definition(ls: Server, params: TextDocumentPositionParams):
return _find_def(ls, params.textDocument.uri, params.position)
@server.thread()
@server.feature(REFERENCES)
@server.catch_error()
def find_references(ls: Server, params: TextDocumentPositionParams):
return _find_refs(ls, params.textDocument.uri, params.position)
class RunWDLParams:
def __init__(self, wdl_uri: str):
self.wdl_uri = wdl_uri
@server.feature(CODE_ACTION)
@server.catch_error()
def code_action(ls: Server, params: CodeActionParams):
return [{
'title': 'Run WDL',
'kind': Server.CMD_RUN_WDL,
'command': {
'command': Server.CMD_RUN_WDL,
'arguments': [RunWDLParams(params.textDocument.uri)],
},
}]
@server.thread()
@server.command(Server.CMD_RUN_WDL)
@server.catch_error()
def run_wdl(ls: Server, params: Tuple[RunWDLParams]):
wdl_uri = params[0].wdl_uri
wdl_path = urlparse(wdl_uri).path
_, wdl = _parse_wdl(ls, wdl_uri)
if not wdl:
return ls.show_message('Unable to submit: WDL contains error(s)', MessageType.Error)
config = _get_client_config(ls)
auth = CromwellAuth.from_no_authentication(config.cromwell.url)
workflow = cromwell_api.submit(
auth, wdl_path, raise_for_status=True,
).json()
id = workflow['id']
title = 'Workflow {} for {}'.format(id, wdl_path)
_progress(ls, 'start', {
'id': id,
'title': title,
'cancellable': True,
'message': workflow['status'],
})
status: str = ''
while True:
if status != workflow['status']:
status = workflow['status']
if status == 'Succeeded':
message_type = MessageType.Info
elif status in ('Aborting', 'Aborted'):
message_type = MessageType.Warning
elif status == 'Failed':
message_type = MessageType.Error
else:
_progress(ls, 'report', {
'id': id,
'message': status,
})
continue
_progress(ls, 'done', {
'id': id,
})
message = '{}: {}'.format(title, status)
ls.show_message(message, message_type)
diagnostics = _parse_failures(wdl, id, auth)
return ls.publish_diagnostics(wdl_uri, diagnostics)
sleep(config.cromwell.pollSec)
if id in ls.aborting_workflows:
workflow = cromwell_api.abort(
id, auth, raise_for_status=True,
).json()
ls.aborting_workflows.remove(id)
continue
try:
workflow = cromwell_api.status(
id, auth, raise_for_status=True,
).json()
except HTTPError as e:
ls.show_message_log(str(e), MessageType.Error)
def _progress(ls: Server, action: str, params):
ls.send_notification('window/progress/' + action, params)
@server.feature('window/progress/cancel')
def abort_workflow(ls: Server, params):
ls.aborting_workflows.add(params.id)
def _parse_failures(wdl: WDL.Document, id: str, auth: CromwellAuth):
workflow = cromwell_api.metadata(
id, auth,
includeKey=['status', 'executionStatus', 'failures', 'stderr'],
expandSubWorkflows=True,
raise_for_status=True,
).json()
if workflow['status'] != 'Failed':
return
calls = workflow['calls']
if calls:
diagnostics: List[Diagnostic] = []
elements = wdl.workflow.elements
for call, attempts in calls.items():
for attempt in attempts:
if attempt['executionStatus'] == 'Failed':
pos = _find_call(wdl.workflow.elements, wdl.workflow.name, call)
failures = _collect_failures(attempt['failures'], [])
stderr = _download(attempt['stderr'])
if stderr is not None:
failures.append(stderr)
msg = '\n\n'.join(failures)
diagnostics.append(_diagnostic(msg, pos))
return diagnostics
else:
failures = _collect_failures(workflow['failures'], [])
msg = '\n\n'.join(failures)
return [_diagnostic(msg)]
class CausedBy:
def __init__(self, causedBy: List['CausedBy'], message: str):
self.causedBy = causedBy
self.message = message
def _collect_failures(causedBy: List[CausedBy], failures: List[str]):
for failure in causedBy:
if failure['causedBy']:
_collect_failures(failure['causedBy'], failures)
failures.append(failure['message'])
return failures
WorkflowElements = List[Union[WDL.Decl, WDL.Call, WDL.Scatter, WDL.Conditional]]
def _find_call(elements: WorkflowElements, wf_name: str, call_name: str):
found: SourcePosition = None
for el in elements:
if found:
break
elif isinstance(el, WDL.Call) and '{}.{}'.format(wf_name, el.name) == call_name:
found = el.pos
elif isinstance(el, WDL.Conditional) or isinstance(el, WDL.Scatter):
found = _find_call(el.elements, wf_name, call_name)
return found
@server.catch_error(log=True)
def _download(url: str):
return str(download(url), 'utf-8')
| broadinstitute/wdl-ide | server/wdl_lsp/server.py | server.py | py | 17,170 | python | en | code | 38 | github-code | 6 | [
{
"api_name": "pygls.server.LanguageServer",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "typing.Set",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
... |
33225318672 | import torch
import torch.nn as nn
from torch.utils.data import Dataset
import h5py
import numpy as np
import utils.io as io
from datasets.hico_constants import HicoConstants
from datasets import metadata
import sys
import random
class HicoDataset(Dataset):
'''
Args:
subset: ['train', 'val', 'train_val', 'test']
'''
data_sample_count = 0 # record how many times to process data sampling
def __init__(self, data_const=HicoConstants(), subset='train', data_aug=False, sampler=None, test=False):
super(HicoDataset, self).__init__()
self.data_aug = data_aug
self.data_const = data_const
self.test = test
self.subset_ids = self._load_subset_ids(subset, sampler)
self.sub_app_data = self._load_subset_app_data(subset)
self.sub_spatial_data = self._load_subset_spatial_data(subset)
self.word2vec = h5py.File(self.data_const.word2vec, 'r')
self.sub_pose_feat = self._load_subset_pose_data(subset)
def _load_subset_ids(self, subset, sampler):
global_ids = io.load_json_object(self.data_const.split_ids_json)
bad_det_ids = io.load_json_object(self.data_const.bad_faster_rcnn_det_ids)
# skip bad instance detection image with 0-1 det
# !NOTE: How to reduce the number of bad instance detection images
subset_ids = [id for id in global_ids[subset] if id not in bad_det_ids['0']+bad_det_ids["1"]]
if sampler:
# import ipdb; ipdb.set_trace()
''' when changing the model, use sub-dataset to quickly show if there is something wrong '''
subset_ids = random.sample(subset_ids, int(len(subset_ids)*sampler))
return subset_ids
def _load_subset_app_data(self, subset):
print(f'Using {self.data_const.feat_type} feature...')
if subset == 'train' or subset == 'val' or subset == 'train_val':
return h5py.File(self.data_const.hico_trainval_data, 'r')
elif subset == 'test':
return h5py.File(self.data_const.hico_test_data, 'r')
else:
print('Please double check the name of subset!!!')
sys.exit(1)
def _load_subset_spatial_data(self, subset):
if subset == 'train' or subset == 'val' or subset == 'train_val':
return h5py.File(self.data_const.trainval_spatial_feat, 'r')
elif subset == 'test':
return h5py.File(self.data_const.test_spatial_feat, 'r')
else:
print('Please double check the name of subset!!!')
sys.exit(1)
def _load_subset_pose_data(self, subset):
if subset == 'train' or subset == 'val' or subset == 'train_val':
return h5py.File(self.data_const.trainval_keypoints_feat, 'r')
elif subset == 'test':
return h5py.File(self.data_const.test_keypoints_feat, 'r')
else:
print('Please double check the name of subset!!!')
sys.exit(1)
def _get_obj_one_hot(self,node_ids):
num_cand = len(node_ids)
obj_one_hot = np.zeros([num_cand,80])
for i, node_id in enumerate(node_ids):
obj_idx = int(node_id)-1
obj_one_hot[i,obj_idx] = 1.0
return obj_one_hot
def _get_word2vec(self,node_ids):
word2vec = np.empty((0,300))
for node_id in node_ids:
vec = self.word2vec[metadata.coco_classes[node_id]]
word2vec = np.vstack((word2vec, vec))
return word2vec
def _get_interactive_label(self, edge_label):
interactive_label = np.zeros(edge_label.shape[0])
interactive_label = interactive_label[:, None]
valid_idxs = list(set(np.where(edge_label==1)[0]))
if len(valid_idxs) > 0:
# import ipdb; ipdb.set_trace()
interactive_label[valid_idxs,:] = 1
return interactive_label
@staticmethod
def displaycount():
print("total times to process data sampling:", HicoDataset.data_sample_count)
# def get_verb_one_hot(self,hoi_ids):
# num_cand = len(hoi_ids)
# verb_one_hot = np.zeros([num_cand,len(self.verb_to_id)])
# for i, hoi_id in enumerate(hoi_ids):
# verb_id = self.verb_to_id[self.hoi_dict[hoi_id]['verb']]
# verb_idx = int(verb_id)-1
# verb_one_hot[i,verb_idx] = 1.0
# return verb_one_hot
def __len__(self):
return len(self.subset_ids)
def __getitem__(self, idx):
global_id = self.subset_ids[idx]
data = {}
single_app_data = self.sub_app_data[global_id]
single_spatial_data = self.sub_spatial_data[global_id]
single_pose_data = self.sub_pose_feat[str(global_id)]
data['roi_labels'] = single_app_data['classes'][:]
data['node_num'] = single_app_data['node_num'].value
data['edge_labels'] = single_app_data['edge_labels'][:]
data['features'] = single_app_data['feature'][:]
data['spatial_feat'] = single_spatial_data[:]
data['word2vec'] = self._get_word2vec(data['roi_labels'])
# data['pose_feat'] = single_pose_data[:]
data['pose_to_human'] = single_pose_data['pose_to_human'][:]
data['pose_to_obj_offset'] = single_pose_data['pose_to_obj_offset'][:]
if self.test:
data['global_id'] = global_id
data['img_name'] = global_id + '.jpg'
data['det_boxes'] = single_app_data['boxes'][:]
data['roi_scores'] = single_app_data['scores'][:]
# import ipdb; ipdb.set_trace()
if self.data_aug:
thresh = random.random()
if thresh > 0.5:
data = self._data_sampler(data)
return data
# for inference
def sample_date(self, global_id):
data = {}
single_app_data = self.sub_app_data[global_id]
single_spatial_data = self.sub_spatial_data[global_id]
single_pose_data = self.sub_pose_feat[str(global_id)]
data['global_id'] = global_id
data['img_name'] = global_id + '.jpg'
data['det_boxes'] = single_app_data['boxes'][:]
data['roi_labels'] = single_app_data['classes'][:]
data['roi_scores'] = single_app_data['scores'][:]
data['node_num'] = single_app_data['node_num'].value
# data['node_labels'] = single_app_data['node_labels'][:]
data['edge_labels'] = single_app_data['edge_labels'][:]
data['features'] = single_app_data['feature'][:]
data['spatial_feat'] = single_spatial_data[:]
data['word2vec'] = self._get_word2vec(data['roi_labels'])
data['pose_to_human'] = single_pose_data['pose_to_human'][:]
data['pose_to_obj_offset'] = single_pose_data['pose_to_obj_offset'][:]
data['keypoints'] = single_app_data['keypoints'][:]
return data
# for DatasetLoader
def collate_fn(batch):
'''
Default collate_fn(): https://github.com/pytorch/pytorch/blob/1d53d0756668ce641e4f109200d9c65b003d05fa/torch/utils/data/_utils/collate.py#L43
'''
batch_data = {}
batch_data['global_id'] = []
batch_data['img_name'] = []
batch_data['det_boxes'] = []
batch_data['roi_labels'] = []
batch_data['roi_scores'] = []
batch_data['node_num'] = []
batch_data['edge_labels'] = []
batch_data['features'] = []
batch_data['spatial_feat'] = []
batch_data['word2vec'] = []
# batch_data['pose_feat'] = []
batch_data['pose_to_human'] = []
batch_data['pose_to_obj_offset'] = []
batch_data['keypoints'] = []
for data in batch:
batch_data['roi_labels'].append(data['roi_labels'])
batch_data['node_num'].append(data['node_num'])
batch_data['edge_labels'].append(data['edge_labels'])
batch_data['features'].append(data['features'])
batch_data['spatial_feat'].append(data['spatial_feat'])
batch_data['word2vec'].append(data['word2vec'])
# batch_data["pose_feat"].append(data["pose_feat"])
batch_data["pose_to_human"].append(data["pose_to_human"])
batch_data["pose_to_obj_offset"].append(data["pose_to_obj_offset"])
if 'global_id' in data.keys():
batch_data['global_id'].append(data['global_id'])
batch_data['img_name'].append(data['img_name'])
batch_data['det_boxes'].append(data['det_boxes'])
batch_data['roi_scores'].append(data['roi_scores'])
if 'keypoints' in data.keys():
batch_data['keypoints'].append(data['keypoints'])
# import ipdb; ipdb.set_trace()
batch_data['edge_labels'] = torch.FloatTensor(np.concatenate(batch_data['edge_labels'], axis=0))
batch_data['features'] = torch.FloatTensor(np.concatenate(batch_data['features'], axis=0))
batch_data['spatial_feat'] = torch.FloatTensor(np.concatenate(batch_data['spatial_feat'], axis=0))
batch_data['word2vec'] = torch.FloatTensor(np.concatenate(batch_data['word2vec'], axis=0))
# batch_data['pose_feat'] = torch.FloatTensor(np.concatenate(batch_data['pose_feat'], axis=0))
batch_data['pose_to_human'] = torch.FloatTensor(np.concatenate(batch_data['pose_to_human'], axis=0))
batch_data['pose_to_obj_offset'] = torch.FloatTensor(np.concatenate(batch_data['pose_to_obj_offset'], axis=0))
return batch_data | birlrobotics/PMN | datasets/hico_dataset.py | hico_dataset.py | py | 9,279 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "datasets.hico_constants.HicoConstants",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 30,
"usage_type": "call"
},
{
"api_name... |
3618688983 | import graphviz as gv
from graphvizual import *
class Edge:
def __init__(self,node_0,node_1,weight):
self.node_0 = node_0
self.node_1 = node_1
self.weight= weight
class Graph_0:
def __init__(self):
self.list_edges =[]
def add_edge(self,start,end,weight):
self.list_edges.append(Edge(start,end,weight))
self.buble_sort()
return self
def list_nodes(self):
list=[]
for i in self.list_edges:
if i.node_0 not in list:
list.append(i.node_0)
if i.node_1 not in list:
list.append(i.node_1)
return list
def buble_sort(self):
length = len(self.list_edges) - 1
sorted = False
while not sorted:
sorted = True
for element in range(0, length):
if self.list_edges[element].weight > self.list_edges[element + 1].weight:
sorted = False
hold = self.list_edges[element + 1]
self.list_edges[element + 1] = self.list_edges[element]
self.list_edges[element] = hold
return self
def making_friends(self,node):
list=[]
for i in self.list_edges:
if i.node_0==node:
list.append(i)
return list
def print_list_edges(self):
list_e=[]
for i in self.list_edges:
list_e.append([i.node_0,i.node_1,i.weight])
print(list_e)
def sort_friends(self,friends):
length = len(friends) - 1
sorted = False
while not sorted:
sorted = True
for element in range(0,length):
# if friends[element][1] > friends[element + 1][1]:
if friends[element].weight > friends[element + 1].weight:
sorted = False
hold = friends[element + 1]
friends[element + 1] = friends[element]
friends[element] = hold
return friends
def creating_antecendents(self):
antecendents = {}
for i in self.list_nodes():
antecendents[str(i)]=0
return(antecendents)
def nodes_values(self):
nodes_values = {}
for i in self.list_nodes():
nodes_values[str(i)] = 100
return (nodes_values)
def dijkstra_alg(self,node_start,node_end):
nodes_values=self.nodes_values()
antecendents=self.creating_antecendents()
nodes_values[node_start]=0
list_visited_nodes=[str(node_start)]
list_visited_edges=[]
friends=[]
roar=1
while roar!=20:
for k in list_visited_nodes:
if roar==20:
break
friends_i=self.making_friends(k)
for i in friends_i:
if i.weight+nodes_values[str(i.node_0)]<nodes_values[str(i.node_1)]:
nodes_values[i.node_1]=nodes_values[i.node_0]+i.weight
antecendents[i.node_1]=i.node_0
for i in friends_i:
if i not in friends:
friends.append(i)
self.sort_friends(friends)
for k in friends:
if k not in list_visited_edges and k.node_1 not in list_visited_nodes and k.node_0 !=node_end:
list_visited_edges.append(k)
if k.node_0 not in list_visited_nodes:
list_visited_nodes.append(k.node_0)
if k.node_0==node_end:
roar=20
break
if k.node_1 not in list_visited_nodes:
list_visited_nodes.append(k.node_1)
if k.node_1==node_end:
roar=20
break
node=node_end
path_d=[]
while antecendents[node] != 0:
# path_d.append(antecendents[node])
node_ant=antecendents[node]
# node=antecendents[node]
# path_visible=[]
for i in list_visited_edges:
if i.node_0==node_ant and i.node_1==node:
# path_d.append([i.node_0,i.node_1,i.weight])
path_d.insert(0,[i.node_0,i.node_1,i.weight])
break
node=node_ant
# for i in list_visited_edges:
# path_visible.append([i.node_0,i.node_1,i.weight])
# path_visible.append(nodes_values[node_end])
return path_d
def drawing(self,path):
Drawing = gv.Digraph(format='png')
list_e = [['B', 'C', 1], ['C', 'E', 1], ['E', 'A', 2], ['A', 'B', 3], ['D', 'E', 3], ['A', 'D', 3], ['C', 'D', 5], ['B', 'D', 6]]
for item in list_e:
node_00 = str(item[0])
node_11 = str(item[1])
wei = str(item[2])
Drawing.edge(node_00, node_11, wei, color='black')
Drawing = apply_styles(Drawing, styles)
start = Drawing.render(filename=str(10))
Drawing.render(view=True)
list = []
Drawing = Graph(format='png')
for i in range(1, len(path) + 1):
print(path)
Drawing = gv.Digraph(format='png')
list.append([str(path[i - 1][0]), str(path[i - 1][1]), str(path[i - 1][2])])
for item in list_e:
node_00 = str(item[0])
node_11 = str(item[1])
wei = str(item[2])
if [node_00, node_11, wei] in list:
Drawing.edge(node_00, node_11, wei, color='red')
elif [node_11, node_00, wei] in list:
Drawing.edge(node_00, node_11, wei, color='red')
else:
Drawing.edge(node_00, node_11, wei, color='black')
Drawing = apply_styles(Drawing, styles)
i = Drawing.render(filename=str(i))
Drawing.render(view=True)
if __name__ == "__main__":
d=Graph_0()
d.add_edge('A', 'B', 3)
d.add_edge('B', 'C', 1)
d.add_edge('B', 'D', 6)
d.add_edge('C', 'E', 1)
d.add_edge('C', 'D', 5)
d.add_edge('D', 'E', 3)
d.add_edge('E', 'A', 2)
d.add_edge('A', 'D', 3)
# print(d.dijkstra_alg('C','B'))
path=d.dijkstra_alg('C','B')
d.drawing(path)
# print(d.list_nodes())
| AnnaPiatek/Graph | Dijkstra.py | Dijkstra.py | py | 6,638 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "graphviz.Digraph",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "graphviz.Digraph",
"line_number": 151,
"usage_type": "call"
}
] |
38090331621 | from .base_page import BasePage
from .locators import ProductPageLocators
from selenium.common.exceptions import NoAlertPresentException
from selenium.webdriver.common.by import By
import math
import webbrowser
class ProductPage(BasePage):
def go_product_basket_add(self):
self.browser.find_element(*ProductPageLocators.BTN_ADD_BASKET).click()
def solve_quiz_and_get_code(self):
alert = self.browser.switch_to.alert
x = alert.text.split(" ")[2]
answer = str(math.log(abs((12 * math.sin(float(x))))))
alert.send_keys(answer)
alert.accept()
try:
alert = self.browser.switch_to.alert
alert_text = alert.text
print(f"Your code: {alert_text}")
alert.accept()
except NoAlertPresentException:
print("No second alert presented")
def should_be_name_product(self):
product_name = self.is_element_present(*ProductPageLocators.PRODUCT_NAME)
message = self.is_element_present(*ProductPageLocators.CONFIRM_MESSAGE)
assert product_name == message, "Наименование товара отсутсвует в корзине"
def should_be_price_product(self):
product_price = self.browser.find_element(*ProductPageLocators.PRODUCT_PRICE).text
message_price = self.browser.find_element(*ProductPageLocators.PRICE_BASKET).text
assert product_price in message_price, "Цена товара не соответствует цене в корзине"
def should_not_be_success_message(self):
assert self.is_not_element_present(*ProductPageLocators.SUCCESS_MESSAGE), \
"Success message is presented, but should not be"
def should_not_be_success_message_disappeared(self):
assert self.is_disappeared(*ProductPageLocators.SUCCESS_MESSAGE), \
"Success message is presented, but should not be"
| Pavel-OG/project_selenium_course_final_block | pages/product_page.py | product_page.py | py | 1,916 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "base_page.BasePage",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "locators.ProductPageLocators.BTN_ADD_BASKET",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "locators.ProductPageLocators",
"line_number": 11,
"usage_type": "name"... |
40310342893 | import time
import random
import pandas as pd
import multiprocessing as mp
import numpy as np
import os
import torch
import torch.nn.functional as F
import copy
from .utils import strToListF
from .models import makeDataSet_Vec
from .utils import strToListF, colorizar, getSTime
# models
from .drlearning import Agent_DQL, ExperienceReplay, ICM as ICM_DQL
class VecDataEnvironment:
''' If this environment return done=True, reset it or some errors may apears'''
VERY_BAD_REWARD = -1.
def __init__(self, data_path, eval_path=None, max_backpack_size=200, vname='vecs', lname='is_humor', frmethod='acc', rdata_weval=False):
self.data = pd.read_csv(data_path)
self.data_eval = None
self.max_backpack_size = max_backpack_size
self.vec_size = len(self.data.loc[0,vname].split())
self.vname = vname
self.lname = lname
self.done = False
self.backpack = []
self.backpack_l = []
self.pos_gone = None
self.iterator = [i for i in range(len(self.data))]
self.iter_modulo = len(self.data)
self.iter_pos = None
self.current_vector = None
self.final_reward = None
self.frmethod = frmethod
if eval_path is not None:
self.data_eval = pd.read_csv(eval_path)
if rdata_weval:
self.resetIterator(True)
def mulIterModulo(self, mul=1.0):
tmp = int(self.iter_modulo * mul)
self.iter_modulo = min(len(self.data), tmp)
self.iter_pos = None
def resetIterator(self, use_reduced=False, porsion=1.):
if not use_reduced:
self.iterator = [i for i in range(len(self.data))]
self.iter_modulo = int(len(self.data) * porsion)
self.iter_pos = 0
else:
print ('# Reducing Data trick')
file_path = os.path.join('data', 'itEnvRed.npy')
if os.path.isfile(file_path):
rel = np.load(file_path)
self.iterator = rel.tolist()
self.iter_modulo = len(self.iterator)
del rel
ides = dict([(i,1) for i in self.iterator])
for i in range(len(self.data)):
if i not in ides:
self.iterator.append(i)
del ides
print (' Taken from', colorizar(file_path))
else:
cnt = mp.cpu_count()
pool = mp.Pool(cnt)
dx = int(len(self.data_eval) / cnt )
dx = [(i*dx, i*dx + dx + (0 if i != cnt-1 else len(self.data_eval) % cnt)) for i in range(cnt)]
label_list = pool.map(self.reduceData, dx)
del pool
del cnt
del dx
ides = {}
for li in label_list:
for v in li:
ides.update({v:1})
del label_list
self.iterator = [ v for v in ides ]
self.iter_modulo = len(self.iterator)
save = np.array(self.iterator, dtype=np.int64)
np.save(file_path, save)
del save
for i in range(len(self.data)):
if i not in ides:
self.iterator.append(i)
del ides
def reduceData(self, ini_fin):
sol = []
for k in range(ini_fin[0],ini_fin[1]):
vec = np.array(strToListF(self.data_eval.loc[k, self.vname]), dtype=np.float32)
lab = int(self.data_eval.loc[k, self.lname])
ide, dist = None, None
for i in range(len(self.data)):
curr_vec = np.array(strToListF(self.data.loc[i, self.vname]), dtype=np.float32)
curr_lab = int(self.data.loc[i, self.lname])
if lab != curr_lab: continue
distance = np.sqrt(((curr_vec - vec) ** 2).sum()).item()
if dist is None or dist > distance:
dist = distance
ide = i
sol.append(ide)
del self.data_eval
del self.data
return sol
def __next(self):
if self.iter_pos is None:
self.iter_pos = 0
selection_part = self.iterator[:self.iter_modulo]
other_part = self.iterator[self.iter_modulo:]
random.shuffle(selection_part) # RANDOMIZE
random.shuffle(other_part)
self.iterator = selection_part + other_part
self.iter_pos += 1
if (self.iter_pos >= len(self.iterator)) or ((self.iter_pos % self.iter_modulo == 0) and self.iter_pos > 0):
self.done = True
self.__calculate_final_R()
return None, None
i = self.iterator[self.iter_pos]
cad = strToListF(self.data.loc[i, self.vname])
lab = int(self.data.loc[i, self.lname])
return cad, lab
def export_prototypes(self, file_list, label_list, silense=False):
''' export to a .npy the vectors in the backpak\n
filelist: [f1:Str, f2:str, ... , fn:str] \n
label_list: [l1:int, l2:int, ..., ln:int] \n
the vectors with li label will be placed in fi file for all i'''
for file_, label_ in zip(file_list, label_list):
if not silense:
print ('# Exporting prototypes to', colorizar(os.path.basename(file_)))
expo = []
for v,l in zip(self.backpack, self.backpack_l):
if l != label_: continue
expo.append(v.reshape(1,-1))
expo = np.concatenate(expo, axis=0)
np.save(file_+'.npy', expo)
def proto_cmp_data_csv(self, ini_fin):
''' function used with paralellism to calculate the labels of the data with the prototypes.\n
ini_fin:pair (ini:int, fin:int) the initial and final position of data, accesed by data.loc[i, vname] for i in [ini,fin) '''
sol = []
for i in range(ini_fin[0], ini_fin[1]):
# lab = int(data.loc[i, lname])
vec = None
if self.data_eval is not None:
vec = np.array(strToListF(self.data_eval.loc[i, self.vname]), dtype=np.float32)
else:
vec = np.array(strToListF(self.data.loc[i, self.vname]), dtype=np.float32)
min_val, l_min = None, None
for v, l in zip(self.backpack, self.backpack_l):
if l is None : continue
# euclidiean distance
current_value = np.sqrt(((v - vec) ** 2).sum())
if min_val is None or min_val > current_value:
min_val = current_value
l_min = l
if l_min is None:
break
sol.append(l_min)
del self.data
if self.data_eval is not None:
del self.data_eval
return np.array(sol, np.int32) # check this later, the int32 ------------------------------------------ OJO -----------------
def __calculate_final_R(self):
''' Inside this, self.iterator is seted to None, be aware of future errors '''
cnt = mp.cpu_count()
pool = mp.Pool(cnt)
if self.data_eval is not None:
dx = int(len(self.data_eval) / cnt )
dx = [(i*dx, i*dx + dx + (0 if i != cnt-1 else len(self.data_eval) % cnt)) for i in range(cnt)]
else:
dx = int(len(self.data) / cnt )
dx = [(i*dx, i*dx + dx + (0 if i != cnt-1 else len(self.data) % cnt)) for i in range(cnt)]
label_list = pool.map(self.proto_cmp_data_csv, dx)
del pool
label_list = np.concatenate(label_list, axis=0)
if label_list.shape[0] <= 0:
# The backpack is empty !
self.final_reward = self.VERY_BAD_REWARD
return
if self.data_eval is not None:
original_label = np.array(self.data_eval[self.lname].tolist(), dtype=np.int32)
else:
original_label = np.array(self.data[self.lname].tolist(), dtype=np.int32)
if self.frmethod == 'acc':
self.final_reward = ((label_list == original_label).sum() / original_label.shape[0]).item()
del label_list
del original_label
def __reset_backpack(self):
if len(self.backpack) <= 0:
for _ in range(self.max_backpack_size):
self.backpack.append(np.array([0 for _ in range(self.vec_size)], dtype=np.float32))
self.backpack_l.append(None)
else:
for k in range(self.max_backpack_size):
self.backpack[k] = np.array([0 for _ in range(self.vec_size)], dtype=np.float32)
self.backpack_l[k] = None
def __makeState(self):
self.current_vector = self.__next()
backP = np.stack(self.backpack, axis=0)
if self.done:
vecI = np.zeros(self.vec_size, dtype=np.float32)
else:
vecI = np.array(self.current_vector[0], dtype=np.float32)
return (backP, vecI)
def reset(self):
''' Return the pair: a np.array of shape (max_backpack_size, vec_size) and a np.array of shape (vec_size).
They are: (backpack state, incoming vector from data). '''
self.done = False
self.final_reward = None
if (self.iter_pos is not None) and (self.iter_pos >= len(self.iterator)):
self.iter_pos = None
self.__reset_backpack()
s,v = self.__makeState()
return s,v
def step(self, action:int):
''' Return four objects: \n
\t BackPack State, Incoming Vector from data, reward, done \n
\t types: np.array(max_backpack_size, vec_size) , np.array (vec_size), float, bool '''
if action < 0 or action > self.max_backpack_size:
raise ValueError('ERROR in action input variable, action: {} not in [0,{}]'.format(action,self.max_backpack_size))
self.pos_gone = action if action < self.max_backpack_size else None
reward = 0.
if action < self.max_backpack_size:
self.backpack[action] = np.array(self.current_vector[0], dtype=np.float32)
self.backpack_l[action] = int(self.current_vector[1])
s,v = self.__makeState()
if self.final_reward is not None: reward += self.final_reward
return s, v, reward, self.done
def prepareBackpackState(blist, vec):
state = np.concatenate([blist,vec.reshape(1,-1)], axis=0)
state = torch.from_numpy(state)
return state
def __policy_dql(qvalues, nactions=12,eps=None):
with torch.no_grad():
if eps is not None:
if torch.rand(1) < eps:
return torch.randint(low=0,high=nactions, size=(1,))
else:
return torch.argmax(qvalues)
else:
return torch.multinomial(F.softmax(F.normalize(qvalues), dim=0), num_samples=1)
def __minibatch_train_dql(Qmodel, Qtarget, qloss, replay, params, DEVICE, icm=None):
state1_batch, action_batch, reward_batch, state2_batch = replay.get_batch()
action_batch = action_batch.view(action_batch.shape[0],1).to(device=DEVICE)
reward_batch = reward_batch.view(reward_batch.shape[0],1).to(device=DEVICE)
state1_batch = state1_batch.to(device=DEVICE)
state2_batch = state2_batch.to(device=DEVICE)
forward_pred_err , inverse_pred_err = 0., 0.
reward = reward_batch
if icm is not None:
forward_pred_err , inverse_pred_err = icm(state1_batch, action_batch, state2_batch)
i_reward = (1. / float(params['eta'])) * forward_pred_err
reward += i_reward.detach()
# qvals = Qmodel(state2_batch) # recordar usar target net later
qvals = Qtarget(state2_batch)
reward += float(params['gamma']) * torch.max(qvals)
reward_pred = Qmodel(state1_batch)
reward_target = reward_pred.clone()
indices = torch.stack((torch.arange(action_batch.shape[0]).to(device=DEVICE),action_batch.squeeze().to(device=DEVICE)), dim=0)
indices = indices.tolist()
reward_target[indices] = reward.squeeze()
q_loss = 1e5 * qloss(F.normalize(reward_pred), F.normalize(reward_target.detach()))
return forward_pred_err, inverse_pred_err, q_loss
def __loss_fn(q_loss, inverse_loss, forward_loss, params):
loss_ = (1 - float(params['beta'])) * inverse_loss
loss_ += float(params['beta']) * forward_loss
loss_ = loss_.mean() # loss_.sum() / loss.flatten().shape[0]
loss = loss_ + float(params['lambda']) * q_loss
return loss
# params (data_path, lcolumn, vcolumn, param)
def __prototypes_with_dql(params):
print ('# Start:','Deep Q Learning algorithm. Relax, this will take a wille.')
BACKPACK_SIZE, EPS = int(params['max_prototypes']), float(params['eps'])
EPOCHS, LR, BSIZE = int(params['epochs']), float(params['lr']), int(params['batch_size'])
DMODEL = int(params['d_model'])
target_refill, i_targetFill = int(params['target_refill']), 0
use_icm = params['ICM']
losses = []
switch_to_eps_greedy = int(EPOCHS * (2/5))
env = VecDataEnvironment(params['data_path'], eval_path=params['eval_data_path'], max_backpack_size=BACKPACK_SIZE, vname=params['vcolumn'], lname=params['lcolumn'], rdata_weval=bool(params['reduced_data_prototypes']))
DEVICE = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
# max_len : 5000, antes BACKPACK_SIZE+11, de esta forma quisas se adapte a ir cresiendo poco a poco
max_len = min(5000, BACKPACK_SIZE+100)
Qmodel = Agent_DQL(BACKPACK_SIZE+1, nhead=int(params['nhead']),nhid=int(params['nhid']),d_model=DMODEL,nlayers=int(params['n_layers']), max_len=max_len,dropout=float(params['dropout']))
qloss = torch.nn.MSELoss().to(device=DEVICE)
# seting up the taget net, and memory replay stuff
Qtarget = copy.deepcopy(Qmodel).to(device=DEVICE)
Qtarget.load_state_dict(Qmodel.state_dict())
replay = ExperienceReplay(N=int(params['memory_size']), batch_size=BSIZE)
all_model_params = list(Qmodel.parameters())
icm = None
if use_icm:
icm = ICM_DQL(BACKPACK_SIZE+1, DMODEL*(BACKPACK_SIZE+1), DMODEL, max_len=max_len, forward_scale=1., inverse_scale=1e4, nhead=int(params['nhead']),hiden_size=int(params['nhid']),nlayers=int(params['n_layers']), dropout=float(params['dropout']))
all_model_params += list(icm.parameters())
icm.train()
opt = torch.optim.Adam(lr=LR, params=all_model_params)
Qmodel.train()
greater_reward = -(2**30)
greater_reward_c = greater_reward
triple_sch = [float(i) / 100. for i in params['distribution_train'].split('-')]
for i in range(1,len(triple_sch)): triple_sch[i] += triple_sch[i-1]
# triple_sch = [ triple_sch[i] + (triple_sch[i-1] if i > 0 else 0) for i in range(len(triple_sch))]
if abs(triple_sch[-1] - 1.) > 1e-9:
raise ValueError("Parameter 'distribution_train' most add 100, but has {}.".format(triple_sch[-1]*100.))
pos_tr = 0
for i in range(EPOCHS):
print('# Epoch {}/{} {}'.format(i+1, EPOCHS, 'with eps' if i >= switch_to_eps_greedy else 'with softmax policy'))
while pos_tr < len(triple_sch) and int(EPOCHS * triple_sch[pos_tr]) <= i+1:
env.mulIterModulo(2.0)
pos_tr += 1
all_obj_seeit = False
state1 = prepareBackpackState(*env.reset()).unsqueeze(0).to(device=DEVICE)
acc_reward = 0.
it_episode = 0
init_time = time.time()
while not all_obj_seeit:
# parafernalia ----------------------------
it_episode += 1
print ('\r It {} with reward {:.4f} | {}'.format(it_episode, acc_reward, getSTime(time.time()-init_time)), end=' ')
# -----------------------------------------
opt.zero_grad()
q_val_pred = Qmodel(state1)
# Use softmax policy only at the begining
if i >= switch_to_eps_greedy:
action = int(__policy_dql(q_val_pred, nactions=BACKPACK_SIZE+1,eps=EPS))
else:
action = int(__policy_dql(q_val_pred, nactions=BACKPACK_SIZE+1))
back_state, vec_state , e_reward, done = env.step(action)
state2 = prepareBackpackState(back_state, vec_state).unsqueeze(0).to(device=DEVICE)
replay.add_memory(state1, action, e_reward, state2)
acc_reward += e_reward
all_obj_seeit = done
if not done:
state1 = state2
if len(replay.memory) < BSIZE:
continue
forward_pred_err, inverse_pred_err, q_loss = __minibatch_train_dql(Qmodel, Qtarget, qloss, replay, params, DEVICE, icm=icm)
loss = __loss_fn(q_loss, forward_pred_err, inverse_pred_err, params)
loss_list = (q_loss.mean().item(), forward_pred_err.flatten().mean().item(), inverse_pred_err.flatten().mean().item())
losses.append(loss_list)
loss.backward()
opt.step()
i_targetFill += 1
if i_targetFill % target_refill == 0:
i_targetFill = 0
Qtarget.load_state_dict(Qmodel.state_dict())
if greater_reward_c < acc_reward:
greater_reward_c = acc_reward
env.export_prototypes(file_list = [os.path.join('data','pos_center'), os.path.join('data','neg_center')], label_list = [1, 0])
if greater_reward <= acc_reward and (pos_tr >= len(triple_sch)):
greater_reward = acc_reward
Qmodel.save(os.path.join('pts', 'dql_model.pt'))
if icm is not None:
icm.save(os.path.join('pts', 'icm_model.pt'))
print ('\r It {} with reward:{:.4f} | {}'.format(it_episode, acc_reward, getSTime(time.time()-init_time)), end='\n')
losses_ = np.array(losses)
np.save(os.path.join('out', 'dql_losses.npy'), losses_)
del icm
del opt
del replay
# best model
# Qmodel.load(os.path.join('pts', 'dql_model.pt'))
# Qmodel.eval()
# it_episode, acc_reward = 0, 0.
# init_time = time.time()
# env.resetIterator()
print ('# Ending:','Deep Q Learning algorithm')
# state1 = prepareBackpackState(*env.reset()).unsqueeze(0)
# with torch.no_grad():
# while True:
# # parafernalia ----------------------------
# it_episode += 1
# # -----------------------------------------
# q_val_pred = Qmodel(state1)
# action = int(__policy_dql(q_val_pred, nactions=BACKPACK_SIZE+1, eps=0.01))
# back_state, vec_state , e_reward, done = env.step(action)
# state1 = prepareBackpackState(back_state, vec_state).unsqueeze(0)
# acc_reward += e_reward
# all_obj_seeit = done
# if done:
# print ('\r It {} with reward {:.4f} | {}'.format(it_episode, acc_reward, getSTime(time.time()-init_time)))
# break
# print ('\r It {} with reward {:.4f} | {}'.format(it_episode, acc_reward, getSTime(time.time()-init_time)), end=' ')
# esporting final state of the backpack
# env.export_prototypes(file_list = [os.path.join('data','pos_center'), os.path.join('data','neg_center')],
# label_list = [1 , 0])
del env
def extractPrototypes(method, params):
""" Apply a method to extract prototypes from data. \n
method: the method used to select prototypes, most be in [\'dql\', \'dql-intrinsic\']\n
data_path:str a path to a \'.csv\' file with at most the columns [vcolumn, lcolumn]. \n
eval_data_path: same as data_path, but treated as evaluation data \n
The column vcolumn most be a list a floating points, a vector.\n
The column lcolumn is the label of the vectors, [0,1]. """
__paramu = {'intrinsic':True, 'lambda':0.1, 'eta':1.0, 'gamma':0.2, 'eps':0.15, 'beta':0.2,
'lcolumn':'is_humor', 'vcolumn':'vecs', 'max_prototypes':20, # 200
'batch_size':10, 'lr':0.001, 'epochs':20, 'memory_size':50}
__paramu.update(params)
methods_ = [('dql', __prototypes_with_dql), ('dql-intrinsic', __prototypes_with_dql)]
for mname, fun in methods_:
if method == mname:
fun(__paramu)
return
print ('ERROR::extractPrototypes Method parameter', '\''+method+'\'', 'is not in [', ' , '.join(['\''+s+'\'' for s,_,_ in methods_]), '] !!!!') | mjason98/haha21 | code/protos.py | protos.py | py | 20,732 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numb... |
35712406484 |
from global_variables import stop_event
from hatch_controller import hc
from beamer.mqtt import mqtt_client, fsmQueue, TRAPPE_TOPIC, HDMI_TOPIC
from beamer.hdmi import hdmi_relay
import logging
import time
MQTT_OPEN = b"OPEN"
MQTT_CLOSE = b"CLOSE"
MQTT_STOP = b"STOP"
class State():
def __init__(self):
self.enter_time = time.time()
logging.info(f'COVER: Current state: {str(self)}')
self.on_enter()
def on_enter(self) -> None:
pass
def update(self, mqtt_command=""):
return self
def __repr__(self):
return self.__str__()
def __str__(self):
return self.__class__.__name__
class Open(State):
def on_enter(self):
mqtt_client.publish(f"{TRAPPE_TOPIC}/state", "open")
def update(self, mqtt_command=""):
if mqtt_command == MQTT_CLOSE:
return Closing()
return self
class Closed(State):
def on_enter(self):
mqtt_client.publish(f"{TRAPPE_TOPIC}/state", "closed")
def update(self, mqtt_command=""):
if mqtt_command == MQTT_OPEN:
return Opening()
return self
class Stopped(State):
def on_enter(self):
hc.stop()
mqtt_client.publish(f"{TRAPPE_TOPIC}/state", "stopped")
logging.info(f"Stopped at {hc.get_position()}")
def update(self, mqtt_command=""):
if mqtt_command == MQTT_CLOSE:
return Closing()
elif mqtt_command == MQTT_OPEN:
return Opening()
return self
class Opening(State):
def on_enter(self):
mqtt_client.publish(f"{TRAPPE_TOPIC}/state", "opening")
hc.set_target_position(hc.opened_position)
def update(self, mqtt_command=""):
if mqtt_command == MQTT_CLOSE:
return Closing()
elif mqtt_command == MQTT_STOP:
return Stopped()
return self
class Closing(State):
def on_enter(self) -> None:
mqtt_client.publish(f"{TRAPPE_TOPIC}/state", "closing")
hc.enable_control()
hc.set_target_position(hc.closed_position)
return
def update(self, mqtt_command=""):
if mqtt_command == MQTT_OPEN:
return Opening()
elif mqtt_command == MQTT_STOP:
# stop hc
mqtt_client.publish(f"{TRAPPE_TOPIC}/state", "stopped")
return Stopped()
return self
class CoverStateMachine():
def __init__(self) -> None:
self.state = Closed()
def control_loop(self):
while not stop_event.is_set():
if hc.target_position_reached():
if hc.get_position() <= hc.closed_position + 1:
self.state = Closed()
elif hc.get_position() >= hc.opened_position - 10:
self.state = Open()
mqtt_command = ""
if fsmQueue.not_empty:
mqtt_msg = fsmQueue.get()
if mqtt_msg.topic == f"{HDMI_TOPIC}/set":
if mqtt_msg.payload == b"ON":
hdmi_relay.enable()
mqtt_client.publish(f"{HDMI_TOPIC}/state", b"ON")
elif mqtt_msg.payload == b"OFF":
hdmi_relay.disable()
mqtt_client.publish(f"{HDMI_TOPIC}/state", b"OFF")
elif mqtt_msg.topic == f"{TRAPPE_TOPIC}/set":
mqtt_command = mqtt_msg.payload
logging.info(f"command: {mqtt_command}")
self.state = self.state.update(mqtt_command)
time.sleep(50 * 1e-3) # 50 ms loop
coverFSM = CoverStateMachine()
| clementnuss/hatch_controller | beamer/beamer_state_machine.py | beamer_state_machine.py | py | 3,618 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "time.time",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "beamer.mqtt.mqtt_client.publish",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "beamer.mqtt.mq... |
10711597654 | from youtubesearchpython import VideosSearch
import os
import glob
# __ _ _
# / \ | | | |
# / \ | | /\ | | /\ _ _
# / /\ \ | |/ / | |/ / | | | |
# / ____ \ | |\ \ | |\ \ | |_| |
#/__/ \__\ |_| \_\ |_| \_\ \___/
#
# Copyright of Akash, 2021
# https://www.github.com/akkupy
# https://t.me/akkupy
def yt_music(song_name, chat_id, msg_id, bot):
try:
videosSearch = VideosSearch(song_name, limit=1)
result = videosSearch.result()
first_result = result["result"]
yt_url = first_result[0]["link"]
yt_title = first_result[0]["title"]
yt_pub_time = first_result[0]["publishedTime"]
yt_id = first_result[0]["id"]
yt_duration = first_result[0]["duration"]
if not os.path.isdir("./music/"):
os.makedirs("./music/")
yt_song = (
f'youtube-dl --force-ipv4 -q -o "./music/{yt_title}.%(ext)s" --extract-audio --audio-format mp3 --audio-quality 128k '
+ yt_url
)
os.system(yt_song)
try:
a = glob.glob("./music/*.webm")
b = a[0]
c = b[8:]
except:
a = glob.glob("./music/*.mp3")
b = a[0]
c = b[8:]
dir = f"./music/{c}"
dir1 = f"./music/{c}"
capy = f"**Song Name ➠** `{yt_title}` \n**Published On ➠** `{yt_pub_time}` \n**Duration ➠** `{yt_duration}` \n**Link ➠** `{yt_url}`"
if os.path.exists(dir):
try:
bot.sendChatAction(chat_id=chat_id, action="upload_audio")
bot.send_audio(audio=open(dir, 'rb'), caption=capy, chat_id=chat_id, reply_to_message_id=msg_id)
os.remove(dir)
except:
bot.sendMessage(chat_id=chat_id, text="Audio Size is too large,Check the link below",
reply_to_message_id=msg_id)
bot.sendMessage(chat_id=chat_id, text=yt_url, reply_to_message_id=msg_id)
os.remove(dir)
elif os.path.exists(dir1):
try:
bot.sendChatAction(chat_id=chat_id, action="upload_audio")
bot.send_audio(audio=open(dir1, 'rb'), caption=capy, chat_id=chat_id, reply_to_message_id=msg_id)
os.remove(dir1)
except:
bot.sendMessage(chat_id=chat_id, text="Audio Size is too large,Check the link below",
reply_to_message_id=msg_id)
bot.sendMessage(chat_id=chat_id, text=yt_url, reply_to_message_id=msg_id)
os.remove(dir1)
else:
bot.sendChatAction(chat_id=chat_id, action="typing")
bot.sendMessage(chat_id=chat_id, text="Song Not Found!", reply_to_message_id=msg_id)
except:
bot.sendChatAction(chat_id=chat_id, action="typing")
bot.sendMessage(chat_id=chat_id, text="Unable to retreive the Song :( Check out the link", reply_to_message_id=msg_id)
if yt_url is not "":
bot.sendMessage(chat_id=chat_id, text=yt_url, reply_to_message_id=msg_id)
| akkupy/Sara-Bot | Modules/Yt_music.py | Yt_music.py | py | 3,160 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "youtubesearchpython.VideosSearch",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "os.makedi... |
21325441820 | import os
from setuptools import setup
basedir = os.path.dirname(__file__)
def readme():
with open(os.path.join(basedir, "README.rst")) as f:
return f.read()
about = {}
with open(os.path.join(basedir, "pysyncgateway", "__about__.py")) as f:
exec(f.read(), about)
setup(
name=about["__name__"],
version=about["__version__"],
description=about["__description__"],
long_description=readme(),
url="https://github.com/constructpm/pysyncgateway",
author=about["__author__"],
author_email=about["__email__"],
license="Apache License 2.0",
install_requires=["requests>=2.23.0", "six>=1.13"],
packages=["pysyncgateway"],
python_requires=">=3.5, <4",
zip_safe=False,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python",
],
)
| constructpm/pysyncgateway | setup.py | setup.py | py | 1,004 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9... |
28845108173 | from django.views.generic import View
from .forms import CorrectionSendingForm
from apps.article_review.models import Review
from django.contrib import messages
from django.shortcuts import redirect
# Create your views here.
from apps.correction_reception.models import ArticleCorrection
# * Importar los modelos
class CorrectionSendingView(View):
def post(self, request, *args, **kwargs):
# * get review
review = Review.objects.get(pk=kwargs['pk'])
form = CorrectionSendingForm(request.POST)
if form.is_valid():
# * Hacer algo con el formulario
val = form.cleaned_data.get('btn')
if val == 'Si':
review.enviado = True
review.save()
assignment = review.assignment
# * if all reviews are sent then change the status of assignment to completed
# * get all reviews of the assignment
reviews = Review.objects.filter(assignment=assignment)
# * check if all reviews are sent
for review in reviews:
if review.enviado == False:
messages.success(
request, 'Se ha cargado la corrección. Se notificará al autor cuando se hayan cargado todas las correcciones pendientes por los otros arbitros.')
return redirect('core_dashboard:dashboard')
else:
assignment.completed = True
assignment.save()
ArticleCorrection.objects.get_or_create(article=assignment.article)
messages.success(
request, 'Se ha enviado la corrección y se ha notificado al autor.')
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMessage
from django.urls import reverse
email = EmailMessage(
subject='Artículo arbitrado',
body=f'Estimado(a) {review.assignment.article.author.user.get_full_name()},\n\n'
f'Le informamos que el artículo {review.assignment.article.title} ha sido arbitrado y tiene correciones pendientes por realizar.\n\n'
f'Para acceder al artículo puede verlo en su tablero de actividades, por favor ingrese a la siguiente dirección:\n\n'
f'{get_current_site(request).domain + reverse("core_dashboard:dashboard")}\n\n'
f'Atentamente,\n\n'
f'Comité Editorial de Ciencia y Tecnología',
from_email='jonathan90090@gmail.com',
to=[review.assignment.article.author.user.email]
)
email.send()
return redirect('core_dashboard:dashboard')
else:
return redirect('core_dashboard:dashboard')
| HetairoiElite/cienciatec | apps/correction_sending/views.py | views.py | py | 3,096 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "django.views.generic.View",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "apps.article_review.models.Review.objects.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "apps.article_review.models.Review.objects",
"line_number": 17,
"u... |
42629975620 | from unittest import TestCase
import os
from yapic_io.connector import io_connector
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from yapic_io import TiffConnector, Dataset, PredictionBatch
import pytest
from tifffile import memmap
base_path = os.path.dirname(__file__)
class TestPredictionBatch(TestCase):
@pytest.fixture(autouse=True)
def setup(self, tmpdir):
self.tmpdir = tmpdir.strpath
def test_computepos_1(self):
img_path = os.path.abspath(os.path.join(
base_path,
'../test_data/tiffconnector_1/im/6width4height3slices_rgb.tif'))
label_path = os.path.join(base_path, '/path/to/nowhere')
c = TiffConnector(img_path, label_path)
d = Dataset(c)
size = (1, 1, 1)
batch_size = 1
p = PredictionBatch(d, batch_size, size)
self.assertEqual(len(p._all_tile_positions), 6 * 4 * 3)
tilepos = [(p[0], tuple(p[1])) for p in p._all_tile_positions]
self.assertEqual(len(tilepos),
len(set(tilepos)))
def test_computepos_2(self):
img_path = os.path.abspath(os.path.join(
base_path,
'../test_data/tiffconnector_1/im/6width4height3slices_rgb.tif'))
label_path = os.path.join(base_path, '/path/to/nowhere')
c = TiffConnector(img_path, label_path)
d = Dataset(c)
size = (3, 6, 4)
batch_size = 1
p = PredictionBatch(d, batch_size, size)
val = [(0, (0, 0, 0))]
for pos, valpos in zip(p._all_tile_positions, val):
assert_array_equal(pos[1], np.array(valpos[1]))
self.assertEqual(pos[0], valpos[0])
def test_computepos_3(self):
img_path = os.path.abspath(os.path.join(
base_path,
'../test_data/tiffconnector_1/im/6width4height3slices_rgb.tif'))
label_path = os.path.join(base_path, '/path/to/nowhere')
c = TiffConnector(img_path, label_path)
d = Dataset(c)
size = (2, 6, 4)
batch_size = 1
p = PredictionBatch(d, batch_size, size)
val = [(0, (0, 0, 0)), (0, (1, 0, 0))]
for pos, valpos in zip(p._all_tile_positions, val):
assert_array_equal(pos[1], np.array(valpos[1]))
self.assertEqual(pos[0], valpos[0])
def test_getitem_1(self):
img_path = os.path.abspath(os.path.join(
base_path,
'../test_data/tiffconnector_1/im/6width4height3slices_rgb.tif'))
label_path = os.path.join(base_path, '/path/to/nowhere')
c = TiffConnector(img_path, label_path)
d = Dataset(c)
size = (1, 6, 4)
batch_size = 2
p = PredictionBatch(d, batch_size, size)
# batch size is 2, so the first 2 tiles go with the first batch
# (size two), the third tile in in the second batch. the second
# batch has only size 1 (is smaller than the specified batch size),
# because it contains the rest.
self.assertEqual(len(p), 2)
self.assertEqual(p[0].pixels().shape, (2, 3, 1, 6, 4))
self.assertEqual(p[1].pixels().shape, (1, 3, 1, 6, 4))
def test_getitem_2(self):
img_path = os.path.abspath(os.path.join(
base_path,
'../test_data/tiffconnector_1/im/6width4height3slices_rgb.tif'))
label_path = os.path.join(base_path, '/path/to/nowhere')
c = TiffConnector(img_path, label_path)
d = Dataset(c)
size = (1, 6, 4)
batch_size = 3
p = PredictionBatch(d, batch_size, size)
# batch size is 3, this means all3 tempplates fit in one batch
self.assertEqual(len(p), 1)
self.assertEqual(p[0].pixels().shape, (3, 3, 1, 6, 4))
def test_current_tile_positions(self):
img_path = os.path.abspath(os.path.join(
base_path,
'../test_data/tiffconnector_1/im/6width4height3slices_rgb.tif'))
label_path = os.path.join(base_path, '/path/to/nowhere')
c = TiffConnector(img_path, label_path)
d = Dataset(c)
size = (1, 6, 4)
batch_size = 2
p = PredictionBatch(d, batch_size, size)
val = [(0, (0, 0, 0)), (0, (1, 0, 0))]
for pos, valpos in zip(p[0].current_tile_positions, val):
assert_array_equal(pos[1], np.array(valpos[1]))
self.assertEqual(pos[0], valpos[0])
val = [(0, (2, 0, 0))]
for pos, valpos in zip(p[1].current_tile_positions, val):
assert_array_equal(pos[1], np.array(valpos[1]))
self.assertEqual(pos[0], valpos[0])
def test_put_probmap_data(self):
img_path = os.path.abspath(os.path.join(
base_path,
'../test_data/tiffconnector_1/im/6width4height3slices_rgb.tif'))
label_path = os.path.join(base_path, '/path/to/nowhere')
c = TiffConnector(img_path, label_path, savepath=self.tmpdir)
d = Dataset(c)
size = (1, 6, 4)
batch_size = 1
p = PredictionBatch(d, batch_size, size)
data = np.ones((1, 2, 1, 6, 4))
p[0].put_probmap_data(data)
p[1].put_probmap_data(data)
p[2].put_probmap_data(data)
def test_put_probmap_data_2(self):
img_path = os.path.abspath(os.path.join(
base_path,
'../test_data/tiffconnector_1/im/6width4height3slices_rgb.tif'))
label_path = os.path.join(base_path, '/path/to/nowhere')
c = TiffConnector(img_path, label_path, savepath=self.tmpdir)
d = Dataset(c)
size = (1, 2, 2)
batch_size = 1
p = PredictionBatch(d, batch_size, size)
pixel_val = 0
for mb in p:
pixel_val += 10
data = np.ones((1, 2, 1, 2, 2)) * pixel_val
mb.put_probmap_data(data)
pixelmap = memmap(os.path.join(self.tmpdir,
'6width4height3slices_rgb_class_1.tif'))
# zslice 0
val_0 = np.array([[10., 10., 30., 30., 50., 50.],
[10., 10., 30., 30., 50., 50.],
[20., 20., 40., 40., 60., 60.],
[20., 20., 40., 40., 60., 60.]])
assert_array_almost_equal(pixelmap[0, :, :, 0], val_0)
# zslice 1
val_1 = np.array([[70., 70., 90., 90., 110., 110.],
[70., 70., 90., 90., 110., 110.],
[80., 80., 100., 100., 120., 120.],
[80., 80., 100., 100., 120., 120.]])
assert_array_almost_equal(pixelmap[1, :, :, 0], val_1)
# zslice 2
val_2 = np.array([[130., 130., 150., 150., 170., 170.],
[130., 130., 150., 150., 170., 170.],
[140., 140., 160., 160., 180., 180.],
[140., 140., 160., 160., 180., 180.]])
assert_array_almost_equal(pixelmap[2, :, :, 0], val_2)
def test_put_probmap_data_3(self):
img_path = os.path.abspath(os.path.join(
base_path, '../test_data/tiffconnector_1/im/*'))
label_path = os.path.abspath(os.path.join(
base_path, '../test_data/tiffconnector_1/labels/*'))
c = TiffConnector(img_path, label_path, savepath=self.tmpdir)
d = Dataset(c)
size = (1, 3, 4)
batch_size = 2
p = PredictionBatch(d, batch_size, size)
data = np.ones((2, 3, 1, 3, 4))
p[0].put_probmap_data(data)
data = np.ones((2, 3, 1, 3, 4))
p[1].put_probmap_data(data)
data = np.ones((2, 3, 1, 3, 4))
p[2].put_probmap_data(data)
def test_put_probmap_data_when_no_labels_available(self):
img_path = os.path.abspath(os.path.join(
base_path, '../test_data/tiffconnector_1/im/*'))
c = io_connector(img_path, '', savepath=self.tmpdir)
d = Dataset(c)
size = (1, 3, 4)
batch_size = 2
p = PredictionBatch(d, batch_size, size)
data = np.ones((2, 2, 1, 3, 4))
p[0].put_probmap_data(data)
data = np.ones((2, 2, 1, 3, 4))
p[1].put_probmap_data(data)
data = np.ones((2, 2, 1, 3, 4))
p[2].put_probmap_data(data)
val = ['40width26height3slices_rgb_class_1.tif',
'40width26height3slices_rgb_class_2.tif']
self.assertEqual(sorted(os.listdir(self.tmpdir)), val)
def test_put_probmap_data_multichannel_label(self):
img_path = os.path.abspath(os.path.join(
base_path, '../test_data/tiffconnector_1/im/*'))
label_path = os.path.abspath(os.path.join(
base_path, '../test_data/tiffconnector_1/labels_multichannel/*'))
c = TiffConnector(img_path, label_path, savepath=self.tmpdir)
d = Dataset(c)
original_labels = c.original_label_values_for_all_images()
res = c.calc_label_values_mapping(original_labels)
d = Dataset(c)
size = (1, 3, 4)
batch_size = 1
p = PredictionBatch(d, batch_size, size)
data = np.ones((1, 6, 1, 3, 4))
p[0].put_probmap_data(data)
def test_prediction_loop(self):
# mock classification function
def classify(pixels, value):
return np.ones(pixels.shape) * value
# define data locations
pixel_image_dir = os.path.join(
base_path, '../test_data/tiffconnector_1/im/*.tif')
label_image_dir = os.path.join(
base_path, '../test_data/tiffconnector_1/labels/*.tif')
tile_size = (1, 5, 4) # size of network output layer in zxy
padding = (0, 0, 0) # padding of network input layer in zxy,
# in respect to output layer
# Make training_batch mb and prediction interface p with
# TiffConnector binding.
c = TiffConnector(pixel_image_dir,
label_image_dir, savepath=self.tmpdir)
p = PredictionBatch(Dataset(c), 2, tile_size, padding_zxy=padding)
self.assertEqual(len(p), 255)
self.assertEqual(p.labels, {1, 2, 3})
# classify the whole bound dataset
for counter, item in enumerate(p):
pixels = item.pixels() # input for classifier
mock_classifier_result = classify(pixels, counter)
# pass classifier results for each class to data source
item.put_probmap_data(mock_classifier_result)
def test_pixel_dimensions(self):
img_path = os.path.abspath(os.path.join(
base_path, '../test_data/tiffconnector_1/im/*'))
c = io_connector(img_path, '', savepath=self.tmpdir)
d = Dataset(c)
size = (1, 5, 4)
batch_size = 2
p = PredictionBatch(d, batch_size, size)[0]
print(p.pixels().shape)
self.assertEqual((2, 3, 1, 5, 4), p.pixels().shape)
p.set_pixel_dimension_order('bzxyc')
self.assertEqual((2, 1, 5, 4, 3), p.pixels().shape)
| yapic/yapic_io | yapic_io/tests/test_prediction_batch.py | test_prediction_batch.py | py | 10,948 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "unittest.TestCase",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"... |
17609793311 | from django import http
import six
from django.db.models import ProtectedError
from rest_framework import views, exceptions, status
from rest_framework.exceptions import UnsupportedMediaType
from rest_framework.response import Response
from backpack.serializers_bcv1 import BadgeConnectErrorSerializer
from entity.serializers import V2ErrorSerializer, Rfc7591ErrorSerializer
from entity.authentication import CSRFPermissionDenied
def exception_handler(exc, context):
version = context.get('kwargs', {}).get('version', 'v1')
if version in ['v2', 'rfc7591']:
description = 'miscellaneous error'
field_errors = {}
validation_errors = []
if isinstance(exc, exceptions.ParseError):
description = 'bad request'
validation_errors = [exc.detail]
response_code = status.HTTP_400_BAD_REQUEST
elif isinstance(exc, exceptions.ValidationError):
description = 'bad request'
if isinstance(exc.detail, list):
validation_errors = exc.detail
elif isinstance(exc.detail, dict):
field_errors = exc.detail
elif isinstance(exc.detail, six.string_types):
validation_errors = [exc.detail]
response_code = status.HTTP_400_BAD_REQUEST
elif isinstance(exc, (exceptions.AuthenticationFailed, exceptions.NotAuthenticated)):
description = 'no valid auth token found'
response_code = status.HTTP_401_UNAUTHORIZED
elif isinstance(exc, CSRFPermissionDenied):
description = 'no valid csrf token found'
response_code = status.HTTP_401_UNAUTHORIZED
elif isinstance(exc, (http.Http404, exceptions.PermissionDenied)):
description = 'entity not found or insufficient privileges'
response_code = status.HTTP_404_NOT_FOUND
elif isinstance(exc, ProtectedError):
description, protected_objects = exc.args
response_code = status.HTTP_400_BAD_REQUEST
elif isinstance(exc, UnsupportedMediaType):
description = exc.detail
validation_errors = [exc.detail]
response_code = status.HTTP_415_UNSUPPORTED_MEDIA_TYPE
elif isinstance(exc, exceptions.APIException):
field_errors = exc.detail
response_code = exc.status_code
else:
# Unrecognized exception, return 500 error
return None
if version == 'v2':
serializer = V2ErrorSerializer(
instance={}, success=False, description=description,
field_errors=field_errors, validation_errors=validation_errors
)
else:
serializer = Rfc7591ErrorSerializer(
instance={}, field_errors=field_errors, validation_errors=validation_errors
)
return Response(serializer.data, status=response_code)
elif version == 'bcv1':
# Badge Connect errors
error = None
status_code = status.HTTP_400_BAD_REQUEST
status_text = 'BAD_REQUEST'
if isinstance(exc, exceptions.ParseError):
error = exc.detail
elif isinstance(exc, exceptions.ValidationError):
error = exc.detail
status_text = 'REQUEST_VALIDATION_ERROR'
elif isinstance(exc, exceptions.PermissionDenied):
status_code = status.HTTP_401_UNAUTHORIZED
status_text = 'PERMISSION_DENIED'
elif isinstance(exc, (exceptions.AuthenticationFailed, exceptions.NotAuthenticated)):
status_code = status.HTTP_401_UNAUTHORIZED
status_text = 'UNAUTHENTICATED'
elif isinstance(exc, exceptions.MethodNotAllowed):
status_code = status.HTTP_405_METHOD_NOT_ALLOWED
status_text = 'METHOD_NOT_ALLOWED'
serializer = BadgeConnectErrorSerializer(instance={},
error=error,
status_text=status_text,
status_code=status_code)
return Response(serializer.data, status=status_code)
else:
# Use the default exception-handling logic for v1
if isinstance(exc, ProtectedError):
description, protected_objects = exc.args
return Response(description, status=status.HTTP_400_BAD_REQUEST)
return views.exception_handler(exc, context)
| reedu-reengineering-education/badgr-server | apps/entity/views.py | views.py | py | 4,487 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "rest_framework.exceptions.ParseError",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.exceptions",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 26,
"usag... |
75163509946 | from flask import Blueprint, render_template, redirect, url_for, flash
from flask_security import current_user
from flask_babel import gettext
from . import route
from dxc.app.models.job.forms import JobForm, JobReportForm
from dxc.services import api_job, api_report
bp = Blueprint('job', __name__, template_folder='templates', static_folder='static', url_prefix='/job')
@route(bp, '/new', methods=['GET', 'POST'])
def create_job():
form = JobForm()
if form.validate_on_submit():
user = None
if current_user.get_id() is not None:
user = current_user
job = api_job.create(user=user, **form.data)
return redirect(url_for('.detail_job', job_id=job.id))
return render_template('job/create.html', form=form)
#----------------------------------------------------------------------
@bp.route('/<int:job_id>', methods=['GET'])
def detail_job(job_id):
""""""
job = api_job.get_or_404(job_id)
api_job.update(job, read_count = job.read_count + 1)
return render_template('job/detail.html', job=job)
#----------------------------------------------------------------------
@bp.route('/jobs/<int:page>', methods=['GET'])
@bp.route('/jobs/', methods=['GET'])
def list_job(page=None):
""""""
if page == None or page <= 0:
page = 1
jobs = api_job.get_latest_page_filterby(page, status=1)
return render_template('job/list.html', jobs = jobs)
#----------------------------------------------------------------------
@bp.route('/report/<int:job_id>', methods=['GET', 'POST'])
def report_job(job_id):
"""Report a job
"""
report_form = JobReportForm()
if report_form.validate_on_submit():
api_report.create(job_id=job_id, **report_form.data)
flash(gettext(u'Thanks for your report. We will check it soon.'))
return redirect(url_for('.list_job'))
return render_template('job/report.html', job_id=job_id, report_form=report_form)
#----------------------------------------------------------------------
@bp.route('/reports/<int:job_id>', methods=['GET'])
def list_report(job_id):
""""""
job = api_job.get(job_id)
return render_template('job/report_list.html', job=job, reports=job.reports)
@route(bp, '/profile/published_jobs/','/profile/published_jobs/<int:status>/','/profile/published_jobs/<int:status>/<int:page>', methods=['GET'])
def list_publisedjobs(status=1, page=1):
"""List jobs published by me."""
jobs = api_job.get_latest_page_filterby(page=page, per_page=2, status=status, user_id=current_user.id)
return render_template('job/profile_publishedjobs.html', jobs=jobs, status=status)
| cash2one/Luyasi-Flask | dxc/app/frontend/job.py | job.py | py | 2,646 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Blueprint",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "dxc.app.models.job.forms.JobForm",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask_security.current_user.get_id",
"line_number": 16,
"usage_type": "call"
},
{
... |
69976456507 | import logging
from typing import Callable, List
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .coordinator import UpdateCoordinator
from homeassistant.helpers.entity import DeviceInfo, async_generate_entity_id
from .const import (
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
PARALLEL_UPDATES = 1
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up Dolphin switch based on a config entry."""
coordinator: UpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
switches = []
for device in coordinator.data.keys():
switches.append(ShabbatSwitch(hass=hass, coordinator=coordinator, device=device))
switches.append(FixedTemperature(hass=hass, coordinator=coordinator, device=device))
for switch in range(1, 7):
switches.append(DropSwitch(hass=hass, coordinator=coordinator, index=switch, device=device))
async_add_entities(switches)
class DropSwitch(CoordinatorEntity, SwitchEntity):
def __init__(self, hass, coordinator, index, device):
CoordinatorEntity.__init__(self, coordinator)
self._hass = hass
self._id = index
self._coordinator = coordinator
self._device = device
self._is_on = False
self.entity_id = async_generate_entity_id(DOMAIN + ".{}", None or f"{device}_drop{index}", hass=hass)
@property
def unique_id(self):
return self.entity_id
@property
def name(self):
if self._coordinator.data[self._device].showerTemperature != None:
showerTemperature = self._coordinator.data[self._device].showerTemperature[self._id - 1]['temp'] if len(
self._coordinator.data[self._device].showerTemperature) > self._id - 1 else None
else:
showerTemperature = None
return f"{self._id} Shower - {showerTemperature}°C" if self._id == 1 else f"{self._id} Showers - {showerTemperature}°C"
@property
def icon(self):
return "mdi:shower"
@property
def available(self):
"""Return availability."""
if self._coordinator.data[self._device].shabbat:
return False
if self._coordinator.data[self._device].power and not self._is_on:
return False
if self._coordinator.data[self._device].fixedTemperature:
return False
if self._coordinator.data[self._device].showerTemperature != None:
if len(self._coordinator.data[self._device].showerTemperature) > self._id - 1:
return True
return False
@property
def is_on(self):
if not self._coordinator.data[self._device].power:
self._is_on = False
return self._is_on
@property
def device_info(self) -> DeviceInfo:
"""Return the device info."""
return DeviceInfo(
identifiers={
(DOMAIN, self._device)
},
)
async def async_turn_on(self):
current_temp = self._coordinator.data[self._device].temperature
drop_temperature = self._coordinator.data[self._device].showerTemperature[self._id - 1]['temp']
if current_temp <= drop_temperature and self._coordinator.data[self._device].power == False:
await self._coordinator.dolphin.turnOnManually(self._coordinator.dolphin._user, drop_temperature,
self._device)
self._is_on = True
await self.coordinator.async_request_refresh()
self.async_write_ha_state()
async def async_turn_off(self):
await self._coordinator.dolphin.turnOffManually(self._coordinator.dolphin._user, self._device)
self._is_on = False
await self.coordinator.async_request_refresh()
self.async_write_ha_state()
class ShabbatSwitch(CoordinatorEntity, SwitchEntity):
def __init__(self, hass, coordinator, device):
CoordinatorEntity.__init__(self, coordinator)
self._hass = hass
self._coordinator = coordinator
self._device = device
self.entity_id = async_generate_entity_id(DOMAIN + ".{}", None or f"{device}_sabbath_mode", hass=hass)
@property
def unique_id(self):
return self.entity_id
@property
def name(self):
return "Sabbath mode"
@property
def icon(self):
return "mdi:star-david"
@property
def device_info(self) -> DeviceInfo:
"""Return the device info."""
return DeviceInfo(
identifiers={
(DOMAIN, self._device)
},
name=self.name,
)
@property
def is_on(self):
return self._coordinator.data[self._device].shabbat
async def async_turn_on(self):
await self._coordinator.dolphin.enableShabbat(self._coordinator.dolphin._user, self._device)
self._coordinator.data[self._device].shabbat = True
self.async_write_ha_state()
async def async_turn_off(self):
await self._coordinator.dolphin.disableShabbat(self._coordinator.dolphin._user, self._device)
self._coordinator.data[self._device].shabbat = False
self.async_write_ha_state()
class FixedTemperature(CoordinatorEntity, SwitchEntity):
def __init__(self, hass, coordinator, device):
CoordinatorEntity.__init__(self, coordinator)
self._hass = hass
self._coordinator = coordinator
self._device = device
self.entity_id = async_generate_entity_id(DOMAIN + ".{}", None or f"{device}_fixed_temperature", hass=hass)
@property
def unique_id(self):
return self.entity_id
@property
def name(self):
return "Fixed temperature"
@property
def icon(self):
return "mdi:home-thermometer-outline"
@property
def device_info(self) -> DeviceInfo:
"""Return the device info."""
return DeviceInfo(
identifiers={
(DOMAIN, self._device)
},
name=self.name,
)
@property
def is_on(self):
return self._coordinator.data[self._device].fixedTemperature
async def async_turn_on(self):
await self._coordinator.dolphin.turnOnFixedTemperature(self._coordinator.dolphin._user, self._device,
self._coordinator.data[self._device].targetTemperature)
self._coordinator.data[self._device].fixedTemperature = True
await self.coordinator.async_request_refresh()
self.async_write_ha_state()
async def async_turn_off(self):
await self._coordinator.dolphin.turnOffFixedTemperature(self._coordinator.dolphin._user, self._device)
self._coordinator.data[self._device].fixedTemperature = False
await self.coordinator.async_request_refresh()
self.async_write_ha_state()
| 0xAlon/dolphin | custom_components/dolphin/switch.py | switch.py | py | 7,201 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "homeassistant.helpers.typing.HomeAssistantType",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "homeassistant.config_entries.ConfigEntry",
"line_number": 21,
"usage_typ... |
16478653737 | import mxnet as mx
import time
import gluoncv as gcv
from gluoncv.utils import try_import_cv2
cv2 = try_import_cv2()
net = gcv.model_zoo.get_model(
# good, fast
'ssd_512_mobilenet1.0_coco',
# 'ssd_512_mobilenet1.0_voc',
# 'ssd_512_mobilenet1.0_voc_int8',
#
# 'yolo3_mobilenet1.0_coco',
# 'yolo3_mobilenet1.0_voc',
# too slow...
# 'faster_rcnn_resnet50_v1b_voc', # too slow...
# 'faster_rcnn_fpn_syncbn_resnest50_coco', # too slow...
pretrained=True)
net.hybridize()
cap = cv2.VideoCapture(0)
time.sleep(1)
while(True):
ret, frame = cap.read()
k = cv2.waitKey(1)
if k == ord('q'):
break
frame = mx.nd.array(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)).astype('uint8')
rgb_nd, frame = gcv.data.transforms.presets.ssd.transform_test(
frame, short=512, max_size=700
)
# rgb_nd, frame = gcv.data.transforms.presets.yolo.transform_test(
# frame, short=512, max_size=700
# )
# rgb_nd, frame = gcv.data.transforms.presets.rcnn.transform_test(
# frame, short=512, max_size=700
# )
class_IDs, scores, bounding_boxes = net(rgb_nd)
img = gcv.utils.viz.cv_plot_bbox(frame,
bounding_boxes[0],
scores[0],
class_IDs[0],
class_names=net.classes)
gcv.utils.viz.cv_plot_image(img)
cv2.waitKey(1)
cap.release()
cv2.destroyAllWindows() | ODN418/progmates_works | glouncv/detect.py | detect.py | py | 1,506 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "gluoncv.utils.try_import_cv2",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "gluoncv.model_zoo.get_model",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "gluoncv.model_zoo",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_... |
10721085289 | '''
Collection of helper function for the EDA notebooks
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pycountry
''' Returns the pairs of variables sorted according to their correlation '''
def getCorrPairs(corr):
mask = np.zeros_like(corr, dtype=bool)
mask[np.triu_indices_from(mask)] = True
corr[mask] = np.nan
pairs = corr.abs().unstack()
pairs = pairs.sort_values(ascending = False)
return pairs
''' Imputes a predictor timeSeries'''
def imputeTS(timeSeries):
if 'capacity' in timeSeries.name:
res = _imputeCapacity(timeSeries)
else:
res = _imputeGeneric(timeSeries)
return res
''' Imputes a generic time-series by interpolation or year-ahead, year-prior values '''
def _imputeGeneric(timeSeries,
hoursInWeek = 24 * 1,
hoursInYear = 24 * 364):
# Interpolate at most 1 week forwards/backwards in time
timeSeries = timeSeries.interpolate(
method = 'time',
limit = hoursInWeek,
limit_area = 'inside',
limit_direction = 'both')
# Roll-back one year and impute remaining blocks (fills in gaps mostly at the beginning of the time-series)
timeSeries = timeSeries.combine_first(timeSeries.shift(-hoursInYear))
# Roll-forward one year and impute (fills in gaps mostly at the end of the time-series)
timeSeries = timeSeries.combine_first(timeSeries.shift(hoursInYear))
# Re-interpolate any nans remaining
timeSeries = timeSeries.interpolate(
method = 'time',
limit_area = 'inside',
limit_direction = 'both')
return timeSeries
''' Imputes capacity timeseries by padding'''
def _imputeCapacity(timeSeries):
return timeSeries.fillna(method = 'pad')
''' Plots original / imputed time-series'''
def plotImputation(originalTS, imputedTS, withMean = False, hoursInMonth = 24 * 7 * 4):
imputedTS[~originalTS.isnull()] = np.nan
plt.figure(figsize = (15, 3))
plt.plot(originalTS, linewidth = 0.5)
plt.plot(imputedTS, linewidth = 0.5)
if withMean:
monthMean = imputedTS.rolling(hoursInMonth).mean()
plt.plot(monthMean, color = 'k')
plt.legend(['Original', 'Imputed', 'Monthly avg. (rolling)'], ncol = 3);
else:
plt.legend(['Original', 'Imputed'], ncol = 2);
plt.title(originalTS.name + ' Imputed');
return
''' Fixes information for the areas.csv dataframe '''
def makeAreaMetadata(df):
df = df.where(pd.notnull(df), None)
countries, a2Codes, mapCodes, pAreas, bZones, cAreas, mAreas = [], [], [], [], [], [], []
for _, row in df.iterrows():
a2code = row['area ID'].split('_')[0]
if a2code == 'CS': country = 'SerbiaMontenegro' # Does not exist in pycountry
else: country = pycountry.countries.get(alpha_2 = a2code).name
mapcode = a2code
primary_area = country + '_default'
bidZone = country + '_default'
control_area = country + '_default'
market_area = country + '_default'
if row['country'] is None: countries.append(country)
else: countries.append(row['country'])
if row['ISO 3166-1 alpha-2'] is None: a2Codes.append(a2code)
else: a2Codes.append(row['ISO 3166-1 alpha-2'])
if row['MapCode ENTSO-E'] is None: mapCodes.append(mapcode)
else: mapCodes.append(row['MapCode ENTSO-E'])
if row['primary AreaName ENTSO-E'] is None: pAreas.append(primary_area)
else: pAreas.append(row['primary AreaName ENTSO-E'])
if row['bidding zone'] is None: bZones.append(bidZone)
else: bZones.append(row['bidding zone'])
if row['control area'] is None: cAreas.append(control_area)
else: cAreas.append(row['control area'])
if row['market balance area'] is None: mAreas.append(market_area)
else: mAreas.append(row['market balance area'])
df['country'] = countries
df['ISO 3166-1 alpha-2'] = a2Codes
df['MapCode ENTSO-E'] = mapCodes
df['primary AreaName ENTSO-E'] = pAreas
df['bidding zone'] = bZones
df['control area'] = cAreas
df['market balance area'] = mAreas
return df
''' Returns areaIDs per concept type'''
def _getAreas(primaryConcept, df):
return df[df['primary concept'] == primaryConcept]['area ID'].unique().tolist()
''' Checks if a column name appears in a list of area codes and returns area code'''
def areaID(fieldName, conceptType, df):
for area in _getAreas(conceptType, df):
if isinstance(area, str):
if area in fieldName:
return area
return None | Miltos-90/EU_Electricity_Price_Forecasting | src/eda_utils.py | eda_utils.py | py | 5,183 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.zeros_like",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.triu_indices_from",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
... |
7901768963 | from collections import Counter
import logging
def find(list, value):
try:
return list.index(value)
except ValueError:
return None
class DefaultSorter(object):
def __init__(self, langs='all', weight=1):
logging.info("Available languages: {}".format(langs))
self.langs = langs.split(',')
def bestfn(self, subentry):
idx = find(self.langs, subentry['SubLanguageID'])
value = idx if idx is not None else len(self.langs)
return value
def _similarity(a, b):
make_pairs = lambda l: (l[i:i+1] for i in xrange(len(l)-1))
tc = lambda counter: sum(count for count in counter.values())
sa = Counter(make_pairs(a))
sb = Counter(make_pairs(b))
return 2.0 * tc(sa & sb) / (tc(sa) + tc(sb))
class SimilaritySorter(DefaultSorter):
def __init__(self, langs='all'):
super(SimilaritySorter, self).__init__(langs)
self.movie = ''
def bestfn(self, subentry):
value = super(SimilaritySorter, self).bestfn(subentry)
sn = subentry['SubFileName']
similarity = _similarity(sn[:sn.rindex('.')], self.movie)
logging.info("{}: Similarity is {}, lang {}".format(
subentry['SubFileName'], similarity, subentry['SubLanguageID']))
return 1.1 * value + 1 - similarity
| luisguilherme/framboise | framboise/sorting.py | sorting.py | py | 1,318 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "logging.info",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "logging.info",
... |
17493539514 | #This file "drives" the car by calling all the required files
#outputs plots of the dynamic/vibration models
import Beeman, car_2014, chassis_2014, driver_sally, ff_2014_5, ff_2014_7, get_DM, get_FF, get_Jx, get_Jy, get_LR, get_MM, get_SD, get_SM, get_cg, motor_2014, speed_bump, suspension_front_2014, suspension_rear_2014, trajectory, wheel_front_2014, wheel_rear_2014
import numpy as np, math
import matplotlib.pyplot as plt
#creating arguments into Beeman
ff = ff_2014_7.ff_data
ffmatrix, ffobject = get_FF.get_FF(ff['t_in'],ff)
X0 = get_SD.get_SD(ff['model'],ff['car'])
DOF = X0.shape[0]
V0 = np.zeros((DOF,1))
A0 = np.zeros((DOF,3))
M = get_MM.get_MM(ff['model'],ff['car'])
C = get_DM.get_DM(ff['model'],ff['car'])
K = get_SM.get_SM(ff['model'],ff['car'])
#create data
T7, X7, V7, A7 = Beeman.Beeman(X0,V0,A0,M,C,K,get_FF.get_FF,ffobject)
#Heave
plt.plot(T7,X7[:,0])
plt.show()
#Pitch
| brandontran14/CarSimulation | driving.py | driving.py | py | 897 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "ff_2014_7.ff_data",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "get_FF.get_FF",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "get_SD.get_SD",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"... |
6166864226 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 6 13:11:44 2017
@author: Francesco
"""
import serial
import numpy as np
import time
PORT = "COM10"
BAUD = 115200
port = serial.Serial(PORT,BAUD,timeout=1)
START = 1
#BUNDLE SHAPE: |!|!|!|CH0_msb|CH0_lsb|ch1_msb|ch1_lsb|......|ch7_lsb|!|!|!|
NUM_CHANNELS = 8
END_BUNDLE_BYTE = 3
BYTE_PER_CHANNEL = 2 #two bytes to represent int
BUNDLE_LENGTH = NUM_CHANNELS*BYTE_PER_CHANNEL
data = np.zeros(NUM_CHANNELS)
graph_data = open('test_100Hz.txt','w')
print("Gathering recordings for dataset")
movement_time = 2 #2.5 seconds for each movement
sample_time = 0.01 # 100Hz sample frequency
num_samples = int(movement_time/sample_time)
#num_samples = 300
counter = 0
while(START):
try:
#print("Flushing")
#port.flushInput()
movement = input("\n\
0: wrist up\n\
1: wrist down\n\
2: wrist rotation out\n\
3: wrist rotation inside\n\
4: hand open\n\
5: hand closed\n")
if(movement == 's'):
graph_data.close()
print(port.inWaiting())
port.close()
break
#start communication, for some reason with utf-8 it works
#start_time = time.time()
elapsed = 0
counter = 0
starttime = time.time()
while(elapsed < 2):
port.write('s'.encode('utf-8'))
a = port.read(END_BUNDLE_BYTE)
#print(a)
if(a.decode("raw_unicode_escape") == '!!!'):
temp = port.read(BUNDLE_LENGTH)
#unpack values and put them in "data"
for channel in range(0,NUM_CHANNELS):
value = (temp[channel*BYTE_PER_CHANNEL]<<8)|(temp[channel*BYTE_PER_CHANNEL + 1 ])
graph_data.write(str(value))
graph_data.write(',')
#print(value)
#start a new line in the file
graph_data.write(movement+'\n')
#wait the sample time to get a new value
#time.sleep(sample_time)
elapsed = time.time() - starttime
#è allineato con l'if
#perchè deve aumentare il counter solo quando scrive
#counter += 1
#port.write('o'.encode('utf-8'))
#print(port.inWaiting())
#write the separator between one movement and the other
graph_data.write('-\n')
#any character except 's' is ok to stop the communication
#port.write('o'.encode('utf-8'))
print("Movement Acquired - Elapsed Time: %d"%movement_time)
except KeyboardInterrupt:
print("Closing")
port.close()
graph_data.close()
break
| FrancesoM/UnlimitedHand-Learning | python_side/read_bytes_over_serial.py | read_bytes_over_serial.py | py | 3,070 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "serial.Serial",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 82... |
3971463654 | from flask import Flask, request, jsonify, render_template, send_file
import os
import csv
import json
import base64
import pickle
import logging
from utils import (set_license_key_in_config, get_license_key_from_config,
get_dynamodb_table, license_key_is_valid)
# Configure the logging level
logging.basicConfig(level=logging.INFO)
# Get the logger for the current module
logger = logging.getLogger(__name__)
# Create a handler that writes log messages to a file
handler = logging.FileHandler('error.log')
# Create a formatter that specifies the format of the log messages
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
PORT = 8323
# For Production
# app = Flask(__name__,
# static_folder='frontend/static/', static_url_path='/static/',
# template_folder='frontend/templates')
# For Development
app = Flask(__name__,
static_folder='../frontend/static/', static_url_path='/static/',
template_folder='../frontend/templates')
base_path = os.path.join(app.root_path, 'frontend', 'media')
def remove_uploaded_background_images():
import glob
pattern = os.path.join(base_path, 'Background-Image*.png')
# Using glob to find all matching files
for file_path in glob.glob(pattern):
if os.path.exists(file_path):
os.remove(file_path)
print(f"Removed: {file_path}")
@app.route('/admin/')
def view_admin_page():
try:
license_key = get_license_key_from_config()
csv_file_path = os.path.join(app.root_path, 'participants.csv')
if os.path.exists(csv_file_path):
os.remove(csv_file_path)
# Remove uploaded images
remove_uploaded_background_images()
return render_template('adminPage.html', license_key=license_key)
except Exception as e:
logger.error(e)
print(e)
# raise(e)
return "An Error Occurred!"
@app.route('/home/')
def view_home_screen():
try:
pickle.dump(dict(request.args), open('admin_conf.pkl', 'wb'))
return render_template('main.html')
except Exception as e:
print(e)
logger.error(e)
return 'An error occurred'
@app.route('/saveCSVData/', methods=['POST'])
def save_csv():
try:
if request.method == 'POST':
csv_data = request.get_data()
# Decode the bytes to string
csv_data_str = csv_data.decode('utf-8').replace('\\n', '\n').replace('\\r', '').strip('"').replace('\\', '').replace('\\\\','').replace('X', '')
# Remove extra quotes
csv_data_str = csv_data_str.replace('\"', '')
# Split the string into a list of lines
csv_data_lines = csv_data_str.splitlines()
numbers = [line.split(',')[0] for line in csv_data_lines[1:]]
names = [f"{line.split(',')[1]} {line.split(',')[2]}" for line in csv_data_lines[1:]]
if len(set(numbers)) != len(numbers):
return jsonify({"error": "All numbers provided in the table must be unique."}), 400
# Numbers input handling
for idx, number in enumerate(numbers):
if not number:
return jsonify({"error": f"The number at [ROW # {idx + 1}] cannot be empty."}), 400
if len(number) > 4:
return jsonify({"error": f"The length of {number} at [ROW # {idx + 1}] cannot be more than 4 letters."}), 400
if not number.isdigit():
return jsonify({"error": f"{number} at [ROW # {idx + 1}] is not a valid digit/number."}), 400
# NAMES must not be empty, input handling
for idx, name in enumerate(names):
if name == ' ':
return jsonify({"error": f"The name at [ROW # {idx + 1}] cannot be empty."}), 400
new_csv_data_lines = []
for line in csv_data_lines[1:]:
line = line.rstrip(',')
if line:
cell = line.split(',')[0]
if not cell.isdigit():
continue
new_csv_data_lines.append(line)
csv_data_lines = new_csv_data_lines
if len(csv_data_lines) < 50:
return jsonify({"error": "Participants cannot be less than 50"}), 400
if len(csv_data_lines) > 300:
return jsonify({"error": "Participants cannot be more than 300"}), 400
# Open a file in write mode
with open('participants.csv', newline='', mode='w') as file:
writer = csv.writer(file)
# Write each line to the CSV file
for line in csv_data_lines:
writer.writerow(line.split(','))
return jsonify({"success": f"File has been saved at: participants.csv"})
else:
return jsonify({"error": "POST request required."}), 400
except Exception as e:
logger.error(e)
return jsonify({'error': 'An error occurred'}), 500
@app.route('/getCSVData/')
def view_saved_csv():
try:
file_path = 'participants.csv'
if os.path.exists(file_path):
data_list = []
with open(file_path, newline='') as f:
csv_data = csv.reader(f)
# headers = next(csv_data, None) # returns the headers or `None` if the input is empty
headers = ['assign-number', 'first-name', 'last-name', 'date-added']
if headers:
for row in csv_data:
data_list.append({headers[i]: value for i, value in enumerate(row)})
return jsonify({"data": data_list})
else:
return jsonify({"error": "File not found"}), 404
except Exception as e:
logger.error(e)
return jsonify({'error': 'An error occurred'}), 500
@app.route('/getAdminConf/')
def get_admin_conf():
try:
obj = pickle.load(open('admin_conf.pkl', 'rb'))
return jsonify(obj)
except Exception as e:
logger.error(e)
return jsonify({'error': 'An error occurred'}), 500
@app.route('/saveImage/', methods=['POST'])
def save_image():
try:
if request.method == 'POST':
data = json.loads(request.get_data(as_text=True))
image_name = data.get('image_name', None)
img_data = data['image'].split(',')[1] # Split off the header, keep only the actual image content
img_data = base64.b64decode(img_data)
file_path = os.path.join(base_path, 'frontend', f'media', f'{image_name}.png') # Or where you want to save it
if image_name == 'Logo':
file_path = os.path.join(fr'media\{image_name} Uploaded.png') # Or where you want to save it
elif image_name == None:
# Background Image Uploaded
file_path = get_image_path_name()
with open(file_path, 'wb') as f:
f.write(img_data)
return jsonify({"message": "Image saved successfully.", 'file_path': file_path})
else:
return jsonify({"error": "Wrong method type."})
except Exception as e:
logger.error(e)
return jsonify({'error': 'An error occurred'}), 500
@app.route('/validateLicenseKey/<string:licenseKey>/', methods=['POST'])
def view_validate_license_key(licenseKey):
try:
table = get_dynamodb_table()
licenseCreatedDate = license_key_is_valid(licenseKey, table)
# If license exists, write/update it to config file
if licenseCreatedDate:
set_license_key_in_config(licenseKey, licenseCreatedDate)
return jsonify({'success': 'License Key successfully validated'}), 200
return jsonify({'error': 'License Key couldn\'t be validated'}), 404
except Exception as e:
from traceback import print_exc
logger.error(e)
print_exc()
return jsonify({'error': str(e)}), 500
@app.route('/licenseKeyIsValid/')
def view_license_key_validated():
license_key = get_license_key_from_config()
if license_key:
return jsonify({'success': 'License Key is validated!'}), 200
return jsonify({'error': 'Please enter a valid License Key in order to use this software.'}), 400
@app.route('/media/<filename>')
def get_media_file(filename):
return send_file(os.path.join('../frontend', 'media', filename))
def get_image_path_name():
# Background Image Uploaded
file_name = 'Background-Image'
extension = '.png'
counter = 0
# Loop to find the next available file name
while True:
if counter == 0:
file_path = os.path.join(base_path, f'{file_name}{extension}')
else:
file_path = os.path.join(base_path, f'{file_name} {counter}{extension}')
# Check if file already exists
if not os.path.exists(file_path):
break # Exit loop if file does not exist
counter += 1
return file_path
if __name__ == "__main__":
app.run(port=PORT)
| TahirAlauddin/KonnectedReverseRaffle | mac_server/konnected-server.py | konnected-server.py | py | 9,148 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.basicConfig",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.FileH... |
42972033220 | from subprocess import Popen, PIPE
import sys
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from config import config
from theme import theme
class PluginGui:
_route = None
_button = None
_target = None
def __init__(self, parent, route):
self._route = route
self._button = tk.Frame(parent)
g = {'row': 1, 'column': 1, 'sticky': tk.NSEW}
self._button_open = ttk.Button(self._button)
self._button_open.grid(g)
self._button_open.configure(command=self._load_route)
self._button_open.bind('<Double-Button-1>', self._clear_route)
self._button_theme = tk.Label(self._button)
self._button_theme.grid(g)
self._button_theme.bind('<Double-Button-1>', self._clear_route)
theme.register_alternate((self._button_open, self._button_theme), g)
theme.button_bind(self._button_theme, self._load_route)
self._target = tk.Label(parent, text='', anchor=tk.W)
self._target.bind('<Button-1>', self._to_clipboard)
self.update_ui()
def get_ui(self):
return (self._button, self._target)
def update_ui(self):
waypoints = len(self._route)
if waypoints == 0:
self._button_open['text'] = ' Open '
self._target['text'] = 'no waypoints'
else:
self._button_open['text'] = f'{waypoints}'
self._target['text'] = self._route.next()
self._button_theme['text'] = self._button_open['text']
self._to_clipboard()
def _to_clipboard(self, event=None):
if len(self._route) == 0:
return
target = self._route.next()
if sys.platform == "linux" or sys.platform == "linux2":
command = Popen(["xclip", "-selection", "c"], stdin=PIPE)
command.communicate(input=target.encode(), timeout=1)
else:
self._parent.clipboard_clear()
self._parent.clipboard_append(target)
self._parent.update()
def _clear_route(self, event=None):
self._route.clear()
self.update_ui()
def _load_route(self, event=None):
if len(self._route) > 0:
return
ftypes = [
('All supported files', '*.csv *.txt'),
('CSV files', '*.csv'),
('Text files', '*.txt'),
]
logdir = config.get_str('journaldir',
default=config.default_journal_dir)
filename = filedialog.askopenfilename(initialdir=logdir,
filetypes=ftypes)
if self._route.load(filename):
self.update_ui()
| pwerken/EDMC_Waypoints | plugin_gui.py | plugin_gui.py | py | 2,681 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "tkinter.Frame",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "tkinter.NSEW",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "tkinter.ttk.Button",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
... |
21934463311 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Text Emotion Detection."""
from dataclasses import dataclass
from transformers import AutoTokenizer, AutoModelWithLMHead
from transformers import pipeline
__all__ = (
"Emotion",
"EmotionDetectorT5",
"EmotionDetectorRoberta",
)
@dataclass
class Emotion:
"""Emotion."""
tag: str
emoji: str
def get_emotion_emoji(tag: str) -> str:
# Define the emojis corresponding to each sentiment
emoji_mapping = {
"disappointment": "😞",
"sadness": "😢",
"annoyance": "😠",
"neutral": "😐",
"disapproval": "👎",
"realization": "😮",
"nervousness": "😬",
"approval": "👍",
"joy": "😄",
"anger": "😡",
"embarrassment": "😳",
"caring": "🤗",
"remorse": "😔",
"disgust": "🤢",
"grief": "😥",
"confusion": "😕",
"relief": "😌",
"desire": "😍",
"admiration": "😌",
"optimism": "😊",
"fear": "😨",
"love": "❤️",
"excitement": "🎉",
"curiosity": "🤔",
"amusement": "😄",
"surprise": "😲",
"gratitude": "🙏",
"pride": "🦁"
}
return emoji_mapping.get(tag, "")
class EmotionDetectorT5:
"""Emotion Detector from T5 model."""
# https://huggingface.co/mrm8488/t5-base-finetuned-emotion/
# emotions = ["joy", "sad", "dis", "sup", "fea", "ang"]
# emotions = ["sadness", "joy", "love", "anger", "fear", "surprise"]
def __init__(self) -> None:
"""Init Sentiment Analysis."""
self.model_name = "mrm8488/t5-base-finetuned-emotion"
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
self.model = AutoModelWithLMHead.from_pretrained(self.model_name)
def get(self, text: str) -> Emotion:
"""Check emotion from text string."""
input_ids = self.tokenizer.encode(text + '</s>', return_tensors='pt')
output = self.model.generate(input_ids=input_ids,
max_length=2)
dec = [self.tokenizer.decode(ids) for ids in output]
emo = dec[0].replace("<pad>", "").strip()
return Emotion(tag=emo, emoji=get_emotion_emoji(emo))
class EmotionDetectorRoberta:
"""Emotion Detector from Roberta."""
# https://huggingface.co/SamLowe/roberta-base-go_emotions
# emotions = [
# "admiration",
# "amusement",
# "anger",
# "annoyance",
# "approval",
# "caring",
# "confusion",
# "curiosity",
# "desire",
# "disappointment",
# "disapproval",
# "disgust",
# "embarrassment",
# "excitement",
# "fear",
# "gratitude",
# "grief",
# "joy",
# "love",
# "nervousness",
# "optimism",
# "pride",
# "realization",
# "relief",
# "remorse",
# "sadness",
# "surprise",
# "neutral",
# ]
def __init__(self) -> None:
"""Init."""
self.model_name = "SamLowe/roberta-base-go_emotions"
self.nlp = pipeline("sentiment-analysis", framework="pt", model=self.model_name)
def get(self, text: str) -> Emotion:
"""Get Emotion from text str."""
try:
results = self.nlp(text)
except RuntimeError as err:
print(f"len(text) = {len(text)}")
print(f"text: {text}")
raise(err)
data = {result['label']: result['score'] for result in results}
tag, score = "", 0
for key, value in data.items():
if value > score:
tag = key
score = value
return Emotion(
tag=tag,
emoji=get_emotion_emoji(tag=tag),
) | br8km/pynlp | core/emotion.py | emotion.py | py | 3,869 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "dataclasses.dataclass",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "transformers.AutoTokenizer.from_pretrained",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "transformers.AutoTokenizer",
"line_number": 73,
"usage_type": "name"
}... |
40128872754 | #!/usr/bin/env python3
import sys
sys.setrecursionlimit(10**6)
INF = 10 ** 9 + 1 # sys.maxsize # float("inf")
MOD = 10 ** 9 + 7
def debug(*x):
print(*x, file=sys.stderr)
def solve(N, AS):
sum = 0
sumSq = 0
for i in range(N):
sum += AS[i]
sum %= MOD
sumSq += AS[i] * AS[i]
sumSq %= MOD
ret = (sum * sum - sumSq) % MOD
if ret % 2 == 0:
return ret // 2
else:
return (ret + MOD) // 2
def main():
# parse input
N = int(input())
AS = list(map(int, input().split()))
print(solve(N, AS))
# tests
T1 = """
3
1 2 3
"""
TEST_T1 = """
>>> as_input(T1)
>>> main()
11
"""
T2 = """
4
141421356 17320508 22360679 244949
"""
TEST_T2 = """
>>> as_input(T2)
>>> main()
437235829
"""
def _test():
import doctest
doctest.testmod()
g = globals()
for k in sorted(g):
if k.startswith("TEST_"):
doctest.run_docstring_examples(g[k], g, name=k)
def as_input(s):
"use in test, use given string as input file"
import io
f = io.StringIO(s.strip())
g = globals()
g["input"] = lambda: bytes(f.readline(), "ascii")
g["read"] = lambda: bytes(f.read(), "ascii")
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
if sys.argv[-1] == "-t":
print("testing")
_test()
sys.exit()
main()
| nishio/atcoder | abc177/c.py | c.py | py | 1,341 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "sys.setrecursionlimit",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "doctest.testmod",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "doctest.run_docst... |
34197476486 | #!/bin/python3
import sys
import os
import mysql.connector
import datetime
from sys import argv
import requests
import json
from requests.exceptions import HTTPError
from slack import WebClient
from slack.errors import SlackApiError
import logging
logging.basicConfig(level=logging.DEBUG)
database_conf = "/var/lib/jenkins/engine.cnf"
operator_name_list = argv[1].split(",")
start_payment_date = argv[2]
finish_payment_date = argv[3]
game_cycle_file = "rounds.txt"
default_round_log = "round-close.log"
operator_id_list = []
search_list = ["| closed", "| not closed", "| game cycle is already in completed game cycle table"]
slack_channel = "#customer_support"
def collect_operator_id(operator_name: str) -> int:
sql_operator_id = ("select operator_id from core_operator where operator_name='{}'".format(operator_name))
cursor.execute(sql_operator_id)
operator_results = cursor.fetchall()
for op_id in operator_results:
operator_id = op_id[0]
return operator_id
def collect_game_cycle(operator_data: str):
sql_game_cycle = """
SELECT distinct(game_cycle_id) FROM tx_payment_journal a
left join tx_completed_game_cycle b on a.game_cycle_id=b.payment_reference
left join tx_player c on a.from_player_id=c.player_id
where a.transaction_id>=(SELECT transaction_id FROM tx_payment_journal where payment_date >= '{0}' limit 1)
and a.transaction_id<(SELECT transaction_id FROM tx_payment_journal where payment_date >= '{1}' limit 1)
and a.to_player_id=1 and a.complete=1 and a.cancelled=0 and a.current_balance>0 and b.completed_tx_id is null
and c.operator_id={2};""".format(start_payment_date, finish_payment_date, operator_data)
print(sql_game_cycle)
cleanup(game_cycle_file)
cursor.execute(sql_game_cycle)
result_table = cursor.fetchall()
for collumn in result_table:
game_cycle = collumn[0]
with open(game_cycle_file, "a") as rounds_list:
rounds_list.write("{}\n".format(game_cycle))
def close_rounds(operator_id_close: int):
try:
if os.path.exists(game_cycle_file):
print("*** Closing game rounds")
cleanup(default_round_log)
os.system("cp /var/lib/jenkins/devops-prod/scripts/close_rounds/application.properties .")
os.system("java -jar /var/lib/jenkins/devops-prod/scripts/close_rounds/close-round.jar {0} {1}".format(game_cycle_file, operator_id_close))
else:
print("*** No rounds were collected from database, please check data.")
open(default_round_log, "a").close()
except OSError as e:
print("*** Error occurs: {}".format(sys.exc_info()[1]))
exit()
def notify_slack(operator_data: str, prev_date: str, now_date: str, pattern: str, pattert_count: str):
slack_token = "xoxp-229522615970"
client = WebClient(token=slack_token)
user="jenkins-bot"
try:
if pattern == "game cycle is already in completed game cycle table":
completed_pattern = "already closed"
response = client.chat_postMessage(
channel = slack_channel,
text = """Finished processing issued rounds for {0} operator:
Period: {1} - {2}
Rounds {3}: {4}
""".format(operator_data.replace(" ", ""), prev_date, now_date, completed_pattern, pattert_count)
)
else:
response = client.chat_postMessage(
channel = slack_channel,
text = """Finished processing issued rounds for {0} operator:
Period: {1} - {2}
Rounds {3}: {4}
""".format(operator_data.replace(" ", ""), prev_date, now_date, pattern, pattert_count)
)
if os.path.exists(filename):
response = client.files_upload(
channels = slack_channel,
file = filename,
title = custom_pattern
)
except SlackApiError as e:
# You will get a SlackApiError if "ok" is False
assert e.response["error"] # str like 'invalid_auth', 'channel_not_found'
except FileNotFoundError as e:
print("*** Pattern for search was not found: {}".format(sys.exc_info()[1]))
def parse_log(message: str, operatorname: str):
global total_pattert_count
global custom_pattern
global filename
custom_pattern = message.replace("| ", "")
if message == "| closed":
filename = "Rounds_closed.log"
elif message == "| not closed":
filename = "Rounds_not_closed.log"
elif message == "| game cycle is already in completed game cycle table":
filename = "Rounds_already_closed.log"
total_pattert_count = 0
with open(default_round_log, "r") as log_file:
for line in log_file:
if message in line:
total_pattert_count += 1
with open(filename, "a") as closed_rounds:
closed_rounds.write(line)
print("File was created: {}".format(filename))
notify_slack(operatorname, start_payment_date, finish_payment_date, custom_pattern, total_pattert_count)
cleanup(filename)
def cleanup(item: str):
try:
if os.path.exists(item):
os.system("rm -rf {}".format(item))
print("*** {} was successfully removed from workspace".format(item))
except OSError as e:
print("*** Error occurs: {}".format(sys.exc_info()[1]))
exit()
def main():
try:
db_connection = mysql.connector.connect(option_files=database_conf, option_groups="client")
cursor = db_connection.cursor()
for operator in operator_name_list:
print("Processing {} operator:".format(operator))
collect_game_cycle(collect_operator_id(operator))
close_rounds(collect_operator_id(operator))
for search_pattern in search_list:
parse_log(search_pattern, operator)
except mysql.connector.Error as e:
print("*** ERROR: {}".format(e.msg))
exit()
finally:
if (db_connection.is_connected()):
db_connection.close()
cursor.close()
cleanup(game_cycle_file)
cleanup(default_round_log)
if __name__ == '__main__':
main()
| vlad-solomai/viam_automation | automation_gambling/game_round_management/close_rounds_slack.py | close_rounds_slack.py | py | 6,354 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "logging.basicConfig",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_... |
18917415762 | from typing import Any, Callable, TypeVar, cast
import pluggy
F = TypeVar("F", bound=Callable[..., Any])
hookimpl = cast(Callable[[F], F], pluggy.HookimplMarker("ape"))
hookspec = pluggy.HookspecMarker("ape")
plugin_manager = pluggy.PluginManager("ape")
"""A manager responsible for registering and accessing plugins (singleton)."""
class PluginType:
"""
The base plugin class in ape. There are several types of plugins available in ape, such
as the :class:`~ape.plugins.config.Config` or :class:`~ape.plugins.network.EcosystemPlugin`.
Each one of them subclass this class. It is used to namespace the plugin hooks for the
registration process, and to ensure overall conformance to type interfaces as much as possible.
"""
| ApeWorX/ape | src/ape/plugins/pluggy_patch.py | pluggy_patch.py | py | 752 | python | en | code | 736 | github-code | 6 | [
{
"api_name": "typing.TypeVar",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "typing.cast",
"line_number... |
26058478061 | from django.contrib.auth.views import LogoutView
from django.urls import path
from .views import *
urlpatterns = [
path('login/', CustomLoginView.as_view(), name='login'),
path('logout/', LogoutView.as_view(next_page='login'), name='logout'), # тут ми вказуємо через next_page, що якщо ми виходимо з акаунту то переходимо на сторінку "login"
path('', TaskList.as_view(), name='tasks'),
path('register/', RegisterPage.as_view(), name='register'),
path('task/<int:pk>/', TaskDetail.as_view(), name='task'),
path('task-create/', TaskCreate.as_view(), name='task-create'),
path('task-update/<int:pk>', TaskUpdate.as_view(), name='task-update'),
path('task-delete/<int:pk>', DeleteView.as_view(), name='task-delete'),
] | ianvv/todo-app-django | todo_list/base/urls.py | urls.py | py | 812 | python | uk | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.views.LogoutView.as_view",
"line_number": 7,
"usage_type": "call"
},
{
"api_n... |
70732574587 | import os
from importlib.machinery import SourceFileLoader
from setuptools import find_packages, setup
from typing import List
module_name = 'juldate'
module = SourceFileLoader(
module_name,
os.path.join(module_name, '__init__.py'),
).load_module()
def parse_requirements(filename: str) -> List[str]:
requirements = list()
with open(filename) as file:
for line in file:
requirements.append(line.rstrip())
return requirements
setup(
name=module_name,
version=module.__version__,
author=module.__author__,
author_email=module.__email__,
url='https://github.com/churilov-ns/juldate.git',
license=module.__license__,
description=module.__doc__,
long_description=open('README.md').read(),
classifiers=[
'Intended Audience :: Science/Research',
'Natural Language :: Russian',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Astronomy',
],
platforms='all',
python_requires='>=3.8',
packages=find_packages(exclude=['tests']),
install_requires=parse_requirements('requirements.txt'),
extras_require={'dev': parse_requirements('requirements.dev.txt')},
include_package_data=True,
)
| churilov-ns/juldate | setup.py | setup.py | py | 1,333 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "importlib.machinery.SourceFileLoader",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "typing.... |
30084867415 | from .models import AdminUser
from django.shortcuts import render
from django.http import JsonResponse
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
import json
from datetime import date, datetime
# Create your views here.
def jsons(data = None, errorCode = 0, cookies = '', days = 0):
if data is None:
data = []
return JsonResponse({'errorCode': errorCode, 'data': data, 'cookies': cookies, 'days': days})
def adminLogin(request):
data = json.loads(request.body)
username = data['username']
password = data['password']
try:
admin = AdminUser.objects.get(username = username)
except AdminUser.DoesNotExist:
return jsons([], 404)
admin = authenticate(request, username=username, password=password)
if admin is not None:
# authenticated
login(request, admin)
admin = AdminUser.objects.get(username = username)
return jsons([dict(admin.body())], 0, {'user_id': admin.id, 'username': username})
else:
# not authenticated
return jsons([], 403)
# Logout
def adminLogout(request):
if request.user.is_authenticated:
logout(request)
return jsons([], 0)
return jsons([], 403)
@login_required
def adminEdit(request, pk):
try:
admin = AdminUser.objects.get(id = pk)
except AdminUser.DoesNotExist:
return jsons([], 404)
# change password
if request.method == 'PUT':
if request.user.id != admin.id:
return jsons([], 403)
data = json.loads(request.body)
admin.username = admin.username
admin.set_password(data['newpass'])
admin.save()
login(request, admin)
return jsons([dict(admin.body())])
def adminGetByUsername(request, username):
try:
admin = AdminUser.objects.get(username = username)
year = int(admin.joinDate.strftime("%Y"))
month = int(admin.joinDate.strftime("%m"))
day = int(admin.joinDate.strftime("%d"))
nowYear = int(datetime.now().strftime("%Y"))
nowMonth = int(datetime.now().strftime("%m"))
nowDay = int(datetime.now().strftime("%d"))
date1 = date(year, month, day)
date2 = date(nowYear, nowMonth, nowDay)
days = (date2 - date1).days
except AdminUser.DoesNotExist:
return jsons([], 404)
return jsons([dict(admin.body())], 0, '', days) | jeremyytann/BUAA-SE-LetStudy | Code/backend/admin_user/views.py | views.py | py | 2,482 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.http.JsonResponse",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "models.AdminUser.objects.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "mode... |
7798026425 | import os.path
import xml.dom.minidom
import xml.xpath
import logging
import edef
from edef.dev import Config
import fnmatch
from Tools import getModuleName
class Model:
def __init__(self):
self._logger = logging.getLogger("edef.dev")
self._base_path = Config().getBasePath()
self._module_list = dict()
imp = edef.Importer()
xmlfile_list = list()
mod_list = imp.listModules()
for mod in mod_list:
xmlfile_list.append(imp._find_module_meta(mod))
for path in xmlfile_list:
self._logger.debug("Found xml file %s"%path)
#path = os.path.abspath( os.path.join(self._base_path, filename) )
try:
module = eDevModelModule(path)
except:
self._logger.exception("Exception while load xml %s"%path)
continue
self._module_list[module.GetURI()] = module
def openURI(self, uri):
if uri == "mod://":
return self._module_list.keys()
try: mod = self._module_list[uri]
except: raise Exception("Unknown module %s"%uri)
return mod.getText()
def saveURI(self, uri, txt=None):
if not uri in self._module_list.keys():
# create module...
mod_name = getModuleName(uri)+".xml"
mod_path = os.path.join(self._base_path, mod_name)
if os.path.isfile(mod_path): raise Exception("File %s allready exists!"%mod_path)
f = open(mod_path,"w")
f.write(txt)
f.close()
mod = eDevModelModule(mod_path)
self._module_list[uri] = mod
return
# save module
mod = self._module_list[uri]
mod.setText(txt)
def checkURI(self, uri):
return uri in self._module_list.keys()
def deleteURI(self, uri):
if not uri in self._module_list.keys():
raise Exception("Module %s not known"%uri)
os.unlink(self._module_list[uri].getPath())
del self._module_list[uri]
def isURIWriteable(self, uri):
if uri == "mod://": return False
if not uri in self._module_list.keys():
raise Exception("Module %s not known"%uri)
return self._module_list[uri].isWriteable()
def isURIEditable(self, uri):
if uri == "mod://": return False
if not uri in self._module_list.keys():
raise Exception("Module %s not known"%uri)
return self._module_list[uri].isEditable()
class eDevModelModule:
_d_name = None
def __init__(self, path):
self._d_full_path = path
if not os.path.isfile(path):
raise Exception("%s doesn't point to a file!"%path)
(tmp, name) = os.path.split(path)
(name, tmp) = os.path.splitext(name)
(tmp, self._d_name) = os.path.splitext(name)
if self._d_name == "": self._d_name = tmp
self._d_uri = "mod://"+"/".join(name.split("."))
self._editable = False
self._writeable = False
# FIXME replace by TREX
dom = xml.dom.minidom.parse(path)
# if module:
if len(xml.xpath.Evaluate("/Module", dom))==1:
self._editable = True
if os.access(path, os.W_OK): self._writeable = True
# if assembly
elif len(xml.xpath.Evaluate("/Assembly", dom))==1:
self._editable = False
self._writeable = False
else:
raise Exception("Invalid module description: %s"%path)
def GetURI(self): return self._d_uri
def getName(self): return self._d_name
def getPath(self): return self._d_full_path
def getText(self):
f = open(self._d_full_path,"r")
txt = f.read()
f.close()
return txt
def setText(self, xml_txt):
# FIXME check xml_txt
f = open(self._d_full_path, "w")
f.write(xml_txt)
f.close()
def isEditable(self): return self._editable
def isWriteable(self): return self._writeable
| BackupTheBerlios/pplt-svn | trunk/edef/edef-dev/modeditor/ModelModule.py | ModelModule.py | py | 4,039 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "edef.dev.Config",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "edef.Importer",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "Tools.getModuleName",... |
38586042024 | from flask import Flask,render_template,json,flash,request,session,redirect
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
with open('config.json', 'r') as c:
parameter = json.load(c)["parameter"]
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = parameter['local_uri']
app.secret_key = 'super-secret-key'
db = SQLAlchemy(app)
class Contact(db.Model):
sno = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), nullable=False)
email = db.Column(db.String(20), nullable=False)
phone = db.Column(db.String(12), nullable=False)
message = db.Column(db.String(120), nullable=False)
date = db.Column(db.String(12), nullable=True)
@app.route('/')
def home():
return render_template('index.html',parameter=parameter)
@app.route("/contact", methods = ['GET', 'POST'])
def contact():
if(request.method=='POST'):
name = request.form.get('name')
email = request.form.get('email')
phone = request.form.get('phone')
message = request.form.get('message')
entry = Contact(name=name, email = email, phone = phone, message = message, date= datetime.now())
db.session.add(entry)
db.session.commit()
flash("Thank You We will get back to you soon...","success")
return render_template('index.html',parameter=parameter) | 199-cmd/FlaskDemo | FlaskDemo/main.py | main.py | py | 1,402 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.json.load",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.json",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
... |
696671101 |
import tkinter as tk
import atten
import face_recognition
import cv2
import numpy as np
import csv
import os
from datetime import datetime
# Create the root window
root = tk.Tk()
root.overrideredirect(True)
# Set the window size and position
width = 700
height = root.winfo_screenheight()-100 # Get the screen height
# Calculate the x- and y-coordinates to center the window
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
x = int((screen_width/2) - (width/2))
y = int((screen_height/2) - (height/2))
root.geometry(f"{width}x{height}+{x}+{y}")
x_cord = 75;
y_cord = 20;
checker=0;
video_capture = cv2.VideoCapture(0)
Abhinav_image = face_recognition.load_image_file("Abhinav.jpg")
Abhinav_encoding = face_recognition.face_encodings(Abhinav_image)[0]
Khushi_image = face_recognition.load_image_file("Khushi.jpeg")
Khushi_encoding = face_recognition.face_encodings(Khushi_image)[0]
Yashika_image = face_recognition.load_image_file("Yashika.jpeg")
Yashika_encoding = face_recognition.face_encodings(Yashika_image)[0]
Jyotiraditya_image = face_recognition.load_image_file("Jyotiraditya.jpeg")
Jyotiraditya_encoding = face_recognition.face_encodings(Jyotiraditya_image)[0]
Alok_image = face_recognition.load_image_file("Alok.jpeg")
Alok_encoding = face_recognition.face_encodings(Alok_image)[0]
Shrey_image = face_recognition.load_image_file("Shrey.jpeg")
Shrey_encoding = face_recognition.face_encodings(Shrey_image)[0]
known_face_encoding = [
Abhinav_encoding,
Khushi_encoding,
Yashika_encoding,
Jyotiraditya_encoding,
Alok_encoding,
Shrey_encoding
]
known_faces_names = [
"Abhinav Maheshwari",
"Khushi Arora",
"Yashika",
"Jyotiraditya",
"Alok Raj",
"Shrey"
]
students = known_faces_names.copy()
face_locations = []
face_encodings = []
face_names = []
s=True
now = datetime.now()
current_date = now.strftime("%Y-%m-%d")
def mark_attendance():
atten.run(video_capture, s, known_face_encoding, known_faces_names, students,message2)
# Open the CSV file in read mode
# Set the background color to white
root.configure(bg="white")
# Add logo to the top left corner
logo_img = tk.PhotoImage(file="logo.png")
logo_img = logo_img.subsample(1)
# def run_jjcopy():
# root.destroy()
# os.system('python jjcopy.py')
# Create a label widget for the logo and pack it in the top left corner
logo_label = tk.Label(root, image=logo_img, bd=0)
logo_label.pack(side="left", anchor="nw", padx=10, pady=10)
# Add text to the right of the logo
text_label= tk.Label(root, text="ATTENDANCE RECOGNITION SYSTEM" ,bg="white" ,fg="blue" ,width=35 ,height=1,font=('Sitka Text Semibold', 18, 'bold underline'))
text_label.pack(pady=30, anchor="n")
line_canvas = tk.Canvas(root, height=1, width = 700,bg="black", highlightthickness=0)
line_canvas.create_line(0, 0, width, 0, fill="black")
line_canvas.place(x=75-x_cord,y=130-y_cord)
button = tk.Button(root, text="MARK ATTENDANCE", command=mark_attendance, width=40 ,height=1 ,fg="white" ,bg="black" ,font=('Sitka Text Semibold', 18, ' bold ') )
button.place(x=120-x_cord, y=150-y_cord)
lbl = tk.Label(root, text="Attendance list:", width=12 ,height=1 ,fg="green" ,bg="white" ,font=('Sitka Text Semibold', 18, ' bold ') )
lbl.place(x=120-x_cord, y=250-y_cord)
# # Add a line below the "Attendance list:" line
# line2_canvas = tk.Canvas(root, height=1, bg="black", highlightthickness=0)
# line2_canvas.create_line(0, 0, width, 0, fill="black")
# line2_canvas.place(x=120-x_cord, y=150-y_cord)
# message2 = tk.Label(root, height=screen_height*0.025, width=67, bg="#f0f4f9", fg="black", font=("Helvetica", 12), wrap="word", state="disabled")
# message2.place(x=120-x_cord, y=290-y_cord)
message2 = tk.Label(root, height=20, width=67, font=("Helvetica", 12))
message2.place(x=120-x_cord, y=290-y_cord)
# Add an exit button in the bottom left corner
exit_button = tk.Button(root, text="EXIT", width=10, height=1, bg="black", fg="white", font=('Sitka Text Semibold', 15, 'bold'), command=root.destroy)
exit_button.place(x=20, y=height-70)
root.mainloop()
| khushiarora1793/attendancemanagement | temp.py | temp.py | py | 4,274 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tkinter.Tk",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "face_recognition.load_image_file",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "face_rec... |
36007020776 | from bs4 import BeautifulSoup
import requests
import pandas as pd
# Downloading IMDB feature film and MyAnimeList popularity data
headers = {'Accept-Language': 'en-US,en;q=0.8'}
url1 = 'https://www.imdb.com/search/title/?title_type=feature&sort=num_votes,desc'
url2 = 'https://myanimelist.net/topanime.php?type=bypopularity'
response1 = requests.get(url1,headers=headers)
response2 = requests.get(url2,headers=headers)
soup1 = BeautifulSoup(response1.text, "html.parser")
soup2 = BeautifulSoup(response2.text, "html.parser")
movie_title = []
link = []
year = []
certificate = []
movie_runtime = []
genre = []
anime_title = []
anime_link = []
type = []
anime_runtime = []
members = []
for t in soup1.select('h3.lister-item-header a'):
movie_title.append(t.get_text())
link.append("https://www.imdb.com" + t.attrs.get('href') + "?ref_=adv_li_tt")
for t in soup1.select('h3.lister-item-header span.lister-item-year'):
year.append(t.get_text().replace("(","").replace(")",""))
for t in soup1.select('p.text-muted span.certificate'):
certificate.append(t.get_text())
for t in soup1.select('p.text-muted span.runtime'):
movie_runtime.append(t.get_text())
for t in soup1.select('p.text-muted span.genre'):
genre.append(t.get_text().replace("\n","").replace(" ",""))
for t in soup2.select('h3.anime_ranking_h3 a.hoverinfo_trigger'):
anime_title.append(t.get_text())
anime_link.append(t.attrs.get('href'))
for t in soup2.select('div.information'):
info = t.get_text().strip().split('\n')
type.append(info[0].strip())
anime_runtime.append(info[1].strip())
members.append(info[2].strip())
df1 = pd.DataFrame(
{'movie title': movie_title,
'link': link,
'year': year,
'certificate': certificate,
'runtime': movie_runtime,
'genre': genre}
)
df2 = pd.DataFrame(
{'anime title': anime_title,
'anime link': anime_link,
'type': type,
'anime runtime': anime_runtime,
'members': members}
)
print(df1.head())
print(df2.head())
df1.to_csv('moviesrating.csv', index=False)
df2.to_csv('animerating.csv', index=False)
| ilovegaming42069/DataScienceExercise | datascience.py | datascience.py | py | 2,188 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"l... |
38650731253 | from ehrqc.standardise import Config
from ehrqc.standardise import Utils
import logging
log = logging.getLogger("EHR-QC")
def importPatients(con, sourceSchemaName, filePath, fileSeparator, overwrite=True):
if overwrite:
log.info("Creating table: " + sourceSchemaName + ".patients")
dropQuery = """DROP TABLE IF EXISTS """ + sourceSchemaName + """.patients CASCADE"""
createQuery = """CREATE TABLE """ + sourceSchemaName + """.patients
(
patient_id VARCHAR(50) NOT NULL,
gender VARCHAR(50),
age VARCHAR(10),
dob TIMESTAMP(0),
dod TIMESTAMP(0)
)
;
"""
with con:
with con.cursor() as cursor:
cursor.execute(dropQuery)
cursor.execute(createQuery)
import pandas as pd
import numpy as np
df = pd.read_csv(filePath, sep=fileSeparator)
dfColumns = []
columns = []
if(Config.patients['column_mapping']['patient_id']):
dfColumns.append(Config.patients['column_mapping']['patient_id'])
columns.append('patient_id')
if(Config.patients['column_mapping']['gender']):
dfColumns.append(Config.patients['column_mapping']['gender'])
columns.append('gender')
if(Config.patients['column_mapping']['age']):
dfColumns.append(Config.patients['column_mapping']['age'])
columns.append('age')
if(Config.patients['column_mapping']['dod']):
df[Config.patients['column_mapping']['dod']].replace({np.nan: None}, inplace=True)
dfColumns.append(Config.patients['column_mapping']['dod'])
columns.append('dod')
if(Config.patients['column_mapping']['dob']):
df[Config.patients['column_mapping']['dob']].replace({np.nan: None}, inplace=True)
dfColumns.append(Config.patients['column_mapping']['dob'])
columns.append('dob')
Utils.saveDataframe(con=con, destinationSchemaName=sourceSchemaName, destinationTableName='patients', columns=columns, df=df, dfColumns=dfColumns)
def importAdmissions(con, sourceSchemaName, filePath, fileSeparator, overwrite=True):
if overwrite:
log.info("Creating table: " + sourceSchemaName + ".admissions")
dropQuery = """DROP TABLE IF EXISTS """ + sourceSchemaName + """.admissions CASCADE"""
createQuery = """CREATE TABLE """ + sourceSchemaName + """.admissions
(
patient_id VARCHAR(50),
episode_id VARCHAR(50),
admittime VARCHAR(50),
dischtime VARCHAR(50),
deathtime VARCHAR(50),
admission_type VARCHAR(50),
admission_location VARCHAR(50),
discharge_location VARCHAR(50),
insurance VARCHAR(255),
language VARCHAR(10),
marital_status VARCHAR(50),
ethnicity VARCHAR(200),
edregtime VARCHAR(50),
edouttime VARCHAR(50),
hospital_expire_flag VARCHAR(50)
)
;
"""
with con:
with con.cursor() as cursor:
cursor.execute(dropQuery)
cursor.execute(createQuery)
import pandas as pd
import numpy as np
df = pd.read_csv(filePath, sep=fileSeparator)
dfColumns = []
columns = []
if(Config.admissions['column_mapping']['patient_id']):
dfColumns.append(Config.admissions['column_mapping']['patient_id'])
columns.append('patient_id')
if(Config.admissions['column_mapping']['episode_id']):
dfColumns.append(Config.admissions['column_mapping']['episode_id'])
columns.append('episode_id')
if(Config.admissions['column_mapping']['admittime']):
df[Config.admissions['column_mapping']['admittime']].replace({np.nan: None}, inplace=True)
dfColumns.append(Config.admissions['column_mapping']['admittime'])
columns.append('admittime')
if(Config.admissions['column_mapping']['dischtime']):
df[Config.admissions['column_mapping']['dischtime']].replace({np.nan: None}, inplace=True)
dfColumns.append(Config.admissions['column_mapping']['dischtime'])
columns.append('dischtime')
if(Config.admissions['column_mapping']['deathtime']):
df[Config.admissions['column_mapping']['deathtime']].replace({np.nan: None}, inplace=True)
dfColumns.append(Config.admissions['column_mapping']['deathtime'])
columns.append('deathtime')
if(Config.admissions['column_mapping']['admission_type']):
dfColumns.append(Config.admissions['column_mapping']['admission_type'])
columns.append('admission_type')
if(Config.admissions['column_mapping']['admission_location']):
dfColumns.append(Config.admissions['column_mapping']['admission_location'])
columns.append('admission_location')
if(Config.admissions['column_mapping']['discharge_location']):
dfColumns.append(Config.admissions['column_mapping']['discharge_location'])
columns.append('discharge_location')
if(Config.admissions['column_mapping']['insurance']):
dfColumns.append(Config.admissions['column_mapping']['insurance'])
columns.append('insurance')
if(Config.admissions['column_mapping']['language']):
dfColumns.append(Config.admissions['column_mapping']['language'])
columns.append('language')
if(Config.admissions['column_mapping']['marital_status']):
dfColumns.append(Config.admissions['column_mapping']['marital_status'])
columns.append('marital_status')
if(Config.admissions['column_mapping']['ethnicity']):
dfColumns.append(Config.admissions['column_mapping']['ethnicity'])
columns.append('ethnicity')
if(Config.admissions['column_mapping']['edregtime']):
dfColumns.append(Config.admissions['column_mapping']['edregtime'])
columns.append('edregtime')
if(Config.admissions['column_mapping']['edouttime']):
dfColumns.append(Config.admissions['column_mapping']['edouttime'])
columns.append('edouttime')
if(Config.admissions['column_mapping']['hospital_expire_flag']):
dfColumns.append(Config.admissions['column_mapping']['hospital_expire_flag'])
columns.append('hospital_expire_flag')
Utils.saveDataframe(con=con, destinationSchemaName=sourceSchemaName, destinationTableName='admissions', columns=columns, df=df, dfColumns=dfColumns)
def importChartEvents(con, sourceSchemaName, filePath, fileSeparator, overwrite=True):
if overwrite:
log.info("Creating table: " + sourceSchemaName + ".chartevents")
dropQuery = """DROP TABLE IF EXISTS """ + sourceSchemaName + """.chartevents CASCADE"""
createQuery = """CREATE TABLE """ + sourceSchemaName + """.chartevents
(
patient_id VARCHAR(50),
episode_id VARCHAR(50),
vital_id VARCHAR(50),
charttime VARCHAR(50),
storetime VARCHAR(50),
itemid VARCHAR(160),
value VARCHAR(160),
valuenum VARCHAR(160),
valueuom VARCHAR(20),
warning VARCHAR(10)
)
;
"""
with con:
with con.cursor() as cursor:
cursor.execute(dropQuery)
cursor.execute(createQuery)
import pandas as pd
import numpy as np
log.info("Reading file: " + str(filePath))
df = pd.read_csv(filePath, sep=fileSeparator)
dfColumns = []
columns = []
if(Config.chartevents['column_mapping']['patient_id']):
dfColumns.append(Config.chartevents['column_mapping']['patient_id'])
columns.append('patient_id')
if(Config.chartevents['column_mapping']['episode_id']):
dfColumns.append(Config.chartevents['column_mapping']['episode_id'])
columns.append('episode_id')
if(Config.chartevents['column_mapping']['vital_id']):
dfColumns.append(Config.chartevents['column_mapping']['vital_id'])
columns.append('vital_id')
if(Config.chartevents['column_mapping']['charttime']):
df[Config.chartevents['column_mapping']['charttime']].replace({np.nan: None}, inplace=True)
dfColumns.append(Config.chartevents['column_mapping']['charttime'])
columns.append('charttime')
if(Config.chartevents['column_mapping']['storetime']):
df[Config.chartevents['column_mapping']['storetime']].replace({np.nan: None}, inplace=True)
dfColumns.append(Config.chartevents['column_mapping']['storetime'])
columns.append('storetime')
if(Config.chartevents['column_mapping']['itemid']):
dfColumns.append(Config.chartevents['column_mapping']['itemid'])
columns.append('itemid')
if(Config.chartevents['column_mapping']['value']):
# df = df[df[Config.chartevents['column_mapping']['value']].str.strip() != '']
dfColumns.append(Config.chartevents['column_mapping']['value'])
columns.append('value')
if(Config.chartevents['column_mapping']['valuenum']):
dfColumns.append(Config.chartevents['column_mapping']['valuenum'])
columns.append('valuenum')
if(Config.chartevents['column_mapping']['valueuom']):
dfColumns.append(Config.chartevents['column_mapping']['valueuom'])
columns.append('valueuom')
if(Config.chartevents['column_mapping']['warning']):
dfColumns.append(Config.chartevents['column_mapping']['warning'])
columns.append('warning')
Utils.saveDataframe(con=con, destinationSchemaName=sourceSchemaName, destinationTableName='chartevents', columns=columns, df=df, dfColumns=dfColumns)
def importLabEvents(con, sourceSchemaName, filePath, fileSeparator, overwrite=True):
if overwrite:
log.info("Creating table: " + sourceSchemaName + ".labevents")
dropQuery = """DROP TABLE IF EXISTS """ + sourceSchemaName + """.labevents CASCADE"""
createQuery = """CREATE TABLE """ + sourceSchemaName + """.labevents
(
labevent_id VARCHAR(50),
patient_id VARCHAR(50),
episode_id VARCHAR(50),
specimen_id VARCHAR(20),
itemid VARCHAR(200),
charttime VARCHAR(50),
storetime VARCHAR(50),
value VARCHAR(200),
valuenum VARCHAR(200),
valueuom VARCHAR(20),
ref_range_lower VARCHAR(200),
ref_range_upper VARCHAR(200),
flag VARCHAR(10),
priority VARCHAR(7),
comments VARCHAR(620)
)
;
"""
with con:
with con.cursor() as cursor:
cursor.execute(dropQuery)
cursor.execute(createQuery)
import pandas as pd
import numpy as np
df = pd.read_csv(filePath, sep=fileSeparator)
dfColumns = []
columns = []
if(Config.labevents['column_mapping']['labevent_id']):
dfColumns.append(Config.labevents['column_mapping']['labevent_id'])
columns.append('labevent_id')
if(Config.labevents['column_mapping']['patient_id']):
dfColumns.append(Config.labevents['column_mapping']['patient_id'])
columns.append('patient_id')
if(Config.labevents['column_mapping']['episode_id']):
dfColumns.append(Config.labevents['column_mapping']['episode_id'])
columns.append('episode_id')
if(Config.labevents['column_mapping']['specimen_id']):
dfColumns.append(Config.labevents['column_mapping']['specimen_id'])
columns.append('specimen_id')
if(Config.labevents['column_mapping']['itemid']):
dfColumns.append(Config.labevents['column_mapping']['itemid'])
columns.append('itemid')
if(Config.labevents['column_mapping']['charttime']):
df[Config.labevents['column_mapping']['charttime']].replace({np.nan: None}, inplace=True)
dfColumns.append(Config.labevents['column_mapping']['charttime'])
columns.append('charttime')
if(Config.labevents['column_mapping']['storetime']):
df[Config.labevents['column_mapping']['storetime']].replace({np.nan: None}, inplace=True)
dfColumns.append(Config.labevents['column_mapping']['storetime'])
columns.append('storetime')
if(Config.labevents['column_mapping']['value']):
# df = df[df[Config.labevents['column_mapping']['value']].str.strip() != '']
dfColumns.append(Config.labevents['column_mapping']['value'])
columns.append('value')
if(Config.labevents['column_mapping']['valuenum']):
dfColumns.append(Config.labevents['column_mapping']['valuenum'])
columns.append('valuenum')
if(Config.labevents['column_mapping']['valueuom']):
dfColumns.append(Config.labevents['column_mapping']['valueuom'])
columns.append('valueuom')
if(Config.labevents['column_mapping']['ref_range_lower']):
dfColumns.append(Config.labevents['column_mapping']['ref_range_lower'])
columns.append('ref_range_lower')
if(Config.labevents['column_mapping']['ref_range_upper']):
dfColumns.append(Config.labevents['column_mapping']['ref_range_upper'])
columns.append('ref_range_upper')
if(Config.labevents['column_mapping']['flag']):
dfColumns.append(Config.labevents['column_mapping']['flag'])
columns.append('flag')
if(Config.labevents['column_mapping']['priority']):
dfColumns.append(Config.labevents['column_mapping']['priority'])
columns.append('priority')
if(Config.labevents['column_mapping']['comments']):
dfColumns.append(Config.labevents['column_mapping']['comments'])
columns.append('comments')
Utils.saveDataframe(con=con, destinationSchemaName=sourceSchemaName, destinationTableName='labevents', columns=columns, df=df, dfColumns=dfColumns)
def importDiagnosis(con, sourceSchemaName, filePath, fileSeparator, overwrite=True):
log.info("Creating table: " + sourceSchemaName + ".diagnosis")
if overwrite:
dropQuery = """DROP TABLE IF EXISTS """ + sourceSchemaName + """.diagnosis CASCADE"""
createQuery = """CREATE TABLE """ + sourceSchemaName + """.diagnosis
(
diagnosis_id VARCHAR(50),
episode_id VARCHAR(50),
patient_id VARCHAR(50),
charttime VARCHAR(50),
diagnosis VARCHAR(50),
diagnosis_description VARCHAR(250)
)
;
"""
with con:
with con.cursor() as cursor:
cursor.execute(dropQuery)
cursor.execute(createQuery)
import pandas as pd
import numpy as np
df = pd.read_csv(filePath, sep=fileSeparator)
dfColumns = []
columns = []
if(Config.diagnosis['column_mapping']['diagnosis_id']):
dfColumns.append(Config.diagnosis['column_mapping']['diagnosis_id'])
columns.append('diagnosis_id')
if(Config.diagnosis['column_mapping']['patient_id']):
dfColumns.append(Config.diagnosis['column_mapping']['patient_id'])
columns.append('patient_id')
if(Config.diagnosis['column_mapping']['episode_id']):
dfColumns.append(Config.diagnosis['column_mapping']['episode_id'])
columns.append('episode_id')
if(Config.diagnosis['column_mapping']['charttime']):
df[Config.diagnosis['column_mapping']['charttime']].replace({np.nan: None}, inplace=True)
dfColumns.append(Config.diagnosis['column_mapping']['charttime'])
columns.append('charttime')
if(Config.diagnosis['column_mapping']['diagnosis']):
dfColumns.append(Config.diagnosis['column_mapping']['diagnosis'])
columns.append('diagnosis')
if(Config.diagnosis['column_mapping']['diagnosis_description']):
dfColumns.append(Config.diagnosis['column_mapping']['diagnosis_description'])
columns.append('diagnosis_description')
Utils.saveDataframe(con=con, destinationSchemaName=sourceSchemaName, destinationTableName='diagnosis', columns=columns, df=df, dfColumns=dfColumns)
def importDataCsv(con, sourceSchemaName):
if(hasattr(Config, 'patients') and 'file_name' in Config.patients and Config.patients['file_name']):
importPatients(
con=con,
sourceSchemaName=sourceSchemaName,
filePath = Config.patients['file_name'],
fileSeparator=Config.patients['file_separator'],
overwrite=Config.patients['overwrite'],
)
if(hasattr(Config, 'admissions') and 'file_name' in Config.admissions and Config.admissions['file_name']):
importAdmissions(
con=con,
sourceSchemaName=sourceSchemaName,
filePath = Config.admissions['file_name'],
fileSeparator=Config.admissions['file_separator'],
overwrite=Config.admissions['overwrite'],
)
if(hasattr(Config, 'chartevents') and 'file_name' in Config.chartevents and Config.chartevents['file_name']):
importChartEvents(
con=con,
sourceSchemaName=sourceSchemaName,
filePath = Config.chartevents['file_name'],
fileSeparator=Config.chartevents['file_separator'],
overwrite=Config.chartevents['overwrite'],
)
if(hasattr(Config, 'labevents') and 'file_name' in Config.labevents and Config.labevents['file_name']):
importLabEvents(
con=con,
sourceSchemaName=sourceSchemaName,
filePath = Config.labevents['file_name'],
fileSeparator=Config.labevents['file_separator'],
overwrite=Config.labevents['overwrite'],
)
if(hasattr(Config, 'diagnosis') and 'file_name' in Config.diagnosis and Config.diagnosis['file_name']):
importDiagnosis(
con=con,
sourceSchemaName=sourceSchemaName,
filePath = Config.diagnosis['file_name'],
fileSeparator=Config.diagnosis['file_separator'],
overwrite=Config.diagnosis['overwrite'],
)
| ryashpal/EHR-QC-Standardise | ehrqc/standardise/Import.py | Import.py | py | 18,197 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "ehrqc.standardise.Config.patients",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name"... |
36258745480 | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
# Author: JiaChen
import traceback
from src.plugins.base import BasePlugin
from lib.response import BaseResponse
from config import settings
class CpuPlugin(BasePlugin):
def run(self):
response = BaseResponse()
try:
response.data = {'cpu_model': None, 'cpu_physical_count': 0, 'cpu_count': 0}
temp = self.exec_shell_cmd('snmpwalk -v 2c -c %s %s .1.3.6.1.4.1.674.10892.5.4.1100.30.1.23.1' % (settings.community_name, self.manager_ip))
cpu_model = temp.split('"')[1]
response.data['cpu_model'] = cpu_model
temp = self.exec_shell_cmd('snmpwalk -v 2c -c %s %s .1.3.6.1.4.1.674.10892.5.4.1100.30.1.23.1|wc -l' % (settings.community_name, self.manager_ip))
cpu_physical_count = int(temp)
response.data['cpu_physical_count'] = cpu_physical_count
temp = self.exec_shell_cmd('snmpwalk -v 2c -c %s %s .1.3.6.1.4.1.674.10892.5.4.1100.30.1.18.1' % (settings.community_name, self.manager_ip))
cpu_count = 0
for line in temp.split('\n'):
cpu_count += int(line.split(':')[-1])
response.data['cpu_count'] = cpu_count
except Exception as e:
msg = "%s dell cpu plugin error: %s"
self.logger.log(msg % (self.hostname, traceback.format_exc()), False)
response.status = False
response.error = msg % (self.hostname, traceback.format_exc())
return response
| jcdiy0601/EasyCmdbClient | src/plugins/snmp/dell/server/cpu.py | cpu.py | py | 1,514 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "src.plugins.base.BasePlugin",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "lib.response.BaseResponse",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "config.settings.community_name",
"line_number": 16,
"usage_type": "attribute"
},
... |
26922931124 | """
Calls the entos executable.
"""
import string
from typing import Any, Dict, List, Optional, Tuple
from qcelemental.models import Result
from qcelemental.util import parse_version, safe_version, which
from ..exceptions import UnknownError
from ..util import execute, popen
from .model import ProgramHarness
class EntosHarness(ProgramHarness):
_defaults = {
"name": "entos",
"scratch": True,
"thread_safe": False,
"thread_parallel": True,
"node_parallel": False,
"managed_memory": True,
}
version_cache: Dict[str, str] = {}
class Config(ProgramHarness.Config):
pass
def found(self, raise_error: bool = False) -> bool:
return which('entos', return_bool=True, raise_error=raise_error, raise_msg='Please install via XXX')
def get_version(self) -> str:
self.found(raise_error=True)
which_prog = which('entos')
if which_prog not in self.version_cache:
with popen([which_prog, '--version']) as exc:
exc["proc"].wait(timeout=15)
self.version_cache[which_prog] = safe_version(exc["stdout"].split()[2])
return self.version_cache[which_prog]
def compute(self, input_data: 'ResultInput', config: 'JobConfig') -> 'Result':
"""
Run entos
"""
# Check if entos executable is found
self.found(raise_error=True)
# Check entos version
if parse_version(self.get_version()) < parse_version("0.5"):
raise TypeError("entos version '{}' not supported".format(self.get_version()))
# Setup the job
job_inputs = self.build_input(input_data, config)
# Run entos
exe_success, proc = self.execute(job_inputs)
# Determine whether the calculation succeeded
if exe_success:
# If execution succeeded, collect results
result = self.parse_output(proc["outfiles"], input_data)
return result
else:
# Return UnknownError for error propagation
return UnknownError(proc["stderr"])
def execute(self,
inputs: Dict[str, Any],
extra_infiles: Optional[Dict[str, str]] = None,
extra_outfiles: Optional[List[str]] = None,
extra_commands: Optional[List[str]] = None,
scratch_name: Optional[str] = None,
scratch_messy: bool = False,
timeout: Optional[int] = None) -> Tuple[bool, Dict[str, Any]]:
"""
For option documentation go look at qcengine/util.execute
"""
# Collect all input files and update with extra_infiles
infiles = inputs["infiles"]
if extra_infiles is not None:
infiles.update(extra_infiles)
# Collect all output files and extend with with extra_outfiles
outfiles = ["dispatch.out"]
if extra_outfiles is not None:
outfiles.extend(extra_outfiles)
# Replace commands with extra_commands if present
commands = inputs["commands"]
if extra_commands is not None:
commands = extra_commands
# Run the entos program
exe_success, proc = execute(commands,
infiles=infiles,
outfiles=outfiles,
scratch_name=scratch_name,
scratch_directory=inputs["scratch_directory"],
scratch_messy=scratch_messy,
timeout=timeout)
# Entos does not create an output file and only prints to stdout
proc["outfiles"]["dispatch.out"] = proc["stdout"]
return exe_success, proc
def build_input(self, input_model: 'ResultInput', config: 'JobConfig',
template: Optional[str] = None) -> Dict[str, Any]:
# Write the geom xyz file with unit au
xyz_file = input_model.molecule.to_string(dtype='xyz', units='Angstrom')
# Create input dictionary
if template is None:
structure = {'structure': {'file': 'geometry.xyz'}}
dft_info = {
'xc': input_model.model.method,
'ao': input_model.model.basis.upper(),
'df_basis': input_model.keywords["df_basis"].upper(),
'charge': input_model.molecule.molecular_charge
}
print_results = {'print': {'results': True}}
if input_model.driver == 'energy':
input_dict = {'dft': {**structure, **dft_info}, **print_results}
# Write gradient call if asked for
elif input_model.driver == 'gradient':
input_dict = {'gradient': {**structure, 'dft': {**dft_info}}, **print_results}
else:
raise NotImplementedError('Driver {} not implemented for entos.'.format(input_model.driver))
# Write input file
input_file = self.write_input_recursive(input_dict)
input_file = "\n".join(input_file)
else:
# Some of the potential different template options
# (A) ordinary build_input (need to define a base template)
# (B) user wants to add stuff after normal template (A)
# (C) user knows their domain language (doesn't use any QCSchema quantities)
# # Build dictionary for substitute
# sub_dict = {
# "method": input_model.model.method,
# "basis": input_model.model.basis,
# "df_basis": input_model.keywords["df_basis"].upper(),
# "charge": input_model.molecule.molecular_charge
# }
# Perform substitution to create input file
str_template = string.Template(template)
input_file = str_template.substitute()
return {
"commands": ["entos", "-n", str(config.ncores), "dispatch.in"],
"infiles": {
"dispatch.in": input_file,
"geometry.xyz": xyz_file
},
"scratch_directory": config.scratch_directory,
"input_result": input_model.copy(deep=True)
}
def write_input_recursive(self, d: Dict[str, Any]) -> List:
input_file = []
for key, value in d.items():
if isinstance(value, dict):
input_file.append(key + '(')
rec_input = self.write_input_recursive(value)
indented_line = map(lambda x: " " + x, rec_input)
input_file.extend(indented_line)
input_file.append(')')
else:
if isinstance(value, str):
input_file.append("{0} = '{1}'".format(key, value))
elif isinstance(value, bool):
input_file.append("{0} = {1}".format(key, str(value).lower()))
else:
input_file.append("{0} = {1}".format(key, value))
return input_file
def parse_output(self, outfiles: Dict[str, str], input_model: 'ResultInput') -> 'Result':
output_data = {}
properties = {}
# Parse the output file, collect properties and gradient
output_lines = outfiles["dispatch.out"].split('\n')
gradients = []
natom = len(input_model.molecule.symbols)
for idx, line in enumerate(output_lines):
fields = line.split()
if fields[:1] == ["energy:"]:
properties["scf_total_energy"] = float(fields[-1])
elif fields[:2] == ["Molecular", "Dipole:"]:
properties["scf_dipole_moment"] = [float(x) for x in fields[2:5]]
elif fields[:3] == ["SCF", "converged", "in"]:
properties["scf_iterations"] = int(fields[3])
elif fields == ["Gradient", "(hartree/bohr):"]:
# Gradient is stored as (dE/dx1,dE/dy1,dE/dz1,dE/dx2,dE/dy2,...)
for i in range(idx + 2, idx + 2 + natom):
grad = output_lines[i].strip('\n').split()[1:]
gradients.extend([float(x) for x in grad])
if input_model.driver == 'gradient':
if len(gradients) == 0:
raise ValueError('Gradient not found.')
else:
output_data["return_result"] = gradients
# Replace return_result with final_energy if gradient wasn't called
if "return_result" not in output_data:
if "scf_total_energy" in properties:
output_data["return_result"] = properties["scf_total_energy"]
else:
raise KeyError("Could not find SCF total energy")
output_data["properties"] = properties
output_data['schema_name'] = 'qcschema_output'
output_data['success'] = True
return Result(**{**input_model.dict(), **output_data})
| ChemRacer/QCEngine | qcengine/programs/entos.py | entos.py | py | 8,943 | python | en | code | null | github-code | 6 | [
{
"api_name": "model.ProgramHarness",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "model.ProgramHarness.Config",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "mod... |
24673967273 | # Jumpy! - Platform game
# KidsCanCode - Game Development with python
# Art from Kenney.nl
import pygame as pg
import random
from settings import *
from sprites import *
from os import path
class Game:
def __init__(self):
# Initialize game window
pg.init()
pg.mixer.init()
self.screen = pg.display.set_mode((WIDTH, HEIGHT))
pg.display.set_caption(TITLE)
self.clock = pg.time.Clock()
self.running = True
self.font_name = pg.font.match_font(FONT_NAME)
self.load_data()
def load_data(self):
# load high score
self.dir = path.dirname(__file__)
with open(path.join(self.dir, HS_FILE), 'w') as f:
try:
self.highscore = int(f.read())
except:
self.highscore = 0
# load spritesheet image
img_dir = path.join(self.dir, 'img')
self.spritesheet = Spritesheet(path.join(img_dir, SPRITESHEET))
self.cloud_images = []
for i in range(1, 4):
self.cloud_images.append(pg.image.load(path.join(img_dir, 'cloud{}.png'.format(i))).convert())
# load sounds
self.snd_dir = path.join(self.dir, 'snd')
self.jump_sound = pg.mixer.Sound(path.join(self.snd_dir, 'Jump33.wav'))
self.boost_sound = pg.mixer.Sound(path.join(self.snd_dir, 'powerup16.wav'))
def new(self):
# Initialzing the game
self.score = 0
# initializing all the sprites groups
self.all_sprites = pg.sprite.LayeredUpdates()
self.platforms = pg.sprite.Group()
self.powerups = pg.sprite.Group()
self.mobs = pg.sprite.Group()
self.clouds = pg.sprite.Group()
self.mob_timer = pg.time.get_ticks()
# Add a player
self.player = Player(self)
# Create platforms
for plat in PLATFORM_LIST:
Platform(self, *plat)
# Spawn some clouds
for i in range(8):
c = Cloud(self)
c.rect.y += 500
# loading the game music
pg.mixer.music.load(path.join(self.snd_dir, 'Happy Tune.ogg'))
pg.mixer.music.set_volume(VOLUME)
self.run()
def run(self):
# Game loop
pg.mixer.music.play(loops=-1)
self.playing = True
while self.playing:
# Keep the running at the right speed
self.clock.tick(FPS)
self.envents()
self.update()
self.draw()
pg.mixer.music.fadeout(500)
def update(self):
# Game loop update
# update Sprites
self.all_sprites.update()
# Spawn a mob
now = pg.time.get_ticks()
if now - self.mob_timer > MOB_FREQ + random.choice([1000, -500, 250, -1000]):
self.mob_timer = now
Mob(self)
# Check if the player hits any platform - only if falling
if self.player.vel.y > 0:
hits = pg.sprite.spritecollide(self.player, self.platforms, False)
if hits:
lowest = hits[0]
for hit in hits:
if hit.rect.bottom > lowest.rect.bottom:
lowest = hit
if self.player.pos.x > lowest.rect.left and self.player.pos.x < lowest.rect.right:
if self.player.pos.y < lowest.rect.centery:
self.player.pos.y = lowest.rect.top # puts the player on top of the platform
self.player.vel.y = 0 # set the y acceleration to 0
self.player.jumping = False
# Check is player hit a powerup
hits_pow = pg.sprite.spritecollide(self.player, self.powerups, True)
if hits_pow:
for hit in hits_pow:
if hit.type == 'boost':
self.player.vel.y = -BOOST_POWER
self.player.jumping = False
# Check is player hit a mob
hits_mob = pg.sprite.spritecollide(self.player, self.mobs, False, pg.sprite.collide_mask)
if hits_mob:
for hit in hits_mob:
self.playing = False
# if player reaches top 1/4 of screen
if self.player.rect.top < HEIGHT / 4:
# spawn a cloud - 1% chance
if random.randrange(100) < 5:
Cloud(self)
# move the player down
self.player.pos.y += max(abs(self.player.vel.y), 2)
# move the platforms down - scrolling up
for plat in self.platforms:
plat.rect.y += max(abs(self.player.vel.y), 2)
if plat.rect.top > HEIGHT:
plat.kill()
self.score += 10
# move the mobs down when scrolling up
for mob in self.mobs:
mob.rect.y += max(abs(self.player.vel.y), 2)
if mob.rect.top > HEIGHT:
mob.kill()
# move the mobs down when scrolling up
for cloud in self.clouds:
cloud.rect.y += max(abs(self.player.vel.y / random.randrange(1, 4)), 1)
if cloud.rect.top > HEIGHT:
cloud.kill()
# if we die
if self.player.rect.top > HEIGHT:
for sprite in self.all_sprites:
sprite.rect.y -= max(self.player.vel.y, 10)
if sprite.rect.bottom < 0:
sprite.kill()
if len(self.platforms) == 0:
self.playing = False
# spawn new platforms to keep average number
while len(self.platforms) < 6:
width = random.randrange(50, 100)
Platform(self, random.randrange(0, WIDTH - width),
random.randrange(-70, -35))
def envents(self):
# Game events
# process input (events)
for event in pg.event.get():
# check for closing window
if event.type == pg.QUIT:
if self.playing:
self.playing = False
self.running = False
if event.type == pg.KEYDOWN:
if event.key == pg.K_SPACE:
self.player.jump()
if event.key == pg.K_ESCAPE:
if self.playing:
self.playing = False
self.running = False
if event.type == pg.KEYUP:
if event.key == pg.K_SPACE:
self.player.jump_cut()
def draw(self):
# Game loop - draw
# Drae / render
self.screen.fill(BGCOLOR)
self.all_sprites.draw(self.screen)
self.draw_text('Your score: ' + str(self.score), 22, WHITE, WIDTH / 2, 15)
# *After* drawing everything, flip the display
pg.display.flip()
def show_start_screen(self):
# Game splash/Start screen
pg.mixer.music.load(path.join(self.snd_dir, 'Yippee.ogg'))
pg.mixer.music.set_volume(VOLUME)
pg.mixer.music.play(loops=-1)
self.screen.fill(BGCOLOR)
self.draw_text(TITLE, 48, WHITE, WIDTH / 2, HEIGHT / 4)
self.draw_text('Arrows to move, Space to jump', 22, WHITE, WIDTH / 2, HEIGHT / 2)
self.draw_text('Press a key to play', 22, WHITE, WIDTH / 2, HEIGHT * 3 / 4)
self.draw_text('High score: ' + str(self.highscore), 22, WHITE, WIDTH / 2, 15)
pg.display.flip()
self.wait_for_key()
pg.mixer.music.fadeout(500)
def show_go_screen(self):
pg.mixer.music.load(path.join(self.snd_dir, 'Yippee.ogg'))
pg.mixer.music.set_volume(VOLUME)
pg.mixer.music.play(loops=-1)
if self.running:
# Game over screen
self.screen.fill(BGCOLOR)
self.draw_text("GAME OVER", 48, WHITE, WIDTH / 2, HEIGHT / 4)
self.draw_text('Score: ' + str(self.score), 22, WHITE, WIDTH / 2, HEIGHT / 2)
self.draw_text('Press a key to play again', 22, WHITE, WIDTH / 2, HEIGHT * 3 / 4)
if self.score > self.highscore:
self.draw_text('NEW HIGH SCORE!', 22, WHITE, WIDTH / 2, HEIGHT / 2 + 40)
self.highscore = self.score
with open(path.join(self.dir, HS_FILE), 'w') as f:
f.write(str(self.score))
else:
self.draw_text('High score: ' + str(self.highscore), 22, WHITE, WIDTH / 2, HEIGHT / 2 + 40)
pg.display.flip()
self.wait_for_key()
pg.mixer.music.fadeout(500)
def wait_for_key(self):
waiting = True
while waiting:
self.clock.tick(FPS)
for event in pg.event.get():
if event.type == pg.QUIT:
waiting = False
self.running = False
if event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
if self.playing:
self.playing = False
self.running = False
if event.type == pg.KEYUP:
waiting = False
def draw_text(self, text, size, color, x, y):
font = pg.font.Font(self.font_name, size)
text_surface = font.render(text, True, color)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
self.screen.blit(text_surface, text_rect)
def main():
# main function for this app
g = Game()
g.show_start_screen()
while g.running:
g.new()
g.show_go_screen()
pg.quit()
if __name__ == '__main__':
main()
| guychaimy/jumpy | main.py | main.py | py | 9,548 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.init",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.init",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_mo... |
11121080147 | import typing as tp
from datetime import datetime, date
from uuid import uuid4
import pytest
from sqlalchemy import text
from librarius.domain.models import Publication
from librarius.service.uow.implementation import GenericUnitOfWork
if tp.TYPE_CHECKING:
from sqlalchemy.orm import Session
from sqlalchemy.sql.expression import TextClause
from librarius.types import Reference
pytestmark = pytest.mark.usefixtures("mappers")
def insert_publications(
session: "Session",
uuid: "Reference",
title: str,
date_added: datetime,
date_modified: datetime,
date_published: date,
):
expression: "TextClause" = text(
"INSERT INTO publications (uuid, title, date_added, date_modified, date_published) VALUES (:uuid, :title, :date_added, :date_modified, :date_published)"
)
expression: "TextClause" = expression.bindparams(
uuid=uuid,
title=title,
date_added=date_added,
date_modified=date_modified,
date_published=date_published,
)
session.execute(expression)
def retrieve(query, uow):
with uow:
return uow.session.query(Publication).all()
def test_uow_can_retrieve_a_publication(sqlite_session_factory):
session: "Session" = sqlite_session_factory()
pub_uuid = str(uuid4())
insert_publications(
session, pub_uuid, "Cerbulan Book", datetime.now(), datetime.now(), date.today()
)
session.commit()
uow = GenericUnitOfWork(sqlite_session_factory)
# with uow:
# results = uow.session.query(Publication).all()
# results = retrieve_all_publications(AllPublications(), uow)
# print(results[0].__dict__)
# def test_1(sqlite_session_factory):
# session: Session = sqlite_session_factory()
# uu = str(uuid.uuid4())
# title = "Cerbulan"
# date_added = datetime.now()
# date_modified = datetime.now()
# date_published = datetime.now()
# #session.execute("INSERT INTO publications (uuid, title, date_added, date_modified, date_published VALUES (:uuid, :title, :date_added, :date_modified, :date_published)),",
# # dict(uuid=uu, title=title, date_added=date_added, date_modified=date_modified, date_published=date_published))
# #insert_publications(session, uu, title, date_added, date_modified, date_published)
# expression: TextClause = text(
# "INSERT INTO publications (uuid, title, date_added, date_modified, date_published) VALUES (:uuid, :title, :date_added, :date_modified, :date_published)"
# )
# expression: TextClause = expression.bindparams(
# uuid=uu, title=title, date_added=date_added, date_modified=date_modified, date_published=date_published
# )
# session.execute(expression)
# from sqlalchemy.engine.cursor import CursorResult
# session.commit()
# result: CursorResult = session.execute("SELECT * FROM publications")
# [berba] = result
# #print(berba)
# p1: Publication = session.query(Publication).filter_by(uuid=uu).first()
# assert p1.uuid == uu
#
| adriangabura/vega | tests/integration/test_uow.py | test_uow.py | py | 3,023 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.usefixtures",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "da... |
34670410766 | #!/usr/bin/python3
import mysql.connector
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.stem import WordNetLemmatizer
from lib.constants import brand_name_list, device_type_list, cwe_to_exp_type
from vul_scanner import query_iot_cve_from_cvetable
from lib.query_mysql import write_to_vul_analysis_table, query_cve_from_cvetable_given_cveid
def parse_description(desc):
"""
Convert the string of descriptions to a list of lemmas.
:param desc: a string of vulnerability description consisting of one or more sentences
:return: a tuple of (lemma_list, lemma_list_raw, desc_lower)
"""
# create a lemmatizer for word standardization
wordnet_lemmatizer = WordNetLemmatizer()
desc_lower = desc.lower() # a string of original CVE description in lower case
sent_list = sent_tokenize(desc_lower)
sent_list_raw = sent_tokenize(desc)
lemma_list = [] # a list of lemmatized words for one description, in lower case
for sent in sent_list:
sentence_words = word_tokenize(sent)
for word in sentence_words:
lemma_list.append(wordnet_lemmatizer.lemmatize(word, pos='v'))
lemma_list_raw = [] # a list of lemmatized words for one description, in raw form
for sent in sent_list_raw:
sentence_words_raw = word_tokenize(sent)
for word in sentence_words_raw:
lemma_list_raw.append(wordnet_lemmatizer.lemmatize(word, pos='v'))
return lemma_list, lemma_list_raw, desc_lower
def get_protocol(lemma_list):
"""
Get the wireless protocol type based on vulnerability description.
:param lemma_list: a list of lemmatized words from vulnerability description
:return: a string of wireless protocol type
"""
if 'wifi' in lemma_list or 'wi-fi' in lemma_list or 'tcp' in lemma_list or 'udp' in lemma_list or 'http' in lemma_list or 'dns' in lemma_list or 'telnet' in lemma_list or 'mqtt' in lemma_list:
return 'wifi'
if 'bluetooth' in lemma_list or 'ble' in lemma_list:
return 'bluetooth'
if 'zigbee' in lemma_list:
return 'zigbee'
if 'zwave' in lemma_list or 'z-wave' in lemma_list:
return 'zwave'
return 'undecided'
def full_fledged(lemma_list, device_type):
"""
Decide if the device is full-fledged.
:param lemma_list: a list of lemmatized words from vulnerability description
:param device_type: a string of device type
:return: a boolean indicating whether a device is full-fleged or not
"""
return 'camera' in lemma_list or 'router' in lemma_list or 'hub' in lemma_list or 'tv' in lemma_list or 'printer' in lemma_list or 'basestation' in lemma_list or 'thermostat' in lemma_list or \
device_type == 'camera' or device_type == 'router' or device_type == 'hub' or device_type == 'tv' or device_type == 'printer' or device_type == 'basestation' or device_type == 'thermostat'
def is_dos(lemma_list, desc_lower, C, I, A):
"""
Decide if the exploit type is DoS.
:return: a boolean value
"""
return 'dos' in lemma_list or 'denial of service' in desc_lower or 'denial-of-service' in desc_lower or 'crash' in lemma_list or C == 0 and I == 0 and A == 2
def is_buffer_overflow(desc_lower):
"""
Decide if the exploit type is buffer overflow.
:return: a boolean value
"""
return 'buffer overflow' in desc_lower or 'buffer overrun' in desc_lower or 'stack overflow' in desc_lower
def is_man_in_the_middle(lemma_list, lemma_list_raw, desc_lower):
"""
Decide if the exploit type is man in the middle.
:return: a boolean value
"""
return 'man-in-the-middle' in lemma_list or 'man in the middle' in desc_lower or 'MITM' in lemma_list_raw
def is_xss(lemma_list_raw, desc_lower):
"""
Decide if the exploit type is XSS.
:return: a boolean value
"""
return 'XSS' in lemma_list_raw or 'cross-site scripting' in desc_lower or 'cross site scripting' in desc_lower
def is_csrf(lemma_list_raw, desc_lower):
"""
Decide if the exploit type is CSRF.
:return: a boolean value
"""
return 'CSRF' in lemma_list_raw or 'XSRF' in lemma_list_raw or 'cross-site request forgery' in desc_lower or 'cross site request forgery' in desc_lower
def decide_exploit_precondition(exploit_range, desc, device_type):
"""
Decide the precondition of an exploit based on its exploit range and natural language description. NOTICE: Original
`Network` attack vector can be misleading as CVSS does not have enough information to decide its actual range.
Original `Adjacent` attack vector is ambiguous about physically adjacent and logically adjacent.
:param exploit_range: the exploit range field of its CVSS, including Network, Adjacent, Local, Physical
:param desc: a string of one or multiple sentences for vulnerability description
:param device_type: a string of device_type
:return: a string indicating the exploit precondition
"""
lemma_list, lemma_list_raw, desc_lower = parse_description(desc)
if exploit_range == 'PHYSICAL':
return 'physical'
if exploit_range == 'LOCAL':
return 'local'
# Decide the protocol based on vulnerability descriptions
protocol = get_protocol(lemma_list)
# If the exploit range is `ADJACENT_NETWORK`, then we identify whether it is physically or logically adjacent
if exploit_range == 'ADJACENT_NETWORK':
return decide_precondition_for_original_adjacent(protocol, lemma_list)
# If the exploit range is `NETWORK`, we should check if it is the correct range
return decide_precondition_for_original_network(device_type, protocol, lemma_list, lemma_list_raw, desc_lower)
def decide_precondition_for_original_adjacent(protocol, lemma_list):
# If the exploit is about wifi network, then attacker has to join the wifi network first
if protocol == 'wifi':
return 'wifi:adjacent_logically'
if protocol == 'bluetooth' or protocol == 'zigbee' or protocol == 'zwave':
return protocol + ':' + decide_precondition_low_power_protocol(lemma_list)
# for other undecided adjacent types, we set precondition as `wifi:adjacent_logically`
return 'wifi:adjacent_logically'
def decide_precondition_for_original_network(device_type, protocol, lemma_list, lemma_list_raw, desc_lower):
if 'remote' in lemma_list:
return 'network'
if (is_xss(lemma_list_raw, desc_lower) or is_csrf(lemma_list_raw, desc_lower) or 'dns rebinding' in desc_lower) and full_fledged(lemma_list, device_type):
return 'network'
# if a device is not full-fledged, and there is no `remote` keyword, then set precondition as `PROTOCOL:adjacent_XXX`
if not full_fledged(lemma_list, device_type):
if protocol == 'bluetooth' or protocol == 'zigbee' or protocol == 'zwave':
return protocol + ':' + decide_precondition_low_power_protocol(lemma_list)
return 'wifi:adjacent_logically'
return 'network'
def decide_precondition_low_power_protocol(lemma_list):
if 'sniff' in lemma_list or 'decrypt' in lemma_list or 'eavesdrop' in lemma_list or 'intercept' in lemma_list:
return 'adjacent_physically'
return 'adjacent_logically'
def decide_exploit_effect(desc, device_type, C, I, A):
"""
Decide the effect of an exploit based on its natural language description.
:param desc: a string of one or multiple sentences for vulnerability description
:param device_type: a string of device_type
:param C: confidentiality, 2: COMPLETE, 1: PARTIAL, 0: NONE
:param I: integrity, 2: COMPLETE, 1: PARTIAL, 0: NONE
:param A: availability, 2: COMPLETE, 1: PARTIAL, 0: NONE
:return: a string indicating the exploit effect
"""
lemma_list, lemma_list_raw, desc_lower = parse_description(desc)
# Here are some rules based on keywords in the descriptions
if 'root' in lemma_list or 'arbitrary' in lemma_list:
if full_fledged(lemma_list, device_type):
return 'rootPrivilege'
else:
return 'commandInjection'
if 'control' in lemma_list or 'take over' in desc_lower:
return 'deviceControl'
if (('inject' in lemma_list or 'insert' in lemma_list or 'execute' in lemma_list) and 'command' in lemma_list) or (
'hijack' in lemma_list and 'request' in lemma_list):
return 'commandInjection'
if ('inject' in lemma_list or 'insert' in lemma_list or 'obtain') and (
'data' in lemma_list or 'event' in lemma_list):
return 'eventAccess'
if ('steal' in lemma_list or 'obtain' in lemma_list or 'retrieve' in lemma_list) and (
'wifi' in lemma_list or 'wi-fi' in lemma_list):
return 'wifiAccess'
if is_dos(lemma_list, desc_lower, C, I, A):
return 'DoS'
# Here are some customized rules based on CIA triad
# if the device has CIA all high, and it is a full-fledged device, then it is root, otherwise, we return deviceControl
if C == 2 and I == 2 and A == 2:
if full_fledged(lemma_list, device_type):
return 'rootPrivilege'
return 'deviceControl'
# Now we need to construct more complicated rules
# rule for door lock
if 'unlock' in lemma_list and 'lock' in lemma_list:
return 'commandInjection'
# rule for light bulb
if 'turn on' in desc_lower and ('light' in lemma_list or 'bulb' in lemma_list):
return 'commandInjection'
# rule for buffer overflow
if is_buffer_overflow(desc_lower):
if 'inject' in lemma_list or 'hijack' in lemma_list or 'hijacking' in lemma_list:
if full_fledged(lemma_list, device_type):
return 'rootPrivilege'
else:
return 'commandInjection'
else:
return 'DoS'
return 'unknown_exploit_effect'
def decide_exploit_type(cwe, cwe_to_exp_type, desc, C, I, A):
"""
Decide the type of an exploit based on its CWE and natural language description.
:param cwe: a string of the CWE-ID of the NVD-CVE entry
:param cwe_to_exp_type: a dictionary mapping CWE-ID to exploit types
:param desc: a string of one or multiple sentences for vulnerability description
:param C: confidentiality, 2: COMPLETE, 1: PARTIAL, 0: NONE
:param I: integrity, 2: COMPLETE, 1: PARTIAL, 0: NONE
:param A: availability, 2: COMPLETE, 1: PARTIAL, 0: NONE
:return: a string of exploit types
"""
lemma_list, lemma_list_raw, desc_lower = parse_description(desc)
if is_dos(lemma_list, desc_lower, C, I, A):
return 'Denial of Service'
if is_buffer_overflow(desc_lower):
return 'Buffer Overflow'
if is_man_in_the_middle(lemma_list, lemma_list_raw, desc_lower):
return 'Man in the Middle'
if cwe in cwe_to_exp_type:
return cwe_to_exp_type[cwe]
return 'unknown_exploit_type'
def vul_analyzer(cve_id, device_type):
"""
Analyze the given CVE ID and turn the exploit model.
:param cve_id: a string of CVE ID
:param device_type: device type can help to decide exploit precondition and effect
:return: a tuple of exploit model (in Prolog terminology)
"""
# Create a MySQL connect object and cursor object.
db = mysql.connector.connect(host='localhost', user='YOUR_USERNAME_HERE', password='YOUR_PASSWORD_HERE', database='cve')
cursor = db.cursor()
# Query MySQL database to get the cve_tuple
cve_id, cwe, probability, impact_score, exploit_range, desc, C, I, A = query_cve_from_cvetable_given_cveid(cursor, cve_id)
precondition = decide_exploit_precondition(exploit_range, desc, device_type)
effect = decide_exploit_effect(desc, device_type, C, I, A)
# exploit_type = decide_exploit_type(cwe, cwe_to_exp_type, desc, C, I, A)
return cve_id, precondition, effect, probability, impact_score
def main():
# Create a MySQL connect object and cursor object.
db = mysql.connector.connect(host='localhost', user='YOUR_USERNAME_HERE', password='YOUR_PASSWORD_HERE', database='cve')
cursor = db.cursor()
# Create the dictionary to store queried CVEs for IoT devices
iot_cve_dict = query_iot_cve_from_cvetable(cursor, brand_name_list, device_type_list)
# Parse CVE descriptions to decide the effect type of each exploit
for (brand_name, device_type) in iot_cve_dict:
# print(brand_name, device_type)
cve_tuple_list = iot_cve_dict[(brand_name, device_type)]
for (cveid, cwe, probability, impact_score, exploit_range, desc, C, I, A) in cve_tuple_list:
precondition = decide_exploit_precondition(exploit_range, desc, device_type)
effect = decide_exploit_effect(desc, device_type, C, I, A)
exploit_type = decide_exploit_type(cwe, cwe_to_exp_type, desc, C, I, A)
cve_exploit_model = (cveid, exploit_type, precondition, effect, probability, impact_score, desc)
write_to_vul_analysis_table(db, cursor, cve_exploit_model)
cursor.close()
db.close()
def test_vul_analyzer():
return vul_analyzer('CVE-2019-3949', 'base station')
# should return: ('CVE-2019-3949', 'network', 'rootPrivilege', 0.98)
if __name__ == '__main__':
print(test_vul_analyzer())
| pmlab-ucd/IOTA | python/vul_analyzer.py | vul_analyzer.py | py | 13,241 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "nltk.stem.WordNetLemmatizer",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "nltk.tokenize.sent_tokenize",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "nltk.tokenize.sent_tokenize",
"line_number": 25,
"usage_type": "call"
},
{
... |
18781100050 | from pathlib import Path
from environs import Env
env = Env()
env.read_env()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
PROJECT_DIR = BASE_DIR / "project"
SECRET_KEY = env.str("SECRET_KEY", default="something-very-secret")
DEBUG = env.bool("DEBUG", default=False)
ALLOWED_HOSTS = env.list("ALLOWED_HOSTS", default=["*"])
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"debug_toolbar",
"allauth",
"allauth.account",
"utils",
"accounting",
"membership",
]
DATABASES = {"default": env.dj_db_url("DATABASE_URL")}
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"debug_toolbar.middleware.DebugToolbarMiddleware",
]
ROOT_URLCONF = "project.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [PROJECT_DIR / "templates"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
WSGI_APPLICATION = "project.wsgi.application"
AUTH_PASSWORD_VALIDATORS = []
LANGUAGE_CODE = "da-dk"
TIME_ZONE = "Europe/Copenhagen"
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = "/static/"
STATICFILES_DIRS = [PROJECT_DIR / "static"]
STATIC_ROOT = BASE_DIR / "static"
SITE_ID = 1
LOGIN_REDIRECT_URL = "/"
EMAIL_BACKEND = env.str(
"EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
DEFAULT_FROM_EMAIL = env.str("DEFAULT_FROM_EMAIL", default="")
# Parse email URLs, e.g. "smtp://"
email = env.dj_email_url("EMAIL_URL", default="smtp://")
EMAIL_HOST = email["EMAIL_HOST"]
EMAIL_PORT = email["EMAIL_PORT"]
EMAIL_HOST_PASSWORD = email["EMAIL_HOST_PASSWORD"]
EMAIL_HOST_USER = email["EMAIL_HOST_USER"]
EMAIL_USE_TLS = email["EMAIL_USE_TLS"]
# Always show DDT in development for any IP, not just 127.0.0.1 or
# settings.INTERNAL_IPS. This is useful in a docker setup where the
# requesting IP isn't static.
DEBUG_TOOLBAR_CONFIG = {
"SHOW_TOOLBAR_CALLBACK": lambda _x: DEBUG,
}
# We store all translations in one location
LOCALE_PATHS = [PROJECT_DIR / "locale"]
# Allauth configuration
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False
ACCOUNT_USERNAME_REQUIRED = False | valberg/django_project_template | src/config/settings.py | settings.py | py | 3,358 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "environs.Env",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "call"
}
] |
42572778156 | from distutils.core import setup
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='django-view-extractor',
version='0.1.0',
packages=setuptools.find_packages(),
url='https://www.quickrelease.co.uk',
license='GNU GPLv3',
author='Nick Solly',
author_email='nick.solly@quickrelease.co.uk',
description='Extract Django views, urls and permissions',
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=[
'tabulate==0.8.6',
],
)
| QuickRelease/django-view-extractor | setup.py | setup.py | py | 577 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "distutils.core.setup",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 10,
"usage_type": "call"
}
] |
71174596349 | from __future__ import unicode_literals
import re
import os
import io
import sys
PY3 = sys.version_info.major > 2
try:
from urllib.parse import quote # py3
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
except ImportError: # py2
from urllib import quote
from urllib2 import urlopen, HTTPError, URLError
import logging
from collections import namedtuple
from wx import GetTranslation as _
try:
from html import escape # py3
except ImportError:
from cgi import escape # py2
from abc_character_encoding import abc_text_to_unicode
if PY3:
unichr = chr
# this file contains many regular expression patterns
# for understanding these regular expressions:
# https://regex101.com/#python
# http://abcnotation.com/wiki/abc:standard:v2.1#information_field_definition
# keyword | name |file header | tune header | tune body | inline | type
abc_keywords = """\
A:|area |yes |yes |no |no |string
B:|book |yes |yes |no |no |string
C:|composer |yes |yes |no |no |string
D:|discography |yes |yes |no |no |string
F:|file url |yes |yes |no |no |string
G:|group |yes |yes |no |no |string
H:|history |yes |yes |no |no |string
I:|instruction |yes |yes |yes |yes |instruction
K:|key |no |last |yes |yes |instruction
L:|unit note length |yes |yes |yes |yes |instruction
M:|meter |yes |yes |yes |yes |instruction
m:|macro |yes |yes |yes |yes |instruction
N:|notes |yes |yes |yes |yes |string
O:|origin |yes |yes |no |no |string
P:|parts |no |yes |yes |yes |instruction
Q:|tempo |no |yes |yes |yes |instruction
R:|rhythm |yes |yes |yes |yes |string
r:|remark |yes |yes |yes |yes |string
S:|source |yes |yes |no |no |string
s:|symbol line |no |no |yes |no |instruction
T:|tune title |no |second |yes |no |string
U:|user defined |yes |yes |yes |yes |instruction
V:|voice |no |yes |yes |yes |instruction
W:|words (at the end) |no |yes |yes |no |string
w:|words (note aligned) |no |no |yes |no |string
X:|reference number |no |first |no |no |instruction
Z:|transcription |yes |yes |no |no |string
"""
clef_name_pattern = 'treble|bass3|bass|tenor|auto|baritone|soprano|mezzosoprano|alto2|alto1|alto|perc|none|C[1-5]|F[1-5]|G[1-5]'
simple_note_pattern = "[a-gA-G][',]*"
clef_pattern = ' *?(?P<clef>(?: (?P<clefprefix>(?:clef=)?)(?P<clefname>{1})(?P<stafftranspose>(?:[+^_-]8)?))?) *?(?P<octave>(?: octave=-?\d+)?) *?(?P<stafflines>(?: stafflines=\d+)?) *?(?P<playtranspose>(?: transpose=-?\d+)?) *?(?P<score>(?: score={0}{0})?) *?(?P<sound>(?: sound={0}{0})?) *?(?P<shift>(?: shift={0}{0})?) *?(?P<instrument>(?: instrument={0}(?:/{0})?)?)'.format(simple_note_pattern, clef_name_pattern)
key_ladder = 'Fb Cb Gb Db Ab Eb Bb F C G D A E B F# C# G# D# A# E# B#'.split(' ')
whitespace_chars = u' \r\n\t'
abc_inner_pattern = {
'K:': r' ?(?:(?P<tonic>(?:[A-G][b#]?|none)) ??(?P<mode>(?:[MmDdPpLl][A-Za-z]*)?)(?P<accidentals>(?: +(?P<accidental>_{1,2}|=|\^{1,2})(?P<note>[a-g]))*)'+clef_pattern+')?',
'Q:': r'(?P<pre_text>(?: ?"(?P<pre_name>(?:\\"|[^"])*)")?)(?P<metronome>(?: ?(?P<note1>\d+/\d+) ?(?P<note2>\d+/\d+)? ?(?P<note3>\d+/\d+)? ?(?P<note4>\d+/\d+)?=(?P<bpm>\d+))?)(?P<post_text>(?: ?"(?P<post_name>\w*)")?)',
'V:': r' ?(?P<name>\w+)' + clef_pattern
}
name_to_display_text = {
'staves' : _('Staff layout' ),
'area' : _('Area' ),
'book' : _('Book' ),
'composer' : _('Composer' ),
'discography' : _('Discography' ),
'file url' : _('File url' ),
'group' : _('Group' ),
'history' : _('History' ),
'instruction' : _('Instruction' ),
'key' : _('Key' ),
'unit note length' : _('Unit note length' ),
'meter' : _('Meter' ),
'macro' : _('Macro' ),
'notes' : _('Notes' ),
'origin' : _('Origin' ),
'parts' : _('Parts' ),
'tempo' : _('Tempo' ),
'rhythm' : _('Rhythm' ),
'remark' : _('Remark' ),
'source' : _('Source' ),
'symbol line' : _('Symbol line' ),
'tune title' : _('Tune title' ),
'user defined' : _('User defined' ),
'voice' : _('Voice' ),
'words (note aligned)' : _('Words (note aligned)'),
'words (at the end)' : _('Words (at the end)'),
'reference number' : _('Reference number' ),
'transcription' : _('Transcription' ),
}
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
if PY3:
return type('Enum', (), enums)
else:
return type(b'Enum', (), enums)
TuneScope = enum('FullText', 'SelectedText', 'SelectedLines', 'TuneHeader', 'TuneBody', 'Tune', 'TuneUpToSelection', 'BodyUpToSelection', 'BodyAfterSelection', 'LineUpToSelection', 'FileHeader', 'PreviousLine', 'MatchText', 'InnerText', 'PreviousCharacter', 'NextCharacter')
TuneScopeInfo = namedtuple('TuneScopeInfo', 'text start stop encoded_text')
InnerMatch = namedtuple('InnerMatch', 'match offset')
class ValueDescription(object):
def __init__(self, value, description, common=True, show_value=False, alternate_values=None):
super(ValueDescription, self).__init__()
self.value = value
self.description = description
self.show_value = show_value
self.common = common
self.alternate_values = alternate_values or []
class CodeDescription(ValueDescription):
def __init__(self, value, description, common=True, alternate_values=None):
super(CodeDescription, self).__init__(value, description, common=common, show_value=True, alternate_values=alternate_values)
class ValueImageDescription(ValueDescription):
def __init__(self, value, image_name, description, common=True, show_value=False):
super(ValueImageDescription, self).__init__(value, description, common=common, show_value=show_value)
self.image_name = image_name
class CodeImageDescription(ValueImageDescription):
def __init__(self, value, image_name, description, common=True):
super(CodeImageDescription, self).__init__(value, image_name, description, common=common, show_value=True)
decoration_aliases = {
'!>!' : '!accent!',
'!^!' : '!marcato!',
'!emphasis!': '!accent!',
'!<(!' : '!crescendo(!',
'!<)!' : '!crescendo)!',
'!>(!' : '!diminuendo(!',
'!>)!' : '!diminuendo)!',
'!+!' : '!plus!',
}
decoration_to_description = {
'.' : _('staccato mark'),
'~' : _('Irish roll'),
'H' : _('fermata'),
'L' : _('accent or emphasis'),
'M' : _('lowermordent'),
'O' : _('coda'),
'P' : _('uppermordent'),
'S' : _('segno'),
'T' : _('trill'),
'u' : _('down-bow'),
'v' : _('up-bow'),
'!trill!' : _('trill'),
'!trill(!' : _('start of an extended trill'),
'!trill)!' : _('end of an extended trill'),
'!lowermordent!' : _('lower mordent'),
'!uppermordent!' : _('upper mordent'),
'!mordent!' : _('mordent'),
'!pralltriller!' : _('pralltriller'),
'!roll!' : _('Irish roll'),
'!turn!' : _('turn or gruppetto'),
'!turnx!' : _('a turn mark with a line through it'),
'!invertedturn!' : _('an inverted turn mark'),
'!invertedturnx!' : _('an inverted turn mark with a line through it'),
'!arpeggio!' : _('arpeggio'),
'!>!' : _('accent or emphasis'),
'!accent!' : _('accent or emphasis'),
'!emphasis!' : _('accent or emphasis'),
'!^!' : _('marcato'),
'!marcato!' : _('marcato'),
'!fermata!' : _('fermata or hold'),
'!invertedfermata!': _('upside down fermata'),
'!tenuto!' : _('tenuto'),
'!0!' : _('no finger'),
'!1!' : _('thumb'),
'!2!' : _('index finger'),
'!3!' : _('middle finger'),
'!4!' : _('ring finger'),
'!5!' : _('little finger'),
'!+!' : _('left-hand pizzicato'),
'!plus!' : _('left-hand pizzicato'),
'!snap!' : _('snap-pizzicato'),
'!slide!' : _('slide up to a note'),
'!wedge!' : _('staccatissimo or spiccato'),
'!upbow!' : _('up-bow'),
'!downbow!' : _('down-bow'),
'!open!' : _('open string or harmonic'),
'!thumb!' : _('cello thumb symbol'),
'!breath!' : _('breath mark'),
'!pppp!' : _('pianissimo possibile'),
'!ppp!' : _('pianississimo'),
'!pp!' : _('pianissimo'),
'!p!' : _('piano'),
'!mp!' : _('mezzopiano'),
'!mf!' : _('mezzoforte'),
'!f!' : _('forte'),
'!ff!' : _('fortissimo'),
'!fff!' : _('fortississimo'),
'!ffff!' : _('fortissimo possibile'),
'!sfz!' : _('sforzando'),
'!crescendo(!' : _('start of a < crescendo mark'),
'!<(!' : _('start of a < crescendo mark'),
'!crescendo)!' : _('end of a < crescendo mark'),
'!<)!' : _('end of a < crescendo mark'),
'!diminuendo(!' : _('start of a > diminuendo mark'),
'!>(!' : _('start of a > diminuendo mark'),
'!diminuendo)!' : _('end of a > diminuendo mark'),
'!>)!' : _('end of a > diminuendo mark'),
'!segno!' : _('segno'),
'!coda!' : _('coda'),
'!D.S.!' : _('the letters D.S. (=Da Segno)'),
'!D.C.!' : _('the letters D.C. (=either Da Coda or Da Capo)'),
'!dacoda!' : _('the word "Da" followed by a Coda sign'),
'!dacapo!' : _('the words "Da Capo"'),
'!D.C.alcoda!' : _('the words "D.C. al Coda"'),
'!D.C.alfine!' : _('the words "D.C. al Fine"'),
'!D.S.alcoda!' : _('the words "D.S. al Coda"'),
'!D.S.alfine!' : _('the words "D.S. al Fine"'),
'!fine!' : _('the word "fine"'),
'!shortphrase!' : _('vertical line on the upper part of the staff'),
'!mediumphrase!' : _('vertical line on the upper part of the staff, extending down to the centre line'),
'!longphrase!' : _('vertical line on the upper part of the staff, extending 3/4 of the way down'),
'!ped!' : _('sustain pedal down'),
'!ped-up!' : _('sustain pedal up'),
'!editorial!' : _('editorial accidental above note'),
'!courtesy!' : _('courtesy accidental between parentheses'),
}
ABC_TUNE_HEADER_NO = 0
ABC_TUNE_HEADER_FIRST = 1
ABC_TUNE_HEADER_SECOND = 2
ABC_TUNE_HEADER_YES = 3
ABC_TUNE_HEADER_LAST = 4
tune_header_lookup = {'no': ABC_TUNE_HEADER_NO, 'first': ABC_TUNE_HEADER_FIRST, 'second': ABC_TUNE_HEADER_SECOND, 'yes': ABC_TUNE_HEADER_YES, 'last': ABC_TUNE_HEADER_LAST}
AbcSection = enum('FileHeader', 'TuneHeader', 'TuneBody', 'OutsideTune')
ABC_SECTIONS = [
AbcSection.FileHeader,
AbcSection.TuneHeader,
AbcSection.TuneBody,
AbcSection.OutsideTune
]
chord_notes = {
'' : ( 0, 4, 7 ), # 'Major'
'm' : ( 0, 3, 7 ), # 'Minor'
'dim' : ( 0, 3, 6 ), # 'Diminished'
'+' : ( 0, 4, 8 ), # 'Augmented'
'sus' : ( 0, 5, 7 ), # 'Suspended'
'sus2' : ( 0, 2, 7 ), # 'Suspended (2nd)
'7' : ( 0, 4, 7, 10 ), # 'Seventh'
'M7' : ( 0, 4, 7, 11 ), # 'Major seventh'
'mM7' : ( 0, 3, 7, 11 ), # 'Minor-major seventh'
'm7' : ( 0, 3, 7, 10 ), # 'Minor seventh'
'augM7' : ( 0, 4, 8, 11 ), # 'Augmented-major seventh'
'aug7' : ( 0, 4, 8, 10 ), # 'Augmented seventh'
'6' : ( 0, 4, 7, 9 ), # 'Major sixth'
'm6' : ( 0, 3, 7, 9 ), # 'Minor sixth'
'm7b5' : ( 0, 3, 6, 10 ), # 'Half-diminished seventh'
'dim7' : ( 0, 3, 6, 9 ), # 'Diminished seventh'
'7b5' : ( 0, 4, 6, 10 ), # 'Seventh flat five'
'5' : ( 0, 7 ), # 'Power-chord (no third
'7sus' : ( 0, 5, 7, 10 ), # 'Seventh suspended'
'7sus2' : ( 0, 2, 7, 10 ), # 'Seventh suspended (2nd
'M9' : ( 0, 4, 7, 11, 14 ), # 'Major 9th'
'9' : ( 0, 4, 7, 10, 14 ), # 'Dominant 9th'
'mM9' : ( 0, 3, 7, 11, 14 ), # 'Minor Major 9th'
'm9' : ( 0, 3, 7, 10, 14 ), # 'Minor Dominant 9th'
'+M9' : ( 0, 4, 8, 11, 14 ), # 'Augmented Major 9th'
'+9' : ( 0, 4, 8, 10, 14 ), # 'Augmented Dominant 9th'
'o/9' : ( 0, 3, 6, 10, 14 ), # 'Half-Diminished 9th'
'o/9b' : ( 0, 3, 6, 10, 13 ), # 'Half-Diminished Minor 9th'
'dim9' : ( 0, 3, 6, 9, 14 ), # 'Diminished 9th'
'dim9b' : ( 0, 3, 6, 9, 13 ), # 'Diminished Minor 9th'
'11' : ( 0, 4, 7, 10, 14, 17 ), # 'Dominant 11th'
}
def replace_text(text, replacements):
"""
:param text: text that requires replacements
:param replacements: A sequence of tuples in the form (compiled regular expression object, replacement value)
:return: the original text with all replacements applied
"""
for regex, replace_value in replacements:
text = regex.sub(replace_value, text)
return text
def remove_named_groups(pattern):
"""
:param pattern: regular expression pattern
:return: regular expression pattern where named groups are removed
"""
return re.sub(r'(?<=\(\?)P<[^>]+>', ':', pattern)
def replace_named_group(pattern, old_group, new_group=None):
"""
:param pattern: regular expression pattern (containing named groups)
:param old_group: original groupname
:param new_group: desired groupname
:return: regular expression pattern where named group old_group is replaced by new_group
"""
if new_group is None:
replace_value = ':'
else:
replace_value = 'P<{0}>'.format(new_group)
return re.sub(r'(?<=\(\?)P<{0}>'.format(old_group), replace_value, pattern)
def get_html_from_url(url):
result = u''
try:
result = urlopen(url).read()
except HTTPError as ex:
pass
except URLError as ex:
pass
return result
class AbcElement(object):
"""
Base class for each element in abc-code where element is a piece of structured abc-code
"""
rest_of_line_pattern = r'(?P<inner>.*?)(?:(?<!\\)%.*)?$'
def __init__(self, name, keyword=None, display_name=None, description=None, validation_pattern=None):
self.name = name
self.keyword = keyword
if display_name is None:
self.__display_name = name_to_display_text.get(name, name[:1].upper() + name[1:])
else:
self.__display_name = display_name
self.description = description
self.mandatory = False
self.default = None
self.rest_of_line_pattern = AbcElement.rest_of_line_pattern
self._search_pattern = {}
self._search_re = {} # compiled regex
self.params = []
self.validation_pattern = validation_pattern
self.__validation_re = None
self.supported_values = None
self.tune_scope = TuneScope.SelectedLines
self.visible_match_group = None
self.removable_match_groups = {}
@staticmethod
def get_inline_pattern(keyword):
return r'\[' + re.escape(keyword) + r'([^\]\n\r]*)\]'
def freeze(self):
for section in ABC_SECTIONS:
pattern = self._search_pattern.get(section, None)
if pattern is not None:
self._search_re[section] = re.compile(pattern)
if self.validation_pattern is not None:
self.__validation_re = re.compile(self.validation_pattern)
@property
def valid_sections(self):
return [section for section in ABC_SECTIONS if self._search_pattern.get(section) is not None]
def matches(self, context):
regex = self._search_re.get(context.abc_section, None)
if regex is None:
return None
result = None
scope_info = context.get_scope_info(self.tune_scope)
encoded_text = scope_info.encoded_text
text = scope_info.text
p1, p2 = context.get_selection_within_scope(self.tune_scope)
if len(text) != len(encoded_text):
p1 = len(encoded_text[:p1].decode('utf-8'))
p2 = len(encoded_text[:p2].decode('utf-8'))
if p1 == p2 and 0 < p1 <= len(text) and text[p1 - 1] not in whitespace_chars:
p1 -= 1
for m in regex.finditer(text):
if m.start() <= p1 < m.end():
result = m
break
else:
# if p1 > len(text):
# print(u'Selection ({0}) past length ({1})'.format(p1, len(text)))
for m in regex.finditer(text):
if m.start() <= p1 <= p2 <= m.end():
result = m
break
return result
def get_regex_for_section(self, section):
return self._search_re.get(section, None)
def matches_text(self, context, text):
regex = self._search_re.get(context.abc_section, None)
if regex is not None:
return regex.search(text)
return None
def replace_text(self, context, text, replace_value):
return self._search_re[context.abc_section].sub(replace_value, text)
@property
def display_name(self):
return self.__display_name
def get_description_url(self, context):
return None
def get_header_text(self, context):
return self.__display_name
def get_description_text(self, context):
return self.description
def get_description_html(self, context):
result = None
url = self.get_description_url(context)
if url:
result = get_html_from_url(url)
if not result:
result = u'<h1>%s</h1>' % escape(self.get_header_text(context))
description = self.get_description_text(context)
if description:
result += u'{0}<br>'.format(escape(description))
if self.visible_match_group is not None:
# groups = context.current_match.groups()
# element_text = context.match_text
# if len(groups) == 1 and groups[0]:
# element_text = groups[0]
element_text = context.get_matchgroup(self.visible_match_group)
if element_text:
element_text = abc_text_to_unicode(element_text).strip()
if element_text:
result += u'<code>{0}</code><br>'.format(escape(element_text))
#for matchtext in context.current_match.groups():
# if matchtext:
# result += '<code>%s</code><br>' % escape(matchtext)
return result
def get_inner_element(self, context):
return self
class CompositeElement(AbcElement):
def __init__(self, name, keyword=None, display_name=None, description=None):
super(CompositeElement, self).__init__(name, keyword, display_name=display_name, description=description)
self._elements = {}
def add_element(self, element):
if element.keyword:
self._elements[element.keyword] = element
else:
raise Exception('Element has no keyword')
def get_element(self, keyword):
return self._elements.get(keyword)
def get_element_from_context(self, context):
inner_text = context.current_match.group(1)
if inner_text is None:
inner_text = context.current_match.group(2)
return self.get_element_from_inner_text(inner_text)
def get_element_from_inner_text(self, inner_text):
parts = inner_text.split(' ', 1)
keyword = parts[0]
result = self._elements.get(keyword)
if isinstance(result, CompositeElement) and len(parts) > 1:
subelement = result.get_element_from_inner_text(parts[1])
if subelement is not None:
result = subelement
return result
def get_header_text(self, context):
element = self.get_element_from_context(context)
if element:
return element.get_header_text(context)
return super(CompositeElement, self).get_header_text(context)
def get_description_text(self, context):
element = self.get_element_from_context(context)
if element:
return element.get_description_text(context)
return super(CompositeElement, self).get_description_text(context)
def get_inner_element(self, context):
return self.get_element_from_context(context) or self
class AbcUnknown(AbcElement):
pattern = ''
def __init__(self):
super(AbcUnknown, self).__init__('Unknown', display_name=_('Unknown'))
for section in ABC_SECTIONS:
self._search_pattern[section] = AbcUnknown.pattern
class AbcInformationField(AbcElement):
def __init__(self, keyword, name, file_header, tune_header, tune_body, inline, inner_pattern=None):
super(AbcInformationField, self).__init__(name, keyword)
self.file_header = file_header
self.tune_header = tune_header
self.tune_body = tune_body
self.inline = inline
self.inner_pattern = inner_pattern
self.inner_re = None
self.visible_match_group = 1
if inner_pattern:
self.visible_match_group = 0
line_pattern = r'(?m)^' + re.escape(self.keyword) + self.rest_of_line_pattern
if file_header:
self._search_pattern[AbcSection.FileHeader] = line_pattern
if tune_header in [ABC_TUNE_HEADER_YES, ABC_TUNE_HEADER_FIRST, ABC_TUNE_HEADER_SECOND, ABC_TUNE_HEADER_LAST]:
self._search_pattern[AbcSection.TuneHeader] = line_pattern
if tune_body or inline:
pattern = line_pattern
if inline:
pattern += '|' + self.get_inline_pattern(keyword)
self._search_pattern[AbcSection.TuneBody] = pattern
def freeze(self):
super(AbcInformationField, self).freeze()
if self.inner_pattern:
self.inner_re = re.compile(self.inner_pattern)
def matches(self, context):
match = super(AbcInformationField, self).matches(context)
result = match
if self.inner_re and match is not None:
i = 1
inner_text = match.group(i)
if inner_text is None:
i += 1
inner_text = match.group(i)
m = self.inner_re.search(inner_text)
if m:
result = (match, InnerMatch(m, match.start(i)))
return result
class AbcDirective(CompositeElement):
def __init__(self):
super(AbcDirective, self).__init__('Stylesheet directive', display_name=_('Stylesheet directive'), description=_('A stylesheet directive is a line that starts with %%, followed by a directive that gives instructions to typesetting or player programs.'))
pattern = r'(?m)^(?:%%|I:)(?!%)' + self.rest_of_line_pattern + '|' + self.get_inline_pattern('I:')
for section in ABC_SECTIONS:
self._search_pattern[section] = pattern
class AbcStringField(AbcInformationField):
def __init__(self, keyword, name, file_header, tune_header, tune_body, inline):
super(AbcStringField, self).__init__(name, keyword, file_header, tune_header, tune_body, inline)
class AbcInstructionField(AbcInformationField):
def __init__(self, keyword, name, file_header, tune_header, tune_body, inline, inner_pattern=None):
super(AbcInstructionField, self).__init__(name, keyword, file_header, tune_header, tune_body, inline, inner_pattern)
class AbcMidiDirective(CompositeElement):
def __init__(self):
super(AbcMidiDirective, self).__init__('MIDI directive', 'MIDI', display_name=_('MIDI directive'), description=_('A directive that gives instructions to player programs.'))
class AbcMidiProgramDirective(AbcElement):
pattern = r'(?m)^(?:%%|I:)MIDI program(?P<channel>(?:\s+\d+(?=\s+\d))?)(?:(?P<instrument>\s*\d*))?' + AbcElement.rest_of_line_pattern
def __init__(self):
super(AbcMidiProgramDirective, self).__init__('MIDI_program', display_name=_('Instrument'), description=_('Sets the instrument for a MIDI channel.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = AbcMidiProgramDirective.pattern
class AbcMidiChordProgramDirective(AbcElement):
pattern = r'(?m)^(?:%%|I:)MIDI chordprog(?:(?P<instrument>\s*\d*))?' + AbcElement.rest_of_line_pattern
def __init__(self):
super(AbcMidiChordProgramDirective, self).__init__('MIDI_chordprog', display_name=_('Chord instrument'), description=_('Sets the instrument for playing chords.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = AbcMidiChordProgramDirective.pattern
class AbcMidiBaseProgramDirective(AbcElement):
pattern = r'(?m)^(?:%%|I:)MIDI bassprog(?:(?P<instrument>\s*\d*))?' + AbcElement.rest_of_line_pattern
def __init__(self):
super(AbcMidiBaseProgramDirective, self).__init__('MIDI_bassprog', display_name=_('Bass instrument'), description=_('Sets the instrument for the base.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = AbcMidiBaseProgramDirective.pattern
class AbcMidiChannelDirective(AbcElement):
pattern = r'(?m)^(?:%%|I:)MIDI channel(?P<channel>\s*\d*)' + AbcElement.rest_of_line_pattern
def __init__(self):
super(AbcMidiChannelDirective, self).__init__('MIDI_channel', display_name=_('Channel'), description=_('Sets the MIDI channel for the current voice.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = AbcMidiChannelDirective.pattern
class AbcMidiDrumMapDirective(AbcElement):
pattern = r"(?m)^(?:%%|I:)(?:MIDI drummap|percmap)\s+(?P<note>[_^]*\w[,']*)\s+(?P<druminstrument>\d+)" + AbcElement.rest_of_line_pattern
def __init__(self):
super(AbcMidiDrumMapDirective, self).__init__('MIDI_drummap', display_name=_('Drum mapping'), description=_('Maps a note to an instrument.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = AbcMidiDrumMapDirective.pattern
class AbcMidiVolumeDirective(AbcElement):
pattern = r"(?m)^(?:%%|I:)MIDI (?:control 7|chordvol|bassvol)\s+(?P<volume>\d*)" + AbcElement.rest_of_line_pattern
def __init__(self):
super(AbcMidiVolumeDirective, self).__init__('MIDI_volume', display_name=_('Volume'), description=_('Volume for current voice.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = AbcMidiVolumeDirective.pattern
class AbcMidiGuitarChordDirective(AbcElement):
pattern = r"(?m)^(?:%%|I:)MIDI gchord (?P<pattern>\w*)" + AbcElement.rest_of_line_pattern
def __init__(self):
super(AbcMidiGuitarChordDirective, self).__init__('MIDI_gchord', display_name=_('Guitar chords'), description=_('Play guitar chords'))
for section in ABC_SECTIONS:
self._search_pattern[section] = AbcMidiGuitarChordDirective.pattern
class ScoreDirective(AbcElement):
pattern = r"(?m)^(?:%%|I:)(?:score|staves)\b"+ AbcElement.rest_of_line_pattern
def __init__(self):
super(ScoreDirective, self).__init__('score', display_name=_('Score layout'), description=_('Defines which staves are displayed.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = ScoreDirective.pattern
class MeasureNumberDirective(AbcElement):
pattern = r"(?m)^(?:%%|I:)(?:measurenb|barnumbers) (?P<interval>-?\d*)"+ AbcElement.rest_of_line_pattern
def __init__(self):
super(MeasureNumberDirective, self).__init__('measurenb', display_name=_('Measure numbering'), description=_('Defines if and how measures are numbered.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = MeasureNumberDirective.pattern
class HideFieldsDirective(AbcElement):
pattern = r"(?m)^(?:%%|I:)writefields\s+(?P<fields>[A-Za-z_]+)\s+(?:0|false)"+ AbcElement.rest_of_line_pattern
def __init__(self):
super(HideFieldsDirective, self).__init__('hide_fields', display_name=_('Hide fields'), description=_('Defines which fields should be hidden.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = HideFieldsDirective.pattern
class ShowFieldsDirective(AbcElement):
pattern = r"(?m)^(?:%%|I:)writefields\s+(?P<fields>[A-Za-z]+)"+ AbcElement.rest_of_line_pattern
def __init__(self):
super(ShowFieldsDirective, self).__init__('show_fields', display_name=_('Show fields'), description=_('Defines which fields should be shown.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = ShowFieldsDirective.pattern
class Abcm2psDirective(AbcElement):
""" Elements defined by abcm2ps """
anchor_replacement = (re.compile('<a (?:href|name)="[^"]*">|</a>', re.IGNORECASE), '')
table_replacement = (re.compile('<table>.*?</table>', re.IGNORECASE | re.DOTALL), '')
def __init__(self, keyword, name, description=None):
super(Abcm2psDirective, self).__init__(keyword, name, description=description)
self.html_replacements = [
Abcm2psDirective.anchor_replacement,
Abcm2psDirective.table_replacement
]
def get_description_url(self, context):
return 'http://moinejf.free.fr/abcm2ps-doc/%s.xhtml' % quote(self.name)
def get_html_from_url(self, url):
result = get_html_from_url(url)
result = replace_text(result, self.html_replacements)
return result
class AbcVersionDirective(AbcElement):
pattern = r'^%abc-(?P<version>[\d\.]+)'
def __init__(self):
super(AbcVersionDirective, self).__init__('abcversion', display_name=_('ABC version'), description=_('It starts with the version of the ABC specification this file conforms to.'))
self._search_pattern[AbcSection.FileHeader] = AbcVersionDirective.pattern
class AbcComment(AbcElement):
#pattern = r'(?<!\\|^)%\s*(.*)|^%(?!%)\s*(.*)$'
pattern = r'(?<!\\)%\s*(.*)$'
def __init__(self):
super(AbcComment, self).__init__('Comment', '%', display_name=_('Comment'))
for section in ABC_SECTIONS:
self._search_pattern[section] = AbcComment.pattern
self.visible_match_group = 1
def get_header_text(self, context):
if context.match_text and context.match_text.startswith('%%'):
return _('Stylesheet directive')
else:
return super(AbcComment, self).get_header_text(context)
def get_description_text(self, context):
if context.match_text and context.match_text.startswith('%%'):
return _('A stylesheet directive is a line that starts with %%, followed by a directive that gives instructions to typesetting or player programs.')
else:
return super(AbcComment, self).get_description_text(context)
def remove_comments(self, abc):
return self._search_re[AbcSection.TuneBody].sub('', abc)
class AbcBeam(AbcElement):
pattern = r'`+'
def __init__(self):
super(AbcBeam, self).__init__('Beam', '`', display_name=_('Beam'), description=_('Back quotes ` may be used freely between notes to be beamed, to increase legibility.'))
self._search_pattern[AbcSection.TuneBody] = AbcBeam.pattern
class AbcEmptyDocument(AbcElement):
pattern = r'^$'
def __init__(self):
super(AbcEmptyDocument, self).__init__('empty_document', display_name=_('Welcome to EasyABC'),
description=_('Creating an abc-file from scratch can be difficult. This assist panel tries to help by providing hints and actions. But remember, typing is usually faster.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = AbcEmptyLine.pattern
self.tune_scope = TuneScope.FullText
def matches(self, context):
if context.contains_text:
return None
else:
regex = self._search_re.get(context.abc_section, None)
return regex.match('')
class AbcEmptyLine(AbcElement):
pattern = r'^\s*$'
def __init__(self):
super(AbcEmptyLine, self).__init__('empty_line', display_name=_('Empty line'), description=_('An empty line separates tunes.'))
for section in ABC_SECTIONS:
self._search_pattern[section] = AbcEmptyLine.pattern
class AbcEmptyLineWithinTuneHeader(AbcElement):
def __init__(self):
super(AbcEmptyLineWithinTuneHeader, self).__init__('empty_line_header', display_name=_('Empty line in header'), description=_('More directives can be added here in the tune header. After K: the music code begins.'))
self._search_pattern[AbcSection.TuneHeader] = AbcEmptyLine.pattern
class AbcEmptyLineWithinTuneBody(AbcElement):
def __init__(self):
super(AbcEmptyLineWithinTuneBody, self).__init__('empty_line_tune', display_name=_('Empty line'), description=_('Notes, rests, or directives can be added.'))
self._search_pattern[AbcSection.TuneBody] = AbcEmptyLine.pattern
class AbcEmptyLineWithinFileHeader(AbcElement):
def __init__(self):
super(AbcEmptyLineWithinFileHeader, self).__init__('empty_line_file_header', display_name=_('File header'), description=_('Everything above the first X: is the file header. The directives here apply to all the tunes that follow.'))
self._search_pattern[AbcSection.FileHeader] = AbcEmptyLine.pattern
class AbcBodyElement(AbcElement):
def __init__(self, name, pattern, display_name=None, description=None):
super(AbcBodyElement, self).__init__(name, display_name=display_name, description=description)
self._search_pattern[AbcSection.TuneBody] = pattern
self.pattern = pattern
class AbcSpace(AbcBodyElement):
pattern = r'\s+'
def __init__(self):
super(AbcSpace, self).__init__('Whitespace', AbcSpace.pattern, display_name=_('Whitespace'), description=_('Space is used to improve legibility and to prevent notes from sharing the same beam.'))
class AbcAnnotation(AbcBodyElement):
pattern = r'(?P<annotation>"(?P<pos>[\^_<>@])(?P<text>(?:\\"|[^"])*)")'
def __init__(self):
super(AbcAnnotation, self).__init__('Annotation', AbcAnnotation.pattern, display_name=_('Annotation'))
self.visible_match_group = 'text'
class AbcChordOrAnnotation(AbcBodyElement):
pattern = r'"(?P<pos>[\^_<>@])?(?P<text>(?:\\"|[^"])*)"'
def __init__(self):
super(AbcChordOrAnnotation, self).__init__('Chord or annotation', AbcChordOrAnnotation.pattern, display_name=_('Chord symbol or annotation'))
class AbcSlur(AbcBodyElement):
pattern = r'(?P<dash>\.?)\((?!\d)|\)'
def __init__(self):
super(AbcSlur, self).__init__('Slur', AbcSlur.pattern, display_name=_('Slur'))
class TypesettingSpace(AbcBodyElement):
pattern = 'y'
def __init__(self):
super(TypesettingSpace, self).__init__('Typesetting extra space', TypesettingSpace.pattern, display_name=_('Typesetting extra space'), description=_('y can be used to add extra space between the surrounding notes; moreover, chord symbols and decorations can be attached to it, to separate them from notes.'))
class RedefinableSymbol(AbcBodyElement):
pattern = '[H-Wh-w~]'
def __init__(self):
super(RedefinableSymbol, self).__init__('Redefinable symbol', RedefinableSymbol.pattern, display_name=_('Redefinable symbol'), description=_('The letters H-W and h-w and the symbol ~ can be assigned with the U: field to provide a shortcut for the !symbol! syntax. For example, to assign the letter T to represent the trill, you can write: U: T = !trill!'))
class AbcDecoration(AbcBodyElement):
pattern = r"!([^!]+)!|\+([^!]+)\+|\."
values = decoration_to_description
def __init__(self, name=None, subset=None, display_name=None):
if name is None:
name = 'Decoration'
if subset is None:
pattern = AbcDecoration.pattern
else:
with_exclamation = '|'.join(re.escape(value[1:-1]) for value in subset if value[0] == '!')
without_exclamation = '|'.join(re.escape(value) for value in subset if value[0] != '!')
if without_exclamation:
without_exclamation = '|' + without_exclamation
pattern = r'(?P<decoration>(?P<decomark>\+|!)(?P<deconame>{0})(?P=decomark){1})'.format(with_exclamation, without_exclamation)
super(AbcDecoration, self).__init__(name, pattern, display_name=display_name)
def get_description_html(self, context):
html = super(AbcDecoration, self).get_description_html(context)
html += '<br>'
symbol = context.match_text
if symbol and symbol[0] == symbol[-1] == '+': # convert old notation to new
symbol = '!%s!' % symbol[1:-1]
html += escape(decoration_to_description.get(symbol, _('Unknown symbol')))
html += '<br>'
return html
class AbcDynamicsDecoration(AbcDecoration):
values = [
'!ffff!', '!fff!', '!ff!', '!f!', '!mf!', '!mp!', '!p!', '!pp!', '!ppp!', '!pppp!', '!sfz!',
'!crescendo(!', '!<(!',
'!crescendo)!', '!<)!',
'!diminuendo(!', '!>(!',
'!diminuendo)!', '!>)!'
]
def __init__(self):
super(AbcDynamicsDecoration, self).__init__('Dynamics', AbcDynamicsDecoration.values, display_name=_('Dynamics'))
class AbcFingeringDecoration(AbcDecoration):
values = ['!0!', '!1!', '!2!', '!3!', '!4!', '!5!']
def __init__(self):
super(AbcFingeringDecoration, self).__init__('Fingering', AbcFingeringDecoration.values, display_name=_('Fingering'))
class AbcOrnamentDecoration(AbcDecoration):
values = [
'!trill!',
'!trill(!',
'!trill)!',
'!mordent!', #'!lowermordent!',
'!pralltriller!', #'!uppermordent!',
'!roll!',
'!turn!',
'!turnx!',
'!invertedturn!',
'!invertedturnx!',
'!arpeggio!'
]
def __init__(self):
super(AbcOrnamentDecoration, self).__init__('Ornament', AbcOrnamentDecoration.values, display_name=_('Ornament'))
class AbcDirectionDecoration(AbcDecoration):
values = [
'!segno!',
'!coda!',
'!D.S.!',
'!D.C.!',
'!dacoda!',
'!dacapo!',
'!D.C.alcoda!',
'!D.C.alfine!',
'!D.S.alcoda!',
'!D.S.alfine!',
'!fine!'
]
def __init__(self):
super(AbcDirectionDecoration, self).__init__('Direction', AbcDirectionDecoration.values, display_name=_('Direction'))
class AbcArticulationDecoration(AbcDecoration):
values = [
'.',
'!tenuto!',
'!accent!', '!>!', '!emphasis!',
'!marcato!', '!^!',
'!wedge!',
'!invertedfermata!',
'!fermata!',
'!plus!', '!+!',
'!snap!',
'!slide!',
'!upbow!',
'!downbow!',
'!open!',
'!thumb!',
'!breath!',
'!ped!',
'!ped-up!',
]
def __init__(self):
super(AbcArticulationDecoration, self).__init__('Articulation', AbcArticulationDecoration.values, display_name=_('Articulation'))
class AbcBrokenRhythm(AbcBodyElement):
pattern = r'\<+|\>+'
def __init__(self):
super(AbcBrokenRhythm, self).__init__('Broken rhythm', AbcBrokenRhythm.pattern)
def get_description_html(self, context):
html = super(AbcBrokenRhythm, self).get_description_html(context)
if '>' in context.match_text:
html += 'The previous note is dotted, the next note halved'
else: # if '<' in context.match_text:
html += 'The previous note is halved, the next dotted'
return html
class AbcTuplet(AbcBodyElement):
pattern = r"\([1-9](?:\:[1-9]?)?(?:\:[1-9]?)?"
def __init__(self):
super(AbcTuplet, self).__init__('Tuplet', AbcTuplet.pattern, display_name=_('Tuplet'), description=_('Duplets, triplets, quadruplets, etc.'))
class AbcBar(AbcBodyElement):
pattern = r"(?:\.?\|\||:*\|\]|\[\|:*|::|:+\||\|:+|\.?\||\[\|\])[1-9]?"
def __init__(self):
super(AbcBar, self).__init__('Bar', AbcBar.pattern, display_name=_('Bar'), description=_('Separates measures.'))
class AbcVariantEnding(AbcBodyElement):
pattern = r'\[[1-9](?:[,-][1-9])*|\|[1-9]'
def __init__(self):
super(AbcVariantEnding, self).__init__('Variant ending', AbcVariantEnding.pattern, display_name=_('Variant ending'), description=_('To play a different ending each time'))
class AbcVoiceOverlay(AbcBodyElement):
pattern = '&'
def __init__(self):
super(AbcVoiceOverlay, self).__init__('Voice overlay', AbcVoiceOverlay.pattern, display_name=_('Voice overlay'), description=_("The & operator may be used to temporarily overlay several voices within one measure. Each & operator sets the time point of the music back by one bar line, and the notes which follow it form a temporary voice in parallel with the preceding one. This may only be used to add one complete bar's worth of music for each &. "))
class AbcInvalidCharacter(AbcBodyElement):
pattern = r'[^\d\w\s%s]' % re.escape('!"#$%&\'()*+,-./:;<=>?@[\]^_`{|}~')
def __init__(self):
super(AbcInvalidCharacter, self).__init__('Invalid character', AbcInvalidCharacter.pattern, display_name=_('Invalid character'), description=_("This character is not allowed within the body of an abc tune."))
class AbcChordSymbol(AbcBodyElement):
basic_pattern = r'(?P<chordsymbol>"(?P<chordname>[^\^_<>@"\\](?:[^"\\]|\\.)*)")'
#pattern = ur'(?P<chordsymbol>"(?P<chordnote>[A-G][b#\u266D\u266E\u266F]?)(?P<quality>[^/\d]*)(?P<th>2|4|5|6|7|9|11|13)?(?P<sus>sus[2|4|9]?)?(?P<additional>.*?)(?P<bassnote>(?:/[A-Ga-g][b#\u266D\u266E\u266F]?)?)")'
pattern = r'"(?P<chordsymbol>(?P<chordnote>(?:[A-G][b#\u266D\u266E\u266F]?)?)(?P<chordname>.*?)(?P<bassnote>(?:/[A-Ga-g][b#\u266D\u266E\u266F]?)?))"'
def __init__(self):
super(AbcChordSymbol, self).__init__('Chord symbol', AbcChordSymbol.pattern, display_name=_('Chord symbol'))
class AbcBaseNote(AbcBodyElement):
accidental_pattern = r'(?P<accidental>(?:[_^](?:3/2?|1?/2?)|\^{1,2}|_{1,2}|=)?)?'
length_pattern = r'(?P<length>\d{0,3}(?:/\d{0,3})*)'
octave_pattern = r"(?P<octave>[',]*)"
pair_pattern = r'(?P<pair>(?:\s*>+|\s*<+)?)'
tie_pattern = r'(?P<tie>-?)'
basic_note_pattern_without_len = r'{0}(?P<note>[A-Ga-g]){1}'.format(accidental_pattern, octave_pattern)
basic_note_pattern = basic_note_pattern_without_len + length_pattern
basic_rest_pattern_without_len = '(?P<rest>[zx])'
basic_rest_pattern = basic_rest_pattern_without_len + length_pattern
basic_note_or_rest_pattern = '(?:{0}|{1})'.format(basic_note_pattern_without_len, basic_rest_pattern_without_len) + length_pattern
basic_measure_rest_pattern = '(?P<rest>[ZX])(?P<length>(?:[1-9][0-9]*)?)'
def __init__(self, name, pattern, display_name=None, description=None):
super(AbcBaseNote, self).__init__(name, pattern, display_name=display_name, description=description)
class AbcGraceNotes(AbcBaseNote):
pattern = r'(?P<grace>{(?P<acciaccatura>/?)(?P<gracenote>[^}]*)})'
def __init__(self):
super(AbcBaseNote, self).__init__('Grace notes', AbcGraceNotes.pattern, display_name=_('Grace notes'))
self.visible_match_group = 'gracenote'
class AbcNoteGroup(AbcBaseNote):
note_group_pattern_prefix = r'(?P<gracenotes>{0}?)(?P<chordsymbols>{1}?)(?P<decoanno>(?P<decorations>{2})|(?P<annotations>{3})*)'.format(
AbcGraceNotes.pattern, AbcChordSymbol.basic_pattern, AbcDecoration.pattern, AbcAnnotation.pattern)
note_group_pattern_postfix = AbcBaseNote.pair_pattern + AbcBaseNote.tie_pattern
note_pattern = note_group_pattern_prefix + AbcBaseNote.basic_note_pattern + note_group_pattern_postfix
normal_rest_pattern = note_group_pattern_prefix + AbcBaseNote.basic_rest_pattern + AbcBaseNote.pair_pattern
note_or_rest_pattern = note_group_pattern_prefix + AbcBaseNote.basic_note_or_rest_pattern
chord_pattern = r'(?P<chord>{0}\[(?:{1}\s*)*\])'.format(note_group_pattern_prefix, remove_named_groups(note_or_rest_pattern)) + AbcBaseNote.length_pattern + note_group_pattern_postfix
note_or_chord_pattern = r'({0}|{1})'.format(remove_named_groups(note_or_rest_pattern), remove_named_groups(chord_pattern)) + note_group_pattern_postfix
def __init__(self):
super(AbcNoteGroup, self).__init__('Note group', AbcNoteGroup.note_or_chord_pattern, display_name=_('Note group')) # '^{0}$'.format(AbcNoteGroup.pattern))
#self.exact_match_required = True
self.visible_match_group = 1
class AbcNoteOrChord(AbcBaseNote):
pattern = AbcNoteGroup.note_or_chord_pattern
def __init__(self):
super(AbcBaseNote, self).__init__('Note or chord', AbcNoteOrChord.pattern, display_name=_('Note or chord'))
class AbcChord(AbcBaseNote):
pattern = AbcNoteGroup.chord_pattern
def __init__(self):
super(AbcBaseNote, self).__init__('Chord', AbcChord.pattern, display_name=_('Chord'))
self.visible_match_group = 'chord'
class AbcNote(AbcBaseNote):
pattern = AbcNoteGroup.note_pattern
def __init__(self):
super(AbcNote, self).__init__('Note', '({0})'.format(AbcNote.pattern), display_name=_('Note'))
self.removable_match_groups = {
'grace': _('Grace notes'),
'chordsymbol': _('Chord symbol'),
'annotations': _('Annotation')
}
self.visible_match_group = 1
class AbcNormalRest(AbcBaseNote):
pattern = AbcNoteGroup.normal_rest_pattern
def __init__(self):
super(AbcNormalRest, self).__init__('Rest', AbcNormalRest.pattern, display_name=_('Rest'))
self.visible_match_group = 0
class AbcMeasureRest(AbcBaseNote):
pattern = AbcBaseNote.basic_measure_rest_pattern
def __init__(self):
super(AbcMeasureRest, self).__init__('Measure rest', AbcMeasureRest.pattern, display_name=_('Measure rest')) # _('This rest spans one or more measures.')
self.visible_match_group = 0
class AbcMultipleNotesAndChords(AbcBaseNote):
pattern = '(?:' + AbcNoteGroup.note_or_chord_pattern + '[ `]*){2,}'
def __init__(self):
super(AbcMultipleNotesAndChords, self).__init__('Multiple notes/chords', '^{0}$'.format(AbcMultipleNotesAndChords.pattern), display_name=_('Multiple notes/chords'))
self.tune_scope = TuneScope.SelectedText # a line always contains multiple notes so limit to selected text
class AbcMultipleNotes(AbcBaseNote):
pattern = '(?:' + AbcNoteGroup.note_or_rest_pattern + '[ `]*){2,}'
def __init__(self):
super(AbcMultipleNotes, self).__init__('Multiple notes', '^{0}$'.format(AbcMultipleNotes.pattern), display_name=_('Multiple notes'))
self.tune_scope = TuneScope.SelectedText # a line always contains multiple notes so limit to selected text
class AbcBackslash(AbcBodyElement):
pattern = r'\\[ \t]*$'
def __init__(self):
super(AbcBackslash, self).__init__('Backslash', AbcBackslash.pattern, display_name=_('Backslash'), description=_('In abc music code, by default, line-breaks in the code generate line-breaks in the typeset score and these can be suppressed by using a backslash.'))
class AbcStructure(object):
# static variables
replace_regexes = None
valid_directive_re = None
from_to_directive_re = None
abc_field_re = None
@staticmethod
def get_sections(cwd):
# [1.3.6.2 [JWDJ] bugfix This fixes 'str>ng' in Fields and Command Reference
reference_content = io.open(os.path.join(cwd, 'reference.txt'), 'rU', encoding='latin-1').read()
if AbcStructure.replace_regexes is None:
AbcStructure.replace_regexes = [
(re.compile(r'\bh((?:bass/chord|length|logical|string|int|fl-?\n?oat\s?|command|str|text|vol|h|n|char|clef|bass|chord)\d*\s?(?: (?:string|int|float)\d*?)*)i\b'), r'<\1>'), # enclose types with < and >
(re.compile(r'\[((?:bass/chord|length|logical|string|int|float|command|str|text|vol)\d*)\]'), r'<\1>'), # replace types enclosed [ and ] with < and >
(re.compile(r'(?m)\b(?<![- ])1\d\d[\s\n]+[A-Z]+[A-Z\s\.&]+$'), ''), # strip left page header
(re.compile(r'\bA\.\d+\.[\s\n]+[A-Z &]*1\d\d\b'), ''), # strip right page header
(re.compile(r'[\.,;]\s[\w\n\s]+Section\s(\d\.|[\d\w\s&:])*\.'), '.'), # removes references to sections
(re.compile(r' as was seen in Section \d+(\.\d+)*\.'), '.'), # removes references to sections
(re.compile(r'(?m)^(\w:)\s+((?:[a-z]+\s(?:in|of)\s)?(?:header(?:,\s?body)?|body))\s+(.*)$'), r'\1 \3 (\2)'), # places where-field at the end of description
(re.compile(r'\bh(\d+-\d+)i\b'), '(\1)') # fix midi numbers (0-127)
]
AbcStructure.valid_directive_re = re.compile(r'^%%\w+(\s[^:\n]*|\.\.\.[^:\n]*)?:') # 1.3.6.2 [JWDJ] 2015-03 fixes false positives
AbcStructure.from_to_directive_re = re.compile(r'(%%\w+)\.\.\.(%%\w+)')
AbcStructure.abc_field_re = re.compile(r'[A-Za-z]:')
reference_content = reference_content.replace(unichr(150), '-')
reference_content = replace_text(reference_content, AbcStructure.replace_regexes)
lines = reference_content.splitlines()
for i in range(len(lines)):
lines[i] = lines[i].replace('hinti', '<int>')
lines[i] = lines[i].replace('%%MIDI drumoff turns', '%%MIDI drumoff: turns')
lines[i] = lines[i].replace('%%MIDI drumon turns', '%%MIDI drumon: turns')
sections = []
cur_section = []
abc_fields_done = False
for line in lines:
line = line.rstrip()
if line.startswith('A.'):
title = line.split(' ', 1)[1]
cur_section = []
sections.append((title, cur_section))
elif AbcStructure.valid_directive_re.search(line): # 1.3.6.2 [JWDJ] 2015-03 fixes false positives
abc_fields_done = True
cur_section.append(line)
elif not abc_fields_done and AbcStructure.abc_field_re.match(line):
cur_section.append(line)
elif cur_section: # join two lines
if cur_section[-1].endswith('-'):
cur_section[-1] = cur_section[-1][:-1] + line
else:
cur_section[-1] = cur_section[-1] + ' ' + line
for i in range(len(sections)):
section_name, lines = sections[i]
tuples = []
for line in lines:
if AbcStructure.abc_field_re.match(line):
name, desc = line.split(' ', 1)
tuples.append((name, desc))
elif len(line.split(': ', 1)) == 2:
name, desc = tuple(line.split(': ', 1))
m = AbcStructure.from_to_directive_re.match(name)
if m:
tuples.append((m.group(1), desc))
tuples.append((m.group(2), desc))
else:
tuples.append((name, desc))
sections[i] = section_name, tuples
return sections
@staticmethod
def generate_abc_elements(cwd):
directive = AbcDirective()
midi_directive = AbcMidiDirective()
directive.add_element(midi_directive)
# [JWDJ] the order of elements in result is very important, because they get evaluated first to last
result = [
AbcEmptyDocument(),
AbcEmptyLineWithinTuneHeader(),
AbcEmptyLineWithinTuneBody(),
AbcEmptyLineWithinFileHeader(),
AbcEmptyLine(),
AbcVersionDirective(),
AbcMidiProgramDirective(),
AbcMidiChordProgramDirective(),
AbcMidiBaseProgramDirective(),
AbcMidiChannelDirective(),
AbcMidiDrumMapDirective(),
AbcMidiVolumeDirective(),
AbcMidiGuitarChordDirective(),
ScoreDirective(),
MeasureNumberDirective(),
HideFieldsDirective(),
ShowFieldsDirective(),
directive,
AbcComment(),
AbcBeam(),
AbcBackslash(),
]
elements_by_keyword = {}
lines = abc_keywords.splitlines()
for line in lines:
parts = line.split('|')
keyword = parts[0].strip()
name = parts[1].strip()
file_header = parts[2].strip() == 'yes'
tune_header = tune_header_lookup[parts[3].strip()]
tune_body = parts[4].strip() == 'yes'
inline = parts[5].strip() == 'yes'
abc_type = parts[6].strip()
if abc_type == 'instruction':
element = AbcInstructionField(name, keyword, file_header, tune_header, tune_body, inline, abc_inner_pattern.get(keyword, '.*'))
elif abc_type == 'string':
element = AbcStringField(name, keyword, file_header, tune_header, tune_body, inline)
else:
raise Exception('Unknown abc-type')
result.append(element)
elements_by_keyword[element.keyword] = element
for (title, fields) in AbcStructure.get_sections(cwd):
for (field_name, description) in fields:
parts = field_name.split('<', 1)
keyword = parts[0].rstrip()
name = keyword
element_holder = None
if name.startswith('%%'):
name = name[2:]
if name[0:4] == 'MIDI':
element_holder = midi_directive
name = name[5:]
keyword = name
else:
element_holder = directive
if element_holder:
existing_element = element_holder.get_element(keyword)
else:
existing_element = elements_by_keyword.get(keyword)
if existing_element is not None:
element.description = description
else:
if element_holder:
if element_holder == midi_directive:
element = AbcElement(field_name, name, description=description)
midi_directive.add_element(element)
else:
element = Abcm2psDirective(field_name, name, description=description)
directive.add_element(element)
else:
if len(name) == 2 and name[-1] == ':':
element = AbcElement(field_name, name, description=description)
elements_by_keyword[keyword] = element
result.append(element)
for part in parts[1:]:
param = part.strip()
if param[-1] == '>':
param = param[:-1]
element.params.append(param)
# elements = sorted(elements, key=lambda element: -len(element.keyword)) # longest match first
symbol_line = [element for element in result if element.keyword == 's:'][0]
result = [element for element in result if element.keyword != 's:']
# [JWDJ] the order of elements in result is very important, because they get evaluated first to last
result += [
AbcAnnotation(),
AbcChordSymbol(),
AbcChordOrAnnotation(),
AbcTuplet(),
AbcVariantEnding(),
AbcBar(),
AbcDynamicsDecoration(),
AbcFingeringDecoration(),
AbcOrnamentDecoration(),
AbcDirectionDecoration(),
AbcArticulationDecoration(),
AbcDecoration(),
symbol_line,
AbcGraceNotes(),
AbcSlur(),
AbcMultipleNotes(),
AbcMultipleNotesAndChords(),
AbcChord(),
AbcNote(),
AbcNormalRest(),
AbcMeasureRest(),
AbcVoiceOverlay(),
AbcBrokenRhythm(),
AbcInvalidCharacter(),
TypesettingSpace(),
RedefinableSymbol(),
AbcSpace(),
AbcUnknown()
]
elements_by_keyword['V:'].visible_match_group = 'name'
for element in result:
try:
element.freeze()
except Exception as ex:
print('Exception in element {0}: {1}'.format(element.name, ex))
logging.exception(ex)
return result
| jwdj/EasyABC | tune_elements.py | tune_elements.py | py | 58,593 | python | en | code | 67 | github-code | 6 | [
{
"api_name": "sys.version_info",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "wx.GetTranslation",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "wx.GetTranslation",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "wx.GetTransl... |
13109821356 | import spacy
nlp = spacy.load('en_core_web_sm')
example1 = nlp("Animals")
for token in example1:
print(token.lemma_)
print()
example2 = nlp("I am god")
for token in example2:
print(token.lemma_)
| 39xdgy/Interactive_chatbots | 03_lemmatization.py | 03_lemmatization.py | py | 207 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "spacy.load",
"line_number": 3,
"usage_type": "call"
}
] |
19606128436 | import json
from typing import Dict, Generic, TypeVar, cast
import attr
import yaml
from psqlgml.types import GmlData
__all__ = [
"load_resource",
"load_by_resource",
"ResourceFile",
]
T = TypeVar("T")
def load_by_resource(resource_dir: str, resource_name: str) -> Dict[str, GmlData]:
"""Loads all resources reference within the input resource and returns a mapping
with each resource having an entry.
For example, if the main resource extends another resource which does not extend
anything, this function will return two entries, one for each resource
"""
schema_data: Dict[str, GmlData] = {}
resource_names = {resource_name}
while resource_names:
name = resource_names.pop()
f = ResourceFile[GmlData](f"{resource_dir}/{name}")
obj = f.read()
schema_data[name] = obj
sub_resource = obj.get("extends")
if sub_resource:
resource_names.add(sub_resource)
return schema_data
def load_resource(resource_folder: str, resource_name: str) -> GmlData:
"""Loads all data resource files into a single Gml Data instance"""
file_name = f"{resource_folder}/{resource_name}"
f = ResourceFile[GmlData](file_name)
rss: GmlData = f.read()
extended_resource = rss.pop("extends", None)
if not extended_resource:
return rss
extended = load_resource(resource_folder, extended_resource)
# merge
rss["nodes"] += extended["nodes"]
rss["edges"] += extended["edges"]
if "summary" not in rss:
rss["summary"] = extended.get("summary", {})
return rss
for summary in extended.get("summary", {}):
if summary in rss["summary"]:
rss["summary"][summary] += extended["summary"][summary]
else:
rss["summary"][summary] = extended["summary"][summary]
return rss
@attr.s(frozen=True, auto_attribs=True)
class ResourceFile(Generic[T]):
absolute_name: str
@property
def extension(self) -> str:
return self.absolute_name.split(".")[-1]
def read(self) -> T:
loaded: T
with open(self.absolute_name, "r") as r:
if self.extension == "json":
loaded = cast(T, json.loads(r.read()))
if self.extension in ["yml", "yaml"]:
loaded = cast(T, yaml.safe_load(r))
return loaded
| kulgan/psqlgml | src/psqlgml/resources.py | resources.py | py | 2,372 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.TypeVar",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "psqlgml.types.GmlData",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "psqlgml.types.GmlDat... |
4758294095 | from typing import Dict, Any
import click
from .root import cli
#: Modules to import in interactive shell.
SHELL_MODULES = dict(
metric='gambit.metric',
kmers='gambit.kmers',
)
@cli.group(
name='debug',
hidden=True,
)
def debug_group():
"""Tools for debugging and testing."""
pass
def make_shell_ns(ctx) -> Dict[str, Any]:
"""Make the user namespace for the shell command."""
from importlib import import_module
ns = dict(
click_ctx=ctx,
ctx=ctx.obj,
)
# Import modules into namespace
for alias, name in SHELL_MODULES.items():
ns[alias] = import_module(name)
return ns
@debug_group.command()
@click.option(
'--ipython/--no-ipython', 'use_ipython',
default=None,
help='Use IPython instead of built-in Python REPL.',
)
@click.pass_context
def shell(ctx, use_ipython):
"""Start an interactive shell with application data and modules imported.
Attempts to launch an IPython interactive interpreter if it is installed,
otherwise falls back on standard Python REPL.
"""
from gambit.util.misc import is_importable
if use_ipython is None:
if is_importable('IPython'):
use_ipython = True
else:
click.echo('IPython not available, defaulting to built-in Python REPL.', err=True)
use_ipython = False
ns = make_shell_ns(ctx)
if use_ipython:
from IPython import start_ipython
start_ipython(argv=[], user_ns=ns)
else:
from code import interact
interact(local=ns)
| jlumpe/gambit | gambit/cli/debug.py | debug.py | py | 1,417 | python | en | code | 16 | github-code | 6 | [
{
"api_name": "root.cli.group",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "root.cli",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "importlib.import_module",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"li... |
10383640973 | import unittest
import torch
from executorch.backends.xnnpack.test.tester import Tester
class TestSub(unittest.TestCase):
class Sub(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
z = x - y
return z
def test_fp32_sub(self):
inputs = (torch.randn((1, 3)), torch.randn((4, 3)))
(
Tester(self.Sub(), inputs)
.export()
.check_count({"torch.ops.aten.sub.Tensor": 1})
.to_edge()
.check_count({"executorch_exir_dialects_edge__ops_aten_sub_Tensor": 1})
.partition()
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.check_not(["executorch_exir_dialects_edge__ops_aten_sub_Tensor"])
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
)
| pytorch/executorch | backends/xnnpack/test/ops/sub.py | sub.py | py | 926 | python | en | code | 479 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.randn",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "executorch.backends.xn... |
24618666253 | from django.core.mail import send_mail
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Count
from django.shortcuts import render, get_object_or_404, redirect
from django.views.decorators.http import require_POST
from django.views.generic import ListView
from taggit.models import Tag
from django.contrib.postgres.search import SearchVector
from django.contrib import messages
import djangoProject.settings
from .forms import EmailPostForm, CommentForm, SearchForm
from .models import Post
class PostListView(ListView):
"""
Альтернативное представление списка постов
"""
# Вместо
# определения атрибута queryset мы могли бы указать model=Post, и Django
# сформировал бы для нас типовой набор запросов Post.objects.all()
queryset = Post.published.all()
context_object_name = 'posts'
paginate_by = 3
template_name = 'blog/post/list.html'
def post_list(request, tag_slug=None):
post_list = Post.published.all()
tag = None
if tag_slug:
tag = get_object_or_404(Tag, slug=tag_slug)
post_list = post_list.filter(tags__in=[tag])
# создаем объект класс Paginator с числом объектов на 1 странице
paginator = Paginator(post_list, 3)
# вытягиваем значение параметра page из GET запроса, если он отсутствует, выставляем дефолтное 1
# MultiValueDict????
page_number = request.GET.get('page', 1)
# получаем объекты для указанной страницы
try:
posts = paginator.page(page_number)
except EmptyPage:
posts = paginator.page(1)
except PageNotAnInteger:
posts = paginator.page(paginator.num_pages)
print(posts.__dict__)
return render(request, 'blog/post/list.html', {'posts': posts,
'tag': tag})
def post_detail(request, post, year, month, day):
print('мы тут с ', request.user)
post = get_object_or_404(Post,
status=Post.Status.PUBLISHED,
slug=post,
publish__year=year,
publish__month=month,
publish__day=day)
# Список активных комментариев к этому посту
comments = post.comments.filter(active=True)
# Форма для комментирования пользователями
form = CommentForm()
# список схожих постов
# values_list возвращает кортеж со значением заданных полей
post_tags_ids = post.tags.values_list('id', flat=True)
# модификатор __in -- значение должно быть в указанном списке кортоже квересете
similar_posts = Post.published.filter(tags__in=post_tags_ids).exclude(id=post.id)
# annotate создает переменную в который хранит результат
# агрегированного выражения над 'tag'
# названием переменной выступает ключ -- same_tags
similar_posts = similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags', '-publish')[:4]
return render(request, 'blog/post/details.html', {'post': post,
'comments': comments,
'form': form,
'similar_posts': similar_posts})
# представлени для 1)отображает изначальные данные на странице
# 2)обработка представленных для валидации данных
def post_share(request, post_id):
# функция сокращенного доступа для извлечения поста с id==post_id
post = get_object_or_404(Post,
id=post_id,
status=Post.Status.PUBLISHED)
sent = False
# 2)
if request.method == "POST":
# Когда пользователь заполняет форму и передает ее методом POST
form = EmailPostForm(request.POST)
if form.is_valid():
# форма снова прорисовывается в шаблоне,
# включая переданные данные. Ошибки валидации будут отображены
# в шаблоне.
# в cd = dict в котором находятся данные из формы,
# где ключи = названия формы и значение = содержание
# form.cleaned_data returns a dictionary of validated form input fields and their values, where string primary keys are returned as objects.
#
# form.data returns a dictionary of un-validated form input fields and their values in string format (i.e. not objects).
cd = form.cleaned_data
# непосредственно отправка письма
post_url = request.build_absolute_uri(
post.get_absolute_url()
)
subject = f"{cd['name']} recommends you read {post}"
message = f"Mail send by {cd['email']}\n\n" \
f"Read {post.title} at {post_url}\n\n" \
f"{cd['name']}\'s comments: {cd['comments']}"
send_mail(subject, message, djangoProject.settings.EMAIL_HOST_USER,
[cd['to']])
sent = True
# 1)
else:
# Указанный экземпляр формы
# будет использоваться для отображения пустой формы в шаблоне
form = EmailPostForm()
return render(request, 'blog/post/share.html', {'post': post,
'form': form,
'sent': sent})
@require_POST
def post_comment(request, post_id):
post = get_object_or_404(Post,
id=post_id,
status=Post.Status.PUBLISHED)
comment = None
# Создается экземпляр формы, используя переданные на обработку POSTданные
form = CommentForm(data=request.POST)
if form.is_valid():
# Метод save() создает экземпляр модели, к которой форма привязана,
# и сохраняет его в базе данных. Если вызывать его, используя commit=False,
# то экземпляр модели создается, но не сохраняется в базе данных. Такой
# подход позволяет видоизменять объект перед его окончательным сохранением.
comment = form.save(commit=False)
print(comment.__dict__)
comment.post = post
comment.save()
return render(request, 'blog/post/comment.html',
{'post': post,
'form': form,
'comment': comment})
def post_search(request):
form = SearchForm()
query = None
results = []
if 'query' in request.GET:
form = SearchForm(request.GET)
if form.is_valid():
# cleaned_data -- словарь, который хранит информацию из формы, прошедшую валидацию
query = form.cleaned_data['query']
results = Post.published.annotate(search=SearchVector('title', 'body')
).filter(search=query)
return render(request,
'blog/post/search.html',
{'form': form,
'query': query,
'results': results})
# создал вьюшку редирект на главную страницу если введен некорректный url
def redir_to_main_page(request, id):
messages.add_message(request, messages.INFO, 'you were redirected on main page')
return redirect('blog:post_list') | VEIIEV/djangoProject_Blog | blog/views.py | views.py | py | 8,614 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "django.views.generic.ListView",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "models.Post.published.all",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "models.Post.published",
"line_number": 24,
"usage_type": "attribute"
},
{
... |
13276332357 | from pathlib import Path
import os
import pandas as pd
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
# extract the data and save them in a CSV file
tb_path = Path("experiments") / "MPP" / "lohi" / "cdata" / "freesolv" / "lohi" / f"split_0" / "fold_0" / \
"model_0"
tb_file = tb_path / list(sorted(filter(
lambda x: str(x).startswith("events"), os.listdir(tb_path)
)))[-1]
print("File:", tb_file)
ea = EventAccumulator(str(tb_file))
ea.Reload()
for long, short in [("validation_", "val"), ("test_", "test")]:
print([m for m in filter(lambda x: x.startswith(long), ea.Tags()["scalars"])])
for metric in filter(lambda x: x.startswith(long), ea.Tags()["scalars"]):
print("metric", [e.value for e in ea.Scalars(metric)])
# dfs[short][f"{tech}_{metric}_split_{run}"] = [e.value for e in ea.Scalars(metric)]
# print(df)
| kalininalab/DataSAIL | experiments/MPP/check.py | check.py | py | 891 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tensorboard.backend.event_processing.event_accumulator.EventAccumulator",
"line_number": 14,
"usage_type": "call"
... |
23255212962 | from Config import Calculator
from Config import Condition
from Utils import Utils
import json
import insertUser
import os
# # file_path = os.path.join(BASE_DIR, 'Test_Data')
# elements = BASE_DIR.split("/")
# # elements.pop()
# path = "/".join(elements)
# print(path)
if __name__ == '__main__':
# BASE_DIR = os.path.dirname(__file__)
verify = insertUser.verify()
if verify:
BASE_DIR = './File/'
json_file = file(BASE_DIR+"config.json")
conf = json.load(json_file)
allTestValue = Utils.get_test_data(BASE_DIR+conf["file1"])
allTestValue.extend(Utils.get_test_data(BASE_DIR + conf["file2"]))
standardList = Utils.get_standard_data(BASE_DIR + conf["standard"])
conditions = []
for key, value in conf["con"].items():
conditions.append(Condition(key, value))
condition3 = Condition("M", conf["M"]/conf["amount"])
condition3.set_amount(conf["M"])
conditions.append(condition3)
nm = conf["nm"]
config = Calculator(allTestValue, standardList, conf["amount"], nm, 1000, conditions)
config.calculate()
Utils.save(config.result_list)
# x = raw_input("please enter")
| LJJ/py_parseExcel | ParseExcel.py | ParseExcel.py | py | 1,219 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "insertUser.verify",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "Utils.Utils.get_test_data",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "Utils.Utils",
... |
37683950634 | import math
#import numbertheory
from numbertheory import *
#import multiprocessing
from multiprocessing import Pool
import gc
#import numpy as np
#from numpy.polynomial import polynomial as poly
####### HELPER FUNCTIONS #######
# performs the extended euclidean algorithm
# returns info to help calculate inverses
def egcd(a, b):
x, y, u, v = 0, 1, 1, 0
while a != 0:
q, r = b // a, b % a
m, n, = x - u*q, y-v*q
b, a, x, y, u, v = a, r, u, v, m, n
gcd = b
return gcd, x, y
# returns the modular inverse of a mod m if a is coprime to m
# returns the gcd of a and m if a is not coprime to m
def modinv(a, m):
a = a%m
gcd, x, y = egcd(a, m)
if gcd != 1: return False, gcd
else: return True, x % m
# returns whether a is a quadratic residue mod something
def isQR(a, mod):
squareList = list()
for i in range(0, mod):
squareList.append(i**2 % mod)
return a in squareList
# returns a list of the quadratic residues mod something
def listQRs(mod):
squareList = list()
for i in range(0, mod):
squareList.append(i**2 % mod)
return squareList
# returns the modular square root of a number if it exists
def sqrtMod(a, mod):
if not isQR(a, mod): return []
answerList = list()
singleList = list(range(0, mod))
squareList = listQRs(mod)
for i in range(0, mod):
if squareList[i] == a:
answerList.append(singleList[i])
return answerList
# credit to martin-thoma.com
def legendre_symbol(a, p):
if a >= p or a < 0:
return legendre_symbol(a % p, p)
elif a == 0 or a == 1:
return a
elif a == 2:
if p%8 == 1 or p%8 == 7:
return 1
else:
return -1
elif a == p-1:
if p%4 == 1:
return 1
else:
return -1
elif not isPrime(a):
factors = primeFactors(a)
product = 1
for pi in factors:
product *= legendre_symbol(pi, p)
return product
else:
if ((p-1)/2)%2==0 or ((a-1)/2)%2==0:
return legendre_symbol(p, a)
else:
return (-1)*legendre_symbol(p, a)
# returns a list of prime factors
# credit to stackoverflow.com/questions/16996217/prime-factorization-list
def primeFactors(n):
primes = list()
d = 2
while d*d <= n:
while (n%d) == 0:
primes.append(d)
n//=d
d+=1
if n>1:
primes.append(n)
return primes
# creates a proper set of primes involved in the prime factorization of n
# each member is a double: (base, power)
def groupPrimes(n):
groups = list()
primes = primeFactors(n)
distincts = list(set(primes))
distincts.sort()
for i in distincts:
temp = 0
for j in primes:
if j == i:
temp += 1
groups.append((i, temp))
return groups
# to solve systems of congruences - credit to rosetta code
def chinese_remainder(mods, exes, lena):
p = i = prod = 1; sm = 0
for i in range(lena): prod *= mods[i]
for i in range(lena):
p = prod / mods[i]
sm += exes[i] * modinv(p, mods[i])[1] * p
return sm % prod
# Fermat primality test - credit to codeproject.com
def isPrime(number):
import random
''' if number != 1 '''
if (number > 1):
''' repeat the test few times '''
for time in range(3):
''' Draw a RANDOM number in range of number ( Z_number ) '''
randomNumber = random.randint(2, number)-1
''' Test if a^(n-1) = 1 mod n '''
if ( pow(randomNumber, number-1, number) != 1 ):
return False
return True
else:
''' case number == 1 '''
return False
homework = 495960937377360604920383605744987602701101399399359259262820733407167
def multE_Factor(n):
# point = (1, 2)
# jobs = []
# for i in range(15):
# factors = list()
# print("Curve", i, "\n")
# p = multiprocessing.Process(target = E_Factor(factors, i, n))
# jobs.append(p)
# p.start()
# del factors[:]
outcomes = list()
for i in range(15): outcomes.append([])
pool = Pool(processes = 15)
results = [pool.apply(E_Factor, args = (outcomes[i], i, n)) for i in range(5)]
print(results)
print(outcomes)
# Executes multiple factoring processes simultaneously
def Mult_E_Factor(n):
pool = Pool(processes=20)
result = pool.map(e_factorize, (range(0, 20), n))
print(results)
# checks the list generated by E_Factor and reruns it as necessary
def E_Factor_Manager(a, n):
factors = []
E_Factor(factors, a, n)
#print(factors)
finalFactors = []
for i in range(len(factors)):
if not isPrime(factors[i]):
if factors[i] > 100:
finalFactors.extend(e_factorize(a+1, factors[i]))
else: finalFactors.extend(primeFactors(factors[i]))
else: finalFactors.append(factors[i])
finalFactors.sort()
#print(finalFactors)
return finalFactors
# creates a list of elliptic curve generated factors of an number
def E_Factor(factors, a, n):
gc.collect()
print("Factor of", n)
if isPrime(n):
factors.append(n)
return
point = (1,3,1)
curve = findB(point, a, n)
factor = curve.factor(point, math.ceil(math.log(n)))
if factor != False:
factors.append(factor)
E_Factor(factors, a, n//factor)
if factor == False:
factors.append(n)
#print(n)
# finds value b and creates a curve, given a point, a mod, and an a
def findB(point, a, mod):
b = 0
while True:
testCurve = EllipticCurve(a, b, mod)
if testCurve.onCurve(point):
testCurve.printme()
return testCurve
b += 1
####### ELLIPTIC CURVE CLASS #######
class EllipticCurve:
def __init__(self, a, b, mod):
self.a = a
self.b = b
self.mod = mod
def printme(self):
print("E: y^2 = x^3 +", self.a, "x +", self.b, "( mod", self.mod, ")")
def neg(self, point):
if point == (0, 1, 0): return (0, 1, 0)
return point[0], (-1 * point[1]) % self.mod, 1
def onCurve(self, point):
if len(point) < 3:
print("Point must be a triple.")
return
if point[2] == 0: return True
x, y = point[0], point[1]
if y in sqrt_mod_m(x**3 + self.a*x + self.b, self.mod):
return True
return False
def add(self, point1, point2):
if len(point1) < 3 or len(point2) < 3:
print("Point must be a triple.")
return
# anything times the identity is itself
if point1[2] == 0: return point2
if point2[2] == 0: return point1
# the identity times the identity is itself
if point1[2] == 0 and point2[2] == 0: return (0, 1, 0)
if point1 != point2:
if modinv(point1[0] - point2[0], self.mod)[0] == False:
return (0, modinv(point2[0] - point1[0], self.mod)[1], 2)
if point1[0] != point2[0]:
slope = (point2[1] - point1[1]) * modinv(point2[0] - point1[0], self.mod)[1]
else: return (0, 1, 0)
if point1 == point2:
if modinv((2*point1[1])%self.mod, self.mod)[0] == False:
return (0, modinv(2*point1[1], self.mod)[1], 2)
slope = (3*(point1[0]**2) + self.a) * modinv(2*point1[1], self.mod)[1]
x3 = (slope**2 - point1[0] - point2[0]) % self.mod
y3 = (slope * (point1[0] - x3) - point1[1]) % self.mod
return (x3, y3, 1)
def mult(self, point, k):
if k == 1: return point
sum = (0, 1, 0)
for i in range(k):
sum = self.add(sum, point)
return sum
# recursive repeated addition via doubling
# doubles until next doubling would exceed k
# then calls itself on the difference until 1 left
def multP(self, point, k):
if k == 0: return (0, 1, 0)
if k == 1: return point
else:
temp = point
doubles = 0
while True:
doubles += 1
if 2**doubles >= k:
doubles -= 1
break
temp = self.add(temp, temp)
if temp[2] == 2: return temp
leftovers = k - 2**doubles
temp = self.add(temp, self.multP(point, leftovers))
if temp[2] == 2: return temp
return temp
# this works, slowly
def pointOrder(self, point):
answer = (0, 1, 0)
count = 0
while True:
answer = self.add(answer, point)
#print(count, answer, test.onCurve(answer))
count += 1
if answer == (0, 1, 0): break
return count
def bsgsGroupOrder(self, point):
p = self.mod # set the constants
m = p + 1 - math.ceil(2*(p**(1/2)))
z = math.ceil(2*(p**(1/4)))
m, z = int(m), int(z)
mP = self.multP(point,m)
babyList = list()
giantList = list()
answerList = list()
matchList = list()
for i in range(z): # create the lists
babyList.append(self.multP(point,i))
giantList.append(self.neg(self.add(mP, self.multP(point,i*z))))
for i in babyList: # find the matches
for j in giantList:
if i == j:
answerList.append(m + babyList.index(i) + giantList.index(j)*z)
matchList.append((i, j))
for i in range(len(babyList)): print(babyList[i], "\t", giantList[i])
print("ANSWER:")
for i in matchList: print(i) # print results
return answerList
def pohlig_hellman(self, P, Q):
originalQ = Q
N = self.pointOrder(P)
factors = groupPrimes(N) # groupPrimes() returns a list of doubles where
# the first element of each double is the base
mods = list() # and the second is the exponent, so we can
exes = list() # refer to each as necessary
for n in factors:
mods.append(n[0]**n[1])
for q in factors: # for each component of the modulus N
print("\n***********************")
T = list()
Ks = list()
Q = originalQ # reset Q
e = q[1] # the power of the prime factor
for j in range(q[0]):
T.append(self.multP(P, j*(N/q[0]))) # create T list
print("T:", T)
for i in range(1, e+1): # for all elements of the base-k
# expansion of current q
candidate = self.multP(Q, N/(q[0]**i))
K = T.index(candidate) # find the candidate in T
Ks.append(K) # add to the list of ks ()
# then update Q
Q = self.add(Q, self.neg(self.multP(P, K*q[0]**(i-1))))
print("Q", i, " is", Q, "-", K, "*",q[0], "^", i-1, "*", P)
sum = 0
for k in Ks: # evaluate the expansion
sum += k*q[0]**Ks.index(k)
sum %= q[0]**q[1]
print(sum, "mod ", q[0]**q[1], "=", sum)
exes.append(sum) # add it to the list
print("\n***********************")
print("SYSTEM:")
print("X VALUES:\t", exes)
print("MOD VALUES:\t", mods)
print("ANSWER:\t\t", chinese_remainder(mods, exes, len(exes)))
def factor(self, point, b):
for i in range(2, b):
#print(i)
#print(math.factorial(i))
temp = self.multP(point, math.factorial(i))
#print(temp)
if temp[2] == 2:
if temp[1] != self.mod:
return temp[1]
break
# return(temp[1])
#new = EllipticCurve(self.a, self.b, self.mod / temp[1])
if isPrime(temp[1]):
if temp[1] == self.mod:
print(temp[1], "is a trivial factor.")
return False
else: return temp[1]
return False
#print("Nothing broken")
def bitwise_xor(a, b):
c = list()
for i in range(len(a)):
c.append((a[i] + b[i])%2)
return c
def bitwise_and(a, b):
c = list()
for i in range(len(a)):
c.append((a[i] & b[i]))
return c
def bitwise_or(a, b):
c = list()
for i in range(len(a)):
c.append((a[i] | b[i]))
return c
def linear_complexity(sequence, polynomial, debug):
Bx = make_bin_poly([0]) # make these into polynomials
Cx = make_bin_poly([0])
Tx = make_bin_poly([0])
L, N = 0, 0 # complexity and test length start at 0
s = sequence
m = -1
n = len(sequence)
d = 0 # discrepancy
while N < n:
if debug: print("----------------\nN =", N, "\n")
if N==0:
d = s[0]
if debug: print("s 0 (", s[0], ")")
else:
d = 0
for i in range(0, L+1):
if debug: print("s", N-i,"(", s[N-i], ") * ", "c", (i), "(", Cx[i], ") = ", s[N-i] * Cx[i])
d += s[N-i] * Cx[i] # calculate the discrepancy
d%=2
if debug: print('\nd = ', d)
if d==1:
x = make_bin_poly([N-m]) # create x**(N-m)
Tx = Cx
Cx = addpoly(Cx, mulpoly(Bx,x))
if debug: print('\nC(x) = \n', Cx, '\n')
if L <= N/2:
L = N + 1 - L
m = N
Bx = Tx
N += 1
print("\nCOMPLEXITY = ", L)
if polynomial: print("TAP POLYNOMIAL = \n", Cx)
def make_bin_poly(terms):
poly = [0]*(terms[0]+1) # length is the degree + 1
#print(poly)
for i in terms:
poly[len(poly)-(i+1)] = 1
#print(poly)
realpoly = np.poly1d(poly)
return realpoly
def mulpoly(a, b):
c = np.polymul(a, b)
return np.poly1d(c.coeffs % 2)
def addpoly(a, b):
c = a + b
return np.poly1d(c.coeffs % 2)
def xorStreams(a, b, debug):
#print(len(a))
#print(len(b))
#if len(a) != len(b): print("Sizes not equal")
C = list()
for i in range(0, len(a)):
C.append( (a[i]+b[i]) % 2 )
if debug: print("*********************")
if debug: print('A:\t', a)
if debug: print('B:\t', b)
if debug: print('C:\t', C)
return C
def testC(A, B, length, printSequences):
linear_complexity(xorStreams(A.putout(length, False), B.putout(length, False), printSequences), False)
class LFSR:
def __init__(self, fill, taps):
self.fill = list(fill)
self.register = list(fill)
self.taps = list(taps)
for i in range(0, len(self.taps)):
self.taps[i] -= 1
def printtaps(self):
print(self.taps)
def printfill(self):
print(self.register)
def printregister(self):
print(self.register)
def newregister(self, sequence):
self.register = sequence
def newtaps(self, taps):
self.taps = taps
#print(self.taps)
def reset(self):
self.register = list(self.fill)
def tick(self):
return (self.putout(1, False, False))[0]
def putout(self, bits, reset, printRegisters):
if reset: self.reset()
self.output = []
next = 0
for i in range(bits):
#print(i)
if printRegisters: print(self.register)
next = self.xor(self.register, self.taps)
self.output.append(self.register[0])
self.register.pop(0)
self.register.append(next)
return self.output
def xor(self, fill, taps):
sum = 0
for i in taps:
sum += fill[len(fill)-i-1]
sum %= 2
return sum
| CSAlexWhite/Cryptography | crypto.py | crypto.py | py | 16,518 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "random.randint",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "gc.collec... |
70384673149 |
from collections import deque
from dataclasses import dataclass, field, replace
from typing import Type
import copy
import numpy as np
import pandas as pd
import re
# little helper class
class ldf_dict(dict):
def __init__(self):
self = dict()
def add(self, key, value):
self[key] = value
@dataclass
class Nodes:
master: str
timer_base_ms: float
jitter_ms: float
slaves: list = field(default_factory=list)
@dataclass(init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False)
class Signal:
size: int
init_val: int
publisher: str
subscriber: str
@dataclass(init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False)
class Frame:
identifier: int
publisher: str
response_length: int
signals: ldf_dict()
@dataclass(init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False)
class Diagnostic_signal:
size: int
init_val: int
@dataclass
class Node_attribute:
lin_protocol: float
configure_NAD: str
product_id: list
response_error: str
P2_min_ms: int
ST_min_ms: int
configure_frames: ldf_dict()
class LDFParser:
"""
Wording: every element of the ldf e.g. Nodes {} or Signals {} is
called attribute.
"""
__closed_curly: np.ndarray
__opened_curly: np.ndarray
__ldf_data: np.ndarray
__ldf_header: np.ndarray
__start_of_attribute: np.ndarray
__start_of_frames: np.ndarray
# frames: key=frame_name, value=frame data
frames = ldf_dict()
node_attributes = ldf_dict()
schedule_tables = ldf_dict()
signals = ldf_dict()
diagnostic_signals = ldf_dict()
signal_encoding_types = ldf_dict()
signal_representation = ldf_dict()
nodes = Nodes
bus_name = ""
def __init__(self, ldf_path):
self.__ldf_data = pd.read_csv(ldf_path, sep="\n", encoding='latin-1')
self.__ldf_data = self.__ldf_data.values
self.__remove_header_info()
self.__get_bus_name()
self.__analyse_ldf_elements()
def parse_all(self):
for (line_number, axis), value in np.ndenumerate(self.__start_of_attribute):
if value and self.__ldf_data[line_number] == "Nodes {":
self.get_nodes(line_number)
elif value and self.__ldf_data[line_number] == "Signals {":
self.get_signals(line_number)
elif value and self.__ldf_data[line_number] == "Diagnostic_signals {":
self.get_dignostic_signals(line_number)
elif value and self.__ldf_data[line_number] == "Frames {":
self.get_frames()
elif value and self.__ldf_data[line_number] == "Node_attributes {":
self.get_node_attributes(line_number)
elif value and self.__ldf_data[line_number] == "Schedule_tables {":
self.get_schedule_table(line_number)
elif value and self.__ldf_data[line_number] == "Signal_encoding_types {":
self.get_signal_encoding_types(line_number)
elif value and self.__ldf_data[line_number] == "Signal_representation {":
self.get_signal_representation(line_number)
del self.__ldf_data, self.__closed_curly, self.__start_of_frames, self.__start_of_attribute
def get_nodes(self, line_number=-1):
nodes = Nodes
if line_number == -1:
line_number = int(np.where(self.__ldf_data == "Nodes {")[0]) + 1
end_of_nodes = self.__get_index_of_next_closed_curly(line_number)
while line_number < end_of_nodes:
line_number = line_number + 1
current_line_value = self.__ldf_data[line_number][0]
current_line_value = self.__remove_unwanted(current_line_value).split(':')
if current_line_value[0] == "Master":
master_values = current_line_value[1].split(',')
nodes.master = master_values[0]
nodes.timer_base_ms = float(self.__remove_all_but_num(master_values[1]))
nodes.jitter_ms = float(self.__remove_all_but_num(master_values[2]))
elif current_line_value[0] == "Slaves":
nodes.slaves = current_line_value[1].split(',')
self.nodes = nodes
def get_frames(self):
# self.start_of_frame contains all starting positons of the frame elements
start_frame_indizes = np.where(self.__start_of_frames[:, 0])[0]
end_frame_indizes = np.where(self.__closed_curly[:, 0])[0]
end_frame_indizes = deque(end_frame_indizes)
# remove not needed closing curly braces
while end_frame_indizes[0] < start_frame_indizes[0]:
end_frame_indizes.popleft()
end_frames_index = self.__get_end_of_attribute(start_frame_indizes[0])
start_frame_indizes = deque(start_frame_indizes)
current_line_number = start_frame_indizes.popleft()
while current_line_number < end_frames_index:
# first parse the frame header ..
frame = Frame(identifier=0, publisher="", response_length=0, signals=ldf_dict())
frame_header = self.__raw_line_to_list(self.__ldf_data[current_line_number][0])
frame.identifier = frame_header[1]
frame.publisher = frame_header[2]
frame.response_length = int(frame_header[3])
current_line_number = current_line_number + 1
# .. and then the signals
end_of_frame_signals = self.__get_end_of_attribute(current_line_number, 1)
signals = ldf_dict()
while current_line_number < end_of_frame_signals:
signal = ldf_dict()
signal_line = self.__remove_unwanted(self.__ldf_data[current_line_number][0]).split(",")
signal_name = signal_line[0]
signal_offset = signal_line[1]
signal.add("Offset", signal_offset)
signals.add(signal_name, signal)
current_line_number = current_line_number + 1
frame.signals = signals
self.frames.add(frame_header[0], frame)
current_line_number = current_line_number + 1
def get_node_attributes(self, line_number):
end_of_node_attr = self.__get_end_of_attribute(line_number, 3)
line_number = line_number + 1
while line_number < end_of_node_attr:
node_attribute = Node_attribute(lin_protocol=0.0, configure_NAD="", product_id=[], response_error="",
P2_min_ms=0, ST_min_ms=0, configure_frames=ldf_dict())
node_attribute_name = self.__remove_unwanted(self.__ldf_data[line_number][0])
line_number = line_number + 1
node_attribute.lin_protocol = float(self.__remove_unwanted(self.__ldf_data[line_number][0]).split("=")[1])
line_number = line_number + 1
node_attribute.configure_NAD = self.__remove_unwanted(self.__ldf_data[line_number][0]).split("=")[1]
line_number = line_number + 1
if node_attribute_name == "DS":
self.node_attributes.add(node_attribute_name, node_attribute)
line_number = self.__get_end_of_attribute(line_number, 1) + 1
else:
node_attribute.product_id = self.__remove_unwanted(self.__ldf_data[line_number][0]).split("=")[1].split(",")
line_number = line_number + 1
node_attribute.response_error = self.__remove_unwanted(self.__ldf_data[line_number][0]).split("=")[1]
line_number = line_number + 1
node_attribute.P2_min_ms = int(re.sub(r'[^0-9]', '', self.__remove_unwanted(self.__ldf_data[line_number][0]).split("=")[1]))
line_number = line_number + 1
node_attribute.ST_min_ms = int(re.sub(r'[^0-9]', '', self.__remove_unwanted(self.__ldf_data[line_number][0]).split("=")[1]))
line_number = line_number + 2
end_of_configurable_frames = self.__get_end_of_attribute(line_number, 1)
conf_frame_dict = ldf_dict()
while line_number < end_of_configurable_frames:
conf_frame = self.__remove_unwanted(self.__ldf_data[line_number][0]).split("=")
conf_frame_dict.add(conf_frame[0], conf_frame[1])
line_number = line_number + 1
node_attribute.configure_frames = conf_frame_dict
self.node_attributes.add(node_attribute_name, node_attribute)
line_number = self.__get_end_of_attribute(line_number, 2) + 2
def get_signal_representation(self, current_line_number):
current_line_number = current_line_number + 1
end_of_signal_representation = self.__get_index_of_next_closed_curly(current_line_number)
while current_line_number < end_of_signal_representation:
signal_representation_list = self.__remove_unwanted(self.__ldf_data[current_line_number][0]).split(":")
signal_repre_key = signal_representation_list[0]
signal_repre_val = signal_representation_list[1].split(",")
current_line_number = current_line_number + 1
self.signal_representation.add(signal_repre_key, signal_repre_val)
def get_signal_encoding_types(self, current_line_number):
current_line_number = current_line_number + 1
end_of_signal_enc_types = self.__get_end_of_attribute(current_line_number, 2)
while current_line_number < end_of_signal_enc_types:
signal_encoding_name = self.__remove_unwanted(self.__ldf_data[current_line_number][0])
current_line_number = current_line_number + 1
end_of_current_sign_enc_type = self.__get_index_of_next_closed_curly(current_line_number)
encoding_list = []
while current_line_number < end_of_current_sign_enc_type:
val_list = self.__ldf_data[current_line_number][0].split(",")
for i in range(0, len(val_list)):
val_list[i] = re.sub(r"^[\s]*|[\";]", "", val_list[i])
encoding_list.append(val_list)
current_line_number = current_line_number + 1
self.signal_encoding_types.add(signal_encoding_name, encoding_list)
current_line_number = current_line_number + 1
def get_schedule_table(self, current_line_number):
current_line_number = current_line_number + 1
end_of_schedule_tables = self.__get_end_of_attribute(current_line_number, 2)
while current_line_number < end_of_schedule_tables:
schedule_table_name = self.__remove_unwanted(self.__ldf_data[current_line_number][0])
current_line_number = current_line_number + 1
end_of_current_schedule_table = self.__get_index_of_next_closed_curly(current_line_number)
frame_slots = ldf_dict()
while current_line_number < end_of_current_schedule_table:
#schedule_table = Schedule_table(frame_slot_name="", frame_slot_duration_ms=0)
current_line_list = re.sub(r"[\t]", "", self.__ldf_data[current_line_number][0]).split(" ")
frame_slot_name = current_line_list[0]
frame_slot_duration_ms = current_line_list[2]
frame_slots.add(frame_slot_name, int(frame_slot_duration_ms))
current_line_number = current_line_number + 1
self.schedule_tables.add(schedule_table_name, frame_slots)
current_line_number = current_line_number + 1
def get_signals(self, current_line_number):
current_line_number = current_line_number + 1
end_of_signals = self.__get_index_of_next_closed_curly(current_line_number)
while current_line_number < end_of_signals:
signal = Signal(size=0, init_val=0, publisher="", subscriber="")
raw_line = self.__ldf_data[current_line_number][0]
line_as_list = self.__raw_line_to_list(raw_line)
signal.size = line_as_list[1]
signal.init_val = line_as_list[2]
signal.publisher = line_as_list[3]
signal.subscriber = line_as_list[4]
current_line_number = current_line_number + 1
self.signals.add(line_as_list[0], signal)
def get_dignostic_signals(self, current_line_number):
current_line_number = current_line_number + 1
end_of_diagnostic_signals = self.__get_index_of_next_closed_curly(current_line_number)
while current_line_number < end_of_diagnostic_signals:
diagnostic_signal = Diagnostic_signal(size=0, init_val=0)
raw_line = self.__ldf_data[current_line_number][0]
line_as_list = self.__raw_line_to_list(raw_line)
diagnostic_signal.size = line_as_list[1]
diagnostic_signal.init_val = line_as_list[2]
self.diagnostic_signals.add(line_as_list[0], diagnostic_signal)
current_line_number = current_line_number + 1
def __get_bus_name(self):
for (line_number, axis), value in np.ndenumerate(self.__ldf_header):
if value.find("Network") != -1:
self.bus_name = self.__remove_unwanted(value).split(":")[1]
def __remove_unwanted(self, string: str) -> str:
"""
:param string: string that contains commas, semicols, whitespace, tabspace or closed curly
:return: cleaned string
"""
string = re.sub(r'[\s\t;{}"*/]*', '', string, flags=re.M)
return string
def __analyse_ldf_elements(self):
# TODO: optimzable since it runs three times over the file
start_pattern = re.compile(r'\b\w+\s{$')
start_vmatch = np.vectorize(lambda x: bool(start_pattern.match(x)))
self.__start_of_attribute = start_vmatch(self.__ldf_data)
# find all closed curlys
close_curly_pattern = re.compile(r'\s*}$')
end_vmatch = np.vectorize(lambda x: bool(close_curly_pattern.match(x)))
self.__closed_curly = end_vmatch(self.__ldf_data)
open_curly_pattern = re.compile(r'.*{$')
open_curly_vmatch = np.vectorize(lambda x: bool(open_curly_pattern.match(x)))
self.__opened_curly = open_curly_vmatch(self.__ldf_data)
frames_pattern = re.compile(r'\s*[A-Za-z0-9_]+:[\d\sA-Za-z,_]+{$')
# example: AQSe_01: 10, Klima_LIN1, 6 {
frames_vmatch = np.vectorize(lambda x: bool(frames_pattern.match(x)))
self.__start_of_frames = frames_vmatch(self.__ldf_data)
def __remove_all_but_num(self, string: str) -> str:
return re.sub(r'[^0-9.]', '', string, flags=re.M)
def __raw_line_to_list(self, line):
line = self.__remove_unwanted(line).split(":")
line = line[:1] + line[1].split(",")
return line
def __remove_header_info(self):
counter = 0
for line in self.__ldf_data:
if "/*" in line[0]:
counter = counter + 1
if counter != 0:
self.__ldf_header = copy.deepcopy(self.__ldf_data[:counter])
self.__ldf_data = self.__ldf_data[counter:]
def __get_index_of_next_closed_curly(self, index):
index_ = index + 1
while not self.__closed_curly[index_]:
index_ = index_ + 1
return index_
def __write_to_arr_till_closed_curly(self, index, np_arr):
index_ = index + 1
while not self.__closed_curly[index_]:
np_arr = np.append(np_arr, self.__ldf_data[index_][0])
index_ = index_ + 1
return np_arr
def __get_end_of_attribute(self, index, successive_closed_curly=2):
# find end of block by double or tripple closed curly braces
i = index
if successive_closed_curly == 1:
while not self.__closed_curly[i]:
i = i + 1
elif successive_closed_curly == 2:
while not self.__closed_curly[i] or not self.__closed_curly[i + 1]:
i = i + 1
elif successive_closed_curly == 3:
while not self.__closed_curly[i] or not self.__closed_curly[i + 1] or not self.__closed_curly[i + 2]:
i = i + 1
else:
print("Number of curly not supported")
return i
| makreft/lin_ldf_parser | lin_ldf_parser/lin_ldf_parser.py | lin_ldf_parser.py | py | 16,228 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "dataclasses.field",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "datacla... |
37946580665 | from sklearn import tree
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import GaussianNB
import numpy as np
# Data and labels
# [Height, Weight ,Shoe Size]
X = [[181, 80, 44], [177, 70, 43], [160, 60, 38], [154, 54, 37], [166, 65, 40], [190, 90, 47], [175, 64, 39],
[177, 70, 40], [159, 55, 37], [171, 75, 42], [181, 85, 43]]
Y = ['male', 'male', 'female', 'female', 'male', 'male', 'female', 'female', 'female', 'male', 'male']
# Classifiers
clf_tree = tree.DecisionTreeClassifier()
clf_svm = SVC()
clf_KNN = KNeighborsClassifier()
clf_gaussian = GaussianNB()
# Train the models
clf_tree.fit(X, Y)
clf_svm.fit(X, Y)
clf_KNN.fit(X, Y)
clf_gaussian.fit(X,Y)
# Testing using the same data
pred_tree = clf_tree.predict(X)
acc_tree = accuracy_score(Y, pred_tree) * 100
print('Accuracy for DecisionTree: {}'.format(acc_tree))
pred_svm = clf_svm.predict(X)
acc_svm = accuracy_score(Y, pred_svm) * 100
print('Accuracy for SVM: {}'.format(acc_svm))
pred_KNN = clf_KNN.predict(X)
acc_KNN = accuracy_score(Y, pred_KNN) * 100
print('Accuracy for KNN: {}'.format(acc_KNN))
pred_gauss = clf_gaussian.predict(X)
acc_gauss = accuracy_score(Y, pred_gauss) * 100
print('Accuracy for GaussianNB: {}'.format(acc_gauss))
# The best classifier from svm, per, KNN
index = np.argmax([acc_tree,acc_svm, acc_KNN, acc_gauss])
classifiers = {0: 'Tree',1: 'SVM', 2: 'KNN', 3: 'GaussianNB'}
print('Best gender classifier is {}'.format(classifiers[index]))
| vjgpt/gender_classification | gender_classify.py | gender_classify.py | py | 1,542 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "sklearn.tree.DecisionTreeClassifier",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sklearn.tree",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "sklearn.svm.SVC",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "skle... |
13321449104 | from airflow.models import Variable
from airflow.hooks.postgres_hook import PostgresHook
from rock.utilities import safeget, get_delta_offset, find_supported_fields
import requests
class ContentItemCategory:
def __init__(self, kwargs):
self.kwargs = kwargs
self.headers = {
"Authorization-Token": Variable.get(kwargs["client"] + "_rock_token")
}
self.pg_connection = kwargs["client"] + "_apollos_postgres"
self.pg_hook = PostgresHook(
postgres_conn_id=self.pg_connection,
keepalives=1,
keepalives_idle=30,
keepalives_interval=10,
keepalives_count=5,
)
def map_content_channel_to_category(self, obj):
return {
"created_at": self.kwargs["execution_date"],
"updated_at": self.kwargs["execution_date"],
"origin_id": obj["Id"],
"origin_type": "rock",
"apollos_type": "ContentChannel",
"title": obj["Name"],
}
def set_content_item_category_query(self, obj):
return """
UPDATE content_item
SET content_item_category_id = (SELECT id FROM content_item_category WHERE origin_id = '{}')
WHERE origin_id = '{}';
""".format(
str(safeget(obj, "ContentChannel", "Id")), str(obj["Id"])
)
def run_attach_content_item_categories(self):
fetched_all = False
skip = 0
top = 10000
while not fetched_all:
# Fetch people records from Rock.
params = {
"$top": top,
"$skip": skip,
"$expand": "ContentChannel",
"$select": "Id,ContentChannel/Id",
"$orderby": "ModifiedDateTime desc",
}
if not self.kwargs["do_backfill"]:
params["$filter"] = get_delta_offset(self.kwargs)
print(params)
r = requests.get(
f"{Variable.get(self.kwargs['client'] + '_rock_api')}/ContentChannelItems",
params=params,
headers=self.headers,
)
rock_objects = r.json()
if not isinstance(rock_objects, list):
print(rock_objects)
print("oh uh, we might have made a bad request")
print("top: {top}")
print("skip: {skip}")
skip += top
continue
skip += top
fetched_all = len(rock_objects) < top
self.pg_hook.run(
list(map(self.set_content_item_category_query, rock_objects))
)
def run_fetch_and_save_content_item_categories(self):
fetched_all = False
skip = 0
top = 10000
while not fetched_all:
# Fetch people records from Rock.
params = {
"$top": top,
"$skip": skip,
# "$expand": "Photo",
"$select": "Id,Name",
"$orderby": "ModifiedDateTime desc",
}
if not self.kwargs["do_backfill"]:
params[
"$filter"
] = f"ModifiedDateTime ge datetime'{self.kwargs['execution_date'].strftime('%Y-%m-%dT00:00')}' or ModifiedDateTime eq null"
print(params)
r = requests.get(
f"{Variable.get(self.kwargs['client'] + '_rock_api')}/ContentChannels",
params=params,
headers=self.headers,
)
rock_objects = r.json()
if not isinstance(rock_objects, list):
print(rock_objects)
print("oh uh, we might have made a bad request")
print("top: {top}")
print("skip: {skip}")
skip += top
continue
skip += top
fetched_all = len(rock_objects) < top
insert_data = list(map(self.map_content_channel_to_category, rock_objects))
content_to_insert, columns, constraint = find_supported_fields(
pg_hook=self.pg_hook,
table_name="content_item_category",
insert_data=insert_data,
)
self.pg_hook.insert_rows(
"content_item_category",
content_to_insert,
columns,
0,
True,
replace_index=constraint,
)
add_apollos_ids = """
UPDATE content_item_category
SET apollos_id = apollos_type || ':' || id::varchar
WHERE origin_type = 'rock' and apollos_id IS NULL
"""
self.pg_hook.run(add_apollos_ids)
def fetch_and_save_content_item_categories(ds, *args, **kwargs):
if "client" not in kwargs or kwargs["client"] is None:
raise Exception("You must configure a client for this operator")
Klass = ( # noqa N806
ContentItemCategory if "klass" not in kwargs else kwargs["klass"]
)
category_task = Klass(kwargs)
category_task.run_fetch_and_save_content_item_categories()
def attach_content_item_categories(ds, *args, **kwargs):
if "client" not in kwargs or kwargs["client"] is None:
raise Exception("You must configure a client for this operator")
Klass = ( # noqa N806
ContentItemCategory if "klass" not in kwargs else kwargs["klass"]
)
category_task = Klass(kwargs)
category_task.run_attach_content_item_categories()
| CrossingsCommunityChurch/apollos-shovel | dags/rock/rock_content_item_categories.py | rock_content_item_categories.py | py | 5,576 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "airflow.models.Variable.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "airflow.models.Variable",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "airflow.hooks.postgres_hook.PostgresHook",
"line_number": 15,
"usage_type": "call"
... |
36060705055 | from config.log import log
from central.servidor_central import servidor_central
if __name__ == "__main__":
log()
# print('Informe o caminho do arquivo de configuração da sala 01:')
# sala_01 = input()
# print('Informe o caminho do arquivo de configuração da sala 02:')
# sala_02 = input()
sala_01 = 'src/json/sala_1.json'
sala_02 = 'src/json/sala_2.json'
servidor_central(sala_01, sala_02) | AntonioAldisio/FSE-2022-2-Trabalho-1 | src/app_servidor_central.py | app_servidor_central.py | py | 428 | python | pt | code | 0 | github-code | 6 | [
{
"api_name": "config.log.log",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "central.servidor_central.servidor_central",
"line_number": 13,
"usage_type": "call"
}
] |
7437698122 | """
Script that trains an NFC bounding interval annotator.
To use tensorboard during or after model training, open a terminal and say:
conda activate vesper-dev-tf2
tensorboard --logdir "/Users/Harold/Desktop/NFC/Data/Vesper ML/
NFC Bounding Interval Annotator 1.0/Logs/<training log dir path>"
and then visit:
127.0.0.1:6006
in Chrome.
"""
from collections import defaultdict
import math
import time
from matplotlib.backends.backend_pdf import PdfPages
from tensorflow.keras.layers import (
BatchNormalization, Conv2D, Dense, Flatten, MaxPooling2D)
# from tensorflow.keras.layers import Dropout
from tensorflow.keras.models import Sequential
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from vesper.mpg_ranch.nfc_bounding_interval_annotator_1_0.inferrer \
import Inferrer
from vesper.util.settings import Settings
import vesper.mpg_ranch.nfc_bounding_interval_annotator_1_0.annotator_utils \
as annotator_utils
import vesper.mpg_ranch.nfc_bounding_interval_annotator_1_0.dataset_utils \
as dataset_utils
import vesper.util.yaml_utils as yaml_utils
TSEEP_SETTINGS = Settings(
clip_type='Tseep',
bound_type='Start',
waveform_sample_rate=24000,
positive_example_probability=.5,
positive_example_call_start_offset=.025,
waveform_slice_duration=.080,
# `True` if and only if the waveform amplitude scaling data
# augmentation is enabled. This augmentation scales each waveform
# randomly to distribute the waveform log RMS amplitudes uniformly
# within a roughly 48 dB window.
waveform_amplitude_scaling_data_augmentation_enabled=False,
# spectrogram settings
spectrogram_window_size=.005,
spectrogram_hop_size=20,
spectrogram_log_epsilon=1e-10,
# spectrogram frequency axis slicing settings
spectrogram_start_freq=4000,
spectrogram_end_freq=10500,
# The maximum spectrogram frequency shift for data augmentation,
# in bins. Set this to zero to disable this augmentation.
max_spectrogram_frequency_shift=2,
spectrogram_background_normalization_percentile_rank=30,
# training settings
training_batch_size=128,
training_epoch_step_count=100, # epoch size is batch size times step count
training_epoch_count=30,
model_save_period=5, # epochs
dropout_rate=.25,
# validation settings
validation_batch_size=1,
validation_step_count=1000,
# evaluation plot settings
max_evaluation_inlier_diff=20,
# offsets for converting inference value to spectrogram index
call_start_index_offset=23,
call_end_index_offset=22,
)
def main():
settings = TSEEP_SETTINGS
train_annotator(settings)
# evaluate_annotator('2020-07-06_09.33.54')
# show_model_summary('start_2020-06-10_12.13.39', 20)
# test_get_spectrogram_percentiles()
# test_create_waveform_dataset_from_tensors()
# test_create_waveform_dataset_from_tfrecord_files('Training', settings)
# test_create_training_dataset('Training', settings)
# test_create_inference_dataset(settings)
# show_dataset_sizes(settings)
def train_annotator(settings):
s = settings
training_name = annotator_utils.create_training_name(s)
training_dataset = get_dataset('Training', s).batch(s.training_batch_size)
validation_dataset = \
get_dataset('Validation', s).batch(s.validation_batch_size)
input_shape = dataset_utils.get_spectrogram_slice_shape(settings)
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=input_shape),
# Dropout(s.dropout_rate),
BatchNormalization(),
MaxPooling2D((1, 2)),
# Conv2D(16, (1, 1), activation='relu'),
# BatchNormalization(),
Conv2D(32, (3, 3), activation='relu'),
# Dropout(s.dropout_rate),
BatchNormalization(),
MaxPooling2D((1, 2)),
# Conv2D(16, (1, 1), activation='relu'),
# BatchNormalization(),
Flatten(),
# Dense(32, activation='relu'),
# BatchNormalization(),
Dense(32, activation='relu'),
# Dropout(s.dropout_rate),
BatchNormalization(),
Dense(1, activation='sigmoid')
])
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
model.summary()
log_dir_path = annotator_utils.get_training_log_dir_path(training_name)
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=log_dir_path, histogram_freq=1)
model_save_callback = ModelSaveCallback(training_name, settings)
model.fit(
training_dataset, epochs=s.training_epoch_count,
steps_per_epoch=s.training_epoch_step_count, verbose=2,
validation_data=validation_dataset,
validation_steps=s.validation_step_count,
callbacks=[tensorboard_callback, model_save_callback])
class ModelSaveCallback(tf.keras.callbacks.Callback):
def __init__(self, training_name, settings):
self._training_name = training_name
self._settings = settings
def on_epoch_end(self, epoch, logs=None):
epoch_num = epoch + 1
if epoch_num % self._settings.model_save_period == 0:
model_dir_path = \
annotator_utils.get_tensorflow_saved_model_dir_path(
self._training_name, epoch_num)
self.model.save(model_dir_path)
save_training_settings(self._settings, self._training_name)
print(f'Saved model at end of epoch {epoch_num}.')
print('Evaluating model...')
evaluate_annotator(self._training_name, epoch_num)
def get_dataset(name, settings):
dir_path = annotator_utils.get_dataset_dir_path(settings.clip_type, name)
return dataset_utils.create_training_dataset(dir_path, settings)
def save_training_settings(settings, training_name):
file_path = annotator_utils.get_training_settings_file_path(training_name)
text = yaml_utils.dump(settings.__dict__, default_flow_style=False)
file_path.write_text(text)
def evaluate_annotator(training_name, epoch_num):
_, settings = annotator_utils.load_model_and_settings(
training_name, epoch_num)
dir_path = annotator_utils.get_dataset_dir_path(
settings.clip_type, 'Validation')
dataset = dataset_utils.create_validation_dataset(dir_path, settings)
dataset = dataset.take(settings.validation_step_count)
inferrer = Inferrer((training_name, epoch_num))
bounds = inferrer.get_call_bounds(dataset)
start_diff_counts = defaultdict(int)
end_diff_counts = defaultdict(int)
for (inferred_start_index, inferred_end_index, dataset_start_index,
dataset_end_index) in bounds:
dataset_start_index = dataset_start_index.numpy()
dataset_end_index = dataset_end_index.numpy()
sample_rate = settings.waveform_sample_rate
start_diff = _get_diff(
inferred_start_index, dataset_start_index, sample_rate)
end_diff = _get_diff(
inferred_end_index, dataset_end_index, sample_rate)
if start_diff is not None:
start_diff_counts[start_diff] += 1
end_diff_counts[end_diff] += 1
# print(
# start_diff, end_diff,
# inferred_start_index, inferred_end_index,
# dataset_start_index, dataset_end_index)
_show_diff_counts('Start', start_diff_counts, settings)
_show_diff_counts('End', end_diff_counts, settings)
_plot_diff_counts(
training_name, epoch_num, start_diff_counts, end_diff_counts, settings)
def _get_diff(inferred_index, dataset_index, sample_rate):
if inferred_index is None:
return None
else:
sample_count = inferred_index - dataset_index
return int(round(1000 * sample_count / sample_rate))
def _show_diff_counts(name, counts, settings):
diffs = sorted(counts.keys())
# Calculate error mean and standard deviation, excluding outliers.
diff_sum = 0
diff_sum_2 = 0
inlier_count = 0
outlier_count = 0
for diff in diffs:
count = counts[diff]
if diff <= settings.max_evaluation_inlier_diff:
diff_sum += count * diff
diff_sum_2 += count * diff * diff
inlier_count += count
else:
outlier_count += count
diff_mean = diff_sum / inlier_count
diff_std = math.sqrt(diff_sum_2 / inlier_count - diff_mean * diff_mean)
print(f'{name} {inlier_count} {diff_mean} {diff_std} {outlier_count}')
def _plot_diff_counts(
training_name, epoch_num, start_diff_counts, end_diff_counts,
settings):
file_path = annotator_utils.get_evaluation_plot_file_path(
training_name, epoch_num)
with PdfPages(file_path) as pdf:
_, (start_axes, end_axes) = plt.subplots(2)
title = f'{training_name} Epoch {epoch_num} Call Start Errors'
_plot_diff_counts_aux(start_axes, title, start_diff_counts, settings)
title = f'{training_name} Epoch {epoch_num} Call End Errors'
_plot_diff_counts_aux(end_axes, title, end_diff_counts, settings)
plt.tight_layout()
pdf.savefig()
plt.close()
def _plot_diff_counts_aux(axes, title, counts, settings):
limit = settings.max_evaluation_inlier_diff
x = np.arange(-limit, limit + 1)
total_count = sum(counts.values())
y = np.array([counts[d] for d in x]) / total_count
axes.bar(x, y)
axes.set_title(title)
axes.set_xlabel('diff (ms)')
axes.set_ylabel('fraction')
def show_model_summary(training_name, epoch_num):
model_dir_path = annotator_utils.get_tensorflow_saved_model_dir_path(
training_name, epoch_num)
model = tf.keras.models.load_model(model_dir_path)
model.summary()
def test_get_spectrogram_percentiles():
# For convenience of specification, here first dimension is frequency,
# second is time. This tensor is transposed below, though, preceding
# the call to `_get_spectrogram_percentiles`.
gram = tf.constant([
[1.1, 0, 0, 89.9], # 0, 0, 1, 90
[80, 60, 40, 20], # 20, 40, 60, 80
[40, 80, 130, -10] # 0, 40, 80, 120
])
print('gram:')
print(gram)
# Transpose gram so it's a sequence of spectra (i.e. so that first
# dimension is time and second is frequency), as expected by
# `_get_spectrogram_percentiles`.
gram = tf.transpose(gram)
ranks = tf.constant([25, 50, 75, 100])
percentiles = dataset_utils._get_spectrogram_percentiles(gram, ranks)
print('gram percentiles:')
print(percentiles)
def test_create_waveform_dataset_from_tensors():
waveforms = [
np.array([0, 16384]),
np.array([0, 16384, 32768])]
dataset = dataset_utils.create_waveform_dataset_from_tensors(waveforms)
for waveform in dataset:
print(waveform)
def test_create_waveform_dataset_from_tfrecord_files(dataset_name, settings):
dir_path = annotator_utils.get_dataset_dir_path(
settings.clip_type, dataset_name)
dataset = dataset_utils.create_waveform_dataset_from_tfrecord_files(
dir_path)
show_waveform_dataset_stats(dataset, settings.waveform_sample_rate)
def show_waveform_dataset_stats(dataset, sample_rate):
example_count = 10000
dataset = dataset.take(example_count)
min_start_time = 1000000
max_start_time = 0
min_end_time = 1000000
max_end_time = 0
min_duration = 1000000
max_duration = 0
start_time = time.time()
for _, clip_start_index, clip_end_index, call_start_index, \
call_end_index, clip_id in dataset:
clip_start_index = clip_start_index.numpy()
clip_end_index = clip_end_index.numpy()
call_start_index = call_start_index.numpy()
call_end_index = call_end_index.numpy()
clip_id = clip_id.numpy()
call_start_time = int(round(1000 * call_start_index / sample_rate))
min_start_time = min(min_start_time, call_start_time)
max_start_time = max(max_start_time, call_start_time)
call_end_time = int(round(1000 * call_end_index / sample_rate))
min_end_time = min(min_end_time, call_end_time)
max_end_time = max(max_end_time, call_end_time)
call_duration = call_end_time - call_start_time
min_duration = min(min_duration, call_duration)
max_duration = max(max_duration, call_duration)
# print(
# clip_id, len(waveform), clip_start_index, clip_end_index,
# call_start_index, call_end_index, call_start_time, call_end_time,
# call_duration)
end_time = time.time()
delta_time = end_time - start_time
rate = example_count / delta_time
print(
f'Generated {example_count} examples in {delta_time} seconds, '
f'a rate of {rate} examples per second.')
print(f'call start time range ({min_start_time}, {max_start_time})')
print(f'call end time range ({min_end_time}, {max_end_time})')
print(f'call duration range ({min_duration}, {max_duration})')
def test_create_training_dataset(dataset_name, settings):
dir_path = annotator_utils.get_dataset_dir_path(
settings.clip_type, dataset_name)
dataset = dataset_utils.create_training_dataset(dir_path, settings)
show_training_dataset_stats(dataset)
def show_training_dataset_stats(dataset):
example_count = 10000
dataset = dataset.take(example_count)
start_time = time.time()
positive_count = 0
for _, label in dataset:
# print(f'gram {gram.shape} {label}')
if label == 1:
positive_count += 1
end_time = time.time()
delta_time = end_time - start_time
rate = example_count / delta_time
print(
f'Generated {example_count} examples in {delta_time} seconds, '
f'a rate of {rate} examples per second.')
percent = 100 * positive_count / example_count
print(f'{positive_count} examples, or {percent} percent, were positives.')
def test_create_inference_dataset(settings):
waveform_durations = [.5, .6]
sample_rate = settings.waveform_sample_rate
waveforms = [
_create_random_waveform(d, sample_rate)
for d in waveform_durations
]
dataset = dataset_utils.create_waveform_dataset_from_tensors(waveforms)
dataset = dataset_utils.create_inference_dataset(dataset, settings)
for forward_slices, backward_slices in dataset:
slice_count = forward_slices.shape[0]
assert(backward_slices.shape[0] == slice_count)
for i in range(slice_count):
forward_slice = forward_slices[i]
backward_slice = backward_slices[slice_count - 1 - i]
_compare_tensors(forward_slice, backward_slice)
def _compare_tensors(x, y):
"""
Checks that tensor x is the same as tensor y but with the first axis
reversed.
"""
assert(tf.reduce_all(x == tf.reverse(y, (0,))))
def _create_random_waveform(duration, sample_rate):
length = int(round(duration * sample_rate))
return np.random.randint(-32768, 32768, length)
def show_dataset_sizes(settings):
from tensorflow.data import TFRecordDataset
for dataset_name in ('Training', 'Validation'):
total_size = 0
print(f'Sizes of files in dataset "{dataset_name}":')
dir_path = annotator_utils.get_dataset_dir_path(
settings.clip_type, dataset_name)
file_paths = sorted(dir_path.glob('*.tfrecords'))
for file_path in file_paths:
dataset = TFRecordDataset([str(file_path)])
size = 0
for _ in dataset:
size += 1
print(f' {file_path.name}: {size}')
total_size += size
print(f'Total size of dataset "{dataset_name}": {total_size}')
if __name__ == '__main__':
main()
| HaroldMills/Vesper | vesper/mpg_ranch/nfc_bounding_interval_annotator_1_0/train_bounding_interval_annotator.py | train_bounding_interval_annotator.py | py | 16,756 | python | en | code | 47 | github-code | 6 | [
{
"api_name": "vesper.util.settings.Settings",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "vesper.mpg_ranch.nfc_bounding_interval_annotator_1_0.annotator_utils.create_training_name",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "vesper.mpg_ranch.nfc_bou... |
16205876862 | from typing import Any
from xdsl.dialects import scf
from xdsl.interpreter import (
Interpreter,
InterpreterFunctions,
PythonValues,
ReturnedValues,
impl,
impl_terminator,
register_impls,
)
@register_impls
class ScfFunctions(InterpreterFunctions):
@impl(scf.If)
def run_if(self, interpreter: Interpreter, op: scf.If, args: tuple[Any, ...]):
(cond,) = args
region = op.true_region if cond else op.false_region
results = interpreter.run_ssacfg_region(region, ())
return results
@impl(scf.For)
def run_for(
self, interpreter: Interpreter, op: scf.For, args: PythonValues
) -> PythonValues:
lb, ub, step, *loop_args = args
loop_args = tuple(loop_args)
for i in range(lb, ub, step):
loop_args = interpreter.run_ssacfg_region(
op.body, (i, *loop_args), "for_loop"
)
return loop_args
@impl_terminator(scf.Yield)
def run_br(self, interpreter: Interpreter, op: scf.Yield, args: tuple[Any, ...]):
return ReturnedValues(args), ()
| xdslproject/xdsl | xdsl/interpreters/scf.py | scf.py | py | 1,102 | python | en | code | 133 | github-code | 6 | [
{
"api_name": "xdsl.interpreter.InterpreterFunctions",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "xdsl.interpreter.Interpreter",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "xdsl.dialects.scf.If",
"line_number": 18,
"usage_type": "attribute"
... |
19638669363 | import matplotlib.pylab as plt
#import cv2
import numpy as np
import scipy as sp
from scipy.fftpack import fft, fftfreq, ifft, fft2, ifft2, fftshift, ifftshift
arbol=plt.imread("arbol.png")
#plt.imshow(arbol)
#transformada
base,altura=np.shape(arbol)
trans = fft2(arbol)
shi=fftshift(trans)
grashi=np.abs(shi)
fgraf=np.log(grashi)
#grafica de la transformada se uso logaritmo para que se note mas
plt.figure()
plt.imshow(abs(fgraf), cmap='gray')
plt.title("Transformada de Fourier")
plt.savefig("quijanoSantiago_FT2D.pdf")
#filtrarla, informacion sale de aprenderpython.net/transformada-de-fourier/
trans2 = fft2(arbol)
shi2=fftshift(trans2)
def borrar(shi2,abj,arr,izq,der):
for i in range(np.shape(shi2)[0]):
for j in range(np.shape(shi2)[1]):
if (i<arr and i>abj and j<der and j>izq):
shi2[i,j]=0
return shi2
def salvar(shi2,abj,arr,izq,der):
for i in range(np.shape(shi2)[0]):
for j in range(np.shape(shi2)[1]):
if (i<arr and i>abj and j<der and j>izq):
shi2[i,j]=shi2[i,j]
else:
shi2[i,j]=0
return shi2
#shi3=salvar(shi2,0,256,120,136)
shi4=borrar(shi2,117,120,103,106)
shi5=borrar(shi4,136,139,151,154)
shi6=borrar(shi5,62,65,62,65)
shi7=borrar(shi6,191,194,191,194)
filGra=np.abs(shi7)
graficarFil=np.log(filGra)
filtra=ifftshift(shi7)
invX2=ifft2(filtra)
#
#f2=fftshift(filtr[0])
#graf2=np.log(np.abs(f2))
plt.figure()
plt.title("Transformada filtrada")
plt.imshow(graficarFil, cmap='gray')
plt.ylabel("frecuencia")
plt.xlabel(" ")
plt.colorbar()
plt.savefig("quijanoSantiago_FT2D_filtrada.pdf")
plt.figure()
plt.title("Imagen despues de filtro")
plt.imshow(abs(invX2))
plt.savefig("quijanoSantiago_Imagen_Filtrada.pdf")
#plt.show()
#######
| saquijano/quijanoSantiagoHW3 | Fourier2D.py | Fourier2D.py | py | 1,691 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pylab.imread",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pylab",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "numpy.shape",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "scipy.fftpack.fft2... |
72078430587 | # %%
from datetime import date
import requests
from json import dump, load
# %%
class Search:
def __init__(self, keyword, url="http://localhost:8080/search", getResult=True):
self.keyword = keyword
self.url = url
self.resultMax = 2
self.infoBoxMax = 1
if getResult:
self.fullResult = self._search()
self.result = self.extractRelevant()
def html(self):
params = {'q': self.keyword}
res = requests.post(self.url, params)
return res
def test_search(self):
params = {'q': self.keyword, 'format': 'json'}
res = requests.post(self.url, params)
return res
def _search(self):
params = {'q': self.keyword, 'format': 'json'}
res = requests.post(self.url, params)
return res.json()
def refresh(self):
self.fullResult = self._search()
def extractRelevant(self):
if not self.fullResult:
self.refresh()
res = self.extract()
if len(res['results']) > self.resultMax:
res['results'] = [res['results'][i] for i in range(2)]
if len(res['infoboxes']) > self.infoBoxMax:
res['infoboxes'] = [res['infoboxes'][i] for i in range(1)]
return res
def extractResult(self, res):
keys = ['url', 'title', 'content', 'category']
return {k: res[k] for k in keys if k in res.keys()}
def extactInfoBox(self, infoBox):
keys = ['infoBox', 'id', 'content']
return {k: infoBox[k] for k in keys if k in infoBox.keys()}
def extract(self):
results = [self.extractResult(result)
for result in self.fullResult['results']]
answers = self.fullResult['answers']
infoboxes = [self.extactInfoBox(info)
for info in self.fullResult['infoboxes']]
suggestions = self.fullResult['suggestions']
return {'results': results, 'answers': answers, 'infoboxes': infoboxes, 'suggestions': suggestions}
def log(self, result=True, fullResult=False, fileName="searchLog.json"):
with open(fileName, 'a') as f:
if result or fullResult:
f.write(date.today().strftime("%d/%m/%Y %H:%M:%S") + "\n \n")
if result:
f.write("\n \nExtracted Results\n")
dump(self.result, f, indent=4)
if fullResult:
f.write("\n \nFull Results\n")
dump(self.result, f, indent=4)
def writeToFile(content, fileName="sampleSearch.json"):
with open(fileName, 'w') as f:
dump(content, f, indent=4)
def read(fileName="sampleSearch.json"):
with open(fileName, "r") as f:
return load(f)
| arromaljj/FinalProjectArromal | backend/backend_core/search.py | search.py | py | 2,723 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.post",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"... |
24680896779 | from fastapi import APIRouter, Depends, HTTPException, status, Request
from typing import Union
import requests
from db.models import ENV, Session, get_db
from db import schemas, crud
from dependencies import utils
import json
router = APIRouter(prefix="/auth")
@router.post("/login", response_model=schemas.TokenBase)
async def login_user(user_data: schemas.UserLogin, db: Session = Depends(get_db)):
try:
data = {
"username": user_data.username,
"password": user_data.password
}
data = json.dumps(data, indent = 4)
headers = {"Content-Type": "application/json"}
proxies = {"http": ENV['URL_AUTH']}
request_session = requests.post(ENV['URL_AUTH']+"/auth/login", data=data, headers=headers, proxies=proxies)
response_token = request_session.json()
return response_token
# return schemas.TokenBase(
# access_token = response["access_token"],
# token_type = response["token_type"]
# )
except:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
@router.get("/verify")
async def verify_auth(request: Request):
try:
print("ENTRANDO A VERIFY BACKEND")
token = request.headers.get('Authorization')
headers = {"Content-Type": "application/json", "Authorization": token}
proxies = {"http": ENV['URL_AUTH']}
request_session = requests.get(ENV['URL_AUTH']+"/auth/verify", headers=headers, proxies=proxies)
response = request_session.json()
print(response)
if response != {"detail": "Could not validate credentials"}:
return response
else:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect token",
headers={"WWW-Authenticate": "Bearer"},
)
except:
return HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect token",
headers={"WWW-Authenticate": "Bearer"},
)
| CosasU-Edipizarro/iic2173-2022-1 | backend/routers/auth.py | auth.py | py | 2,205 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "db.schemas.UserLogin",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "db.schemas",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "db.models.Sessi... |
71879426428 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 4 13:55:34 2020
@author: Kangqi Fu
"""
from numpy import loadtxt, reshape
from pylab import ioff
import matplotlib.pyplot as plt
from glob import glob
import os
ioff()
fileNames = glob("./output/Solution*.dat")
fileNames.sort()
for fileName in fileNames:
fig = plt.figure()
ax = fig.add_subplot(111)
f = open(fileName, "r")
xCells = int(f.readline().split(":")[1])
yCells = int(f.readline().split(":")[1])
numGhostCells = int(f.readline().split(":")[1])
time = float(f.readline().split(":")[1])
cfl = float(f.readline().split(":")[1])
f.close()
x, y, u = loadtxt(fileName, skiprows = 5, unpack=True)
x = reshape(x, (xCells + 2 * numGhostCells, yCells + 2 * numGhostCells))
y = reshape(y, (xCells + 2 * numGhostCells, yCells + 2 * numGhostCells))
u = reshape(u, (xCells + 2 * numGhostCells, yCells + 2 * numGhostCells))
#ax.set_xlim(-1.5, 1.5)
#ax.set_ylim(-0.1, 1.1)
plt.contourf(x, y, u, 100, cmap='jet')
#plt.contourf(x, y, u,100, cmap='ocean_r')
plt.colorbar()
ax.set_title("CFL = %5.2f"%cfl + ", Times = %5.3f"%time)
fig.savefig(fileName.replace(".dat", ".png"))
os.system("eog " + fileNames[0].replace(".dat",".png"))
| KennyKangMPC/CS-759 | final_project/scalarAdvection2D/plotAdv.py | plotAdv.py | py | 1,290 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "pylab.ioff",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
... |
70283377149 | import streamlit as st
from collections import Counter
import nltk
from nltk.corpus import stopwords
import torch
from datasets import load_dataset
import time
import sys, os
import logging
from transformers import AutoTokenizer, AutoModel
#custom packages
sys.path.insert(1, os.getcwd())
from src import constant as my_constant
from src import my_utils as my_utils
from src import searcher as my_searcher
st.set_page_config(layout="wide")
st.title('Demo of Semantic Search on United Nations Administrative Instructions (AIs)')
st.markdown(f"{my_constant.open_i}- Data: web scraped from UN Policy portal: https://policy.un.org/browse-by-source/30776{my_constant.close_i}")
st.markdown(f"{my_constant.open_i}- Technology used: Sentence transformer model, FAISS (Facebook AI Similarity Search), YAKE (unsupervised model), Huggingface arrow dataset, and Selenium (dynamic web page scraping){my_constant.close_i}")
#get configuration
cfg = my_utils.get_configuration()
search_cfg=cfg[my_constant.search_setting]
log_dir = cfg.get('log_dir')
#search config
search_cfg = cfg[my_constant.search_setting]
max_len = search_cfg.get(my_constant.max_doc_len) if search_cfg.get(my_constant.max_doc_len) else 800
#config device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@st.cache_resource
def load_data_model():
try:
#load data
search_ds_path = os.path.join(os.getcwd(), "data")
#load from disk
search_dataset = load_dataset('parquet', data_files=os.path.join(search_ds_path, 'embed_dataset.parquet'), split="train")
if search_dataset is None:
st.write("Ops sorry! failed to load data")
raise Exception("Failed to load dataset!!")
#add faiss index
search_dataset.add_faiss_index(column=my_constant.embeddings)
nltk.download('stopwords')
time.sleep(.1)
#load stop words
stop_words = stopwords.words('english')
st_wd = search_cfg.get(my_constant.stop_words)
if st_wd:
stop_words = stop_words + [str(s).strip().lower() for s in st_wd.split(my_constant.comma) if s]
#load sentence model
model_ckpt = "sentence-transformers/multi-qa-mpnet-base-dot-v1"
sentence_tokenizer = AutoTokenizer.from_pretrained(model_ckpt, force_download=True )
sentence_model = AutoModel.from_pretrained(model_ckpt)
if sentence_model is None:
st.write(my_constant.abort_msg )
raise Exception(f'failed to load model')
return {
'search_dataset': search_dataset,
'stop_words': Counter(stop_words),
'sentence_tokenizer': sentence_tokenizer,
'sentence_model': sentence_model,
'device': device
}
except Exception as e:
logging.error(f'Home.load_data_model: {str(e)}')
searcher_dict = load_data_model()
try:
with st.form('Search'):
search_for = st.text_input('Search for:')
num_recs = st.slider('Show only Top: ', min_value=1, max_value=50, value=20)
submit = st.form_submit_button('Search')
if submit:#run the search
results, time_tkn = my_searcher.search_for_documents(search_for,
searcher_dict,
k=num_recs)
st.markdown(f"{my_constant.open_i}Search took:{time_tkn}.{my_constant.close_i}")
if len(results) > 0:
my_searcher.print_streamlit_results(results)
else:
st.markdown(f'{my_constant.opening_tag}No documents found with specified critera.{my_constant.closing_tag}')
st.markdown(f"{my_constant.open_i}{my_constant.score_defn}{my_constant.close_i}")
except Exception as e:
logging.error(f'{str(e)}') | geraldlab/semantic_search | Search.py | Search.py | py | 3,971 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.insert",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "streamlit.set_page_config",
... |
20331901079 | """
Assignment 2
Csoport: 524/2
Név: Velican László
Azonosító: vlim2099
Segéd függvények amelyek meghívódnak a szerverben/kliensben vagy máashol
"""
import sympy
import random
#Generál egy kártya paklit két jokerrel amik még egyáltalán nincsenek összekeverve
def generateDeck ():
deck = [];
for i in range(1,55):
deck.append(i);
return deck
#összekever egy paraméterként kapott kártya paklit
def shuffleDeck (deck):
n = len(deck)
for i in range(n-1,0,-1):
j = random.randint(0,i+1)
deck[i],deck[j] = deck[j],deck[i]
return deck
#generál egy seed-et a Blum-Blum-Shub függvényhez
def generateSeed ():
p=-1;
q=-1;
start = 999999999;
while (p==-1):
x = sympy.nextprime(start);
if(x % 4 == 3):
p=x;
start = x+1
while (q==-1):
x = sympy.nextprime(start);
if(x % 4 == 3):
q=x;
start = x+1
n=p*q
s = random.randint(1, n-1)
return s;
#beolvas a condig fileból nevet és kulcsot
def beolvasEncryptalas():
configFile = open("config", "r")
dataFromFile = configFile.read().splitlines()
if (dataFromFile[0]=="BlumBlumShub"):
dataFromFile[1] = int(dataFromFile[1])
else:
listaString = dataFromFile[1]
if (listaString[0]=='[' and listaString[len(listaString)-1]==']'):
listaString = listaString[1:-1]
deckLista = listaString.split(", ");
deckLista = [int(i) for i in deckLista]
dataFromFile[1] = deckLista;
return dataFromFile;
| Laccer01/Kriptografia | assign3/auxiliaryFunctions.py | auxiliaryFunctions.py | py | 1,590 | python | hu | code | 0 | github-code | 6 | [
{
"api_name": "random.randint",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sympy.nextprime",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sympy.nextprime",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "random.randint",
"... |
39007586537 | import os
import shutil
from rich.prompt import Prompt
from rich.table import Table
from create_folder import create_folder
def delete_user(console):
path = "./user-docs"
user_to_del = str()
user_path = str()
while True:
os.system('clear')
console.print(f"[red]So you want to delete a user? Does that make you feel powerful?[/red]\n")
# https://www.youtube.com/watch?v=m6xukx6hloE
users = sorted(os.listdir(path))
print_users_table(console, users, path)
selected_row = Prompt.ask(f"\nWhich unforunate soul do you wish to delete?\nEnter the [cyan]row #[/cyan] to "
f"be deleted")
if selected_row.isnumeric():
selected_row = int(selected_row)
if 0 < selected_row <= len(users):
user_to_del = users[selected_row - 1]
if "-deleted" not in user_to_del:
break
else:
prompt = Prompt.ask(f"\n[yellow]That user has already been deleted[/yellow]. Enter [cyan]any["
f"/cyan] key to try again, or [cyan]Q[/cyan] to quit to menu")
if prompt.lower() == "q":
return
else:
Prompt.ask(f"\n[yellow]{selected_row}[/yellow], is not a valid entry. Enter [cyan]any[/cyan] key to try "
f"again")
# create deleted_users folder if it doesn't already exist
deleted_path = "deleted-users"
name_test = os.path.exists(deleted_path)
if not name_test:
create_folder(console, deleted_path)
# create the user folder for the deleted user in the deleted_users folder
deleted_path = "deleted-users"
name_test = os.path.exists(deleted_path + "/" + user_to_del)
if not name_test:
create_folder(console, deleted_path + "/" + user_to_del)
# move the user files from user-docs to deleted-users
user_path = path + "/" + user_to_del
deleted_user_path = deleted_path + "/" + user_to_del
user_files = os.listdir(user_path)
for file in user_files:
shutil.move(user_path + "/" + file, deleted_user_path + "/" + file)
# rename deleted user folder in user-docs
shutil.move(user_path, user_path + "-deleted")
# print updated table
users = sorted(os.listdir(path))
os.system('clear')
print_users_table(console, users, path)
Prompt.ask(f"\nUser [yellow]{user_to_del}[/yellow] has been deleted. Enter [cyan]any[/cyan] key to return to menu")
def print_users_table(console, users, path):
table = Table(title=f"[cyan]All Users[/cyan]")
table.add_column("Row")
table.add_column("User Name")
table.add_column("# of Files")
for user in users:
user_path = path + "/" + user
table.add_row(str(users.index(user) + 1), user, str(len(os.listdir(user_path))))
console.print(table)
| mcsadri/automation | automation/delete_user.py | delete_user.py | py | 2,920 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.system",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "rich.prompt.Prompt.ask",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "rich.prompt.Prompt",
... |
26115632417 | import re
from collections import Counter
import configuration
def count_requests_with_server_error():
regex = re.compile(r'\d+\.\d+\.\d+\..+[A-Z]{3,4} .+HTTP.+" 5.. \d+.+$', re.MULTILINE)
with open(configuration.repo_root() + '/access.log', 'r') as file:
ip = [match.split()[0] for match in re.findall(regex, file.read())]
output = list(
zip(
Counter(ip).keys(),
Counter(ip).values()
)
)
output.sort(
key=lambda elem: elem[1],
reverse=True
)
output = [
{
'ip_address': ip_address,
'count': count
} for ip_address, count in output[:5]
]
configuration.report_result(
header='Clients with the highest amount of failed requests (code 5xx)',
output=output,
file_to_write='count_server_based_errors'
)
count_requests_with_server_error()
| sh4rkizz/2022-1-QAPYTHON-VK-A-Mahonin | homework5/py_scripts/clients_with_most_server_based_errors.py | clients_with_most_server_based_errors.py | py | 993 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "re.compile",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "re.MULTILINE",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "configuration.repo_root",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "re.findall",
"l... |
70267230269 |
import os
import sys
import time
import config
import traceback
cur_dir = os.path.dirname(os.path.abspath(__file__))
#sys.path.append(os.path.join(cur_dir, "..", "epyk-ui"))
from epyk.core.js import Imports
from epyk.core.py import PyRest
PyRest.TMP_PATH = config.OUTPUT_TEMPS
Imports.STATIC_PATH = "./../../static"
# To reduce the scope of filters to generate
filter = None # 'sprint' #
category = None # 'slides' # 'angular, vue'
SUCCESS = 0
FAILURE = 0
def process_folder(folder, results, main_folder=None, out_path=config.OUTPUT_PATHS_LOCALS_HTML):
"""
:param folder:
:param main_folder:
:return:
"""
global SUCCESS, FAILURE
start, count_scripts, count_run_scripts = time.time(), 0, 0
if main_folder is not None:
if isinstance(main_folder, list):
script_path = os.path.join(cur_dir, os.path.join(*main_folder), folder)
main_folder = ".".join(main_folder)
else:
script_path = os.path.join(cur_dir, main_folder, folder)
else:
script_path = os.path.join(cur_dir, folder)
for file in os.listdir(script_path):
if file.endswith(".py") and file != "__init__.py":
count_scripts += 1
if filter is not None and not filter in file:
if main_folder is None:
continue
if main_folder is not None and not filter in folder:
continue
script_name = file[:-3]
try:
if main_folder is not None:
if main_folder == 'interactives':
config.OUT_FILENAME = script_name
else:
config.OUT_FILENAME = "%s_%s_%s" % (main_folder, folder, script_name)
mod = __import__("%s.%s.%s" % (main_folder, folder, script_name), fromlist=['object'])
else:
config.OUT_FILENAME = "%s_%s" % (folder, script_name)
mod = __import__("%s.%s" % (folder, script_name), fromlist=['object'])
output = mod.page.outs.html_file(path=out_path, name=config.OUT_FILENAME)
results.append(output)
#results.append("%s.html" % os.path.join(config.OUTPUT_PATHS_LOCALS_HTML, config.OUT_FILENAME))
count_run_scripts += 1
SUCCESS += 1
except Exception as err:
traceback.print_exception(*sys.exc_info())
print("Error with: %s" % file)
FAILURE =+ 1
if filter is None:
print("Processing %s (%s / %s reports) in %s seconds" % (folder, count_run_scripts, count_scripts, time.time() - start))
results = []
if category is None or category == 'locals':
for folder in os.listdir(os.path.join(cur_dir, 'locals')):
if folder == "webscrapping" and filter is None:
continue
if os.path.isdir(os.path.join(cur_dir, 'locals', folder)) and folder != '__pycache__':
process_folder(folder, results, main_folder='locals')
# Run other type of reports
for cat in ['dashboards', 'slides']:
if category is None or category == cat:
if filter is None:
print("")
print("processing - %s" % cat)
process_folder(cat, results, out_path=config.OUTPUT_PATHS_LOCALS_SLIDES if cat == 'slides' else config.OUTPUT_PATHS_LOCALS_HTML)
# Run other type of reports
for cat in ['websites']:
if category is None or category == cat:
if filter is None:
print("")
print("processing - %s" % cat)
for folder in os.listdir(os.path.join(cur_dir, 'websites', 'templates')):
if os.path.isdir(os.path.join(cur_dir, 'websites', 'templates', folder)) and folder != '__pycache__':
process_folder(folder, results, main_folder=['websites', 'templates'])
for cat in ['interactives']:
if category is None or category == cat:
if filter is None:
print("")
print("processing - %s" % cat)
process_folder("reports", results, main_folder=cat, out_path=config.OUTPUT_PATHS_LOCALS_INTERACTIVE)
if category in ['angular', 'vue']:
web_frameworks = {
'angular': {
'out_path': config.ANGULAR_APP_PATH,
'folder': 'src/app/apps',
'auto_route': True},
'vue': {
'out_path': config.VUE_APP_PATH,
'folder': 'src/views',
'auto_route': True},
'local': {
'out_path': config.OUTPUT_PATHS_LOCALS_TS,
'folder': category,
'auto_route': False},
}
for cat in ['angular']:
script_path = os.path.join("web", cat)
mod = __import__("web.%s.exports" % cat, fromlist=['object'])
#
if web_frameworks[category]['out_path'] is not None:
paths = web_frameworks[category]
else:
paths = web_frameworks['local']
for script in mod.REPORTS:
script_name = script[-1][:-3]
py_script = __import__("%s.%s" % (".".join(script[:-1]), script_name), fromlist=['object'])
py_script.page.outs.publish(server=category, app_path=paths['out_path'], selector=script_name,
target_folder=paths['folder'], auto_route=paths['auto_route'])
# if category is None or category == 'locals':
# process_folder('websites', results)
# process_folder('interactives', results)
# process_folder('dashboards', results)
# process_folder('web', results)
if filter is not None or category is not None:
if filter is None:
print("")
print("Reports location:")
for report in results:
print(report)
print("")
print("Success: %s" % SUCCESS)
print("failure: %s" % FAILURE) | epykure/epyk-templates | PacthRunner.py | PacthRunner.py | py | 5,254 | python | en | code | 17 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "epyk.core.py.PyRest.TMP_PAT... |
15056144032 | """Production settings and globals."""
import yaml
from os import environ
from os.path import dirname, join
from common import *
########## JSON CONFIGURATION
SERVICE_NAME = 'djangoapp'
CONFIG_ROOT = environ.get('CONFIG_ROOT', dirname(SITE_ROOT))
with open(join(CONFIG_ROOT, SERVICE_NAME) + ".auth.yaml") as auth_file:
AUTH_TOKENS = yaml.load(auth_file)
with open(join(CONFIG_ROOT, SERVICE_NAME) + ".env.yaml") as env_file:
ENV_TOKENS = yaml.load(env_file)
########## END JSON CONFIGURATION
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = ENV_TOKENS.get('EMAIL_HOST', None)
EMAIL_PORT = ENV_TOKENS.get('EMAIL_PORT', 587)
EMAIL_HOST_PASSWORD = AUTH_TOKENS.get('EMAIL_HOST_PASSWORD', None)
EMAIL_HOST_USER = AUTH_TOKENS.get('EMAIL_HOST_USER', None)
EMAIL_SUBJECT_PREFIX = '[%s] ' % SITE_NAME
EMAIL_USE_TLS = True
SERVER_EMAIL = ENV_TOKENS.get('SERVER_EMAIL', 'counter@edunext.co')
########## END EMAIL CONFIGURATION
########## DATABASE CONFIGURATION
DATABASES = AUTH_TOKENS['DATABASES']
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
CACHES = AUTH_TOKENS['CACHES']
########## END CACHE CONFIGURATION
########## CELERY CONFIGURATION
# See: http://docs.celeryproject.org/en/latest/configuration.html#broker-transport
# BROKER_TRANSPORT = 'amqplib'
# Set this number to the amount of allowed concurrent connections on your AMQP
# provider, divided by the amount of active workers you have.
# For example, if you have the 'Little Lemur' CloudAMQP plan (their free tier),
# they allow 3 concurrent connections. So if you run a single worker, you'd
# want this number to be 3. If you had 3 workers running, you'd lower this
# number to 1, since 3 workers each maintaining one open connection = 3
# connections total.
# See: http://docs.celeryproject.org/en/latest/configuration.html#broker-pool-limit
# BROKER_POOL_LIMIT = 3
# See: http://docs.celeryproject.org/en/latest/configuration.html#broker-connection-max-retries
# BROKER_CONNECTION_MAX_RETRIES = 0
# See: http://docs.celeryproject.org/en/latest/configuration.html#broker-url
# BROKER_URL = environ.get('RABBITMQ_URL') or environ.get('CLOUDAMQP_URL') # this should come from the auth.json
# See: http://docs.celeryproject.org/en/latest/configuration.html#celery-result-backend
# CELERY_RESULT_BACKEND = 'amqp'
########## END CELERY CONFIGURATION
########## STORAGE CONFIGURATION
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
DEFAULT_FILE_STORAGE = AUTH_TOKENS.get('STATICFILES_STORAGE', 'storages.backends.s3boto.S3BotoStorage')
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
# AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = AUTH_TOKENS.get('AWS_ACCESS_KEY_ID', 'something')
AWS_SECRET_ACCESS_KEY = AUTH_TOKENS.get('AWS_SECRET_ACCESS_KEY', 'secret')
AWS_STORAGE_BUCKET_NAME = AUTH_TOKENS.get('AWS_STORAGE_BUCKET_NAME', 'your_bucket')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIREY = 60 * 60 * 24 * 7
AWS_HEADERS = {
'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIREY,
AWS_EXPIREY
)
}
# Serving the files from S3 causes a No 'Access-Control-Allow-Origin' or problems with require and the /static/ path
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_ROOT = ENV_TOKENS.get('STATIC_ROOT', STATIC_ROOT)
########## END STORAGE CONFIGURATION
########## COMPRESSION CONFIGURATION
COMPRESS_ENABLED = False
# See: http://django_compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_OFFLINE
COMPRESS_OFFLINE = True
# See: http://django_compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_STORAGE
COMPRESS_STORAGE = DEFAULT_FILE_STORAGE
# See: http://django_compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_CSS_FILTERS
COMPRESS_CSS_FILTERS += [
'compressor.filters.cssmin.CSSMinFilter',
]
# See: http://django_compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_JS_FILTERS
COMPRESS_JS_FILTERS += [
'compressor.filters.jsmin.JSMinFilter',
]
########## END COMPRESSION CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = AUTH_TOKENS.get('SECRET_KEY', SECRET_KEY)
########## END SECRET CONFIGURATION
########## DOMAIN CONFIGURATION
ALLOWED_HOSTS = ENV_TOKENS.get('ALLOWED_HOSTS', ['*'])
########## END DOMAIN CONFIGURATION
| eduNEXT/django-example-app | app/settings/prod.py | prod.py | py | 4,929 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.environ.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_nu... |
34352982765 | import cv2
#load pre trained data
trained_face_data = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#choose image to detect face in
#img = cv2.imread('52-05.jpg')
#img = cv2.imread('img2p.jpg')
webcam = cv2.VideoCapture(1) #detect face in video
#key = cv2.waitKey(1)
#iterate over frames
while True:
successful_frame_read, frame = webcam.read() #get current frame
#make it grayscale
gray_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#cv2.waitKey(1)
#detect faces
face_coordinates = trained_face_data.detectMultiScale(gray_img)
# print(face_coordinates)
#draw rectangle around face
for (x, y, w, h) in face_coordinates:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 0), 3)
#show image
cv2.imshow('face detector', frame)
key = cv2.waitKey(1)
#stop if q is pressed
if key==81 or key ==113:
break
#release the videocapture object
webcam.release()
print('code completed') | mirethy/cl-python-opencv-facedetect | face.py | face.py | py | 959 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.CascadeClassifier",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY"... |
22656015465 | from model import common
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn import functional as F
import numpy as np
def make_model(args, parent=False):
return RCGB(args)
class CGConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(CGConv2d, self).__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
# for convolutional layers with a kernel size of 1, just use traditional convolution
if kernel_size == 1 or True:
self.ind = True
else:
self.ind = False
self.oc = out_channels
self.ks = kernel_size
# the target spatial size of the pooling layer
ws = kernel_size
self.avg_pool = nn.AdaptiveAvgPool2d((ws,ws))
# the dimension of the latent repsentation
self.num_lat = int((kernel_size * kernel_size) / 2 + 1)
# the context encoding module
self.ce = nn.Linear(ws*ws, num_lat, False)
self.ce_bn = nn.BatchNorm1d(in_channels)
self.ci_bn2 = nn.BatchNorm1d(in_channels)
# activation function is relu
self.act = nn.ReLU(inplace=True)
# the number of groups in the channel interacting module
if in_channels // 16:
self.g = 16
else:
self.g = in_channels
# the channel interacting module
self.ci = nn.Linear(self.g, out_channels // (in_channels // self.g), bias=False)
self.ci_bn = nn.BatchNorm1d(out_channels)
# the gate decoding module
self.gd = nn.Linear(num_lat, kernel_size * kernel_size, False)
self.gd2 = nn.Linear(num_lat, kernel_size * kernel_size, False)
# used to prrepare the input feature map to patches
self.unfold = nn.Unfold(kernel_size, dilation, padding, stride)
# sigmoid function
self.sig = nn.Sigmoid()
def forward(self, x):
# for convolutional layers with a kernel size of 1, just use traditional convolution
if self.ind:
return F.conv2d(x, self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
else:
b, c, h, w = x.size()
weight = self.weight
# allocate glbal information
gl = self.avg_pool(x).view(b,c,-1)
# context-encoding module
out = self.ce(gl)
# use different bn for the following two branches
ce2 = out
out = self.ce_bn(out)
out = self.act(out)
# gate decoding branch 1
out = self.gd(out)
# channel interacting module
if self.g >3:
# grouped linear
oc = self.ci(self.act(self.ci_bn2(ce2).\
view(b, c//self.g, self.g, -1).transpose(2,3))).transpose(2,3).contiguous()
else:
# linear layer for resnet.conv1
oc = self.ci(self.act(self.ci_bn2(ce2).transpose(2,1))).transpose(2,1).contiguous()
oc = oc.view(b,self.oc,-1)
oc = self.ci_bn(oc)
oc = self.act(oc)
# gate decoding branch 2
oc = self.gd2(oc)
# produce gate
out = self.sig(out.view(b, 1, c, self.ks, self.ks) + oc.view(b, self.oc, 1, self.ks, self.ks))
# unfolding input feature map to patches
x_un = self.unfold(x)
b, _, l = x_un.size()
# gating
out = (out * weight.unsqueeze(0)).view(b, self.oc, -1)
# currently only handle square input and output
return torch.matmul(out,x_un).view(b, self.oc, int(np.sqrt(l)), int(np.sqrt(l)))
def gated_conv(in_channels, out_channels, kernel_size, bias=True):
return CGConv2d(in_channels, out_channels, kernel_size=kernel_size,
padding=(kernel_size//2), stride=1, bias=bias)
## Residual Channel Attention Block (RCAB)
class RCAB(nn.Module):
def __init__(
self, conv, n_feat, kernel_size, reduction,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(RCAB, self).__init__()
modules_body = []
for i in range(2):
modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))
if bn: modules_body.append(nn.BatchNorm2d(n_feat))
if i == 0: modules_body.append(act)
# Adding Context Gated Convolution instead of Channel Attention layer from RCAN
modules_body.append(gated_conv(n_feat, n_feat, kernel_size, bias))
self.body = nn.Sequential(*modules_body)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x)
#res = self.body(x).mul(self.res_scale)
res += x
return res
## Residual Group (RG)
class ResidualGroup(nn.Module):
def __init__(self, conv, n_feat, kernel_size, reduction, act, res_scale, n_resblocks):
super(ResidualGroup, self).__init__()
modules_body = []
modules_body = [
RCAB(
conv, n_feat, kernel_size, reduction, bias=True, bn=False, act=nn.ReLU(True), res_scale=1) \
for _ in range(n_resblocks)]
modules_body.append(conv(n_feat, n_feat, kernel_size))
self.body = nn.Sequential(*modules_body)
def forward(self, x):
res = self.body(x)
res += x
return res
## Residual Channel Attention Network (RCAN)
class RCGB(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(RCGB, self).__init__()
n_resgroups = args.n_resgroups
n_resblocks = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
reduction = args.reduction
scale = args.scale[0]
act = nn.ReLU(True)
# RGB mean for DIV2K
self.sub_mean = common.MeanShift(args.rgb_range)
# define head module
modules_head = [conv(args.n_colors, n_feats, kernel_size)]
# define body module
modules_body = [
ResidualGroup(
conv, n_feats, kernel_size, reduction, act=act, res_scale=args.res_scale, n_resblocks=n_resblocks) \
for _ in range(n_resgroups)]
modules_body.append(conv(n_feats, n_feats, kernel_size))
# define tail module
modules_tail = [
common.Upsampler(conv, scale, n_feats, act=False),
conv(n_feats, args.n_colors, kernel_size)]
self.add_mean = common.MeanShift(args.rgb_range, sign=1)
self.head = nn.Sequential(*modules_head)
self.body = nn.Sequential(*modules_body)
self.tail = nn.Sequential(*modules_tail)
def forward(self, x):
x = self.sub_mean(x)
x = self.head(x)
res = self.body(x)
res += x
x = self.tail(res)
x = self.add_mean(x)
return x
def load_state_dict(self, state_dict, strict=False):
own_state = self.state_dict()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if name.find('tail') >= 0:
print('Replace pre-trained upsampler to new one...')
else:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
elif strict:
if name.find('tail') == -1:
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
if strict:
missing = set(own_state.keys()) - set(state_dict.keys())
if len(missing) > 0:
raise KeyError('missing keys in state_dict: "{}"'.format(missing))
| akashpalrecha/superres-deformable | src/model/cgc_rcan.py | cgc_rcan.py | py | 8,589 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "torch.nn.Conv2d",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.nn.AdaptiveAvgPool2d",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.nn",
... |
17962113365 | import sqlite3 as sql
from datetime import date
from model.classes import User, Country, Tasting
def initCon() -> sql:
"""
Initialize connection
:return connection:
"""
return sql.connect('../coffeeDB.db')
def createCursor(con: sql.Connection) -> sql.Cursor:
"""
Creates cursor
:param con:
:return cursor:
"""
return con.cursor()
class Insert:
"""Insert data into DB"""
def __init__(self):
self.__con = initCon()
self.__cursor = createCursor(self.__con)
def getCon(self) -> sql.Connection:
return self.__con
def getCursor(self) -> sql.Cursor:
return self.__cursor
def insertCountry(self, countryName) -> bool:
pass
def addUser(self, email: str, password: str, firstName: str, lastName: str, countryID: int) -> bool:
"""
Adds user to DB
Checks if inputed data is valid through User class
Checks if email has already been registered
:param email:
:param password:
:param firstName:
:param lastName:
:param countryID:
:return:
"""
ret = Retrieve()
email = email.lower() # Email should be lowercase
if ret.registeredEmail(email):
raise ValueError("A user with this email has already been registered")
User(0, email, password, firstName, lastName, countryID)
cursor = self.getCursor()
try:
cursor.execute(
"""
INSERT INTO User (email, password, firstName, surname, countryID)
VALUES (?, ?, ?, ?, ?)
""", (email, password, firstName, lastName, countryID)
)
self.getCon().commit()
return True
except Exception as e:
return False
def addTasting(self, tasteNotes: str, points: int, userID: int, roastedCoffeeID: int) -> bool:
"""
Adds a tasting created by the user
:param tasteNotes:
:param points:
:param tastingDate:
:param userID:
:param roastedCoffeeID:
:return:
"""
tastingDate = date.today()
Tasting(0, tasteNotes, points, tastingDate, userID, roastedCoffeeID) # Checks if inputed data is valid
cursor = self.getCursor()
try:
cursor.execute(
"""
INSERT INTO Tasting (tasteNotes, points, tastingDate, userID, roastedCoffeeID)
VALUES (?, ?, ?, ?, ?)
""", (tasteNotes, points, tastingDate, userID, roastedCoffeeID)
)
self.getCon().commit()
return True
except Exception as e:
return False
class Retrieve:
"""Retrieve data from DB"""
def __init__(self):
self.__con = initCon()
self.__cursor = createCursor(self.__con)
def getCon(self) -> sql.Connection:
return self.__con
def getCursor(self) -> sql.Cursor:
return self.__cursor
def getUsers(self) -> list[User]:
"""
Retrieve all data from DB
:return userList:
"""
userList = []
cursor = self.getCursor()
for row in cursor.execute("SELECT * FROM User"):
userID, email, password, firstName, surname, countryID = row
userList.append(User(userID, email, password, firstName, surname, countryID))
self.getCon().commit()
return userList
def getCountries(self) -> list[Country]:
"""
Gets all countries
:return countryList:
"""
countryList = []
cursor = self.getCursor()
for row in cursor.execute("SELECT * FROM Country"):
countryID, name = row
countryList.append(Country(countryID, name))
self.getCon().commit()
return countryList
def getRoastedCoffees(self) -> list[dict]:
"""
Gets all roasted coffees added to the database
:return:
"""
roastedCoffeeList = []
cursor = self.getCursor()
query = """
SELECT RoastedCoffee.roastedCoffeeID, RoastedCoffee.name, CoffeeRoastery.name FROM RoastedCoffee
INNER JOIN CoffeeRoastery on RoastedCoffee.roastaryID
WHERE RoastedCoffee.roastaryID == CoffeeRoastery.roastaryID
"""
for row in cursor.execute(query):
roastedCoffeeID, coffeeName, roasteryName = row
result = {
"roastedCoffeeID": roastedCoffeeID,
"coffeeName": coffeeName,
"roasteryName": roasteryName
}
roastedCoffeeList.append(result)
self.getCon().commit()
return roastedCoffeeList
def getCoffeeByDescription(self, search: str) -> list[dict]:
"""
Returns all rows have a description or tastenote with the searchword that matches
:param search:
:return:
"""
cursor = self.getCursor()
result = []
for row in cursor.execute(
"""
select distinct CoffeeRoastery.name, RoastedCoffee.name from Tasting
inner join RoastedCoffee on Tasting.roastedCoffeeID
inner join CoffeeRoastery on RoastedCoffee.roastaryID
where Tasting.roastedCoffeeID == RoastedCoffee.roastedCoffeeID
and RoastedCoffee.roastaryID == CoffeeRoastery.roastaryID
and (Tasting.tasteNotes like ? or RoastedCoffee.description like ?)
""", ("%" + search + "%", "%" + search + "%")
):
roasteryName, coffeeName = row
data = {
"roasteryName": roasteryName,
"coffeeName": coffeeName
}
result.append(data)
self.getCon().commit()
return result
def getCoffeeByCountryAndProcessingMethod(self) -> list[dict]:
"""
Returns coffees from Rwanda or Colombia and are unwashed
:return:
"""
cursor = self.getCursor()
result = []
query = """
select CoffeeRoastery.name, RoastedCoffee.name from RoastedCoffee
inner join CoffeeRoastery on RoastedCoffee.roastaryID == CoffeeRoastery.roastaryID
inner join CoffeeParty on RoastedCoffee.coffeePartyID == CoffeeParty.coffeePartyID
inner join Farm on CoffeeParty.producedFarmID == Farm.farmID
inner join Region on Farm.regionID == Region.regionID
inner join Country on Region.countryID == Country.countryID
inner join ProcessingMethod on CoffeeParty.processingMethodID == ProcessingMethod.processingMethodID
where (Country.name == "Rwanda" or Country.name == "Colombia")
and ProcessingMethod.name != "Vasket"
"""
for row in cursor.execute(query):
roasteryName, coffeeName = row
data = {
"roasteryName": roasteryName,
"coffeeName": coffeeName
}
result.append(data)
self.getCon().commit()
return result
def registeredEmail(self, email: str) -> bool:
"""
Checks if there are any equal emails in the DB
:param email:
:return bool:
"""
email = email.lower()
cursor = self.getCursor()
result = cursor.execute(
"""
SELECT * FROM User
WHERE User.email = ?
""", (email,)
).fetchall()
self.getCon().commit()
return len(result) > 0
def getCoffeeByValue(self) -> list[dict]:
"""
Gets a list off all coffees based on average score per 100 kroners
:return:
"""
cursor = self.getCursor()
result = []
query = """
select CoffeeRoastery.name, RoastedCoffee.name, RoastedCoffee.kiloPrice, (avg(distinct Tasting.points) / RoastedCoffee.kiloPrice) * 100 from Tasting
inner join RoastedCoffee on Tasting.roastedCoffeeID
inner join CoffeeRoastery on RoastedCoffee.roastaryID
where Tasting.roastedCoffeeID == RoastedCoffee.roastedCoffeeID
and CoffeeRoastery.roastaryID == RoastedCoffee.roastaryID
group by Tasting.roastedCoffeeID
order by (avg(distinct Tasting.points) / RoastedCoffee.kiloPrice) desc
"""
for row in cursor.execute(query):
roasteryName, coffeeName, kiloPrice, score = row
data = {
"roasteryName": roasteryName,
"coffeeName": coffeeName,
"kiloPrice": kiloPrice,
"score": score
}
result.append(data)
self.getCon().commit()
return result
def getUniqueTastings(self) -> list[dict]:
"""
Returns a list off the number of unique coffees each user has tasted
:return:
"""
cursor = self.getCursor()
result = []
query = """
select User.firstName, User.surname, count(distinct Tasting.roastedCoffeeID) from Tasting
inner join User on Tasting.userID
where User.userID == Tasting.userID
and date(Tasting.tastingDate) >= date("2022-01-01")
and date(Tasting.tastingDate) < date("2023-01-01")
group by Tasting.userID
order by count(Tasting.roastedCoffeeID) desc
"""
for row in cursor.execute(query):
firstName, surname, count = row
data = {
"firstName": firstName,
"surname": surname,
"count": count
}
result.append(data)
self.getCon().commit()
return result
class Main():
def loginAndRegister(self):
"""
Allows user to login or register
:return:
"""
userInput = str(input("Enter your email: "))
userInput = userInput.lower()
ret = Retrieve()
ins = Insert()
if ret.registeredEmail(userInput):
# If email is already in use
users = ret.getUsers()
password = str(input("Enter password: "))
user = list(filter(lambda row: row.getEmail() == userInput and row.getPassword() == password, users))
while len(user) == 0:
print("Incorrect email and password. Try again!")
email = str(input("Enter email: "))
password = str(input("Enter password: "))
user = list(
filter(lambda row: row.getEmail() == email.lower() and row.getPassword() == password, users))
print("Logged in\n")
return user[0]
else:
# If email is not in use
email = userInput
password = str(input("Enter a password: "))
firstName = str(input("Enter your first name: "))
surname = str(input("Enter your surname: "))
print("\nSelect a country from the list of countries")
for row in ret.getCountries():
print(row.getName())
countryInput = str(input("\nEnter country: "))
country = list(filter(lambda row: row.getName() == countryInput, ret.getCountries()))
while len(country) == 0:
# This does not work properly
countryInput = str(input("Could not find any matches. Enter a country: "))
country = list(filter(lambda row: row.getName() == countryInput, ret.getCountries()))
country = country[0]
ins.addUser(email, password, firstName, surname, country.getCountryID())
print("\nUser registered")
return None
def bh1(self):
"""
Userstory 1
:return:
"""
user = self.loginAndRegister()
if not user:
user = self.loginAndRegister()
ret = Retrieve()
ins = Insert()
result = ret.getRoastedCoffees()
roasteries = []
for row in result:
if row["roasteryName"] not in roasteries:
roasteries.append(row["roasteryName"])
print("Select a roastery from the list")
for roastery in roasteries:
print(f"\t=> {roastery}")
userInput = str(input("\nEnter desired roastery: "))
roasteryMatches = list(filter(lambda row: row['roasteryName'] == userInput, result))
if len(roasteryMatches) == 0:
print("No matches")
return
print(f"\nSelect a coffee from the roastery {userInput}")
for row in roasteryMatches:
print(f"\t=> {row['coffeeName']}")
userInput = str(input("\nEnter desired coffee: "))
roastedCoffee = list(filter(lambda row: row['coffeeName'] == userInput, roasteryMatches))
if len(roastedCoffee) == 0:
print("No matches")
return
roastedCoffee = roastedCoffee[0]
userID = user.getUserID()
roastedCoffeeID = roastedCoffee['roastedCoffeeID']
points = int(input("Enter points: "))
while not (0 <= points <= 10):
points = int(input("Points has to be between 0 and 10. Enter points: "))
tasteNote = str(input("Enter taste note: "))
try:
if ins.addTasting(tasteNote, points, userID, roastedCoffeeID):
print("\nAdded tasting")
else:
print("\nFailed to add tasting")
except Exception as e:
print("Error:", e)
def bh2(self):
"""
Userstory 2
:return:
"""
ret = Retrieve()
result = ret.getUniqueTastings()
for row in result:
print(f"\t=> {row['firstName']} {row['surname']} has tasted {row['count']} unique coffees")
def bh3(self):
"""
Userstory 3
:return:
"""
ret = Retrieve()
result = ret.getCoffeeByValue()
print("Here are the coffees that got the highest score compared to price\n")
for row in result:
print("\tRoastery Name:", row["roasteryName"])
print("\tCoffee name:", row["coffeeName"])
print("\tKilo price:", row["kiloPrice"])
print("\tScore (per 100 NOK):", round(row["score"], 2), "\n")
def bh4(self):
"""
Userstory 4
:return:
"""
userInput = str(input("Enter searchword: "))
ret = Retrieve()
result = ret.getCoffeeByDescription(userInput)
if not userInput or len(result) == 0:
print("\nNo matches")
return
else:
print("\nReturned the following result(s):")
for row in result:
print(f"\t=> Roastery: {row['roasteryName']}\n\t=> Coffee: {row['coffeeName']}\n")
def bh5(self):
"""
Userstory 5
:return:
"""
ret = Retrieve()
result = ret.getCoffeeByCountryAndProcessingMethod()
print("Showing unwashed coffees from Rwanda and Colombia: ")
if len(result) == 0:
print("No matches")
else:
for row in result:
print("\t=> Roastery name:", row["roasteryName"])
print("\t=> Coffeename:", row["coffeeName"], "\n")
main = Main()
print("Userstory 1")
main.bh1()
print("\nUserstory 2")
main.bh2()
print("\nUserstory 3")
main.bh3()
print("\nUserstory 4")
main.bh4()
print("\nUserstory 5")
main.bh5()
| jathavaan/CoffeeDB | model/DBMS.py | DBMS.py | py | 15,711 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sqlite3.Connection",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.Cursor",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.C... |
6187356527 | #!/usr/bin/env python3
"""
A function that uses the requests module to obtain the HTML content
of a particular URL and return it
"""
import redis
import requests
from functools import wraps
r = redis.Redis()
def url_access_count(method):
"""
A decorator for the get_page function.
"""
@wraps(method)
def wrapper(url):
"""wrap decorated function"""
key = "cached:" + url
cached_value = r.get(key)
if cached_value:
return cached_value.decode("utf-8")
key_count = "count:" + url
html_content = method(url)
r.incr(key_count)
r.set(key, html_content, ex=10)
r.expire(key, 10)
return html_content
return wrapper
@url_access_count
def get_page(url: str) -> str:
"""
Obtain the HTML content, track the number of accesses,
and cache the result with a 10-second expiration.
"""
results = requests.get(url)
key_count = "count:" + url
count = r.get(key_count).decode("utf-8")
print(count)
return results.text
if __name__ == "__main__":
get_page('http://google.com')
print("OK")
| Cyril-777/alx-backend-storage | 0x02-redis_basic/web.py | web.py | py | 1,141 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "redis.Redis",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 44,
"usage_type": "call"
}
] |
70374444349 | import os
import shutil
import sys
import pytest
import torch
from ivory.core.client import create_client
@pytest.fixture(scope="module")
def runs():
sys.path.insert(0, os.path.abspath("examples"))
client = create_client(directory="examples")
runs = []
for name in ["tensorflow", "nnabla", "torch2"]:
run = client.create_run(name, epochs=5, batch_size=10, shuffle=False)
runs.append(run)
run_tf, run_nn, run_torch = runs
run_nn.model.build(
run_nn.trainer.loss, run_nn.datasets.train, run_nn.trainer.batch_size
)
run_nn.optimizer.set_parameters(run_nn.model.parameters())
ws_tf = run_tf.model.weights
ws_nn = run_nn.model.parameters().values()
ws_torch = run_torch.model.parameters()
for w_tf, w_nn, w_torch in zip(ws_tf, ws_nn, ws_torch):
w_nn.data.data = w_tf.numpy()
w_torch.data = torch.tensor(w_tf.numpy().T)
yield dict(zip(["tf", "nn", "torch"], runs))
del sys.path[0]
if os.path.exists("examples/mlruns"):
shutil.rmtree("examples/mlruns")
| daizutabi/ivory | tests/libs/conftest.py | conftest.py | py | 1,061 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.insert",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_num... |
12791274360 | # -*- coding: utf-8 -*-
import requests
import time
import datetime
import sys
import boto3
from boto3.dynamodb.conditions import Key, Attr
from botocore.exceptions import ClientError
import json
import telegram
from PIL import Image
from io import BytesIO
import asyncio
import re
import os
import top_holding
bot_id = os.environ['BOT_ID']
chat_id = os.environ['CHAT_ID']
img_url = os.environ['IMG_URL']
bot = telegram.Bot(token=bot_id)
def new_sticker_set(sticker_id):
url = 'http://seekvectorlogo.com/wp-content/uploads/2019/10/ark-invest-etfs-vector-logo.png'
web_im = requests.get(url).content
im = Image.open( BytesIO(web_im) )
width = int(im.size[0])
height = int(im.size[1])
if width >= height:
adjustHeight = int(512 / width * height)
im_resize = im.resize((512, adjustHeight))
else:
adjustWidth = int(512 / height * width)
im_resize = im.resize((adjustWidth, 512))
filename = f"/tmp/{sticker_id}.png"
im_resize.save(filename)
try:
bot.create_new_sticker_set(
chat_id
, f'{sticker_id}_by_Anson_bot'
, f'{sticker_id} Trading Desk'
, open(filename,'rb')
, '📈'
, timeout=20
)
except Exception as e:
return False
return True
async def reSize(ticker,sticker_id,act):
# Change Foreign Ticker
regex = r"([0-9]{4,})([A-Z]{2,})"
matches = re.findall(regex, ticker, re.MULTILINE)
if matches:
if matches[0][1] == 'JP':
# Japan to Tokyo
ticker = matches[0][0] + '.T'
else:
ticker = matches[0][0] + '.'+ matches[0][1]
url = f'{img_url}?ticker={ticker}&t='+str(time.time())
web_im = requests.get(url).content
im = Image.open( BytesIO(web_im) )
width = int(im.size[0])
height = int(im.size[1])
if width >= height:
adjustHeight = int(512 / width * height)
im_resize = im.resize((512, adjustHeight))
else:
adjustWidth = int(512 / height * width)
im_resize = im.resize((adjustWidth, 512))
filename = f"/tmp/{sticker_id}{ticker}.png"
im_resize.save(filename)
emoji = '📈'
if act == 'Buy':
emoji = '📈'
else:
emoji = '📉'
bot.add_sticker_to_set(
chat_id
, f'{sticker_id}_by_Anson_bot'
, open(filename,'rb')
, emoji
, timeout=20
)
# print('done')
return True
def main(sticker_id,ticker_list):
# https://github.com/Sea-n/LINE-stickers/blob/master/index.js
asyncio.set_event_loop(asyncio.new_event_loop())
tasks = []
loop = asyncio.get_event_loop()
task = loop.create_task(reSize(sticker_id,sticker_id,'Buy'))
tasks.append(task)
for act in ticker_list:
# ticker = SQ
# sticker_id = ARKF
# act = sell or buy
for ticker in ticker_list[act]:
task = loop.create_task(reSize(ticker,sticker_id,act))
tasks.append(task)
if tasks:
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
sticker_line = f"https://t.me/addstickers/{sticker_id}_by_Anson_bot"
top_holding.holding_graph(sticker_id)
bot.add_sticker_to_set(
chat_id
, f'{sticker_id}_by_Anson_bot'
, open(f'/tmp/{sticker_id}_chart.png','rb')
, '📈'
, timeout=20
)
def get_old(sticker_id):
try:
sets = bot.get_sticker_set(name=f'{sticker_id}_by_Anson_bot')
except Exception as e:
return False
return sets
def clear_old(sticker_list):
# Keep logo and delete others
sticker_list = sticker_list['stickers'][1:]
for stick in sticker_list:
result = bot.delete_sticker_from_set(stick['file_id'])
pass
def lambda_handler(event, context):
sticker_id = event['sticker_id']
sticker_list = event['sticker_list']
if not sticker_id:
return {'statusCode': 400}
old_list = get_old(sticker_id)
if not old_list:
new_sticker_set(sticker_id)
else:
clear_old(old_list)
main(sticker_id,sticker_list)
return {
'statusCode': 200,
'body': json.dumps('Hello from Lambda!')
}
| EddieKuo723/ARK-Invest-Trading-Desk | ARK_Sticker_Set/lambda_function.py | lambda_function.py | py | 4,404 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.environ",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "telegram.Bot",
"l... |
26344548844 | from unittest import TestCase
import glob
from lxml import etree
class ValidationError(Exception):
pass
class TestSampleFileValidation(TestCase):
def test_ukrdc_sample_files(self):
# For each sample file
for sample_path in glob.glob("sample_files/ukrdc/*.xml"):
# Run as a subtest
with self.subTest(msg=sample_path):
# Open the schema and sample files for reading
with open(
"schema/ukrdc/UKRDC.xsd", "r", encoding="utf-8"
) as schema_file, open(
sample_path, "r", encoding="utf-8"
) as sample_file:
# Create a new schema object to track errors for this file
xml_schema = etree.XMLSchema(
etree.parse(
schema_file,
parser=None,
)
)
# Try validating the sample file against the schema
try:
xml_doc = etree.parse(sample_file, None)
xml_schema.assertValid(xml_doc)
# Initially catch errors to allow reporting multiple issues in one file
except etree.DocumentInvalid as e:
tree = etree.ElementTree(xml_doc.getroot())
# Print all errors
print("Validation error(s):")
for error in xml_schema.error_log:
print(" Line {}: {}".format(error.line, error.message))
for e in tree.xpath(".//*"):
if error.line == e.sourceline:
xml_path = tree.getpath(e)
print(xml_path)
break
# Raise an exception to fail the test and report the full error list
raise ValidationError(
f"{len(xml_schema.error_log)} validation error(s) in {sample_path}. See full output above for details."
) from e
| renalreg/resources | tests/test_sample_files.py | test_sample_files.py | py | 2,213 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "glob.glob",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "lxml.etree.XMLSchema",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"li... |
71877606587 | from urllib.parse import quote_plus
from bs4 import BeautifulSoup
#selenium : web test에 사용되는 프레임워크, webdriver API를 통해 렌더링이 완료된 후의 DOM 결과물에 접근할 수 있음(브라우저 제어가 필요)
#pip install selenium
#직접 브라우저를 제어하기 때문에 header값 없이도 크롤링이 가능
#여기선 Chrome 사용 webdriver 설치 : https://chromedriver.chromium.org/downloads
from selenium import webdriver
baseUrl = 'https://www.google.com/search?q='
plusUrl = input('검색어 입력 : ')
resultUrl = baseUrl + quote_plus(plusUrl)
#chrome webDriver 위치가 현재 개발폴더 위치와 다르면 Chrome({경로})와 같이 사용
driver = webdriver.Chrome()
#브라우저가 열리고 입력된 url로 이동
driver.get(resultUrl)
html = driver.page_source
soup = BeautifulSoup(html)
#select로 가져올 경우 list형식으로 가져옴
r = soup.select('.r')
for i in r:
#list object의 경우엔 text를 가져올 수 없음, 텍스트를 불러오기 위해 select_on 사용
print(i.select_one('.LC20lb.DKV0Md').text)
#print(i.select_one('.iUh30.bc').text)
print(i.a.attrs['href'], '\n')
driver.close() | BrokenMental/Python-Study | googleCrawl.py | googleCrawl.py | py | 1,197 | python | ko | code | 0 | github-code | 6 | [
{
"api_name": "urllib.parse.quote_plus",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "... |
27925991770 | """
-*- coding: utf-8 -*-
@author: socratio
@inspiration: drew original inspiration from cleartonic twitchtriviabot. Almost nothing left in this code from that project.
"""
import json
from twitchio import websocket
from twitchio.ext import commands
import yaml
import asyncio
import os
import random
class ChatBot(commands.Bot):
def __init__(self):
#load the auth and connect to twitch
with open(os.path.join(os.getcwd(),'config','auth_config.yml')) as auth:
self.auth = yaml.safe_load(auth)
super().__init__(irc_token=f"{self.auth['pass']}", client_id='...', nick=f"{self.auth['nick']}", prefix='!',initial_channels=[f"{self.auth['chan']}"])
#load the trivia configuration
with open(os.path.join(os.getcwd(),'config','trivia_config.yml')) as config:
self.trivia_config = yaml.safe_load(config)
#create admins array, empty players and questions arrays, boolean variables, and empty answer messages object
self.admins = [i.strip() for i in self.trivia_config['admins'].split(",")]
self.players = []
self.questionlist = []
self.active_game = False
self.questionisactive = False
self.active_question = False
self.scoringopen = False
self.answermessages = {}
#load the scoreboard, set the list of past winners, increment the game number
self.refresh_scores()
try:
self.pastwinners = self.scores[f'Season {self.trivia_config["season"]}']['shirtwinners']
except:
self.scores[f'Season {self.trivia_config["season"]}'] = {"gamesplayed":0, "shirtwinners":[], "scoreboard":{}}
self.pastwinners = self.scores[f'Season {self.trivia_config["season"]}']['shirtwinners']
self.game_number = self.scores[f'Season {self.trivia_config["season"]}']['gamesplayed']+1
#load the questions and populate the questions array
with open(os.path.join(os.getcwd(),'config','triviaset.json')) as self.questions:
self.questions = json.load(self.questions)
for question in self.questions.items():
self.questionlist.append(Question(question))
#populate the players array
for player in self.scores[f'Season {self.trivia_config["season"]}']['scoreboard'].items():
self.players.append(Player(player))
#updates the scoreboard dict object
def refresh_scores(self):
with open(os.path.join(os.getcwd(),'config','scores',"scoreboard.json")) as scores:
self.scores = json.load(scores)
#clears json of scores for this game, sorts and adds scores back to json, resulting in sorted scores every time. Also saves scores to scoreboard file
def commit_scores(self):
self.scores[f'Season {self.trivia_config["season"]}'][f'Game {self.game_number}'] = {}
self.scores[f'Season {self.trivia_config["season"]}']['scoreboard'] = {}
for player in sorted(self.players, key=lambda player:player.seasonpoints, reverse=True):
self.scores[f'Season {self.trivia_config["season"]}']['scoreboard'][player.name] = player.seasonpoints
for player in sorted(self.players, key=lambda player:player.gamepoints, reverse=True):
self.scores[f'Season {self.trivia_config["season"]}'][f'Game {self.game_number}'][player.name] = player.gamepoints
with open(os.path.join(os.getcwd(),'config','scores',"scoreboard.json"),'w') as outfile:
json.dump(self.scores, outfile, indent=4)
#Broadcast ready state to twitch channel
async def event_ready(self):
print(f'Ready | {self.nick}')
ws = bot._ws
await ws.send_privmsg(self.initial_channels[0],"I have indeed been uploaded, sir.")
#major message reading function
async def event_message(self, message):
if message.author != self.nick:
print(f'{message.author.name}: {message.content}')
await self.handle_commands(message)
if self.scoringopen == True and not message.content.startswith('!'):
if message.author.name in self.answermessages:
del self.answermessages[message.author.name]
self.answermessages[message.author.name] = message.content
@commands.command(name='test')
async def test(self, ctx):
await ctx.send(f'Hello {ctx.author.name}!')
#TRIVIA COMMANDS AND PROCEDURES
@commands.command(name='start')
#!Start command starts the trivia game
async def start(self, ctx):
if ctx.author.name in self.admins and not self.active_game:
self.active_game = True
print('Starting Game.')
await ctx.send("Game starts in 15 seconds. Watch the chat for the question. Good luck!")
await asyncio.sleep(15)
if self.active_game:
await self.callquestion()
@commands.command(name='next')
#!next starts the process of asking the next question after 10 seconds and scoring after 20 seconds
async def nextq(self, ctx):
if ctx.author.name in self.admins and not self.questionisactive:
self.questionisactive = True
print('Received call for next question.')
await ctx.send("Next question coming in 10 seconds. Keep an eye on the chat!")
await asyncio.sleep(10)
if self.active_game:
await self.callquestion()
else:
print('Received call for next question, but an active question exists or it is not an admin. Ignoring call.')
@commands.command(name='end')
#!end ends this game of trivia, commits scores to json, and refreshes the scores
async def endtrivia(self, ctx):
if ctx.author.name in self.admins and self.active_game:
print("Ending game.")
self.scoringopen = False
self.active_game = False
self.active_question = False
if any(i.gamepoints > 0 for i in self.players):
for player in sorted(self.players, key=lambda x:x.gamepoints, reverse=True):
if player.name not in self.scores[f'Season {self.trivia_config["season"]}']['shirtwinners']:
self.scores[f'Season {self.trivia_config["season"]}']['shirtwinners'].append(player.name)
self.pastwinners.append(player.name)
break
self.scores[f'Season {self.trivia_config["season"]}']['gamesplayed'] = self.game_number
await ctx.send(f"Ending this game of trivia. Congratulations to {self.pastwinners[-1]} on the new shirt! I hope everyone had fun!")
self.commit_scores()
self.refresh_scores()
@commands.command(name='bonus')
#!bonus reads the message, finds the user targeted for bonus points, finds the point value of the bonus, assigns the extra points if the player exists or creates them if not, and refreshes the scores
async def bonus(self, ctx):
if ctx.author.name in self.admins:
print(f"Received call for bonus points from {ctx.author.name}.")
bonustarget = ctx.message.content.split()[1].lower()
bonuspoints = int(ctx.message.content.split()[2])
if any(bonustarget == player.name for player in self.players):
for player in self.players:
if player.name == bonustarget:
player.gamepoints += int(bonuspoints)
returnstr = player.gamepoints
else:
print(f'Player {bonustarget} does not exist. Creating.')
user = Player(bonustarget,bonuspoints)
self.players.append(user)
self.commit_scores()
self.refresh_scores()
await ctx.send(f'Player {bonustarget} received {bonuspoints} bonus points. Their new total is {returnstr} points.')
@commands.command(name='lasttop5')
#!lasttop5 calls the top 5 scores from the last game played
async def lasttop5(self, ctx):
if ctx.author.name in self.admins:
returnstr = "TOP 5 SCORES FOR THE LAST GAME:\t"
lastgameno = self.scores[f'Season {self.trivia_config["season"]}']['gamesplayed']
lastgamescores = self.scores[f'Season {self.trivia_config["season"]}'][f'Game {lastgameno}']
for score in sorted(lastgamescores.items(), key=lambda x:x[1], reverse=True)[:5]:
returnstr += f"{score[0]}: {score[1]} "
await ctx.send(returnstr)
async def callquestion(self):
self.active_question = self.questionlist.pop(0)
self.scoringopen = True
self.answermessages = {}
ws = bot._ws
await ws.send_privmsg(self.initial_channels[0],f"Question {self.active_question.questionno}: {self.active_question.question}")
await asyncio.sleep(20)
self.scoringopen = False
await self.scorequestion()
self.questionisactive = False
async def scorequestion(self):
self.scoringopen = False
ws = bot._ws
self.point_dict = {}
returnstr = f"The answer was **{self.active_question.answers[0]}**.\t"
#check that all players that answered exist as Player objects
for name in self.answermessages.keys():
if not any(player.name == name for player in self.players):
print(f'Player {name} does not exist. Creating.')
user = Player(name)
self.players.append(user)
#find all the correct answers, building the list of points as it goes
for answer in self.answermessages.items():
for proof in self.active_question.answers:
if answer[1].lower() == proof.lower():
self.point_dict[answer[0]] = 0
break
else:
with open(os.path.join(os.getcwd(),"config","aliases.json")) as aliases:
aliases = json.load(aliases)
for name in aliases.items():
if answer[1].lower() in name[1] and name[0] == self.active_question.answers[0]:
self.point_dict[answer[0]] = 0
for proof in self.active_question.deepcut:
if answer[1].lower() == proof.lower():
self.point_dict[answer[0]] = 3
#check if only 1 person answered, if so, award 3 bonus points
for name,points in self.point_dict.items():
if len(self.point_dict) == 1:
self.point_dict[name] += 3
if 1 < len(self.point_dict) < 4:
self.point_dict[name] += 1
#award 1 point for everyone, an extra point for the first 14, and another point for the first 6
idx = 0
for name,points in self.point_dict.items():
if idx == 0:
returnstr += f"{name} was the first to answer correctly."
if idx < 6:
self.point_dict[name] += 1
if idx < 20:
self.point_dict[name] += 1
self.point_dict[name] += 1
idx += 1
#update the player object with the new points
for player in self.players:
if player.name == name:
player.gamepoints += self.point_dict[name]
player.seasonpoints += self.point_dict[name]
self.commit_scores()
await ws.send_privmsg(self.initial_channels[0],returnstr)
#CHAT RESPONSES AND COMMAND FUNCTIONS
@commands.command(name='score')
#!score finds the score of the user sending the message and sends it in chat
async def score(self, ctx):
print(f'Received a score check for {ctx.author.name}')
if any(player.name == ctx.author.name for player in self.players):
for player in self.players:
if player.name == ctx.author.name:
print(f'Found player {player.name} with {player.gamepoints} game points and {player.seasonpoints} season points.')
user = player
if self.active_game:
scorestr = f"User {player.name} has {player.gamepoints} points in this game and {player.seasonpoints} for the season."
else:
scorestr = f"User {player.name} has {player.seasonpoints} points in this season."
break
else:
print(f'Player {ctx.author.name} does not exist. Creating.')
user = Player(ctx.author.name)
self.players.append(user)
scorestr = f"User {user.name} has 0 points. Welcome to trivia!"
await ctx.send(scorestr)
@commands.command(name='raffle')
#!raffle finds the raffle ticket count of the user sending the message and sends it in chat
async def raffle(self, ctx):
print(f'Received a raffle check for {ctx.author.name}')
if any(player.name == ctx.author.name for player in self.players):
for player in self.players:
if player.name == ctx.author.name:
rafflecount = int(player.seasonpoints/30)
print(f'Found player {player.name} with {player.gamepoints} game points, {player.seasonpoints} season points, and {rafflecount} raffle tickets.')
user = player
if not self.active_game:
scorestr = f"User {player.name} has {player.seasonpoints} for the season resulting in {rafflecount} raffle tickets."
break
else:
print(f'Player {ctx.author.name} does not exist. Creating.')
user = Player(ctx.author.name)
self.players.append(user)
scorestr = f"User {user.name} has 0 points and no raffle tickets. Welcome to trivia!"
await ctx.send(scorestr)
@commands.command(name='top5')
#!top5 returns the top5 scores for the game if a game is active or for the season if a game is not active
async def top5(self, ctx):
if ctx.author.name in self.admins:
returnstr = 'TOP 5: '
print(f'Received top 5 check from {ctx.author.name}.')
if self.active_game:
self.refresh_scores()
for i in sorted(self.players, key=lambda player:player.gamepoints, reverse=True)[:5]:
returnstr += (f'{i.name}: {i.gamepoints}\t')
else:
returnstr = "THIS SEASON'S TOP 5: "
for i in sorted(self.players, key=lambda player:player.seasonpoints, reverse=True)[:5]:
returnstr += (f'{i.name}: {i.seasonpoints}\t')
await ctx.send(returnstr)
@commands.command(name='topless')
#!topless returns the top 5 player scores for players who have not yet won a shirt as defined in pastwinners
async def topless(self, ctx):
if ctx.author.name in self.admins:
returnstr = 'TOP 5 SHIRTLESS THIS '
self.topless = []
print(f'Received top 5 shirtless check from {ctx.author.name}.')
self.refresh_scores()
if self.active_game:
returnstr += 'GAME: '
for player in sorted(self.players, key=lambda x:x.gamepoints, reverse=True):
if player.name not in self.pastwinners and len(self.topless) < 5:
self.topless.append(player)
returnstr += f'{player.name}: {player.gamepoints} '
else:
continue
else:
returnstr += 'SEASON: '
for player in self.scores[f'Season {self.trivia_config["season"]}']['scoreboard'].items():
if player[0] not in self.pastwinners and len(self.topless) < 5:
self.topless.append(player[0])
returnstr += f'{player[0]}: {player[1]} '
else:
continue
await ctx.send(returnstr)
@commands.command(name='stop')
#!stop forces the chatbot to shut down
async def stop(self, ctx):
if ctx.author.name in self.admins:
print(f'Received stop command from {ctx.author.name}.')
if self.active_game:
self.active_game = False
await ctx.send('I have been commanded to stop. The Vision trivia bot is shutting down. See you next time!')
await bot._ws.teardown()
@commands.command(name='rafflewinner')
#!rafflewinner generates a list of raffle tickets based on a person's total points/30 and selects a random winner
async def rafflewinner(self, ctx):
if ctx.author.name in self.admins:
await ctx.send('This is the moment you have ALL been waiting for. The winner of the biggest prize in Stranded Panda Trivia history is...*shuffles raffle tickets for 10 seconds*')
await asyncio.sleep(10)
self.refresh_scores()
with open(os.path.join(os.getcwd(),'config','scores',"scoreboard.json")) as scoreboard:
scoreboard = json.load(scoreboard)
scoreboard = scoreboard[f'Season {self.trivia_config["season"]}']['scoreboard']
rafflelist = []
for player in scoreboard.items():
ticketcount = int(player[1]/30)
for count in range(0,ticketcount):
rafflelist.append(player[0])
drawingwinner = random.choice(rafflelist)
await ctx.send("The hosts now have the raffle winner in their debatably capable hands...")
print(f'The raffle winner is {drawingwinner}')
@commands.command(name='seasonwinner')
#!seasonwinner takes the top 14 scores for the season, adds them together, and produces the top 10
async def seasonwinner(self, ctx):
if ctx.author.name in self.admins:
returnstr = "This season's top 10: "
scorelists = {}
sortedlists = {}
finalscores = {}
with open(os.path.join(os.getcwd(),'config','scores',"scoreboard.json")) as scoreboard:
scoreboard = json.load(scoreboard)
for game in scoreboard[f'Season {self.trivia_config["season"]}'].items():
if (game[0].startswith("Game ")):
for player in game[1].items():
if player[0] not in scorelists:
scorelists[f'{player[0]}'] = []
scorelists[f'{player[0]}'].append(player[1])
for scores in scorelists.items():
sortedlists[f'{scores[0]}'] = sorted(scores[1],reverse=True)
for player in sortedlists.items():
finalscores[f'{player[0]}'] = sum(player[1][0:14])
scoreboard = {}
for player in sorted(finalscores.items(), key=lambda player:player[1], reverse=True):
scoreboard[player[0]] = player[1]
for score in sorted(scoreboard.items(), key=lambda x:x[1], reverse=True)[:10]:
returnstr += f"{score[0]}: {score[1]} "
overallwinner = sorted(scoreboard.items(), key=lambda x:x[1], reverse=True)[0]
await ctx.send("Calculating the season's winner...removing the bottom 2 scores...swapping the bonus Halloween week...")
await asyncio.sleep(5)
await ctx.send(f'The winner of this season of Stranded Panda Twitch Trivia is... {overallwinner[0]} with {overallwinner[1]} points!!! Congratulations {overallwinner[0]}!!!')
await asyncio.sleep(5)
await ctx.send(returnstr)
@commands.command(name='rescore')
#!rescore removes the most recently awarded points and rescores using the most recently submitted answer list.
async def rescore(self, ctx):
if ctx.author.name in self.admins and not self.questionisactive and not self.scoringopen:
print(f"Received call for rescore from {ctx.author.name}.")
#update the player objects with the new points
for name,points in self.point_dict.items():
for player in self.players:
if player.name == name:
player.gamepoints -= points
player.seasonpoints -= points
self.commit_scores()
await self.scorequestion()
await ctx.send("Rescoring complete.")
class Question(object):
#Each question will be an object to be added to a list of objects
def __init__(self, question):
badap = '’'
str_ap = "'"
self.question = str(question[1]['Question'].replace(badap,str_ap))
self.answers = question[1]['Answers']
self.deepcut = question[1]['DeepCut']
self.questionno = question[0]
class Player(object):
#This establishes players in the current game
def __init__(self,playername, pointstart=0):
#if the playername variable is not a string, it's going to be a dictionary object with existing points totals.
#The playername variable will be a string if coming from a !score command and a dictionary object if coming from bot initialization
if not isinstance(playername, str):
self.seasonpoints = playername[1]
self.name = playername[0]
else:
self.seasonpoints = 0
self.name = playername
self.gamepoints = pointstart
if __name__ == '__main__':
bot = ChatBot()
bot.run() | Socratia/StrandedPandaTrivia | strandedpandatriviabot.py | strandedpandatriviabot.py | py | 21,771 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "twitchio.ext.commands.Bot",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "twitchio.ext.commands",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.... |
11093803614 | from django.conf.urls import url
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^$', views.first_view, name='first_view'),
url(r'^uimage/$', views.uimage, name='uimage'),
url(r'^dface/$', views.dface, name='dface'),
url(r'^crop/$', views.crop, name='crop'),
url(r'^backgroundsubtract/$', views.backgroundsubtract, name='backgroundsubtract'),
url(r'^binarize/$', views.binarize, name='binarize'),
url(r'^webcam/$', views.webcam, name='webcam'),
url(r'^stream/$', views.stream, name='stream'),
url(r'^capture/$', views.capture, name='capture'),
]
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT) | neemiasbsilva/django-api-computer-vision | pipeline/urls.py | urls.py | py | 753 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.c... |
34273049354 | from flask import Flask
from flask_cors import CORS
from flask_marshmallow import Marshmallow
from config import config
from .main import main as main_blueprint
'''
Application factory for application package. \
Delays creation of an app by moving it into a factory function that can be \
explicitly invoked from script and apply configuration changes.
'''
cors = CORS(
main_blueprint,
origins=['http://127.0.0.1:4200', 'http://localhost:4200'],
supports_credentials=True
)
ma = Marshmallow()
def create_app(config_name):
app = Flask(__name__)
# Importing configuration settings directly into app
app.config.from_object(config[config_name])
config[config_name].init_app(app)
# Initializing extensions after app is created
cors.init_app(app)
ma.init_app(app)
# Manually creating app_context to access objects outside of view functions
with app.app_context():
app.register_blueprint(main_blueprint, url_prefix='/daron')
return app
| daronphang/stock_app_backend | app/__init__.py | __init__.py | py | 997 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask_cors.CORS",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "main.main",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": "flask_marshmallow.Marshmallow",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flask.Fla... |
37300509850 | import sqlite3, sys
from pathlib import Path
from . import Notes
from tqdm import tqdm
db_path = "database/xhs_tesla_notes.db"
def fill(db_path=db_path):
blank_query = "SELECT COUNT(*) FROM notes WHERE content is ''"
try:
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
amount = cursor.execute("SELECT COUNT(*) FROM notes").fetchone()[0]
blank_amount = cursor.execute(blank_query).fetchone()[0]
print(
f"There are {amount} notes in the database, {blank_amount} of them have blank content, blank rate is {blank_amount/amount}"
)
blank_notes_query = cursor.execute(blank_query)
notes_null_content = blank_notes_query.fetchall()
# notes_null_content = []
for note in tqdm(notes_null_content):
try:
null_con = Notes.Note(
note[0], note[1], note[2], note[3], note[4], note[5], note[10]
)
print(f'Filling content for note with id {null_con.id}')
content = null_con.get_content()
# print(f"Obtained content: {content}")
update_query = "UPDATE notes SET content = ? WHERE id = ?"
cursor.execute(update_query, (content, null_con.id))
conn.commit()
# print(f'Successfully filled content for note with id {null_con.id}')
except Exception as inner_exc:
print(f"Error processing note with id {note[0]}: {inner_exc}")
New_blank_amount = cursor.execute(blank_query).fetchone()[0]
print(
f"{blank_amount-New_blank_amount} of them have been filled, blank_rate now is {New_blank_amount/amount} as {New_blank_amount} of {amount}"
)
except Exception as outer_exc:
print(f"Error connecting to the database: {outer_exc}")
finally:
conn.commit()
conn.close()
| Lucascuibu/xis_topic_py | ai_category/fill_blank.py | fill_blank.py | py | 1,927 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 26,
"usage_type": "call"
}
] |
33379210836 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('spurt', '0020_linkpost_scrape_token'),
]
operations = [
migrations.RenameField(
model_name='linkpost',
old_name='pub_date',
new_name='scraped_pub_date',
),
]
| steezey/spurt | spurt/migrations/0021_auto_20150122_0128.py | 0021_auto_20150122_0128.py | py | 402 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.RenameField",
"line_number": 14,
"usage_type": "call"
},
... |
5519173983 | import sys
import logging
import click
import os
sys.path.append('.')
from src.classes import Dataset
logger = logging.getLogger(__name__)
@click.command()
@click.option(
'--n_images', default=10,
help="Number of images per tissue"
)
@click.option(
'--n_tissues', default=6,
help="Number of tissues with most numbers of samples"
)
def main(n_images, n_tissues):
os.makedirs('data/patches', exist_ok=True)
logger.info('Initializing patches script')
dataset = Dataset(n_images=n_images, n_tissues=n_tissues)
dataset.get_patchcoordfiles()
if __name__ == '__main__':
logging.basicConfig(
filename='logs/patches.log', level=logging.DEBUG,
format=(
"%(asctime)s | %(name)s | %(processName)s |"
"%(levelname)s: %(message)s"
)
)
main()
| willgdjones/HistoVAE | scripts/patches.py | patches.py | py | 825 | python | en | code | 10 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_... |
24013644968 | import warnings
from dataclasses import dataclass
from typing import List, Optional
import keopscore
import torch
from pykeops.torch import Genred
from falkon.mmv_ops.utils import _get_gpu_info, _start_wait_processes, create_output_mat
from falkon.options import BaseOptions, FalkonOptions
from falkon.utils import decide_cuda
from falkon.utils.helpers import calc_gpu_block_sizes, sizeof_dtype
from falkon.utils.stream_utils import sync_current_stream
@dataclass(frozen=True)
class ArgsFmmv:
X1: torch.Tensor
X2: torch.Tensor
v: torch.Tensor
other_vars: List[torch.Tensor]
out: torch.Tensor
gpu_ram: float
backend: str
function: callable
def _decide_backend(opt: BaseOptions, num_dim: int) -> str:
"""Switch between CPU and GPU backend for KeOps"""
if not decide_cuda(opt):
return "CPU"
else:
return "GPU_1D"
def _estimate_split(N, M, D, T, R, ds):
"""Estimate the splits along dimensions N and M for a MVM to fit in memory
The operations consist of computing the product between a kernel
matrix (from a N*D and a M*D matrix) and a 'vector' of shape M*T
This typically requires storage of the input and output matrices,
which occupies (M + N)*(D + T) memory locations plus some intermediate
buffers to perform computations.
TODO: It is not clear how much intermediate memory KeOps requires;
the only thing that is certain is that it is quadratic in D.
For now we sidestep this issue by using a smaller R than what is
actually available in GPU memory.
This function calculates the split along N and M into blocks of size n*m
so that we can compute the kernel-vector product between such blocks
and still fit in GPU memory.
Parameters
-----------
- N : int
The first dimension of the kernel matrix
- M : int
The second dimension of the kernel matrix
- D : int
The data dimensionality
- T : int
The number of output columns
- R : float
The amount of memory available (in bytes)
- ds : int
The size in bytes of each element in the data matrices
(e.g. 4 if the data is in single precision).
Returns
--------
- n : int
The block size to be used along the first dimension
- m : int
The block size along the second dimension of the kernel
matrix
Raises
-------
RuntimeError
If the available memory `R` is insufficient to store even the smallest
possible input matrices. This may happen if `D` is very large since we
do not perform any splitting along `D`.
Notes
------
We find 'good' values of M, N such that
N*(D+T) + M*(D+T) <= R/ds
"""
R = R / ds
# We have a linear equation in two variables (N, M)
slope = -1
intercept = R / (D + T)
slack_points = 10
# We try to pick a point at the edges such that only one kind of split
# is necessary
if N < intercept - 1:
M = min(M, intercept + slope * N)
elif M < intercept - 1:
N = min(N, intercept + slope * M)
else:
# All points on the slope such that N, M > 0 are possible
N = intercept - slack_points - 1
M = intercept + slope * N
if N <= 0 or M <= 0:
raise RuntimeError("Insufficient available GPU memory (available %.2fGB)" % (R * ds / 2**30))
return int(N), int(M)
def _single_gpu_method(proc_idx, queue, device_id):
a: ArgsFmmv = queue.get()
backend = a.backend
X1 = a.X1
X2 = a.X2
v = a.v
oout = a.out
other_vars = a.other_vars
fn = a.function
R = a.gpu_ram
N, D = X1.shape
M = X2.shape[0]
T = v.shape[1]
device = torch.device(f"cuda:{device_id}")
# Second round of subdivision (only if necessary due to RAM constraints)
n, m = _estimate_split(N, M, D, T, R, sizeof_dtype(X1.dtype))
other_vars_dev = [ov.to(device, copy=False) for ov in other_vars]
out_ic = oout.device.index == device_id
# Process the two rounds of splitting with a nested loop.
with torch.cuda.device(device_id), torch.autograd.inference_mode():
for mi in range(0, M, m):
ml = min(m, M - mi)
if ml != M and mi > 0: # Then we must create a temporary output array
out = torch.empty_like(oout)
else:
out = oout
cX2 = X2[mi : mi + ml, :].to(device, copy=False)
cv = v[mi : mi + ml, :].to(device, copy=False)
for ni in range(0, N, n):
nl = min(n, N - ni)
cX1 = X1[ni : ni + nl, :].to(device, copy=False)
cout = out[ni : ni + nl, :].to(device, copy=False)
variables = [cX1, cX2, cv] + other_vars_dev
fn(*variables, out=cout, device_id=device_id, backend=backend)
if not out_ic:
out[ni : ni + nl, :].copy_(cout)
if ml != M and mi > 0:
oout.add_(out)
return oout
def run_keops_mmv(
X1: torch.Tensor,
X2: torch.Tensor,
v: torch.Tensor,
other_vars: List[torch.Tensor],
out: Optional[torch.Tensor],
formula: str,
aliases: List[str],
axis: int,
reduction: str = "Sum",
opt: Optional[FalkonOptions] = None,
) -> torch.Tensor:
if opt is None:
opt = FalkonOptions()
# Choose backend
N, D = X1.shape
T = v.shape[1]
backend = _decide_backend(opt, D)
data_devs = [X1.device, X2.device, v.device]
if any(ddev.type == "cuda" for ddev in data_devs) and (not backend.startswith("GPU")):
warnings.warn(
"KeOps backend was chosen to be CPU, but GPU input tensors found. "
"Defaulting to 'GPU_1D' backend. To force usage of the CPU backend, "
"please pass CPU tensors; to avoid this warning if the GPU backend is "
"desired, check your options (i.e. set 'use_cpu=False')."
)
backend = "GPU_1D"
differentiable = any([X1.requires_grad, X2.requires_grad, v.requires_grad] + [o.requires_grad for o in other_vars])
comp_dev_type = backend[:3].lower().replace("gpu", "cuda") # 'cpu' or 'cuda'
keopscore.config.config.use_cuda = comp_dev_type == "cuda" # workaround for keops issue#248
out = create_output_mat(
out,
data_devs,
is_sparse=False,
shape=(N, T),
dtype=X1.dtype,
comp_dev_type=comp_dev_type,
other_mat=X1,
output_stride="C",
)
rec_multVar_highdim = None
if D > 100:
rec_multVar_highdim = 1
fn = Genred(
formula,
aliases,
reduction_op=reduction,
axis=axis,
dtype_acc=opt.keops_acc_dtype,
sum_scheme=opt.keops_sum_scheme,
rec_multVar_highdim=rec_multVar_highdim,
)
if differentiable:
# For differentiable inputs we don't split, since we don't know how to
# split the backward pass.
out = fn(X1, X2, v, *other_vars, out=out, backend=backend)
elif comp_dev_type == "cpu" and all(ddev.type == "cpu" for ddev in data_devs): # incore CPU
out = fn(X1, X2, v, *other_vars, out=out, backend=backend)
elif comp_dev_type == "cuda" and all(ddev.type == "cuda" for ddev in data_devs): # incore CUDA
device = data_devs[0]
with torch.cuda.device(device):
sync_current_stream(device)
out = fn(X1, X2, v, *other_vars, out=out, backend=backend)
else: # cpu data, gpu computations: out-of-core
# slack should be high due to imprecise memory usage estimates for keops
gpu_info = _get_gpu_info(opt, slack=opt.keops_memory_slack)
block_sizes = calc_gpu_block_sizes(gpu_info, N)
args = [] # Arguments passed to each subprocess
for i, g in enumerate(gpu_info):
# First round of subdivision
bwidth = block_sizes[i + 1] - block_sizes[i]
if bwidth <= 0:
continue
args.append(
(
ArgsFmmv(
X1=X1.narrow(0, block_sizes[i], bwidth),
X2=X2,
v=v,
out=out.narrow(0, block_sizes[i], bwidth),
other_vars=other_vars,
function=fn,
backend=backend,
gpu_ram=g.usable_memory,
),
g.Id,
)
)
_start_wait_processes(_single_gpu_method, args)
return out
| FalkonML/falkon | falkon/mmv_ops/keops.py | keops.py | py | 8,589 | python | en | code | 157 | github-code | 6 | [
{
"api_name": "torch.Tensor",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
... |
6834567910 | from typing import Optional, Tuple, Union
import torch.nn as nn
from diffusers.models import UNet2DConditionModel
from diffusers.models.unet_2d_blocks import UNetMidBlock2DCrossAttn
from diffusers.models.embeddings import Timesteps, TimestepEmbedding
from diffusers.configuration_utils import register_to_config
from blocks import get_down_block, get_up_block
class VideoLDM(UNet2DConditionModel):
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock2D", # -> VideoLDMDownBlock
"CrossAttnDownBlock2D", # -> VideoLDMDownBlock
"CrossAttnDownBlock2D", # -> VideoLDMDownBlock
"DownBlock2D",
),
mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
up_block_types: Tuple[str] = (
"UpBlock2D",
"CrossAttnUpBlock2D", # -> VideoLDMUpBlock
"CrossAttnUpBlock2D", # -> VideoLDMUpBlock
"CrossAttnUpBlock2D", # -> VideoLDMUpBlock
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: Union[int, Tuple[int]] = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: Union[int, Tuple[int]] = 1280,
encoder_hid_dim: Optional[int] = None,
attention_head_dim: Union[int, Tuple[int]] = 8,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
addition_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
resnet_skip_time_act: bool = False,
resnet_out_scale_factor: int = 1.0,
time_embedding_type: str = "positional",
time_embedding_dim: Optional[int] = None,
time_embedding_act_fn: Optional[str] = None,
timestep_post_act: Optional[str] = None,
time_cond_proj_dim: Optional[int] = None,
conv_in_kernel: int = 3,
conv_out_kernel: int = 3,
projection_class_embeddings_input_dim: Optional[int] = None,
class_embeddings_concat: bool = False,
mid_block_only_cross_attention: Optional[bool] = None,
cross_attention_norm: Optional[str] = None,
addition_embed_type_num_heads=64,
):
super().__init__()
self.sample_size = sample_size
# Check inputs
if len(down_block_types) != len(up_block_types):
raise ValueError(
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
)
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}."
)
if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}."
)
# input
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
)
# time
if time_embedding_type == "fourier":
time_embed_dim = time_embedding_dim or block_out_channels[0] * 2
if time_embed_dim % 2 != 0:
raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.")
self.time_proj = GaussianFourierProjection(
time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos
)
timestep_input_dim = time_embed_dim
elif time_embedding_type == "positional":
time_embed_dim = time_embedding_dim or block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
else:
raise ValueError(
f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`."
)
self.time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
post_act_fn=timestep_post_act,
cond_proj_dim=time_cond_proj_dim,
)
if encoder_hid_dim is not None:
self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
else:
self.encoder_hid_proj = None
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
elif class_embed_type == "projection":
if projection_class_embeddings_input_dim is None:
raise ValueError(
"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
)
# The projection `class_embed_type` is the same as the timestep `class_embed_type` except
# 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
# 2. it projects from an arbitrary input dimension.
#
# Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
# When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
# As a result, `TimestepEmbedding` can be passed arbitrary vectors.
self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
elif class_embed_type == "simple_projection":
if projection_class_embeddings_input_dim is None:
raise ValueError(
"`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set"
)
self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim)
else:
self.class_embedding = None
if addition_embed_type == "text":
if encoder_hid_dim is not None:
text_time_embedding_from_dim = encoder_hid_dim
else:
text_time_embedding_from_dim = cross_attention_dim
self.add_embedding = TextTimeEmbedding(
text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
)
elif addition_embed_type is not None:
raise ValueError(f"addition_embed_type: {addition_embed_type} must be None or 'text'.")
if time_embedding_act_fn is None:
self.time_embed_act = None
elif time_embedding_act_fn == "swish":
self.time_embed_act = lambda x: F.silu(x)
elif time_embedding_act_fn == "mish":
self.time_embed_act = nn.Mish()
elif time_embedding_act_fn == "silu":
self.time_embed_act = nn.SiLU()
elif time_embedding_act_fn == "gelu":
self.time_embed_act = nn.GELU()
else:
raise ValueError(f"Unsupported activation function: {time_embedding_act_fn}")
self.down_blocks = nn.ModuleList([])
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
if mid_block_only_cross_attention is None:
mid_block_only_cross_attention = only_cross_attention
only_cross_attention = [only_cross_attention] * len(down_block_types)
if mid_block_only_cross_attention is None:
mid_block_only_cross_attention = False
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
if isinstance(cross_attention_dim, int):
cross_attention_dim = (cross_attention_dim,) * len(down_block_types)
if isinstance(layers_per_block, int):
layers_per_block = [layers_per_block] * len(down_block_types)
if class_embeddings_concat:
# The time embeddings are concatenated with the class embeddings. The dimension of the
# time embeddings passed to the down, middle, and up blocks is twice the dimension of the
# regular time embeddings
blocks_time_embed_dim = time_embed_dim * 2
else:
blocks_time_embed_dim = time_embed_dim
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block[i],
in_channels=input_channel,
out_channels=output_channel,
temb_channels=blocks_time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim[i],
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
resnet_skip_time_act=resnet_skip_time_act,
resnet_out_scale_factor=resnet_out_scale_factor,
cross_attention_norm=cross_attention_norm,
)
self.down_blocks.append(down_block)
# mid
if mid_block_type == "UNetMidBlock2DCrossAttn":
self.mid_block = UNetMidBlock2DCrossAttn(
in_channels=block_out_channels[-1],
temb_channels=blocks_time_embed_dim,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
output_scale_factor=mid_block_scale_factor,
resnet_time_scale_shift=resnet_time_scale_shift,
cross_attention_dim=cross_attention_dim[-1],
attn_num_head_channels=attention_head_dim[-1],
resnet_groups=norm_num_groups,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
)
elif mid_block_type == "UNetMidBlock2DSimpleCrossAttn":
self.mid_block = UNetMidBlock2DSimpleCrossAttn(
in_channels=block_out_channels[-1],
temb_channels=blocks_time_embed_dim,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
output_scale_factor=mid_block_scale_factor,
cross_attention_dim=cross_attention_dim[-1],
attn_num_head_channels=attention_head_dim[-1],
resnet_groups=norm_num_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
skip_time_act=resnet_skip_time_act,
only_cross_attention=mid_block_only_cross_attention,
cross_attention_norm=cross_attention_norm,
)
elif mid_block_type is None:
self.mid_block = None
else:
raise ValueError(f"unknown mid_block_type : {mid_block_type}")
# count how many layers upsample the images
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
reversed_layers_per_block = list(reversed(layers_per_block))
reversed_cross_attention_dim = list(reversed(cross_attention_dim))
only_cross_attention = list(reversed(only_cross_attention))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=reversed_layers_per_block[i] + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=blocks_time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=reversed_cross_attention_dim[i],
attn_num_head_channels=reversed_attention_head_dim[i],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
resnet_skip_time_act=resnet_skip_time_act,
resnet_out_scale_factor=resnet_out_scale_factor,
cross_attention_norm=cross_attention_norm,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
if norm_num_groups is not None:
self.conv_norm_out = nn.GroupNorm(
num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
)
if act_fn == "swish":
self.conv_act = lambda x: F.silu(x)
elif act_fn == "mish":
self.conv_act = nn.Mish()
elif act_fn == "silu":
self.conv_act = nn.SiLU()
elif act_fn == "gelu":
self.conv_act = nn.GELU()
else:
raise ValueError(f"Unsupported activation function: {act_fn}")
else:
self.conv_norm_out = None
self.conv_act = None
conv_out_padding = (conv_out_kernel - 1) // 2
self.conv_out = nn.Conv2d(
block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding
)
| srpkdyy/VideoLDM | videoldm.py | videoldm.py | py | 16,886 | python | en | code | 76 | github-code | 6 | [
{
"api_name": "diffusers.models.UNet2DConditionModel",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "ty... |
37957130845 | from __future__ import print_function
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.http import MediaFileUpload
import subprocess
import os
from os.path import join
path = os.getcwd()
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/drive']
def main():
"""Shows basic usage of the Drive v3 API.
Prints the names and ids of the first 10 files the user has access to.
"""
# Backup the tweets
subprocess.call(['tar -czvf tweet.tar.gz /usr/local/airflow/data/', '-1'], shell=True)
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
print (join(path,'dags/daglibs/token.pickle'))
if os.path.exists(join(path,'dags/daglibs/token.pickle')):
with open(join(path,'dags/daglibs/token.pickle'), 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(join(path,
'dags/daglibs/credentials.json'), SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(join(path,'dags/daglibs/token.pickle'), 'wb') as token:
pickle.dump(creds, token)
service = build('drive', 'v3', credentials=creds)
# Call the Drive v3 API
file_metadata = {'name': 'tweet.tar.gz'}
media = MediaFileUpload('/usr/local/airflow/tweet.tar.gz', mimetype='*/*')
file = service.files().create(body=file_metadata,
media_body=media,
fields='id').execute()
print ("File ID: {}".format(file.get('id')))
if file.get('id'):
return True
return False
if __name__ == '__main__':
main()
| vjgpt/twitter-pipeline | dags/daglibs/upload.py | upload.py | py | 2,218 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "os.getcwd",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_num... |
4534932686 | #import mxnet.ndarray as nd
from mxnet import nd
from mxnet import autograd
# REF [site] >> https://gluon-crash-course.mxnet.io/ndarray.html
def ndarray_example():
a = nd.array(((1, 2, 3), (5, 6, 7)))
b = nd.full((2, 3), 2.0)
b.shape, b.size, b.dtype
# Operations.
x = nd.ones((2, 3))
y = nd.random.uniform(-1, 1, (2, 3))
x * y
y.exp()
nd.dot(x, y.T)
# Indexing.
y[1, 2]
y[:, 1:3]
y[:, 1:3] = 2
y[1:2, 0:2] = 4
# Converting between MXNet NDArray and NumPy.
na = x.asnumpy()
nd.array(na)
# REF [site] >> https://gluon-crash-course.mxnet.io/autograd.html
def autograd_example():
# When differentiating a function f(x)=2x2 with respect to parameter x.
x = nd.array([[1, 2], [3, 4]])
x.attach_grad()
# To let MXNet store y, so that we can compute gradients later, we need to put the definition inside a autograd.record() scope.
with autograd.record():
y = 2 * x * x
# Invoke back propagation (backprop).
# When y has more than one entry, y.backward() is equivalent to y.sum().backward().
y.backward()
print('x.grad =', x.grad)
# Using Python control flows.
def f(a):
b = a * 2
while b.norm().asscalar() < 1000:
b = b * 2
if b.sum().asscalar() >= 0:
c = b[0]
else:
c = b[1]
return c
a = nd.random.uniform(shape=2)
a.attach_grad()
with autograd.record():
c = f(a)
c.backward()
def main():
ndarray_example()
autograd_example()
#---------------------------------------------------------------------
if '__main__' == __name__:
main()
| sangwook236/SWDT | sw_dev/python/rnd/test/machine_learning/mxnet/mxnet_basic.py | mxnet_basic.py | py | 1,497 | python | en | code | 17 | github-code | 6 | [
{
"api_name": "mxnet.nd.array",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "mxnet.nd",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "mxnet.nd.full",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "mxnet.nd",
"line_number": 8,
... |
15418635020 | # -*- coding: utf-8 -*-
#!/usr/bin/env python3.5
from django.shortcuts import render
from django.http import HttpResponseRedirect
from .forms import RegPeopleForm, RegUserForm
def createuser(request):
if request.method == "POST":
uform = RegUserForm(data=request.POST)
pform = RegPeopleForm(data=request.POST)
if uform.is_valid() and pform.is_valid():
user = uform.save()
people = pform.save(commit=False)
people = user
people.save()
return HttpResponseRedirect('/')
else:
uform=RegUserForm()
pform=RegPeopleForm()
return render(request, 'registration/registration.html', {'uform': uform, 'pform': pform})
| MyriamBel/testwork | Reg/views.py | views.py | py | 725 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "forms.RegUserForm",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "forms.RegPeopleForm",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 18,
"usage_type": "call"
},
{
"api_name":... |
32724214710 | from setuptools import find_packages, setup
VERSION = "0.1"
INSTALL_REQUIRES = [
"alembic==1.9.4",
"apischema==0.15.6",
"asyncio==3.4.3",
"configparser==5.3.0",
"fastapi[all]==0.92.0",
"psycopg2==2.9.1",
"python-binance==1.0.16",
"python-telegram-bot==20.0a2",
"SQLAlchemy==1.4.37",
]
setup(
name="report-calculation",
version=VERSION,
python_requires=">=3.9.0",
packages=find_packages(exclude=["tests"]),
author="Daniel Ducuara",
author_email="daniel14015@gmail.com",
description="Get a report of my porfolio",
include_package_data=True,
entry_points={
"console_scripts": [
"report-calculation = report_calculation.main:main",
# "console = report_calulation.main:console",
]
},
install_requires=INSTALL_REQUIRES,
extras_require={
"dev": [
"alembic==1.9.4",
"bandit==1.7.0",
"mypy==0.931",
"pre-commit==3.1.0",
"pylint==2.7.0",
"black==22.10.0",
"isort==5.10.1",
"beautysh==6.2.1",
"autoflake==1.7.7",
],
"test": [
"pytest==6.2.4",
"pytest-mock==3.6.1",
"pytest-cov==2.12.1",
"pytest-asyncio==0.15.1",
],
},
)
| DanielDucuara2018/report_calculation | setup.py | setup.py | py | 1,329 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "setuptools.setup",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 21,
"usage_type": "call"
}
] |
21097762911 | """
Honk Settings.
"""
import environ
from pathlib import Path
from google.oauth2 import service_account
env = environ.Env(
# set casting, default value
DEBUG=(bool, False),
)
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR: Path = Path(__file__).resolve().parent.parent
# reading .env files
environ.Env.read_env(BASE_DIR / '.env')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY: str = env('SECRET_KEY')
DEBUG: bool = env('DEBUG')
ALLOWED_HOSTS: list[str] = ['localhost', '127.0.0.1', 'honk.rafaelmc.net']
CSRF_TRUSTED_ORIGINS: list[str] = ['https://honk.rafaelmc.net']
# Application definition
INSTALLED_APPS: list[str] = [
'circus.apps.CircusConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'rest_framework',
'rest_framework.authtoken',
]
MIDDLEWARE: list[str] = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'honk.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / "templates"],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'honk.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'sqlite_data' / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.2/ref/settings/#auth-password-validators
VALIDATOR_PATH = 'django.contrib.auth.password_validation.'
AUTH_PASSWORD_VALIDATORS = [
{'NAME': VALIDATOR_PATH + 'UserAttributeSimilarityValidator'},
{'NAME': VALIDATOR_PATH + 'MinimumLengthValidator'},
{'NAME': VALIDATOR_PATH + 'CommonPasswordValidator'},
{'NAME': VALIDATOR_PATH + 'NumericPasswordValidator'},
]
# Internationalization
# https://docs.djangoproject.com/en/4.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.2/howto/static-files/
STATIC_URL = 'static/'
STATICFILES_DIRS = [
BASE_DIR / "static",
]
# Default primary key field type
# https://docs.djangoproject.com/en/4.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LOGIN_REDIRECT_URL = "/"
STORAGES = {
"default": {"BACKEND": "storages.backends.gcloud.GoogleCloudStorage"},
"staticfiles": {
"BACKEND": "storages.backends.gcloud.GoogleCloudStorage"
},
}
# GOOGLE_APPLICATION_CREDENTIALS =
GS_BUCKET_NAME = 'honkhonk'
MEDIA_URL = "/media/"
MEDIA_ROOT = BASE_DIR / "media"
GS_CREDENTIALS = service_account.Credentials.from_service_account_file(
f"{BASE_DIR}/gcp-honk-credentials.json"
)
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
],
}
| rafamoreira/honk | honk-web/honk/settings.py | settings.py | py | 3,860 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "environ.Env",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "environ.Env.read_env",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "environ.Env",
"line... |
26528967131 | import collections
from oneview_redfish_toolkit.api.errors import \
OneViewRedfishException
from oneview_redfish_toolkit.api.errors import \
OneViewRedfishResourceNotFoundException
from oneview_redfish_toolkit.api.redfish_json_validator import \
RedfishJsonValidator
from oneview_redfish_toolkit import config
class RedfishError(RedfishJsonValidator):
"""Creates a Redfish Error Dict
Populates self.redfish with errors. Will not validate as there's no
schema to validate against.
"""
SCHEMA_NAME = None
def __init__(self, code, message):
"""Constructor
Populates self.redfish with error message.
"""
super().__init__(self.SCHEMA_NAME)
self.redfish["error"] = collections.OrderedDict()
# Check if Code is a valid Code Error in the registry
if code not in config.get_registry_dict()["Base"]["Messages"]:
raise OneViewRedfishResourceNotFoundException(
"Registry {} not found.".format(code)
)
self.redfish["error"]["code"] = "Base.1.1." + code
self.redfish["error"]["message"] = message
self.redfish["error"]["@Message.ExtendedInfo"] = list()
def add_extended_info(
self,
message_id,
message_args=[],
related_properties=[]):
"""Adds an item to ExtendedInfo list using values from DMTF registry
Adds an item to ExtendedInfo list using the values for Message,
Severity and Resolution from DMTF Base Registry.
Parameters:
message_id: Id of the message; oneOf the keys in Redfish
Registry Messages
message_args: List of string to replace markers on Redfish
messages. Must have the same length as the number of %
signs found in the registry Message field
related_properties: Properties relates to this e error if
necessary
"""
messages = config.get_registry_dict()["Base"]["Messages"]
# Verify if message_id exists in registry
try:
severity = messages[message_id]["Severity"]
except Exception:
raise OneViewRedfishResourceNotFoundException(
"Message id {} not found.".format(message_id)
)
message = messages[message_id]["Message"]
# Check if numbers of replacements and message_args length match
replaces = message.count('%')
replacements = len(message_args)
if replaces != replacements:
raise OneViewRedfishException(
'Message has {} replacements to be made but {} args '
'where sent'.format(replaces, replacements)
)
# Replacing the marks in the message. A better way to do this
# is welcome.
for i in range(replaces):
message = message.replace('%' + str(i + 1), message_args[i])
# Construct the dict
extended_info = collections.OrderedDict()
extended_info["@odata.type"] = "#Message.v1_0_5.Message"
extended_info["MessageId"] = "Base.1.1." + message_id
extended_info["Message"] = message
extended_info["RelatedProperties"] = related_properties
extended_info["MessageArgs"] = message_args
extended_info["Severity"] = severity
extended_info["Resolution"] = messages[message_id]["Resolution"]
# Append it to the list
self.redfish["error"]["@Message.ExtendedInfo"].append(extended_info)
| HewlettPackard/oneview-redfish-toolkit | oneview_redfish_toolkit/api/redfish_error.py | redfish_error.py | py | 3,585 | python | en | code | 16 | github-code | 6 | [
{
"api_name": "oneview_redfish_toolkit.api.redfish_json_validator.RedfishJsonValidator",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "collections.OrderedDict",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "oneview_redfish_toolkit.config.get_registry_dict"... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.