seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
31077263477 | def fatorial(n, boolean=False):
'''
-> Calcula o fatorial de um número
:param n: numero do fatorial
:param boolean: Verdadeiro para mostrar a conta, falso para mostrar apenas o resultado
:return:
'''
cont = 1
for n in range(n, 0, -1):
cont *= n
if boolean == True:
if n > 1:
print(f'{n} x ', end='')
else:
print(f'{n} = ', end='')
print(cont)
fatorial(5, True) | tainanbogo/Python-Scripts | desafio102.py | desafio102.py | py | 470 | python | pt | code | 0 | github-code | 13 |
28521822550 | import os
import sys
import syslog
import hashlib
import xml.etree.ElementTree as tree
import time
from time import sleep
def calcHashNorm(path_normal_files, update):
for i in range(len(path_normal_files)):
sleep(0.0001)
try:
file = path_normal_files[i]
if os.path.isfile(file):
hashsha256 = hashlib.sha256()
with open(file, 'rb') as f:
for chunk in iter(lambda: f.read(524288), b""):
hashsha256.update(chunk)
update.append(f"path:{file};sha256:{hashsha256.hexdigest()};stat:")
elif os.path.isdir(file):
meta_hash = hashlib.sha256(str(os.stat(file)).encode('utf-8'))
update.append(f"path:{file};sha256:;stat:{meta_hash.hexdigest()}")
except Exception as err:
syslog.syslog(syslog.LOG_CRIT, f"pyfim-[ERROR] {err}")
return update
def calcHashMeta(path_meta__files, update):
for i in range(len(path_meta__files)):
sleep(0.0001)
try:
file = path_meta__files[i]
if os.path.isfile(file):
meta_hash = hashlib.sha256(str(os.stat(file)).encode('utf-8'))
hashsha256 = hashlib.sha256()
with open(file, 'rb') as f:
for chunk in iter(lambda: f.read(524288), b""):
hashsha256.update(chunk)
update.append(f"path:{file};sha256:{hashsha256.hexdigest()};stat:{meta_hash.hexdigest()}")
elif os.path.isdir(file):
meta_hash = hashlib.sha256(str(os.stat(file)).encode('utf-8'))
update.append(f"path:{file};sha256:;stat:{meta_hash.hexdigest()}")
except Exception as err:
syslog.syslog(syslog.LOG_CRIT, f"pyfim-[ERROR] {err}")
return update
def removeNewLine(lines):
return [*map(lambda s: s.replace("\n", ""), lines)]
def getListOfFiles(dirNames):
dirNames = dirNames.split(",")
allFiles = list()
listOfFile = list()
for directory in dirNames:
if directory:
if not os.path.exists(directory):
syslog.syslog(syslog.LOG_CRIT, f"pyfim-[ERROR] Dir not found:{directory}, Dir deleted or check Config")
continue
if os.path.isdir(directory):
allFiles.append(directory)
if not os.path.isfile(directory):
listOfFile = os.listdir(directory)
else:
listOfFile.append(directory)
# Iterate over all the entries
for entry in listOfFile:
# Create full path
fullPath = os.path.join(directory, entry)
if fullPath not in path_ignore and '\\' not in fullPath:
# If entry is a directory then get the list of files in this directory
if os.path.isdir(fullPath):
allFiles = allFiles + getListOfFiles(fullPath)
else:
allFiles.append(fullPath)
else:
continue
return allFiles
def writeDB(update):
dbupdatewnewline = ["{}\n".format(i) for i in update]
with open('./pyfim.db', 'w') as f:
f.writelines(dbupdatewnewline)
def compareAndUpdateDB(update):
dbcompare = list()
with open("./pyfim.db", "r") as f:
for line in f:
dbcompare.append(line.strip("\n"))
if dbcompare == update:
return None
sdc = set(dbcompare)
diffModAdd = [x for x in update if x not in sdc]
sdu = set(update)
diffsDel = [x for x in dbcompare if x not in sdu]
if not diffsDel and not diffModAdd:
return None
if diffModAdd:
sizediffsmodadd = len(diffModAdd)
for i in range(sizediffsmodadd):
entry = diffModAdd[i]
parts = entry.split(";")
pathformodcheck = "%s;" % (parts[0])
if [True for s in diffsDel if pathformodcheck in s and sleep(0.0005) is None]:
# modified
entrysplit = entry.split(";")
file = parts[0].replace('path:', '')
oldFileHash = entrysplit[1].replace("sha256:", "")
newFileHash = parts[1].replace("sha256:", "")
oldStatHash = entrysplit[2].replace("stat:", "")
newStatHash = parts[2].replace("stat:", "")
if not oldStatHash and oldFileHash != newFileHash:
tag = "[FILE]"
elif not oldFileHash and oldStatHash != newStatHash:
tag = "[DIR]"
else:
tag = "[FILE,META]"
syslog.syslog(syslog.LOG_CRIT,
f"pyfim-{tag} Modified:{file}, Old-File-Hash:{oldFileHash}, New-File-Hash:{newFileHash}, Old-Meta-Hash:{oldStatHash}, New-Meta-Hash:{newStatHash}")
else:
# added
file = parts[0].replace('path:', '')
newFileHash = parts[1].replace("sha256:", "")
newStatHash = parts[2].replace("stat:", "")
if newFileHash and not newStatHash:
tag = "[FILE]"
elif not newFileHash and os.path.isdir(file):
tag = "[DIR]"
else:
tag = "[FILE,META]"
syslog.syslog(syslog.LOG_CRIT,
f"pyfim-{tag} Added:{file}, File-Hash:{newFileHash}, Meta-Hash:{newStatHash}")
if diffsDel:
sizediffsdel = len(diffsDel)
for i in range(sizediffsdel):
entry = diffsDel[i]
parts = entry.split(";")
pathfordelcheck = "%s;" % (parts[0])
if not [s for s in diffModAdd if pathfordelcheck in s and sleep(0.0005) is None]:
file = parts[0].replace('path:', '')
FileHash = parts[1].replace("sha256:", "")
StatHash = parts[2].replace("stat:", "")
if FileHash and not StatHash:
tag = "[FILE]"
elif not FileHash and StatHash and os.path.isdir(file):
tag = "[DIR]"
else:
tag = "[FILE,META]"
syslog.syslog(syslog.LOG_CRIT,
f"pyfim-{tag} Deleted:{file}, File-Hash:{FileHash}, Meta-Hash:{StatHash}")
return update
# Insert alternative Path here
xmlconfig = "./config.xml"
#
niceVal = -19
os.nice(niceVal)
syslog.syslog(syslog.LOG_WARNING, "pyfim-[START]")
start_time = time.time()
if not os.path.exists(xmlconfig) or os.path.getsize(xmlconfig) < 1:
syslog.syslog(syslog.LOG_CRIT, f"pyfim-[ERROR,END] config.xml doesn't exist or is empty")
sys.exit(-1)
dbpath = "./pyfim.db"
dbupdate = list()
path_ignore = ""
path_norm = ""
path_meta = ""
xmldata = tree.parse(xmlconfig)
xmlroot = xmldata.getroot()
for x in xmlroot:
try:
ignore = x.findtext('ignore')
checkmeta = x.findtext('checkmeta')
path = x.findtext('path')
if ignore == "yes" and path:
path_ignore = path_ignore + f"{path},"
elif checkmeta == "yes" and path:
path_meta = path_meta + f"{path},"
elif checkmeta == "no" and path:
path_norm = path_norm + f"{path},"
except Exception as e:
syslog.syslog(syslog.LOG_CRIT, f"pyfim-[ERROR] Failure when reading config.xml {e}")
if not path_meta and not path_norm:
syslog.syslog(syslog.LOG_WARNING,
f"pyfim-[ERROR,END] Skipping Scan or Database because no Paths/Files are configured.")
sys.exit(1)
else:
syslog.syslog(syslog.LOG_WARNING, f"pyfim-[CONFIG] Scanning:{path_norm}")
syslog.syslog(syslog.LOG_WARNING, f"pyfim-[CONFIG] Scanning with Meta:{path_meta}")
syslog.syslog(syslog.LOG_WARNING, f"pyfim-[CONFIG] Ignoring:{path_ignore}")
path_meta_files = getListOfFiles(path_meta)
path_norm_files = getListOfFiles(path_norm)
calcHashNorm(path_norm_files, dbupdate)
calcHashMeta(path_meta_files, dbupdate)
if os.path.exists("./pyfim.db") and os.path.getsize("./pyfim.db") > 0:
compareAndUpdateDB(dbupdate)
syslog.syslog(syslog.LOG_WARNING, "pyfim-[INIT] Update Database")
writeDB(dbupdate)
else:
syslog.syslog(syslog.LOG_WARNING, "pyfim-[INIT] Create Database")
writeDB(dbupdate)
syslog.syslog(syslog.LOG_WARNING, f"pyfim-[END] Scan took:{round((time.time() - start_time), 5)} Sec")
sys.exit(1)
| wolle604/pyfim | pyfim.py | pyfim.py | py | 8,469 | python | en | code | 1 | github-code | 13 |
8585861525 | from musicXmatch import musicXmatch
mm = musicXmatch()
lyrics = "Tonight I'm gonna have myself a real good time"
song_list = mm.get_song_list_by_lyrics(lyrics)
print('Select one of the following options: ')
for i in range(len(song_list)):
print('[{:>2}'.format(i) + '] : ' + song_list[i][1] + ' by ' + song_list[i][0])
option = ''
while option is '' :
inp = input('\n>> ')
if inp:
option = int(inp)
if option >= len(song_list):
option = ''
if option is '':
print("Please select a valid option")
print(type(option))
print(option)
| dvilelaf/SpaceRocksBot | original/test.py | test.py | py | 592 | python | en | code | 2 | github-code | 13 |
38950753598 | #!/usr/bin/env python
# coding: utf-8
# # Predicting Student Admissions with Neural Networks
# In this notebook, we predict student admissions to graduate school at UCLA based on three pieces of data:
# - GRE Scores (Test)
# - GPA Scores (Grades)
# - Class rank (1-4)
#
# The dataset originally came from here: http://www.ats.ucla.edu/
#
# ## Loading the data
# To load the data and format it nicely, we will use two very useful packages called Pandas and Numpy. You can read on the documentation here:
# - https://pandas.pydata.org/pandas-docs/stable/
# - https://docs.scipy.org/
# In[1]:
# Importing pandas and numpy
import pandas as pd
import numpy as np
# Reading the csv file into a pandas DataFrame
data = pd.read_csv('student_data.csv')
# Printing out the first 10 rows of our data
data[:10]
# ## Plotting the data
#
# First let's make a plot of our data to see how it looks. In order to have a 2D plot, let's ingore the rank.
# In[2]:
# Importing matplotlib
import matplotlib.pyplot as plt
# Function to help us plot
def plot_points(data):
X = np.array(data[["gre","gpa"]])
y = np.array(data["admit"])
admitted = X[np.argwhere(y==1)]
rejected = X[np.argwhere(y==0)]
plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'red', edgecolor = 'k')
plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'cyan', edgecolor = 'k')
plt.xlabel('Test (GRE)')
plt.ylabel('Grades (GPA)')
# Plotting the points
plot_points(data)
plt.show()
# Roughly, it looks like the students with high scores in the grades and test passed, while the ones with low scores didn't, but the data is not as nicely separable as we hoped it would. Maybe it would help to take the rank into account? Let's make 4 plots, each one for each rank.
# In[3]:
# Separating the ranks
data_rank1 = data[data["rank"]==1]
data_rank2 = data[data["rank"]==2]
data_rank3 = data[data["rank"]==3]
data_rank4 = data[data["rank"]==4]
# Plotting the graphs
plot_points(data_rank1)
plt.title("Rank 1")
plt.show()
plot_points(data_rank2)
plt.title("Rank 2")
plt.show()
plot_points(data_rank3)
plt.title("Rank 3")
plt.show()
plot_points(data_rank4)
plt.title("Rank 4")
plt.show()
# This looks more promising, as it seems that the lower the rank, the higher the acceptance rate. Let's use the rank as one of our inputs. In order to do this, we should one-hot encode it.
#
# ## TODO: One-hot encoding the rank
# Use the `get_dummies` function in Pandas in order to one-hot encode the data.
# In[4]:
# TODO: Make dummy variables for rank
one_hot_data = pd.concat([data, pd.get_dummies(data['rank'], prefix='rank')], axis=1)
# TODO: Drop the previous rank column
one_hot_data = one_hot_data.drop('rank', axis=1)
# Print the first 10 rows of our data
one_hot_data[:10]
# ## TODO: Scaling the data
# The next step is to scale the data. We notice that the range for grades is 1.0-4.0, whereas the range for test scores is roughly 200-800, which is much larger. This means our data is skewed, and that makes it hard for a neural network to handle. Let's fit our two features into a range of 0-1, by dividing the grades by 4.0, and the test score by 800.
# In[5]:
# Making a copy of our data
processed_data = one_hot_data[:]
# TODO: Scale the columns
processed_data['gre'] = processed_data['gre']/800
processed_data['gpa'] = processed_data['gpa']/4.0
# Printing the first 10 rows of our procesed data
processed_data[:10]
# ## Splitting the data into Training and Testing
# In order to test our algorithm, we'll split the data into a Training and a Testing set. The size of the testing set will be 10% of the total data.
# In[6]:
sample = np.random.choice(processed_data.index, size=int(len(processed_data)*0.9), replace=False)
train_data, test_data = processed_data.iloc[sample], processed_data.drop(sample)
print("Number of training samples is", len(train_data))
print("Number of testing samples is", len(test_data))
print(train_data[:10])
print(test_data[:10])
# ## Splitting the data into features and targets (labels)
# Now, as a final step before the training, we'll split the data into features (X) and targets (y).
# In[7]:
features = train_data.drop('admit', axis=1)
targets = train_data['admit']
features_test = test_data.drop('admit', axis=1)
targets_test = test_data['admit']
print(features[:10])
print(targets[:10])
# ## Training the 2-layer Neural Network
# The following function trains the 2-layer neural network. First, we'll write some helper functions.
# In[8]:
# Activation (sigmoid) function
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_prime(x):
return sigmoid(x) * (1-sigmoid(x))
def error_formula(y, output):
return - y*np.log(output) - (1 - y) * np.log(1-output)
# # TODO: Backpropagate the error
# Now it's your turn to shine. Write the error term. Remember that this is given by the equation $$ (y-\hat{y}) \sigma'(x) $$
# In[9]:
# TODO: Write the error term formula
def error_term_formula(x, y, output):
return (y - output)*sigmoid_prime(x)
# below is an equally valid solution (it doesn't utilize x)
def error_term_formula(x, y, output):
return (y-output) * output * (1 - output)
# In[10]:
# Neural Network hyperparameters
epochs = 1000
learnrate = 0.5
# Training function
def train_nn(features, targets, epochs, learnrate):
# Use to same seed to make debugging easier
np.random.seed(42)
n_records, n_features = features.shape
last_loss = None
# Initialize weights
weights = np.random.normal(scale=1 / n_features**.5, size=n_features)
for e in range(epochs):
del_w = np.zeros(weights.shape)
for x, y in zip(features.values, targets):
# Loop through all records, x is the input, y is the target
# Activation of the output unit
# Notice we multiply the inputs and the weights here
# rather than storing h as a separate variable
output = sigmoid(np.dot(x, weights))
# The error, the target minus the network output
error = error_formula(y, output)
# The error term
error_term = error_term_formula(x, y, output)
# The gradient descent step, the error times the gradient times the inputs
del_w += error_term * x
# Update the weights here. The learning rate times the
# change in weights, divided by the number of records to average
weights += learnrate * del_w / n_records
# Printing out the mean square error on the training set
if e % (epochs / 10) == 0:
out = sigmoid(np.dot(features, weights))
loss = np.mean((out - targets) ** 2)
print("Epoch:", e)
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
print("=========")
print("Finished training!")
return weights
weights = train_nn(features, targets, epochs, learnrate)
# ## Calculating the Accuracy on the Test Data
# In[11]:
# Calculate accuracy on test data
test_out = sigmoid(np.dot(features_test, weights))
predictions = test_out > 0.5
accuracy = np.mean(predictions == targets_test)
print("Prediction accuracy: {:.3f}".format(accuracy))
# In[ ]:
| ruhiawasthi/Udacity--Introduction-To-Machine-Learning-With-Tensorflow | Predicting Student Admissions with Neural Networks/StudentAdmissions.py | StudentAdmissions.py | py | 7,445 | python | en | code | 4 | github-code | 13 |
31167833172 | from selenium import webdriver
import time
from selenium.webdriver.support.ui import WebDriverWait
with webdriver.Firefox() as driver:
wait = WebDriverWait(driver, 10)
tweet_fill = "http://127.0.0.1:5000/"
driver.get(tweet_fill)
driver.maximize_window()
time.sleep(15)
driver.find_element_by_name("query").send_keys("BJP")
driver.find_element_by_id("search").click()
time.sleep(20) | thisislohith6/tweet | test_selenium.py | test_selenium.py | py | 416 | python | en | code | 0 | github-code | 13 |
43356131713 | import sys
read = sys.stdin.readline
N = int(read())
palindroms = list((read().rstrip().split()))
for i in range(1,N):
# 뒷 글자의 맨 앞자리 vs 앞글자의 맨 뒷자리
if palindroms[i-1][-1] != palindroms[i][0]:
print(0)
exit()
print(1) | w00sung/Algorithm | BOJ/GoodBye2020_A.py | GoodBye2020_A.py | py | 276 | python | ko | code | 0 | github-code | 13 |
1249599288 | import logging
from typing import Dict, List
from kedro.io import MemoryDataSet
import numpy as np
import pandas as pd
from google.cloud import bigquery
def train_model(data: pd.DataFrame, parameters: Dict) -> pd.DataFrame:
"""Split data and train the linear regression model.
Args: parameters: Parameters defined in parameters.yml
"""
client = bigquery.Client()
query_job = client.query(
"""
CREATE OR REPLACE MODEL
`{}`
OPTIONS(
model_type='linear_reg',
input_label_cols=['price'],
data_split_method='random',
subsample=0.2)
AS SELECT
engines,
passenger_capacity,
crew,
d_check_complete,
moon_clearance_complete,
price
FROM
`{}`
""".format(parameters["bq_model_name"],parameters["bq_master_table"])
)
results = query_job.result()
input_ml_eval_data = pd.DataFrame({'state': 'ready'}, index=[0])
input_ml_eval = MemoryDataSet(data=input_ml_eval_data)
return input_ml_eval
def evaluate_model(data: pd.DataFrame, parameters: Dict):
"""Calculate the coefficient of determination and log the result.
Args: parameters: Parameters defined in parameters.yml
"""
client = bigquery.Client()
query_job = client.query(
"""
SELECT r2_score FROM
ML.EVALUATE(MODEL `{}`, (
SELECT
engines,
passenger_capacity,
crew,
d_check_complete,
moon_clearance_complete,
price
FROM
`{}`)
)
""".format(parameters["bq_model_name"],parameters["bq_master_table"])
)
results = query_job.result()
logger = logging.getLogger(__name__)
for row in results:
logger.info("Model has a coefficient R^2 of %.3f.", row.r2_score)
| mahurtado/spaceflights | src/spaceflights/pipelines/data_science/nodes.py | nodes.py | py | 1,963 | python | en | code | 2 | github-code | 13 |
31672495915 | from car import Car
from player import Player
from menu import menu
import time
from colorama import Back
from subprocess import call
import os
def clear():
call('clear' if os.name == 'posix' else 'cls')
#Menu.main_menu()
# name = (input("Enter your Name: "))
# player = Player(name)
# print(f"\nWelcome to RACING WORLD {player.name}!! You have ${player.bank_account} in your account!\n")
#======Races your car against an opponent of your choice
def race():
menu.race_menu() #=====opponent menu
seconds= int(input("Enter race duration in seconds: "))
winnings = int(input("Enter amount to bet:"))
print("*********Your Opponent is Starting his Engine!!!!***********")
time.sleep(0.5)
clock = "fiv"
for i in clock:
print("*")
time.sleep(0.5)
i = 3
print("*********Race is about to Start!!!***********")
time.sleep(0.5)
clock = "five"
for i in clock:
print(Back.GREEN)
time.sleep(0.5)
i = 0
while i < seconds:
opponent.move()
my_car.move()
i += 1
print("*********Race is over!!!***********")
if my_car.position > opponent.position:
print(f"\n !!!!!!You Won $${winnings}!!!!!!")
player.wins(winnings)
else:
print(f"\n :(:(:(You lose $${winnings}:(:(:(")
player.loses(winnings)
print(f"Account balance {player.bank_account}")
my_car.wear_tear()
print(f"Your car health is at {my_car.wear}")
menu.options_menu()
menu.main_menu()
#options_menu()
#print(buy_car_menu())
#buy_mods()
#buy_decals()
| Matt-Robinson-byte/python-text-rpg | game.py | game.py | py | 1,597 | python | en | code | 0 | github-code | 13 |
16993165913 | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 27 11:47:22 2020
@author: William
"""
#%%
#imports
from drlAgents import DDQNAgent
import matplotlib.pyplot as plt
import gym
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
#%%
#Set a seed to compare results
seed = 42
torch.manual_seed(seed)
random.seed(seed)
#%%
#Create a pytorch neural network:
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
a = 20
self.out_features = 2
self.in_layer = nn.Linear(4,a)
self.h1 = nn.Linear(a,a)
self.bn1 = nn.BatchNorm1d(a)
self.h2 = nn.Linear(a,a)
self.bn2 = nn.BatchNorm1d(a)
self.out_layer = nn.Linear(a,2)
def forward(self, x):
x = F.relu(self.in_layer(x))
x = F.relu(self.bn1(self.h1(x)))
x = F.relu(self.bn2(self.h2(x)))
return self.out_layer(x)
#%%
#create the environment:
env = gym.make('CartPole-v0')
#Set the environment's seed:
env.seed(seed)
env.action_space.seed(seed)
#Create and train the model:
agent = DDQNAgent(env, Net)
agent.train(epochs=100)
#%%
plt.plot(agent.agent.logger.get('ep_reward'))
#%%
agent.play(length=100) | williambankes/drlAgents | scripts/ddqn_agent_test.py | ddqn_agent_test.py | py | 1,237 | python | en | code | 0 | github-code | 13 |
32566213339 | from tkinter import *
from PIL import ImageTk, Image
root = Tk()
root.title('Radio Buttons')
root.iconbitmap('Images/WillRagB.ico')
MODES = [
# "Label", "Value"
# for the examble:
# (Toppings, Value)
("Pepperoni", "Pepperoni"),
("Peppers", "Peppers"),
("Mushroom", "Mushroom"),
("Onion", "Onion"),
]
pizza = StringVar()
pizza.set("Pepperoni")
for toppings, value in MODES:
Radiobutton(root, text=toppings, variable=pizza, value=value, anchor=W).pack()
def clicked(value):
myLabel = Label(root, text=value)
myLabel.pack()
# allows us not have to use test.get()
# Can also do test = StringVar, if you wanted a string variable
#test = IntVar()
#test.set(2)
# tkinter variables
#Radiobutton(root, text="Option 1", variable=test, value=1, command=lambda: clicked(test.get())).pack()
#Radiobutton(root, text="Option 2", variable=test, value=2, command=lambda: clicked(test.get())).pack()
#myLabel = Label(root, text=test.get())
#myLabel.pack()
#myButton = Button(root, text="Click", command=lambda: clicked(test.get())).pack()
#myLabel = Label(root, text=pizza.get())
#myLabel.pack()
myButton = Button(root, text="Click", command=lambda: clicked(pizza.get())).pack()
root.mainloop() | WilliamW5/Tkinter | radio.py | radio.py | py | 1,228 | python | en | code | 0 | github-code | 13 |
20191102479 |
import tensorflow as tf
import numpy as np
from vqvae import soft_em
from model import transformer_relative_position
from audio_io import utils as audio_utils
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow as tf
import os
"""
https://github.com/huseinzol05/NLP-Models-Tensorflow/blob/master/speech-to-text/wav2vec.ipynb
"""
class ConformerConfig(object):
def __init__(self,
char_vocab_size,
pinyin_vocab_size=None,
subsampling_filters=[144, 144],
subsampling_kernel_sizes=[[3, 3], [3, 3]],
subsampling_strides=[[2, 1], [2, 1]],
subsampling_dropout=0.1,
proj_dropout=0.1,
ffm_expansion_factor=4,
ffm_dropout=0.1,
ffm_hidden_size=256,
ffm_fc_factor=0.5,
mha_hidden_size=256,
mha_num_attention_heads=4,
mha_max_relative_position=64,
mha_num_buckets=32,
mha_bidirectional=True,
mha_initializer_range=0.02,
mha_relative_position_type="relative_t5",
mha_relative_position_embedding_type="sinusoidal_trainable",
mha_num_hidden_layers=4,
mha_attention_probs_dropout_prob=0.1,
mha_hidden_dropout_prob=0.1,
mha_use_relative_position=True,
cnn_kernel_sizes=32,
cnn_strides=1,
cnn_depth_multiplier=1,
cnn_dropout_prob=0.1,
is_cnn_batch_norm=True,
is_cnn_padding=True,
fc_layers=1,
fc_hidden_size=1,
fc_dropout_rate=0.1,
rnn_hidden_size=512,
rnn_layers=1,
is_bidirectional=True,
is_rnn_batch_norm=False,
bottleneck_size=384,
bottleneck_dims=256,
vqvae_beta=0.25,
vqvae_gamma=0.1,
time_major=False,
output_mode="char"):
self.char_vocab_size = char_vocab_size
self.pinyin_vocab_size = pinyin_vocab_size
self.output_mode = output_mode
if output_mode == "char":
self.vocab_size = self.char_vocab_size
tf.logging.info(output_mode)
tf.logging.info(self.vocab_size)
elif output_mode == "pinyin":
self.vocab_size = self.pinyin_vocab_size
tf.logging.info(output_mode)
tf.logging.info(self.vocab_size)
self.subsampling_filters = subsampling_filters
self.subsampling_kernel_sizes = subsampling_kernel_sizes
self.subsampling_strides = subsampling_strides
self.subsampling_dropout = subsampling_dropout
self.proj_dropout = proj_dropout
self.ffm_expansion_factor = ffm_expansion_factor
self.ffm_dropout = ffm_dropout
self.ffm_hidden_size = ffm_hidden_size
self.ffm_fc_factor = ffm_fc_factor
self.mha_hidden_size = mha_hidden_size
self.mha_num_attention_heads = mha_num_attention_heads
self.mha_max_relative_position = mha_max_relative_position
self.mha_num_buckets = mha_num_buckets
self.mha_initializer_range = mha_initializer_range
self.mha_bidirectional = mha_bidirectional
self.mha_relative_position_type = mha_relative_position_type
self.mha_relative_position_embedding_type = mha_relative_position_embedding_type
self.mha_num_hidden_layers = mha_num_hidden_layers
self.mha_attention_probs_dropout_prob = mha_attention_probs_dropout_prob
self.mha_hidden_dropout_prob = mha_hidden_dropout_prob
self.mha_use_relative_position = mha_use_relative_position
self.cnn_kernel_sizes = cnn_kernel_sizes
self.cnn_strides = cnn_strides
self.cnn_depth_multiplier = cnn_depth_multiplier
self.cnn_dropout_prob = cnn_dropout_prob
self.is_cnn_batch_norm = is_cnn_batch_norm
self.is_cnn_padding = is_cnn_padding
self.fc_layers = fc_layers
self.fc_hidden_size = fc_hidden_size
self.fc_dropout_rate = fc_dropout_rate
self.rnn_hidden_size = rnn_hidden_size
self.rnn_layers = rnn_layers
self.is_bidirectional = is_bidirectional
self.is_rnn_batch_norm = is_rnn_batch_norm
self.vqvae_beta = vqvae_beta
self.vqvae_gamma = vqvae_gamma
self.bottleneck_size = bottleneck_size
self.bottleneck_dims = bottleneck_dims
self.time_major = time_major
self.reduction_factor = 1
for s in self.subsampling_strides:
self.reduction_factor *= s[0]
tf.logging.info("*** reduction_factor ***")
tf.logging.info(self.reduction_factor)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = ConformerConfig(char_vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
print(key, value, '===model parameters===')
if config.__dict__['output_mode'] == 'char':
config.__dict__['vocab_size'] = config.__dict__['char_vocab_size']
tf.logging.info("** output_mode is char **")
elif config.__dict__['output_mode'] == "pinyin":
config.__dict__['vocab_size'] = config.__dict__['pinyin_vocab_size']
tf.logging.info("** output_mode is pinyin **")
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class Conformer(object):
def __init__(self,
config,
sequences,
input_length,
is_training=False,
is_pretraining=False,
time_feature_mask=None,
freq_feature_mask=None,
target_feature_mode='linear',
is_global_bn=False,
decoder_type="fc"):
config = copy.deepcopy(config)
self.config = copy.deepcopy(config)
for key in config.__dict__:
print(key, "==config==", config.__dict__[key])
if not is_training:
config.mha_hidden_dropout_prob = 0.0
config.mha_attention_probs_dropout_prob = 0.0
config.subsampling_dropout = 0.0
config.proj_dropout = 0.0
config.ffm_dropout = 0.0
config.cnn_dropout_prob = 0.0
config.fc_dropout_rate = 0.0
initializer = tf.truncated_normal_initializer(stddev=0.046875, dtype=tf.float32)
with tf.variable_scope('conformer', reuse=tf.AUTO_REUSE):
# since feature extraction will add extra dims on input:
# [batch_size, time, n_dims]--->[batch_size, time, n_dims, 1]
# so, no need for dimension expandims
sequences_shape = get_shape_list(sequences, expected_rank=[3,4])
if len(sequences_shape) == 4:
tf.logging.info("*** specturm input ***")
tf.logging.info(sequences)
# perform raw audio input
with tf.variable_scope('conv_downsampling'):
[self.conv_subsampling,
self.reduction_factor] = conv2d_block(sequences,
filters=config.subsampling_filters,
kernel_sizes=config.subsampling_kernel_sizes,
strides=config.subsampling_strides,
dropout_rate=config.subsampling_dropout,
is_training=is_training,
is_batch_norm=config.is_cnn_batch_norm,
is_padding=config.is_cnn_padding,
is_global_bn=is_global_bn)
conv_subsampling_shape = get_shape_list(self.conv_subsampling, expected_rank=[4])
tf.logging.info("*** conv down-sampling before merge last two dimensions ***")
tf.logging.info(self.conv_subsampling)
tf.logging.info(conv_subsampling_shape)
self.conv_subsampling = tf.reshape(self.conv_subsampling,
shape=[conv_subsampling_shape[0], -1,
conv_subsampling_shape[2] * conv_subsampling_shape[3]])
tf.logging.info("*** conv down-sampling after merge last two dimensions***")
tf.logging.info(self.conv_subsampling)
elif len(sequences_shape) == 3:
tf.logging.info("*** audio signal input ***")
with tf.variable_scope('conv_downsampling'):
[self.conv_subsampling,
self.reduction_factor] = conv1d_block(sequences,
filters=config.subsampling_filters,
kernel_sizes=config.subsampling_kernel_sizes,
strides=config.subsampling_strides,
dropout_rate=config.subsampling_dropout,
is_training=is_training,
is_batch_norm=False,
is_padding=True)
tf.logging.info("*** conv down-sampling ***")
tf.logging.info(self.conv_subsampling)
conv_subsampling_shape = get_shape_list(self.conv_subsampling, expected_rank=[3])
assert len(conv_subsampling_shape) == 3
self.unmasked_conv_subsampling = tf.identity(self.conv_subsampling)
if is_pretraining:
if target_feature_mode == 'soft-em':
# apply softem
tf.logging.info("*** apply soft em ***")
with tf.variable_scope('discrete_bottleneck'):
[self.code_book,
self.code_discrete,
self.code_dense,
self.code_loss_dict] = soft_em.discrete_bottleneck(
self.conv_subsampling,
config.bottleneck_size,
config.bottleneck_dims,
beta=config.vqvae_beta,
gamma=config.vqvae_gamma,
is_training=is_training)
elif target_feature_mode == 'linear':
tf.logging.info("*** apply linear proj ***")
with tf.variable_scope('pretrain_linear_proj'):
self.code_dense = tf.layers.dense(
self.conv_subsampling,
units=config.ffm_hidden_size
)
self.code_dense = layer_norm(self.code_dense)
self.code_loss_dict = {}
self.code_discrete = tf.identity(self.code_dense)
self.code_book = tf.identity(self.code_dense)
with tf.variable_scope('linear_proj'):
if is_pretraining:
tf.logging.info("*** apply mask before linear_proj ***")
if time_feature_mask is not None:
tf.logging.info("*** apply time mask before linear_proj ***")
tf.logging.info(time_feature_mask)
time_feature_mask = tf.cast(time_feature_mask, dtype=tf.float32)
# [B, T, 1]
self.conv_subsampling *= tf.expand_dims(time_feature_mask, axis=-1)
if freq_feature_mask is not None:
tf.logging.info("***@* apply freq mask before linear_proj ***")
tf.logging.info(freq_feature_mask)
freq_feature_mask = tf.cast(freq_feature_mask, dtype=tf.float32)
self.conv_subsampling *= freq_feature_mask
self.linear_proj = tf.layers.dense(
self.conv_subsampling,
units=config.ffm_hidden_size
)
self.linear_proj = layer_norm(self.linear_proj)
self.linear_proj = tf.nn.dropout(self.linear_proj,
keep_prob=1-config.proj_dropout)
tf.logging.info("**** linear_proj ****")
tf.logging.info(self.linear_proj)
if input_length is not None:
tf.logging.info("*** generate attention mask ***")
reduced_length = audio_utils.get_reduced_length(input_length, self.reduction_factor)
tf.logging.info("*** reduced_length ***")
tf.logging.info(reduced_length)
sequence_mask = tf.sequence_mask(reduced_length, conv_subsampling_shape[1])
sequence_mask = tf.cast(sequence_mask, dtype=tf.float32)
tf.logging.info("*** sequence_mask ***")
tf.logging.info(sequence_mask)
if time_feature_mask is not None:
sequence_mask *= time_feature_mask
self.attention_mask = transformer_relative_position.create_attention_mask_from_input_mask(
sequence_mask,
sequence_mask)
else:
self.attention_mask = None
tf.logging.info("*** attention_mask ***")
tf.logging.info(self.attention_mask)
with tf.variable_scope('encoder'):
tf.logging.info("*** mha encoder ***")
mha_attention_head_size = config.mha_hidden_size // config.mha_num_attention_heads
tf.logging.info("*** mha_attention_head_size ***")
tf.logging.info(mha_attention_head_size)
[self.relative_position_embeddings,
self.relative_position_table] = transformer_relative_position._generate_relative_positions_embeddings(
conv_subsampling_shape[1],
depth=config.mha_num_attention_heads,
max_relative_position=config.mha_max_relative_position,
name="relative_positions_bias",
num_buckets=config.mha_num_buckets,
initializer_range=config.mha_initializer_range,
cache=False,
bidirectional=config.mha_bidirectional,
relative_position_type=config.mha_relative_position_type,
relative_position_embedding_type=config.mha_relative_position_embedding_type)
tf.logging.info("****** relative_position_embeddings ***")
tf.logging.info(self.relative_position_embeddings)
pre_output = self.linear_proj
self.conformer_block = conformer(pre_output,
ffm_hidden_size=config.ffm_hidden_size,
ffm_dropout_rate=config.ffm_dropout,
ffm_fc_factor=config.ffm_fc_factor,
ffm_expansion_factor=config.ffm_expansion_factor,
mha_relative_position_embeddings=self.relative_position_embeddings,
mha_num_attention_heads=config.mha_num_attention_heads,
mha_attention_head_size=mha_attention_head_size,
mha_attention_probs_dropout_prob=config.mha_attention_probs_dropout_prob,
mha_hidden_dropout_prob=config.mha_hidden_dropout_prob,
mha_initializer_range=config.mha_initializer_range,
mha_use_relative_position=config.mha_use_relative_position,
mha_num_hidden_layers=config.mha_num_hidden_layers,
mha_attention_mask=self.attention_mask,
conv_strides=config.cnn_strides,
conv_depth_multiplier=config.cnn_depth_multiplier,
conv_dropout_prob=config.cnn_dropout_prob,
relative_position_type=config.mha_relative_position_type,
is_training=is_training,
is_global_bn=is_global_bn)
tf.logging.info("*** conformer_block ***")
tf.logging.info(self.conformer_block)
if not is_pretraining:
with tf.variable_scope('fc_module'):
self.fc_output = fc_block(self.conformer_block[-1],
fc_layers=config.fc_layers,
hidden_size=config.fc_hidden_size,
dropout_rate=config.fc_dropout_rate,
is_training=is_training)
self.fc_output = layer_norm(self.fc_output)
with tf.variable_scope('decoder'):
if decoder_type == 'fc':
tf.logging.info("**** fc_output ****")
self.decoder_output = tf.identity(self.fc_output)
tf.logging.info(self.decoder_output)
elif decoder_type == 'rnn':
rnn_cell = tf.nn.rnn_cell.GRUCell
with tf.variable_scope("rnn"):
self.decoder_output = rnn_block(self.fc_output,
rnn_cell=rnn_cell,
rnn_hidden_size=config.rnn_hidden_size,
rnn_layers=config.rnn_layers,
is_batch_norm=False if config.rnn_layers ==1 else config.is_rnn_batch_norm,
is_bidirectional=config.is_rnn_bidirectional,
is_training=is_training,
time_major=config.time_major,
sequence_length=reduced_length)
self.decoder_output = layer_norm(self.decoder_output)
tf.logging.info("**** rnn_output ****")
tf.logging.info(self.decoder_output)
with tf.variable_scope('cls/predictions'):
self.logits = tf.layers.dense(self.decoder_output,
config.vocab_size,
kernel_initializer=initializer)
tf.logging.info("*** logits ***")
tf.logging.info(self.logits)
def get_unmasked_linear_proj(self):
with tf.variable_scope('conformer', reuse=tf.AUTO_REUSE):
with tf.variable_scope('linear_proj'):
linear_proj = tf.layers.dense(
self.unmasked_conv_subsampling,
units=self.config.ffm_hidden_size
)
linear_proj = layer_norm(linear_proj)
return linear_proj
def get_linear_proj_encoder(self,
linear_proj,
input_length,
is_training,
time_feature_mask):
conv_subsampling_shape = get_shape_list(linear_proj, expected_rank=[3])
assert len(conv_subsampling_shape) == 3
with tf.variable_scope('conformer', reuse=tf.AUTO_REUSE):
if input_length is not None:
tf.logging.info("*** generate attention mask ***")
reduced_length = audio_utils.get_reduced_length(input_length, self.reduction_factor)
sequence_mask = tf.sequence_mask(reduced_length, conv_subsampling_shape[1])
tf.logging.info("*** sequence_mask ***")
tf.logging.info(sequence_mask)
if time_feature_mask is not None:
sequence_mask *= time_feature_mask
attention_mask = transformer_relative_position.create_attention_mask_from_input_mask(
sequence_mask,
sequence_mask)
else:
attention_mask = None
with tf.variable_scope('encoder'):
tf.logging.info("*** mha encoder ***")
mha_attention_head_size = self.config.mha_hidden_size // self.config.mha_num_attention_heads
tf.logging.info("*** mha_attention_head_size ***")
tf.logging.info(mha_attention_head_size)
pre_output = linear_proj
conformer_block = conformer(pre_output,
ffm_hidden_size=self.config.ffm_hidden_size,
ffm_dropout_rate=self.config.ffm_dropout,
ffm_fc_factor=self.config.ffm_fc_factor,
ffm_expansion_factor=self.config.ffm_expansion_factor,
mha_relative_position_embeddings=self.relative_position_embeddings,
mha_num_attention_heads=self.config.mha_num_attention_heads,
mha_attention_head_size=mha_attention_head_size,
mha_attention_probs_dropout_prob=self.config.mha_attention_probs_dropout_prob,
mha_hidden_dropout_prob=self.config.mha_hidden_dropout_prob,
mha_initializer_range=self.config.mha_initializer_range,
mha_use_relative_position=self.config.mha_use_relative_position,
mha_num_hidden_layers=self.config.mha_num_hidden_layers,
mha_attention_mask=self.attention_mask,
conv_strides=self.config.cnn_strides,
conv_depth_multiplier=self.config.cnn_depth_multiplier,
conv_dropout_prob=self.config.cnn_dropout_prob,
relative_position_type=self.config.mha_relative_position_type,
is_training=is_training,
is_global_bn=is_global_bn)
return conformer_block[-1]
def get_conv_downsampling_output(self):
return self.unmasked_conv_subsampling
def get_sequence_output(self):
return self.conformer_block[-1]
def get_conv_reduction_factor(self):
return self.reduction_factor
def get_logits(self):
return self.logits
def get_fc_output(self):
return self.fc_output
def get_code_book(self, is_pretraining):
if is_pretraining:
tf.logging.info("** return code_book **")
tf.logging.info(self.code_book)
return self.code_book
else:
return None
def get_code_discrete(self, is_pretraining):
if is_pretraining:
tf.logging.info("** return code_discrete **")
tf.logging.info(self.code_discrete)
return self.code_discrete
else:
return None
def get_code_dense(self, is_pretraining):
if is_pretraining:
tf.logging.info("** return code_dense **")
tf.logging.info(self.code_dense)
return self.code_dense
else:
return None
def get_code_loss(self, is_pretraining):
if is_pretraining:
tf.logging.info("** return code_loss_dict **")
tf.logging.info(self.code_loss_dict)
return self.code_loss_dict
else:
return None
def rnn_layer(inputs, rnn_cell,
rnn_hidden_size,
is_batch_norm,
is_bidirectional,
sequence_length=None,
is_training=False,
time_major=False):
fw_cell = rnn_cell(num_units=rnn_hidden_size,
name="forward")
bw_cell = rnn_cell(num_units=rnn_hidden_size,
name="backward")
if is_bidirectional:
outputs, _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw=fw_cell, cell_bw=bw_cell, inputs=inputs, dtype=tf.float32,
swap_memory=False,
sequence_length=sequence_length)
rnn_outputs = tf.concat(outputs, -1)
else:
rnn_outputs = tf.nn.dynamic_rnn(
fw_cell, inputs, dtype=tf.float32, swap_memory=False,
sequence_length=sequence_length)
if is_batch_norm:
rnn_outputs = sequecnce_batch_norm(rnn_outputs, time_major=time_major)
return rnn_outputs
def rnn_block(inputs,
rnn_cell,
rnn_hidden_size,
rnn_layers,
is_batch_norm,
is_bidirectional,
sequence_length=None,
is_training=False,
time_major=False):
pre_output = inputs
for layer_idx in range(rnn_layers):
with tf.variable_scope("layer_%d" % layer_idx):
pre_output = rnn_layer(pre_output,
rnn_cell=rnn_cell,
rnn_hidden_size=rnn_hidden_size,
is_batch_norm=is_batch_norm,
is_bidirectional=is_bidirectional,
is_training=is_training,
time_major=time_major,
sequence_length=sequence_length)
return pre_output
def sequecnce_batch_norm(inputs,
time_major=False,
variance_epsilon=1e-5):
input_shape = get_shape_list(inputs, expected_rank=[3])
beta = tf.get_variable(shape=[input_shape[-1]],
name='beta', initializer=tf.zeros_initializer(),
regularizer=None, constraint=None, trainable=True)
gamma = tf.get_variable(shape=[input_shape[-1]],
name='gamma', initializer=tf.ones_initializer(),
regularizer=None, constraint=None, trainable=True)
mean, variance = tf.nn.moments(inputs, axes=[0, 1], keep_dims=False)
if time_major:
total_padded_frames = tf.cast(input_shape[0], mean.dtype)
batch_size = tf.cast(input_shape[1], mean.dtype)
else:
total_padded_frames = tf.cast(input_shape[1], mean.dtype)
batch_size = tf.cast(input_shape[0], mean.dtype)
total_unpadded_frames_batch = tf.count_nonzero(
inputs, axis=[0, 1], keepdims=False,
dtype=mean.dtype
)
mean = (mean * total_padded_frames * batch_size) / total_unpadded_frames_batch
variance = (variance * total_padded_frames * batch_size) / total_unpadded_frames_batch
return tf.nn.batch_normalization(
inputs, mean=mean, variance=variance,
offset=beta, scale=gamma,
variance_epsilon=1e-8
)
def conformer(inputs,
ffm_hidden_size,
ffm_dropout_rate,
ffm_fc_factor,
ffm_expansion_factor,
mha_relative_position_embeddings,
mha_num_attention_heads,
mha_attention_head_size,
mha_attention_probs_dropout_prob=0.1,
mha_hidden_dropout_prob=0.1,
mha_initializer_range=0.02,
mha_use_relative_position=True,
mha_num_hidden_layers=12,
mha_attention_mask=None,
conv_kernel_sizes=31,
conv_strides=1,
conv_dropout_prob=0.1,
conv_depth_multiplier=1,
relative_position_type="relative_normal",
is_training=False,
is_global_bn=False):
input_shape = get_shape_list(inputs, expected_rank=[3])
batch_size = input_shape[0]
seq_length = input_shape[1]
pre_output = inputs
conformer_block = []
tf.logging.info("*** pre_output ***")
tf.logging.info(pre_output)
for layer_idx in range(mha_num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
with tf.variable_scope("residual_ffm_input"):
ffm_outputs = residual_ffm_block(pre_output,
hidden_size=ffm_hidden_size,
dropout_rate=ffm_dropout_rate,
fc_factor=ffm_fc_factor,
expansion_factor=ffm_expansion_factor,
is_training=is_training)
ffm_outputs = layer_norm(ffm_outputs)
tf.logging.info("*** residual_ffm_input ***")
tf.logging.info(ffm_outputs)
with tf.variable_scope("attention"):
with tf.variable_scope("self"):
[attention_head, attention_scores] = transformer_relative_position.attention_layer(
from_tensor=ffm_outputs,
to_tensor=ffm_outputs,
attention_mask=mha_attention_mask,
num_attention_heads=mha_num_attention_heads,
size_per_head=mha_attention_head_size,
attention_probs_dropout_prob=mha_attention_probs_dropout_prob,
initializer_range=mha_initializer_range,
do_return_2d_tensor=False,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length,
use_relative_position=mha_use_relative_position,
dropout_name=tf.get_variable_scope().name,
relative_position_type=relative_position_type,
relative_position_embeddings=mha_relative_position_embeddings)
tf.logging.info("*** attention_head ***")
tf.logging.info(attention_head)
attention_head = tf.nn.dropout(attention_head, keep_prob=1-mha_hidden_dropout_prob)
attention_output = layer_norm(attention_head + ffm_outputs)
with tf.variable_scope("conformer_conv"):
conv_output = conformer_conv(attention_output,
kernel_size=conv_kernel_sizes,
strides=conv_strides,
depth_multiplier=conv_depth_multiplier,
dropout_rate=conv_dropout_prob,
is_training=is_training,
is_global_bn=is_global_bn)
tf.logging.info("****** conformer_conv ***")
tf.logging.info(conv_output)
conv_attention_output = layer_norm(conv_output + attention_output)
with tf.variable_scope("residual_ffm_output"):
layer_output = residual_ffm_block(conv_attention_output,
hidden_size=ffm_hidden_size,
dropout_rate=ffm_dropout_rate,
fc_factor=ffm_fc_factor,
expansion_factor=ffm_expansion_factor,
is_training=is_training)
tf.logging.info("*** residual_ffm_output ***")
tf.logging.info(layer_output)
layer_output = layer_norm(layer_output)
conformer_block.append(layer_output)
pre_output = layer_output
return conformer_block
def fc_layer(inputs, hidden_size,
dropout_rate,
is_training=False):
fc_intermediate_output = tf.layers.dense(inputs,
units=hidden_size)
# ffc_output = tf.nn.relu6(fc_intermediate_output)
ffc_output = tf.nn.swish(fc_intermediate_output)
ffc_output = tf.nn.dropout(ffc_output, keep_prob=1-dropout_rate)
return ffc_output
def fc_block(inputs,
fc_layers,
hidden_size,
dropout_rate,
is_training=False):
pre_output = inputs
for layer_idx in range(fc_layers):
with tf.variable_scope("layer_%d" % layer_idx):
pre_output = fc_layer(pre_output,
hidden_size=hidden_size,
dropout_rate=dropout_rate,
is_training=is_training)
return pre_output
def glu(inputs, axis=-1):
a, b = tf.split(inputs, 2, axis=-1)
b = tf.nn.sigmoid(b)
return tf.multiply(a, b)
def conformer_conv(inputs,
kernel_size,
strides=1,
depth_multiplier=1,
dropout_rate=0.1,
is_training=False,
is_global_bn=False):
# [batch, seq_len, dims]
input_shape = get_shape_list(inputs, expected_rank=[3])
input_dim = input_shape[-1]
# [batch, seq_len, 1, dims]
outputs = tf.expand_dims(inputs, 2)
# [batch, seq_len, 1, filters*2]
outputs = tf.layers.conv2d(
inputs=outputs,
filters=input_dim*2,
kernel_size=1,
strides=1,
padding="valid",
use_bias=True,
activation=None,
kernel_initializer=tf.glorot_normal_initializer())
tf.logging.info("** outputs conv2d **")
tf.logging.info(outputs)
# [batch, seq_len, 1, filters]
outputs = glu(outputs, axis=-1)
tf.logging.info("** outputs glu **")
tf.logging.info(outputs)
depthwise_filter = tf.get_variable("depthwise_filter",
(kernel_size, 1, input_dim, depth_multiplier),
dtype=tf.float32,
initializer=tf.glorot_normal_initializer())
paddings = kernel_size // 2
outputs = tf.pad(
outputs,
[[0, 0], [paddings, paddings], [0, 0], [0, 0]])
tf.logging.info("** outputs padding **")
tf.logging.info(outputs)
outputs = tf.nn.depthwise_conv2d(
outputs, depthwise_filter, (1,1,1,1),
"VALID"
)
outputs = batch_norm(outputs, is_training=is_training,
is_global_bn=is_global_bn)
# outputs = gelu(outputs)
outputs = tf.nn.swish(outputs)
# [batch, seq_len, 1, dims]
outputs = tf.layers.conv2d(
inputs=outputs,
filters=input_dim,
kernel_size=1,
strides=1,
padding="valid",
use_bias=True,
activation=None,
kernel_initializer=tf.glorot_normal_initializer())
# [batch, seq_len, 1, dims]
tf.logging.info("** outputs of conv **")
tf.logging.info(outputs)
# outputs = tf.squeeze(outputs, axis=2)
outputs = tf.reshape(outputs, input_shape)
outputs = tf.nn.dropout(outputs, keep_prob=1-dropout_rate)
return outputs
def residual_ffm_block(inputs, hidden_size,
dropout_rate,
fc_factor,
expansion_factor,
is_training=False):
input_shape = get_shape_list(inputs, expected_rank=[3])
outputs = tf.layers.dense(inputs,
units=expansion_factor*hidden_size
)
# outputs = gelu(outputs)
outputs = tf.nn.swish(outputs)
outputs = tf.nn.dropout(outputs, keep_prob=1-dropout_rate)
outputs = tf.layers.dense(outputs,
units=input_shape[-1])
outputs = tf.nn.dropout(outputs, keep_prob=1-dropout_rate)
outputs = inputs + fc_factor * outputs
return outputs
def batch_norm(inputs, is_training,
batch_norm_decay=0.997,
batch_norm_eps=1e-5,
is_global_bn=False):
try:
from model.global_bn_utils import batch_norm as global_batch_norm
return global_batch_norm(inputs=inputs,
is_training=is_training,
batch_norm_decay=batch_norm_decay,
batch_norm_eps=batch_norm_eps,
is_global_bn=is_global_bn)
except:
return tf.layers.batch_normalization(
inputs=inputs,
momentum=batch_norm_decay,
epsilon=batch_norm_eps,
fused=True,
training=is_training)
def conv2d_bn_layer(inputs,
filters,
kernel_size,
strides,
dropout_rate,
is_batch_norm=False,
is_training=False,
is_padding=True,
is_global_bn=False
):
if is_padding:
paddings = [k_size//2 for k_size in kernel_size]
inputs = tf.pad(
inputs,
[[0, 0], [paddings[0], paddings[0]], [paddings[1], paddings[1]], [0, 0]])
tf.logging.info("** apply cnn padding **")
tf.logging.info(paddings)
inputs = tf.layers.conv2d(
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding="valid",
use_bias=not is_batch_norm,
activation=None,
kernel_initializer=tf.glorot_normal_initializer())
if is_batch_norm:
inputs = batch_norm(inputs, is_training,
is_global_bn=is_global_bn)
inputs = tf.nn.relu6(inputs)
inputs = tf.nn.dropout(inputs, keep_prob=1-dropout_rate)
return inputs
def conv2d_block(inputs,
filters=[144, 144],
kernel_sizes=[[3, 3], [3, 3]],
strides=[[2, 1], [2, 1]],
dropout_rate=0.1,
is_batch_norm=True,
is_training=False,
is_padding=True,
is_global_bn=False):
assert len(kernel_sizes) == len(strides) == len(filters)
pre_output = inputs
for layer_idx in range(len(filters)):
with tf.variable_scope("layer_%d" % layer_idx):
pre_output = conv2d_bn_layer(pre_output,
filters=filters[layer_idx],
kernel_size=kernel_sizes[layer_idx],
strides=strides[layer_idx],
dropout_rate=dropout_rate,
is_batch_norm=is_batch_norm,
is_training=is_training,
is_padding=is_padding,
is_global_bn=is_global_bn
)
reduction_factor = 1
for s in strides:
reduction_factor *= s[0]
return pre_output, reduction_factor
def conv1d_bn_layer(inputs,
filters,
kernel_size,
strides,
layer_id,
dropout_rate,
is_batch_norm,
is_training=False,
is_padding=False
):
if is_padding:
ka = kernel_size // 2
kb = ka - 1 if kernel_size % 2 == 0 else ka
pad = tf.zeros([tf.shape(x)[0], kb + ka, filters])
inputs = tf.concat([pad, inputs], 1)
inputs = tf.layers.conv1d(
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='valid',
use_bias=False,
activation=None,
kernel_initializer=tf.glorot_normal_initializer())
inputs = layer_norm(inputs)
inputs = gelu(inputs)
inputs = tf.nn.dropout(inputs, keep_prob=1-dropout_rate)
return inputs
def conv1d_block(inputs,
filters=[512,512,512,512,512,512,512],
kernel_sizes=[10,3,3,3,3,2,2],
strides=[5,2,2,2,2,2,2],
conv_dropout_rate=0.1,
is_batch_norm=True,
is_training=False,
is_padding=False):
assert len(kernel_sizes) == len(strides) == len(filters)
pre_output = inputs
for layer_idx in range(len(filters)):
with tf.variable_scope("layer_%d" % layer_idx):
pre_output = conv1d_bn_layer(pre_output,
filters=filters[layer_idx],
kernel_size=kernel_size[layer_idx],
strides=strides[layer_idx],
dropout_rate=conv_dropout_rate,
is_training=is_training,
is_padding=is_padding,
is_batch_norm=is_batch_norm)
reduction_factor = 1
for s in strides:
reduction_factor *= s[0]
return pre_output, reduction_factor
def gelu(input_tensor):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
input_tensor: float Tensor to perform activation.
Returns:
`input_tensor` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.math.erf(input_tensor / tf.sqrt(2.0)))
return input_tensor * cdf
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
| yyht/deepspeech | model/conformer.py | conformer.py | py | 41,450 | python | en | code | 2 | github-code | 13 |
36467208493 | import re
import requests
class TronGazer:
apiPrev = 'https://ilearn.ttu.edu.tw/api/'
def __init__(self):
self.session = requests.session()
headers = {
"User-Agent":
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.152 Safari/537.36"
}
self.session.headers.update(headers)
def login_ttu(self, uid: str, password: str) -> bool:
# 设定好登入资讯
login_info = {
"ID": uid,
"PWD": password,
"Submit": "%B5n%A4J%A8t%B2%CE", # 登入系统
}
r = self.session.post(
'https://stucis.ttu.edu.tw/login.php', data=login_info)
self.check_login = self.session.get(
'https://stucis.ttu.edu.tw/menu/TronClass.php')
if self.check_login.text == 'Not login or session expire!':
return False
return True
def getUserInfo(self):
self.userName = re.findall(
'<root-scope-variable name="currentUserName" value="(.*)"></root-scope-variable>', self.check_login.text)[0]
self.userId = re.findall(
'id="userId" data-id="(\d+)"', self.check_login.text)[0]
def getAllCourse(self):
r = self.session.get('%susers/%s/courses' %
(TronGazer.apiPrev, self.userId))
course_list = r.json()
self.all_course = [{
'id': i['id'],
'name': i['name']
} for i in course_list['courses']]
def getVideos(self, cid: int):
action_json_url = '%scourses/%s/activities' % (TronGazer.apiPrev, cid)
r = self.session.get(action_json_url)
course_resources = r.json()
videos = []
video_iter = filter(lambda i: i['type'] == "online_video",
course_resources['activities'])
# Tronclass上的影片分为两种,一种是上传至Tronclass,另一种为外部影片(Youtube)
# 因此影片长度的储存位置就被放到不同地方
# 1. 上传至Tronclass的影片
# 影片资讯会放在uploads字段,而影片会被系统重新编码为不同分辨率
# 所以uploads里可能会有多个影片档,但实为同一部影片
# 因此找index=0的影片资讯即可
# 2. 连结至Youtube的影片
# Tronclass仅把Youtube影片的标题与片长放在data字段,直接读取即可
for v in video_iter:
# 读取影片片长
if len(v['uploads']) == 0:
# 自己测试时,点选旧课程发现若影片连结至yt时,该影片已被下架,则会缺少片长字段
# 既然影片无法观看,直接跳过该部影片
if 'duration' not in v['data']:
continue
duration = v['data']['duration']
else:
# 依yt影片的方式,假定影片被教师删除之类的因素,但不确定字段的存放状态,因此以try做处理
try:
duration = v['uploads'][0]['videos'][0]['duration']
except:
continue
videos.append({
'id': v['id'],
'title': v['title'],
'duration': duration
})
self.videos = videos
def watchVideo(self, video):
self.session.post('%scourse/activities-read/%d' %
(TronGazer.apiPrev, video['id']),
json={
"start": 0,
"end": video['duration']
})
| ssrtw/TronGazer | TronGazer.py | TronGazer.py | py | 3,686 | python | zh | code | 0 | github-code | 13 |
28352167049 | from aiogram.dispatcher import FSMContext
from loader import dp, db
from aiogram import filters, types
from bot.keyboards.inline.PurchasesKeyboards.PurchaseKeyboard import CreatePurchase
from bot.states.FormToJoinAsBuyer import FormToJoinAsBuyer
from bot.utils.misc.decorators import check_if_user_is_registered
from bot.utils.misc.parsers import get_buyers_payers_amount_from_purchase
from bot.utils.misc.additional_functions import make_purchase_text, calculate, make_calculate_text, check_if_purchase_correct
from bot.utils.misc.REGULAR_EXPRESSIONS import DELETE_PURCHASE, JOIN_AS_BUYER, JOIN_AS_PAYER, PURCHASE, CALCULATE
@dp.callback_query_handler(filters.Regexp(PURCHASE), state='*')
@check_if_user_is_registered
async def show_purchase_button(call: types.CallbackQuery):
purchase_id = call.data.split()[1]
purchase = db.get_purchase(purchase_id)
if purchase:
value, title = purchase.get("amount"), purchase.get("title")
keyboard = CreatePurchase(purchase_id, value).keyboard
text = make_purchase_text(purchase_id)
await call.message.answer(text=text, reply_markup=keyboard, parse_mode="markdown")
else:
await call.answer("This purchase is unavailable")
@check_if_user_is_registered
@dp.callback_query_handler(filters.Regexp(DELETE_PURCHASE), state='*')
async def delete_purchase_button(call: types.CallbackQuery):
purchase_id = call.data.split()[1]
title = db.get_purchase(purchase_id).get("title")
db.delete_purchase(purchase_id=purchase_id, group_id=call.message.chat.id)
await call.answer("Purchase was deleted!")
await call.message.edit_text(text=f"Purchase `{title}` was deleted.", parse_mode="markdown")
@dp.callback_query_handler(filters.Regexp(JOIN_AS_BUYER), state='*')
@check_if_user_is_registered
async def join_as_buyer_button(call: types.CallbackQuery, state: FSMContext):
purchase_id = call.data.split()[1]
amount = float(call.data.split()[2])
if db.check_if_user_joined_as_buyer(telegram_id=call.from_user.id, purchase_id=purchase_id):
db.remove_user_as_buyer(telegram_id=call.from_user.id, purchase_id=purchase_id)
message_text = make_purchase_text(purchase_id)
await call.answer("You have removed yourself from buyers list")
await call.message.edit_text(text=message_text, parse_mode="markdown")
await call.message.edit_reply_markup(reply_markup=call.message.reply_markup)
else:
await state.update_data(purchase_id=purchase_id, message=call.message, amount_max=amount, call=call)
await call.message.answer(text=f"{call.from_user.full_name}, how much did you pay?")
await FormToJoinAsBuyer.amount_payed.set()
@dp.callback_query_handler(filters.Regexp(JOIN_AS_PAYER), state='*')
@check_if_user_is_registered
async def join_as_payer_button(call: types.CallbackQuery):
purchase_id = call.data.split()[1]
user = (call.from_user.id, call.from_user.full_name)
if db.check_if_user_joined_as_payer(user=user, purchase_id=purchase_id):
db.remove_user_as_payer(user=user, purchase_id=purchase_id)
await call.answer("You have removed yourself from payers list")
else:
db.join_to_purchase_as_payer(user=user, purchase_id=purchase_id)
await call.answer("You have joined the purchase as a payer")
msg_text = make_purchase_text(purchase_id)
await call.message.edit_text(text=msg_text, parse_mode="markdown")
await call.message.edit_reply_markup(reply_markup=call.message.reply_markup)
@dp.callback_query_handler(filters.Regexp(CALCULATE), state='*')
async def calculate_purchase_button(call: types.CallbackQuery):
purchase_id = call.data.split()[1]
purchase = db.get_purchase(purchase_id)
error_code, error_message = check_if_purchase_correct(purchase)
output = error_message
if error_code == -2:
pass
else:
buyers, payers, amount = get_buyers_payers_amount_from_purchase(purchase)
result = calculate(buyers=buyers, payers=payers, amount=amount)
output += make_calculate_text(result)
await call.message.reply(text=output, parse_mode="markdown")
| nazfurdychka/share-bot | bot/handlers/groups/callbacks/purchases.py | purchases.py | py | 4,140 | python | en | code | 0 | github-code | 13 |
33794470134 | from __future__ import print_function, division
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn
from sklearn.cross_validation import train_test_split
import random
# 设置模型
learning_rate = 0.001
training_epochs = 50
batch_size = 100
display_step = 1
# 模型参数
x = tf.placeholder(tf.float32, [None, n_features])
y = tf.placeholder(tf.float32, [None, n_class])
W = tf.Variable(tf.zeros([n_features, n_class]))
b = tf.Variable(tf.zeros([n_class]))
# 方程
pred = tf.matmul(x, W) + b
# 定义损失函数
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
# 这里先softmax再算熵
# cost = tf.nn.sigmoid_cross_entropy_with_logits(pred, y)
# 梯度下降
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# 准确率
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# 初始化所有变量
init = tf.initialize_all_variables()
# 训练模型
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
total_batch = int(n_samples / batch_size)
for i in range(total_batch):
_, c = sess.run([optimizer, cost],
feed_dict={x: X_train[i * batch_size : (i+1) * batch_size],
y: y_train[i * batch_size : (i+1) * batch_size, :].eval()})
print("Optimization Finished!")
print("Testing Accuracy:", accuracy.eval({x: X_train, y:y_train.eval()}))
| kEva0v0/Prepare-For-Interview | LR.py | LR.py | py | 1,631 | python | en | code | 0 | github-code | 13 |
20625989485 | from pathlib import Path
import numpy as np
import pandas as pd
import tensorflow as tf
if __name__ == "__main__":
ratings_path = Path("data", "ratings_clean.csv")
ratings = pd.read_csv(ratings_path)
explicit_ratings = ratings.loc[ratings.rating != 0]
def get_and_save_mapping(values, filename):
with open(filename, "w") as file_to_write:
value_to_id = {
value: value_id for value_id, value in enumerate(values.unique())
}
for value, value_id in value_to_id.items():
file_to_write.write("{},{}\n".format(value, value_id))
return value_to_id
user_mapping = get_and_save_mapping(
explicit_ratings["user_id"], "data/users_mapping.csv"
)
item_mapping = get_and_save_mapping(
explicit_ratings["isbn"], "data/books_mapping.csv"
)
explicit_ratings = explicit_ratings.assign(
visitor_id=explicit_ratings.loc[:, "user_id"].map(user_mapping.get),
item_id=explicit_ratings.loc[:, "isbn"].map(item_mapping.get),
)
id_transformed_explicit_ratings = explicit_ratings[
["visitor_id", "item_id", "rating"]
]
id_transformed_explicit_ratings.to_csv(
path_or_buf="data/id_transformed_explicit_ratings.csv",
index=False,
header=False,
)
n_users, n_items = (
id_transformed_explicit_ratings.visitor_id.nunique(),
id_transformed_explicit_ratings.item_id.nunique(),
)
grouped_by_items = id_transformed_explicit_ratings.groupby("item_id")
with tf.io.TFRecordWriter("data/users_for_item.tfrecords") as record_to_write:
for item, grouped in grouped_by_items:
example = tf.train.Example(
features=tf.train.Features(
feature={
"key": tf.train.Feature(
int64_list=tf.train.Int64List(value=[item])
),
"indices": tf.train.Feature(
int64_list=tf.train.Int64List(
value=grouped["visitor_id"].values
)
),
"values": tf.train.Feature(
float_list=tf.train.FloatList(
value=grouped["rating"].values
)
),
}
)
)
record_to_write.write(example.SerializeToString())
grouped_by_users = id_transformed_explicit_ratings.groupby("visitor_id")
with tf.io.TFRecordWriter("data/items_for_user.tfrecords") as record_to_write:
for user, grouped in grouped_by_users:
example = tf.train.Example(
features=tf.train.Features(
feature={
"key": tf.train.Feature(
int64_list=tf.train.Int64List(value=[user])
),
"indices": tf.train.Feature(
int64_list=tf.train.Int64List(
value=grouped["item_id"].values
)
),
"values": tf.train.Feature(
float_list=tf.train.FloatList(
value=grouped["rating"].values
)
),
}
)
)
record_to_write.write(example.SerializeToString())
| coinflip112/bookrecommender | preprocessing/wals_prepare.py | wals_prepare.py | py | 3,583 | python | en | code | 0 | github-code | 13 |
26640563172 | import pytest
from app import schemas
from app import models
def test_get_all_posts(authorized_client, test_posts):
res = authorized_client.get("/posts/")
def validate(post):
return schemas.PostOut(**post)
posts_map = map(validate, res.json())
posts_list = list(posts_map)
# print(res.json())
# print(posts_list)
assert len(res.json()) == len(test_posts)
assert res.status_code == 200
def test_unauthorized_user_get_all_posts(client, test_posts):
res = client.get("/posts/")
assert res.status_code == 401
def test_unauthorized_user_get_one_post(client, test_posts):
res = client.get(f"/posts/{test_posts[0].id}")
assert res.status_code == 401
def test_get_one_post_not_exist(authorized_client, test_posts):
res = authorized_client.get(f"/posts/88888")
assert res.status_code == 404
def test_get_one_post(authorized_client, test_posts):
res = authorized_client.get(f"/posts/{test_posts[0].id}")
# print(res.json())
post = schemas.PostOut(**res.json())
assert post.Post.id == test_posts[0].id
assert post.Post.context == test_posts[0].context
assert post.Post.title == test_posts[0].title
@pytest.mark.parametrize("title, context, published", [
("awesome new title", "awesome new content", True),
("favorite pizza", "i love pepperoni", False),
("tallest skyscrapers", "wahoo", True),
])
def test_create_post(authorized_client, test_user, test_posts, title, context, published):
res = authorized_client.post(
"/posts/createpost", json={"title": title, "context": context, "published": published})
# print(res.json().get("message"))
# created_post = schemas.PostCreate(**res.json().get("message"))
created_post = models.Post(**res.json().get("message"))
assert res.status_code == 201
assert created_post.title == title
assert created_post.context == context
assert created_post.published == published
assert created_post.owner_id == test_user['id']
def test_create_post_default_published_true(authorized_client, test_user, test_posts):
res = authorized_client.post(
"/posts/createpost", json={"title": "arbitrary title", "context": "aasdfjasdf"})
created_post = models.Post(**res.json().get("message"))
assert res.status_code == 201
assert created_post.title == "arbitrary title"
assert created_post.context == "aasdfjasdf"
assert created_post.published == True
assert created_post.owner_id == test_user['id']
def test_unauthorized_user_create_post(client, test_user, test_posts):
res = client.post(
"/posts/createpost", json={"title": "arbitrary title", "context": "aasdfjasdf"})
assert res.status_code == 401
def test_unauthorized_user_delete_Post(client, test_user, test_posts):
res = client.delete(
f"/posts/{test_posts[0].id}")
assert res.status_code == 401
def test_delete_post_success(authorized_client, test_user, test_posts):
res = authorized_client.delete(
f"/posts/{test_posts[0].id}")
assert res.status_code == 204
def test_delete_post_non_exist(authorized_client, test_user, test_posts):
res = authorized_client.delete(
f"/posts/8000000")
assert res.status_code == 404
def test_delete_other_user_post(authorized_client, test_user, test_posts):
res = authorized_client.delete(
f"/posts/{test_posts[3].id}")
assert res.status_code == 403
def test_update_post(authorized_client, test_user, test_posts):
data = {
"title": "updated title",
"context": "update context",
"id": test_posts[0].id
}
res = authorized_client.put(f"/posts/{test_posts[0].id}", json=data)
print(res.json())
updated_post = schemas.Post(**res.json())
assert res.status_code == 200
assert updated_post.title == data['title']
assert updated_post.context == data['context']
def test_update_other_user_post(authorized_client, test_user, test_user2, test_posts):
data = {
"title": "updated title",
"context": "update context",
"id": test_posts[3].id
}
res = authorized_client.put(f"/posts/{test_posts[3].id}", json=data)
assert res.status_code == 403
def test_unauthorized_user_update_post(client, test_user, test_posts):
res = client.put(
f"/posts/{test_posts[0].id}")
assert res.status_code == 401
def test_update_post_non_exist(authorized_client, test_user, test_posts):
data = {
"title": "updated title",
"context": "update context",
"id": test_posts[0].id
}
res = authorized_client.put(
f"/posts/8000000", json=data)
assert res.status_code == 404
| trungtruc123/FASTAPI | tests/test_posts.py | test_posts.py | py | 4,684 | python | en | code | 0 | github-code | 13 |
37946592628 | # This file was automatically created by FeynRules $Revision: 573 $
# Mathematica version: 8.0 for Microsoft Windows (32-bit) (February 24, 2011)
# Date: Tue 2 Jul 2013 00:32:00
from object_library import all_parameters, Parameter
from function_library import complexconjugate, re, im, csc, sec, acsc, asec
# This is a default parameter object representing 0.
ZERO = Parameter(name = 'ZERO',
nature = 'internal',
type = 'real',
value = '0.0',
texname = '0')
# User-defined parameters.
CKM11 = Parameter(name = 'CKM11',
nature = 'external',
type = 'complex',
value = 0.97428,
texname = '\\text{CKM11}',
lhablock = 'CKMBlock',
lhacode = [ 1, 1 ])
CKM12 = Parameter(name = 'CKM12',
nature = 'external',
type = 'complex',
value = 0.2253,
texname = '\\text{CKM12}',
lhablock = 'CKMBlock',
lhacode = [ 1, 2 ])
CKM13 = Parameter(name = 'CKM13',
nature = 'external',
type = 'complex',
value = 0.00347,
texname = '\\text{CKM13}',
lhablock = 'CKMBlock',
lhacode = [ 1, 3 ])
CKM21 = Parameter(name = 'CKM21',
nature = 'external',
type = 'complex',
value = 0.2252,
texname = '\\text{CKM21}',
lhablock = 'CKMBlock',
lhacode = [ 2, 1 ])
CKM22 = Parameter(name = 'CKM22',
nature = 'external',
type = 'complex',
value = 0.97345,
texname = '\\text{CKM22}',
lhablock = 'CKMBlock',
lhacode = [ 2, 2 ])
CKM23 = Parameter(name = 'CKM23',
nature = 'external',
type = 'complex',
value = 0.041,
texname = '\\text{CKM23}',
lhablock = 'CKMBlock',
lhacode = [ 2, 3 ])
CKM31 = Parameter(name = 'CKM31',
nature = 'external',
type = 'complex',
value = 0.00862,
texname = '\\text{CKM31}',
lhablock = 'CKMBlock',
lhacode = [ 3, 1 ])
CKM32 = Parameter(name = 'CKM32',
nature = 'external',
type = 'complex',
value = 0.0403,
texname = '\\text{CKM32}',
lhablock = 'CKMBlock',
lhacode = [ 3, 2 ])
CKM33 = Parameter(name = 'CKM33',
nature = 'external',
type = 'complex',
value = 0.999152,
texname = '\\text{CKM33}',
lhablock = 'CKMBlock',
lhacode = [ 3, 3 ])
KX = Parameter(name = 'KX',
nature = 'external',
type = 'real',
value = 1,
texname = '\\text{KX}',
lhablock = 'Kappa',
lhacode = [ 1 ])
KT = Parameter(name = 'KT',
nature = 'external',
type = 'real',
value = 1,
texname = '\\text{KT}',
lhablock = 'Kappa',
lhacode = [ 2 ])
KB = Parameter(name = 'KB',
nature = 'external',
type = 'real',
value = 1,
texname = '\\text{KB}',
lhablock = 'Kappa',
lhacode = [ 3 ])
KY = Parameter(name = 'KY',
nature = 'external',
type = 'real',
value = 1,
texname = '\\text{KY}',
lhablock = 'Kappa',
lhacode = [ 4 ])
aEWM1 = Parameter(name = 'aEWM1',
nature = 'external',
type = 'real',
value = 127.9,
texname = '\\text{aEWM1}',
lhablock = 'SMINPUTS',
lhacode = [ 1 ])
Gf = Parameter(name = 'Gf',
nature = 'external',
type = 'real',
value = 0.00001166,
texname = 'G_f',
lhablock = 'SMINPUTS',
lhacode = [ 2 ])
aS = Parameter(name = 'aS',
nature = 'external',
type = 'real',
value = 0.118,
texname = '\\text{aS}',
lhablock = 'SMINPUTS',
lhacode = [ 3 ])
xitpw = Parameter(name = 'xitpw',
nature = 'external',
type = 'real',
value = 0.4,
texname = '\\text{xitpw}',
lhablock = 'Xi',
lhacode = [ 1 ])
xitpz = Parameter(name = 'xitpz',
nature = 'external',
type = 'real',
value = 0.3,
texname = '\\text{xitpz}',
lhablock = 'Xi',
lhacode = [ 2 ])
xitph = Parameter(name = 'xitph',
nature = 'external',
type = 'real',
value = 0.3,
texname = '\\text{xitph}',
lhablock = 'Xi',
lhacode = [ 3 ])
xibpw = Parameter(name = 'xibpw',
nature = 'external',
type = 'real',
value = 0.4,
texname = '\\text{xibpw}',
lhablock = 'Xi',
lhacode = [ 4 ])
xibpz = Parameter(name = 'xibpz',
nature = 'external',
type = 'real',
value = 0.3,
texname = '\\text{xibpz}',
lhablock = 'Xi',
lhacode = [ 5 ])
xibph = Parameter(name = 'xibph',
nature = 'external',
type = 'real',
value = 0.3,
texname = '\\text{xibph}',
lhablock = 'Xi',
lhacode = [ 6 ])
ymb = Parameter(name = 'ymb',
nature = 'external',
type = 'real',
value = 4.2,
texname = '\\text{ymb}',
lhablock = 'YUKAWA',
lhacode = [ 5 ])
ymt = Parameter(name = 'ymt',
nature = 'external',
type = 'real',
value = 174.3,
texname = '\\text{ymt}',
lhablock = 'YUKAWA',
lhacode = [ 6 ])
ymtau = Parameter(name = 'ymtau',
nature = 'external',
type = 'real',
value = 1.777,
texname = '\\text{ymtau}',
lhablock = 'YUKAWA',
lhacode = [ 15 ])
zetaXuL = Parameter(name = 'zetaXuL',
nature = 'external',
type = 'real',
value = 0.3,
texname = '\\text{zetaXuL}',
lhablock = 'Zeta',
lhacode = [ 1 ])
zetaXcL = Parameter(name = 'zetaXcL',
nature = 'external',
type = 'real',
value = 0.3,
texname = '\\text{zetaXcL}',
lhablock = 'Zeta',
lhacode = [ 2 ])
zetaXtL = Parameter(name = 'zetaXtL',
nature = 'external',
type = 'real',
value = 0.4,
texname = '\\text{zetaXtL}',
lhablock = 'Zeta',
lhacode = [ 3 ])
zetaTuL = Parameter(name = 'zetaTuL',
nature = 'external',
type = 'real',
value = 0.3,
texname = '\\text{zetaTuL}',
lhablock = 'Zeta',
lhacode = [ 4 ])
zetaTcL = Parameter(name = 'zetaTcL',
nature = 'external',
type = 'real',
value = 0.3,
texname = '\\text{zetaTcL}',
lhablock = 'Zeta',
lhacode = [ 5 ])
zetaTtL = Parameter(name = 'zetaTtL',
nature = 'external',
type = 'real',
value = 0.4,
texname = '\\text{zetaTtL}',
lhablock = 'Zeta',
lhacode = [ 6 ])
zetaBdL = Parameter(name = 'zetaBdL',
nature = 'external',
type = 'real',
value = 0.3,
texname = '\\text{zetaBdL}',
lhablock = 'Zeta',
lhacode = [ 7 ])
zetaBsL = Parameter(name = 'zetaBsL',
nature = 'external',
type = 'real',
value = 0.3,
texname = '\\text{zetaBsL}',
lhablock = 'Zeta',
lhacode = [ 8 ])
zetaBbL = Parameter(name = 'zetaBbL',
nature = 'external',
type = 'real',
value = 0.4,
texname = '\\text{zetaBbL}',
lhablock = 'Zeta',
lhacode = [ 9 ])
zetaYdL = Parameter(name = 'zetaYdL',
nature = 'external',
type = 'real',
value = 0.3,
texname = '\\text{zetaYdL}',
lhablock = 'Zeta',
lhacode = [ 10 ])
zetaYsL = Parameter(name = 'zetaYsL',
nature = 'external',
type = 'real',
value = 0.3,
texname = '\\text{zetaYsL}',
lhablock = 'Zeta',
lhacode = [ 11 ])
zetaYbL = Parameter(name = 'zetaYbL',
nature = 'external',
type = 'real',
value = 0.4,
texname = '\\text{zetaYbL}',
lhablock = 'Zeta',
lhacode = [ 12 ])
zetaXuR = Parameter(name = 'zetaXuR',
nature = 'external',
type = 'real',
value = 0,
texname = '\\text{zetaXuR}',
lhablock = 'Zeta',
lhacode = [ 13 ])
zetaXcR = Parameter(name = 'zetaXcR',
nature = 'external',
type = 'real',
value = 0,
texname = '\\text{zetaXcR}',
lhablock = 'Zeta',
lhacode = [ 14 ])
zetaXtR = Parameter(name = 'zetaXtR',
nature = 'external',
type = 'real',
value = 0,
texname = '\\text{zetaXtR}',
lhablock = 'Zeta',
lhacode = [ 15 ])
zetaTuR = Parameter(name = 'zetaTuR',
nature = 'external',
type = 'real',
value = 0,
texname = '\\text{zetaTuR}',
lhablock = 'Zeta',
lhacode = [ 16 ])
zetaTcR = Parameter(name = 'zetaTcR',
nature = 'external',
type = 'real',
value = 0,
texname = '\\text{zetaTcR}',
lhablock = 'Zeta',
lhacode = [ 17 ])
zetaTtR = Parameter(name = 'zetaTtR',
nature = 'external',
type = 'real',
value = 0,
texname = '\\text{zetaTtR}',
lhablock = 'Zeta',
lhacode = [ 18 ])
zetaBdR = Parameter(name = 'zetaBdR',
nature = 'external',
type = 'real',
value = 0,
texname = '\\text{zetaBdR}',
lhablock = 'Zeta',
lhacode = [ 19 ])
zetaBsR = Parameter(name = 'zetaBsR',
nature = 'external',
type = 'real',
value = 0,
texname = '\\text{zetaBsR}',
lhablock = 'Zeta',
lhacode = [ 20 ])
zetaBbR = Parameter(name = 'zetaBbR',
nature = 'external',
type = 'real',
value = 0,
texname = '\\text{zetaBbR}',
lhablock = 'Zeta',
lhacode = [ 21 ])
zetaYdR = Parameter(name = 'zetaYdR',
nature = 'external',
type = 'real',
value = 0,
texname = '\\text{zetaYdR}',
lhablock = 'Zeta',
lhacode = [ 22 ])
zetaYsR = Parameter(name = 'zetaYsR',
nature = 'external',
type = 'real',
value = 0,
texname = '\\text{zetaYsR}',
lhablock = 'Zeta',
lhacode = [ 23 ])
zetaYbR = Parameter(name = 'zetaYbR',
nature = 'external',
type = 'real',
value = 0,
texname = '\\text{zetaYbR}',
lhablock = 'Zeta',
lhacode = [ 24 ])
MTA = Parameter(name = 'MTA',
nature = 'external',
type = 'real',
value = 1.777,
texname = '\\text{MTA}',
lhablock = 'MASS',
lhacode = [ 15 ])
MT = Parameter(name = 'MT',
nature = 'external',
type = 'real',
value = 174.3,
texname = '\\text{MT}',
lhablock = 'MASS',
lhacode = [ 6 ])
MB = Parameter(name = 'MB',
nature = 'external',
type = 'real',
value = 4.2,
texname = '\\text{MB}',
lhablock = 'MASS',
lhacode = [ 5 ])
MX = Parameter(name = 'MX',
nature = 'external',
type = 'real',
value = 600,
texname = '\\text{MX}',
lhablock = 'MASS',
lhacode = [ 6000005 ])
MTP = Parameter(name = 'MTP',
nature = 'external',
type = 'real',
value = 600,
texname = '\\text{MTP}',
lhablock = 'MASS',
lhacode = [ 6000006 ])
MBP = Parameter(name = 'MBP',
nature = 'external',
type = 'real',
value = 600,
texname = '\\text{MBP}',
lhablock = 'MASS',
lhacode = [ 6000007 ])
MY = Parameter(name = 'MY',
nature = 'external',
type = 'real',
value = 600,
texname = '\\text{MY}',
lhablock = 'MASS',
lhacode = [ 6000008 ])
MZ = Parameter(name = 'MZ',
nature = 'external',
type = 'real',
value = 91.1876,
texname = '\\text{MZ}',
lhablock = 'MASS',
lhacode = [ 23 ])
MH = Parameter(name = 'MH',
nature = 'external',
type = 'real',
value = 125,
texname = '\\text{MH}',
lhablock = 'MASS',
lhacode = [ 25 ])
WT = Parameter(name = 'WT',
nature = 'external',
type = 'real',
value = 1.5101349,
texname = '\\text{WT}',
lhablock = 'DECAY',
lhacode = [ 6 ])
WX = Parameter(name = 'WX',
nature = 'external',
type = 'real',
value = 1,
texname = '\\text{WX}',
lhablock = 'DECAY',
lhacode = [ 6000005 ])
WTP = Parameter(name = 'WTP',
nature = 'external',
type = 'real',
value = 1,
texname = '\\text{WTP}',
lhablock = 'DECAY',
lhacode = [ 6000006 ])
WBP = Parameter(name = 'WBP',
nature = 'external',
type = 'real',
value = 1,
texname = '\\text{WBP}',
lhablock = 'DECAY',
lhacode = [ 6000007 ])
WY = Parameter(name = 'WY',
nature = 'external',
type = 'real',
value = 1,
texname = '\\text{WY}',
lhablock = 'DECAY',
lhacode = [ 6000008 ])
WZ = Parameter(name = 'WZ',
nature = 'external',
type = 'real',
value = 2.44639985,
texname = '\\text{WZ}',
lhablock = 'DECAY',
lhacode = [ 23 ])
WW = Parameter(name = 'WW',
nature = 'external',
type = 'real',
value = 2.0353557,
texname = '\\text{WW}',
lhablock = 'DECAY',
lhacode = [ 24 ])
WH = Parameter(name = 'WH',
nature = 'external',
type = 'real',
value = 0.00679485838,
texname = '\\text{WH}',
lhablock = 'DECAY',
lhacode = [ 25 ])
gamma0bph = Parameter(name = 'gamma0bph',
nature = 'internal',
type = 'real',
value = '(1 - MH**2/MBP**2)**2/2.',
texname = '\\text{gamma0bph}')
gamma0bpz = Parameter(name = 'gamma0bpz',
nature = 'internal',
type = 'real',
value = '((1 - MZ**2/MBP**2)*(1 + MZ**2/MBP**2 - (2*MZ**4)/MBP**4))/2.',
texname = '\\text{gamma0bpz}')
gamma0tph = Parameter(name = 'gamma0tph',
nature = 'internal',
type = 'real',
value = '(1 - MH**2/MTP**2)**2/2.',
texname = '\\text{gamma0tph}')
gamma0tpz = Parameter(name = 'gamma0tpz',
nature = 'internal',
type = 'real',
value = '((1 - MZ**2/MTP**2)*(1 + MZ**2/MTP**2 - (2*MZ**4)/MTP**4))/2.',
texname = '\\text{gamma0tpz}')
aEW = Parameter(name = 'aEW',
nature = 'internal',
type = 'real',
value = '1/aEWM1',
texname = '\\text{aEW}')
G = Parameter(name = 'G',
nature = 'internal',
type = 'real',
value = '2*cmath.sqrt(aS)*cmath.sqrt(cmath.pi)',
texname = 'G')
MW = Parameter(name = 'MW',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(MZ**2/2. + cmath.sqrt(MZ**4/4. - (aEW*cmath.pi*MZ**2)/(Gf*cmath.sqrt(2))))',
texname = 'M_W')
ee = Parameter(name = 'ee',
nature = 'internal',
type = 'real',
value = '2*cmath.sqrt(aEW)*cmath.sqrt(cmath.pi)',
texname = 'e')
KBbLh = Parameter(name = 'KBbLh',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt((xibph*zetaBbL)/gamma0bph)',
texname = '\\text{KBbLh}')
KBbRh = Parameter(name = 'KBbRh',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt((xibph*zetaBbR)/gamma0bph)',
texname = '\\text{KBbRh}')
KBdLh = Parameter(name = 'KBdLh',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt((xibph*zetaBdL)/gamma0bph)',
texname = '\\text{KBdLh}')
KBdRh = Parameter(name = 'KBdRh',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt((xibph*zetaBdR)/gamma0bph)',
texname = '\\text{KBdRh}')
KBsLh = Parameter(name = 'KBsLh',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt((xibph*zetaBsL)/gamma0bph)',
texname = '\\text{KBsLh}')
KBsRh = Parameter(name = 'KBsRh',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt((xibph*zetaBsR)/gamma0bph)',
texname = '\\text{KBsRh}')
KTcLh = Parameter(name = 'KTcLh',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt((xitph*zetaTcL)/gamma0tph)',
texname = '\\text{KTcLh}')
KTcRh = Parameter(name = 'KTcRh',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt((xitph*zetaTcR)/gamma0tph)',
texname = '\\text{KTcRh}')
KTtLh = Parameter(name = 'KTtLh',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt((xitph*zetaTtL)/gamma0tph)',
texname = '\\text{KTtLh}')
KTtRh = Parameter(name = 'KTtRh',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt((xitph*zetaTtR)/gamma0tph)',
texname = '\\text{KTtRh}')
KTuLh = Parameter(name = 'KTuLh',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt((xitph*zetaTuL)/gamma0tph)',
texname = '\\text{KTuLh}')
KTuRh = Parameter(name = 'KTuRh',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt((xitph*zetaTuR)/gamma0tph)',
texname = '\\text{KTuRh}')
gamma0bpw = Parameter(name = 'gamma0bpw',
nature = 'internal',
type = 'real',
value = '(1 - MW**2/MBP**2)*(1 + MW**2/MBP**2 - (2*MW**4)/MBP**4)',
texname = '\\text{gamma0bpw}')
gamma0tpw = Parameter(name = 'gamma0tpw',
nature = 'internal',
type = 'real',
value = '(1 - MW**2/MTP**2)*(1 + MW**2/MTP**2 - (2*MW**4)/MTP**4)',
texname = '\\text{gamma0tpw}')
gamma0xw = Parameter(name = 'gamma0xw',
nature = 'internal',
type = 'real',
value = '(1 - MW**2/MX**2)*(1 - (2*MW**4)/MX**4 + MW**2/MX**2)',
texname = '\\text{gamma0xw}')
gamma0yw = Parameter(name = 'gamma0yw',
nature = 'internal',
type = 'real',
value = '(1 - MW**2/MY**2)*(1 - (2*MW**4)/MY**4 + MW**2/MY**2)',
texname = '\\text{gamma0yw}')
sw2 = Parameter(name = 'sw2',
nature = 'internal',
type = 'real',
value = '1 - MW**2/MZ**2',
texname = '\\text{sw2}')
cw = Parameter(name = 'cw',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(1 - sw2)',
texname = 'c_w')
sw = Parameter(name = 'sw',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(sw2)',
texname = 's_w')
g1 = Parameter(name = 'g1',
nature = 'internal',
type = 'real',
value = 'ee/cw',
texname = 'g_1')
gw = Parameter(name = 'gw',
nature = 'internal',
type = 'real',
value = 'ee/sw',
texname = 'g_w')
KBdLw = Parameter(name = 'KBdLw',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt((xibpw*zetaBdL)/gamma0bpw))/(sw*cmath.sqrt(2))',
texname = '\\text{KBdLw}')
KBdRw = Parameter(name = 'KBdRw',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt((xibpw*zetaBdR)/gamma0bpw))/(sw*cmath.sqrt(2))',
texname = '\\text{KBdRw}')
KBsLw = Parameter(name = 'KBsLw',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt((xibpw*zetaBsL)/gamma0bpw))/(sw*cmath.sqrt(2))',
texname = '\\text{KBsLw}')
KTcLw = Parameter(name = 'KTcLw',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt((xitpw*zetaTcL)/gamma0tpw))/(sw*cmath.sqrt(2))',
texname = '\\text{KTcLw}')
KTcLz = Parameter(name = 'KTcLz',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt((xitpz*zetaTcL)/gamma0tpz))/(2.*cw*sw)',
texname = '\\text{KTcLz}')
KTcRw = Parameter(name = 'KTcRw',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt((xitpw*zetaTcR)/gamma0tpw))/(sw*cmath.sqrt(2))',
texname = '\\text{KTcRw}')
KTcRz = Parameter(name = 'KTcRz',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt((xitpz*zetaTcR)/gamma0tpz))/(2.*cw*sw)',
texname = '\\text{KTcRz}')
KTtLw = Parameter(name = 'KTtLw',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt((xitpw*zetaTtL)/gamma0tpw))/(sw*cmath.sqrt(2))',
texname = '\\text{KTtLw}')
KTtLz = Parameter(name = 'KTtLz',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt((xitpz*zetaTtL)/gamma0tpz))/(2.*cw*sw)',
texname = '\\text{KTtLz}')
KTtRw = Parameter(name = 'KTtRw',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt((xitpw*zetaTtR)/gamma0tpw))/(sw*cmath.sqrt(2))',
texname = '\\text{KTtRw}')
KTtRz = Parameter(name = 'KTtRz',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt((xitpz*zetaTtR)/gamma0tpz))/(2.*cw*sw)',
texname = '\\text{KTtRz}')
KTuLw = Parameter(name = 'KTuLw',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt((xitpw*zetaTuL)/gamma0tpw))/(sw*cmath.sqrt(2))',
texname = '\\text{KTuLw}')
KTuLz = Parameter(name = 'KTuLz',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt((xitpz*zetaTuL)/gamma0tpz))/(2.*cw*sw)',
texname = '\\text{KTuLz}')
KTuRw = Parameter(name = 'KTuRw',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt((xitpw*zetaTuR)/gamma0tpw))/(sw*cmath.sqrt(2))',
texname = '\\text{KTuRw}')
KTuRz = Parameter(name = 'KTuRz',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt((xitpz*zetaTuR)/gamma0tpz))/(2.*cw*sw)',
texname = '\\text{KTuRz}')
KXcL = Parameter(name = 'KXcL',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt(zetaXcL/gamma0xw))/(sw*cmath.sqrt(2))',
texname = '\\text{KXcL}')
KXcR = Parameter(name = 'KXcR',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt(zetaXcR/gamma0xw))/(sw*cmath.sqrt(2))',
texname = '\\text{KXcR}')
KXtL = Parameter(name = 'KXtL',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt(zetaXtL/gamma0xw))/(sw*cmath.sqrt(2))',
texname = '\\text{KXtL}')
KXtR = Parameter(name = 'KXtR',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt(zetaXtR/gamma0xw))/(sw*cmath.sqrt(2))',
texname = '\\text{KXtR}')
KXuL = Parameter(name = 'KXuL',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt(zetaXuL/gamma0xw))/(sw*cmath.sqrt(2))',
texname = '\\text{KXuL}')
KXuR = Parameter(name = 'KXuR',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt(zetaXuR/gamma0xw))/(sw*cmath.sqrt(2))',
texname = '\\text{KXuR}')
KYbL = Parameter(name = 'KYbL',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt(zetaYbL/gamma0yw))/(sw*cmath.sqrt(2))',
texname = '\\text{KYbL}')
KYbR = Parameter(name = 'KYbR',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt(zetaYbR/gamma0yw))/(sw*cmath.sqrt(2))',
texname = '\\text{KYbR}')
KYdL = Parameter(name = 'KYdL',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt(zetaYdL/gamma0yw))/(sw*cmath.sqrt(2))',
texname = '\\text{KYdL}')
KYdR = Parameter(name = 'KYdR',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt(zetaYdR/gamma0yw))/(sw*cmath.sqrt(2))',
texname = '\\text{KYdR}')
KYsL = Parameter(name = 'KYsL',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt(zetaYsL/gamma0yw))/(sw*cmath.sqrt(2))',
texname = '\\text{KYsL}')
KYsR = Parameter(name = 'KYsR',
nature = 'internal',
type = 'real',
value = '(ee*cmath.sqrt(zetaYsR/gamma0yw))/(sw*cmath.sqrt(2))',
texname = '\\text{KYsR}')
v = Parameter(name = 'v',
nature = 'internal',
type = 'real',
value = '(2*MW*sw)/ee',
texname = 'v')
KBbLw = Parameter(name = 'KBbLw',
nature = 'internal',
type = 'real',
value = '(gw*cmath.sqrt((xibpw*zetaBbL)/gamma0bpw))/cmath.sqrt(2)',
texname = '\\text{KBbLw}')
KBbLz = Parameter(name = 'KBbLz',
nature = 'internal',
type = 'real',
value = '(gw*cmath.sqrt((xibpz*zetaBbL)/gamma0bpz))/(2.*cw)',
texname = '\\text{KBbLz}')
KBbRw = Parameter(name = 'KBbRw',
nature = 'internal',
type = 'real',
value = '(gw*cmath.sqrt((xibpw*zetaBbR)/gamma0bpw))/cmath.sqrt(2)',
texname = '\\text{KBbRw}')
KBbRz = Parameter(name = 'KBbRz',
nature = 'internal',
type = 'real',
value = '(gw*cmath.sqrt((xibpz*zetaBbR)/gamma0bpz))/(2.*cw)',
texname = '\\text{KBbRz}')
KBdLz = Parameter(name = 'KBdLz',
nature = 'internal',
type = 'real',
value = '(gw*cmath.sqrt((xibpz*zetaBdL)/gamma0bpz))/(2.*cw)',
texname = '\\text{KBdLz}')
KBdRz = Parameter(name = 'KBdRz',
nature = 'internal',
type = 'real',
value = '(gw*cmath.sqrt((xibpz*zetaBdR)/gamma0bpz))/(2.*cw)',
texname = '\\text{KBdRz}')
KBsLz = Parameter(name = 'KBsLz',
nature = 'internal',
type = 'real',
value = '(gw*cmath.sqrt((xibpz*zetaBsL)/gamma0bpz))/(2.*cw)',
texname = '\\text{KBsLz}')
KBsRw = Parameter(name = 'KBsRw',
nature = 'internal',
type = 'real',
value = '(gw*cmath.sqrt((xibpw*zetaBsR)/gamma0bpw))/cmath.sqrt(2)',
texname = '\\text{KBsRw}')
KBsRz = Parameter(name = 'KBsRz',
nature = 'internal',
type = 'real',
value = '(gw*cmath.sqrt((xibpz*zetaBsR)/gamma0bpz))/(2.*cw)',
texname = '\\text{KBsRz}')
lam = Parameter(name = 'lam',
nature = 'internal',
type = 'real',
value = 'MH**2/(2.*v**2)',
texname = '\\text{lam}')
yb = Parameter(name = 'yb',
nature = 'internal',
type = 'real',
value = '(ymb*cmath.sqrt(2))/v',
texname = '\\text{yb}')
yt = Parameter(name = 'yt',
nature = 'internal',
type = 'real',
value = '(ymt*cmath.sqrt(2))/v',
texname = '\\text{yt}')
ytau = Parameter(name = 'ytau',
nature = 'internal',
type = 'real',
value = '(ymtau*cmath.sqrt(2))/v',
texname = '\\text{ytau}')
muH = Parameter(name = 'muH',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(lam*v**2)',
texname = '\\mu')
| rushioda/PIXELVALID_athena | athena/Generators/MadGraphModels/python/models/VLQ_UFO/parameters.py | parameters.py | py | 34,221 | python | en | code | 1 | github-code | 13 |
16951742507 | import csv
import matplotlib.pyplot as plt
import gender_guesser.detector as gender
DELIMITER = ";"
FILE = "list.csv"
ALL_STUDENTS_AUD = "all_students_aud.csv"
ALL_STUDENTS_CC = "all_students_cc.csv"
ALL_GROUPS_AUD = "all_groups_aud.csv"
ALL_GROUPS_CC = "all_groups_cc.csv"
students = []
all_students = {}
class Student:
def __init__(self, name, surname, gender, id, login, courses, group=-1, codingclass=-1):
self.group = group
self.name = name
self.surname = surname
self.gender = gender
self.id = id
self.login = login
self.courses = courses
self.codingclass = codingclass
def get_group(self):
return self.group
def get_codingclass(self):
return self.codingclass
def get_name(self):
return self.name
def get_surname(self):
return self.surname
def get_gender(self):
return self.gender
def get_id(self):
return self.id
def get_login(self):
return self.login
def get_courses(self):
return self.courses
def get_areas(self):
areas = []
for course in self.courses:
areas.append(course["area"])
return areas
def get_degree(self):
degrees = []
for course in self.courses:
degrees.append(course["degree"])
return degrees
def get_semesters(self):
semesters = []
for course in self.courses:
semesters.append(course["semester"])
return semesters
def add_to_group(self, group):
if group == "keiner Funktion oder Gruppe zugeordnet":
return -1
elements = group.split(" ")
if len(elements) == 2:
return self.set_group(elements[1])
else:
return self.set_codingclass(elements[2])
return -1
def set_group(self, group):
self.group = group
def set_codingclass(self, codingclass):
self.codingclass = codingclass
def strip_courses(course):
courses = []
multi_course = course.split(";")
for single_course in multi_course:
chopped_course = single_course.split(",")
if len(chopped_course) == 3:
area = chopped_course[0].strip()
degree = chopped_course[1].strip()
semester = chopped_course[2].strip()
course_dic = {"area": area, "degree": degree, "semester": semester}
courses.append(course_dic)
return courses
def read_all_students(file):
with open(file, mode='r', encoding='utf-8-sig') as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=DELIMITER)
for row in csv_reader:
name = row["Vorname"]
surname = row["Nachname"]
if row["Anrede"] == "Herr":
gender = "male"
elif row["Anrede"] == "Frau":
gender = "female"
else:
gender = -1
id = row["Nutzernamen"]
login = row["Anmeldedatum"]
courses = strip_courses(row["Studiengänge"])
if id not in all_students:
all_students[id] = Student(
name, surname, gender, id, login, courses)
def read_all_groups(file):
with open(file, mode='r', encoding='utf-8-sig') as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=DELIMITER)
for row in csv_reader:
id = row["Nutzernamen"]
if id in all_students:
student = all_students[id]
if 'Gruppe' in row:
student.add_to_group(row["Gruppe"])
else:
print(row["Vorname"])
print(row["Nachname"])
gender_guesser = gender.Detector()
read_all_students(ALL_STUDENTS_AUD)
read_all_students(ALL_STUDENTS_CC)
read_all_groups(ALL_GROUPS_AUD)
read_all_groups(ALL_GROUPS_CC)
data = {}
dataM = {}
dataF = {}
ccmin = -1
ccmax = -1
testatmin = -1
testatmax = -1
user_input = -1
print_all_students = False
print_cc = False
print_testate = False
while user_input != 1 and user_input != 2 and user_input != 3:
print("Deine Möglichkeiten:")
print("1. Alle Studenten plotten")
print("2. Bestimmte CC plotten")
print("3. Bestimmte Testate plotten")
user_input = int(input("Was möchtest du tun?: "))
if user_input == 1:
print_all_students = True
if user_input == 2:
print_cc = True
while ccmin < 1:
ccmin = int(input("Kleinste Coding Class( >= 1 ): "))
while ccmax > 14 or ccmax < 1:
ccmax = int(input("Größte Coding Class( <= 14 ): "))
if user_input == 3:
print_testate = True
while testatmin < 1:
testatmin = int(input("Kleinstes Testat( >= 1 ): "))
while testatmax > 300 or testatmax < 1:
testatmax = int(input("Größtes Testat( >= 300 ): "))
zfb_doppelt = ""
while zfb_doppelt != "j" and zfb_doppelt != "n":
zfb_doppelt = input(
"Sollen 2FB pro Studiengang nur als 0.5 Personen zählen? (j/n): ")
for student in all_students.values():
testat_in_range = testatmin <= int(student.get_group()) <= testatmax
cc_in_range = ccmin <= int(student.get_codingclass()) <= ccmax
if(print_all_students) or (print_testate and testat_in_range) or (print_cc and cc_in_range):
courses = student.get_courses()
student_gender = student.get_gender()
for course in courses:
full_course = course["area"]
student_life_value = 1
if course["degree"] == "2-Fächer-Bachelor":
full_course += " 2FB"
if zfb_doppelt == "j":
student_life_value = 0.5
if full_course in data:
data[full_course] = data[full_course] + student_life_value
else:
data[full_course] = student_life_value
if student_gender == "female":
if full_course in dataF:
dataF[full_course] = dataF[full_course] + \
student_life_value
else:
dataF[full_course] = student_life_value
elif student_gender == "male":
if full_course in dataM:
dataM[full_course] = dataM[full_course] + \
student_life_value
else:
dataM[full_course] = student_life_value
"""for student in students:
semesters = student.get_semesters()
for semester in semesters:
if semester in data:
data[semester] = data[semester] + 1
else:
data[semester] = 1"""
data = sorted(data.items(), key=lambda kv: (kv[1], kv[0]), reverse=True)
dataF = sorted(dataF.items(), key=lambda kv: (kv[1], kv[0]), reverse=True)
dataM = sorted(dataM.items(), key=lambda kv: (kv[1], kv[0]), reverse=True)
print(data)
print(dataF)
print(dataM)
if data != []:
x, y = zip(*data)
p1 = plt.bar(x, y, color=(1.0, 0.64, 0.0, 0.6))
if dataF != []:
x, y = zip(*dataF)
p2 = plt.bar(x, y, color=(0.0, 0.0, 1.0, 0.6))
plt.xticks(rotation=90)
plt.savefig("graph.png")
plt.show()
| lkoehl/studip-csv-analyzer | csv-analyzer.py | csv-analyzer.py | py | 7,133 | python | en | code | 0 | github-code | 13 |
17072588067 | import tkinter as tk
root = tk.Tk() # This creates the main window of the tkinter application
total_expenses_label = tk.Label(root, text="Total Expenses: $0.00")
total_expenses_label.pack()
root.mainloop() # This runs the Tkinter event loop which handles user input and updates the GUI
# create a Tkinter window
window = tk.Tk()
window.title("Budget Tracker")
# create input fields for income, expenses, and savings
income_label = tk.Label(window, text="Income:")
income_label.grid(row=0, column=0)
income_entry = tk.Entry(window)
income_entry.grid(row=0, column=1)
cash_label = tk.Label(window, text="Cash (Pocket cash):")
cash_label.grid(row=1, column=0)
cash_entry = tk.Entry(window)
cash_entry.grid(row=1, column=1)
car_payment1_label = tk.Label(window, text="Car payment 1:")
car_payment1_label.grid(row=2, column=0)
car_payment1_entry = tk.Entry(window)
car_payment1_entry.grid(row=2, column=1)
car_payment2_label = tk.Label(window, text="Car payment 2:")
car_payment2_label.grid(row=3, column=0)
car_payment2_entry = tk.Entry(window)
car_payment2_entry.grid(row=3, column=1)
car_insurance_label = tk.Label(window, text="Car insurance:")
car_insurance_label.grid(row=4, column=0)
car_insurance_entry = tk.Entry(window)
car_insurance_entry.grid(row=4, column=1)
electricity_heating_label = tk.Label(window, text="Electricity/Heating:")
electricity_heating_label.grid(row=5, column=0)
electricity_heating_entry = tk.Entry(window)
electricity_heating_entry.grid(row=5, column=1)
mortgage_label = tk.Label(window, text="Mortgage:")
mortgage_label.grid(row=6, column=0)
mortgage_entry = tk.Entry(window)
mortgage_entry.grid(row=6, column=1)
medical_insurance_label = tk.Label(window, text="Medical insurance:")
medical_insurance_label.grid(row=7, column=0)
medical_insurance_entry = tk.Entry(window)
medical_insurance_entry.grid(row=7, column=1)
water_label = tk.Label(window, text="Water:")
water_label.grid(row=8, column=0)
water_entry = tk.Entry(window)
water_entry.grid(row=8, column=1)
landscaping_label = tk.Label(window, text="Landscaping:")
landscaping_label.grid(row=9, column=0)
landscaping_entry = tk.Entry(window)
landscaping_entry.grid(row=9, column=1)
gas_label = tk.Label(window, text="Gas:")
gas_label.grid(row=10, column=0)
gas_entry = tk.Entry(window)
gas_entry.grid(row=10, column=1)
groceries_label = tk.Label(window, text="Groceries:")
groceries_label.grid(row=11, column=0)
groceries_entry = tk.Entry(window)
groceries_entry.grid(row=11, column=1)
# create buttons to add and remove items
add_button = tk.Button(window, text="Add")
add_button.grid(row=12, column=0)
remove_button = tk.Button(window, text="Remove")
remove_button.grid(row=12, column=1)
# create a label to display the budget information
budget_label = tk.Label(window, text="")
budget_label.grid(row=13, column=0, columnspan=2)
# create a function to update the budget information
def update_budget():
# Get the current values from the input fields
income = float(income_entry.get())
cash = float(cash_entry.get())
car_payment1 = float(car_payment1_entry.get())
car_payment2 = float(car_payment2_entry.get())
car_insurance = float(car_insurance_entry.get())
electricity_heating = float(electricity_heating_entry.get())
mortgage = float(mortgage_entry.get())
medical_insurance = float(medical_insurance_entry.get())
water = float(water_entry.get())
landscaping = float(landscaping_entry.get())
gas = float(gas_entry.get())
groceries = float(groceries_entry.get())
# Calculate total expenses
total_expenses = cash + car_payment1 + car_payment2 + car_insurance + electricity_heating + mortgage + medical_insurance + water + landscaping + gas + groceries
calculation_var = tk.StringVar()
calculation = calculation_var.get()
# Calculate net income
if calculation_var.get() == "Monthly":
net_income = income - total_expenses
elif calculation_var.get() == "Yearly":
net_income = (income - total_expenses) * 12
# Update the labels with the new information
total_expenses_label.config(text="Total Expenses: $%.2f" % total_expenses)
net_income_label.config(text="Net Income: $%.2f" % net_income)
# Create the labels to display the total expenses and net income
total_expenses_label = tk.Label(root, text="Total Expenses: $0.00")
total_expenses_label.pack()
# Create the labels to display the total expenses and net income
net_income_label = tk.Label(root, text="Total Expenses: $0.00")
net_income_label.pack() | LKocaj/pythonPractice1 | expenses2.py | expenses2.py | py | 4,543 | python | en | code | 0 | github-code | 13 |
7835052930 | import time
from pathlib import Path
from typing import Generator, Tuple
import pytest
from tests.factories import SecureDropConfigFactory
from tests.functional.app_navigators.source_app_nav import SourceAppNavigator
from tests.functional.conftest import SdServersFixtureResult, spawn_sd_servers
from tests.functional.pageslayout.utils import list_locales, save_static_data
# Very short session expiration time
SESSION_EXPIRATION_SECONDS = 3
@pytest.fixture(scope="session")
def sd_servers_with_short_timeout(
setup_journalist_key_and_gpg_folder: Tuple[str, Path],
setup_rqworker: Tuple[str, str],
) -> Generator[SdServersFixtureResult, None, None]:
"""Spawn the source and journalist apps as separate processes with a short session timeout."""
# Generate a securedrop config with a very short session timeout
journalist_key_fingerprint, gpg_key_dir = setup_journalist_key_and_gpg_folder
worker_name, _ = setup_rqworker
config_with_short_timeout = SecureDropConfigFactory.create(
SESSION_EXPIRATION_MINUTES=SESSION_EXPIRATION_SECONDS / 60,
SECUREDROP_DATA_ROOT=Path("/tmp/sd-tests/functional-session-timeout"),
GPG_KEY_DIR=gpg_key_dir,
JOURNALIST_KEY=journalist_key_fingerprint,
RQ_WORKER_NAME=worker_name,
)
# Spawn the apps in separate processes
with spawn_sd_servers(config_to_use=config_with_short_timeout) as sd_servers_result:
yield sd_servers_result
@pytest.mark.parametrize("locale", list_locales())
@pytest.mark.pagelayout()
class TestSourceAppSessionTimeout:
def test_source_session_timeout(self, locale, sd_servers_with_short_timeout):
# Given an SD server with a very short session timeout
# And a source user accessing the source app from their browser
locale_with_commas = locale.replace("_", "-")
with SourceAppNavigator.using_tor_browser_web_driver(
source_app_base_url=sd_servers_with_short_timeout.source_app_base_url,
accept_languages=locale_with_commas,
) as navigator:
# And they're logged in and are using the app
navigator.source_visits_source_homepage()
navigator.source_clicks_submit_documents_on_homepage()
navigator.source_continues_to_submit_page()
# And their session just expired
time.sleep(SESSION_EXPIRATION_SECONDS + 1)
# When the source user reloads the page
navigator.driver.refresh()
# Then the source user sees the "session expired" message
notification = navigator.driver.find_element_by_class_name("error")
assert notification.text
if locale == "en_US":
expected_text = "You were logged out due to inactivity."
assert expected_text in notification.text
save_static_data(navigator.driver, locale, "source-session_timeout")
| freedomofpress/securedrop | securedrop/tests/functional/pageslayout/test_source_session_layout.py | test_source_session_layout.py | py | 2,917 | python | en | code | 3,509 | github-code | 13 |
6287697497 | # sort subjects by their performance in the first part of the experiment
# this script creates a variable that indicates whether they were better than random in the first 20 rounds and plots
# the effect of revealing/allowing an additional variable.
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# import libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy as sp
from pathlib import Path
import statsmodels.api as sm
import statsmodels.formula.api as smf
from stargazer.stargazer import Stargazer
# load the data in long format for each part
part1 = pd.read_csv('data/clean/part1.csv')
all_rounds = pd.read_csv('data/clean/all.csv')
part2 = pd.read_csv('data/clean/part2.csv')
# figure out how many guesses they got correct in the first 20 roundsfirst20, on='participant.code').rename(columns={'player.correct_y':'first20_count'})
first20 = part1.groupby('participant.code').sum()['player.correct']
part1 = part1.merge(first20, on='participant.code').rename(columns={'player.correct_y':'first20_count', 'player.correct_x':'playe.correct'})
part1.loc[part1['first20_count']>10, 'better_random'] = 1
part1.loc[part1['first20_count']<=10, 'better_random'] = 0
all_rounds = all_rounds.merge(first20, on='participant.code').rename(columns={'player.correct_y':'first20_count', 'player.correct_x':'player.correct'})
all_rounds.loc[all_rounds['first20_count']>10, 'better_random'] = 1
all_rounds.loc[all_rounds['first20_count']<=10, 'better_random'] = 0
part2 = part2.merge(first20, on='participant.code').rename(columns={'player.correct_y':'first20_count', 'player.correct_x':'player.correct'})
part2.loc[part2['first20_count']>10, 'better_random'] = 1
part2.loc[part2['first20_count']<=10, 'better_random'] = 0
# plot the chance of guessing correctly by rounds (binned into 5) for the first 20 rounds
sns.lmplot(data=all_rounds[(all_rounds['round_number_modif']<=20) ], x="round_number_modif", y="player.correct",
x_bins=5, hue='better_random')
# save the plot
plt.savefig('computed_objects/figures/performance_part1.png')
# plot the two groups throughout the experiment
# plot the chance of guessing correctly by rounds (binned into 10 bins) by performance in part 1
sns.lmplot(data=all_rounds, x="round_number_modif", y="player.correct", x_bins=10, hue='better_random')
plt.title('Learning by performance in part 1')
plt.xlabel('round number')
plt.ylabel('share correct')
plt.savefig('computed_objects/figures/p1_performance_throughout.png')
# Accuracy wrt number of variables that they were allowed to choose and by part1 performance
fig, axs = plt.subplots( figsize=(15, 5))
sns.pointplot(data=all_rounds[(all_rounds['round_number_modif']>20)], x='player.number_variables', y='player.correct',
hue='better_random', eestimator='mean', join=False )
axs.set_ylim(.35, .75)
axs.set_title('performance by number of available variables after 20 rounds')
axs.axhline(.5, 0, 1, color = 'grey')
fig.savefig('computed_objects/figures/p1_performace_effect_assigned.png')
# Accuracy wrt number of variables that they were allowed to choose
fig, axs = plt.subplots( figsize=(15, 5))
sns.pointplot(data=all_rounds[(all_rounds['round_number_modif']>20)], x='revealed_variables_count', y='player.correct',
hue='better_random', eestimator='mean', join=False )
axs.set_ylim(.35, .75)
axs.set_title('performance by number of variables revealed')
axs.axhline(.5, 0, 1, color = 'grey')
fig.savefig('computed_objects/figures/p1_performace_effect_revealed.png') | Jimena-Galindo/learning_polarization | analysis/part1_performance.py | part1_performance.py | py | 3,578 | python | en | code | 0 | github-code | 13 |
12947806619 | import socket
from time import strftime
host = ''
port = 12345
addr = (host, port)
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(addr)
s.listen(2)
while True:
cli_socket, cli_addr = s.accept()
while True:
data =cli_socket.recv(1024)
if data.decode().strip() == 'quit':
break
print(data.decode())
cdata = '你好[%s]\r\n' % strftime('%H:%M:%S')
cli_socket.send(cdata.encode())
cli_socket.close()
# s.close()
| HLQ1102/MyPython | devops/py01/tcp_time_serv.py | tcp_time_serv.py | py | 514 | python | en | code | 0 | github-code | 13 |
32895555611 | def my_project_path(my_project):
"""
output: Path to the folder where the project is
"""
# If there is a project, mufolder will be location of the project
if my_project.fileName()!='':
print('project', Path(my_project.fileName()).stem , 'loaded')
return Path(my_project.homePath())
else:
print('No project available')
return 0 # exits main if there is no project available
def my_find_layer(ln):
"""
tries to find a project layer which name is ln
"""
layers=QgsProject().instance().mapLayersByName(ln)
if len(layers)>1:
print('Warning: there is more than one layer with name',ln)
return layers[0]
elif len(layers)==1:
return layers[0]
else:
print('Warning: no matches for', ln)
return None
# variation over the previous function, to make it more flexible
def my_find_approx_layer(approx_ln):
"""
tries to find a layer which name includes approx_ln
"""
layers=QgsProject().instance().mapLayers().values() # dictionairy of all layers
for layer in layers:
ln=layer.name()
if approx_ln in ln: # True if the layer name contains approx_ln
return my_find_layer(ln)
return None # in case no match is found
def my_remove_layer(layer):
"""
removes layer from project
"""
if layer in QgsProject().instance().mapLayers().values():
QgsProject().instance().removeMapLayer(layer.id())
def my_create_path_from_list(path_as_list,prefix,suffix,ext="shp"):
"""
function to create the file name; if the directory does not exist it will be created
inputs:
list of paths
filename prefix
filename suffix
filename extension (default: shp)
output: full path
"""
filename = prefix+'_'+suffix+'.'+ext
# return Path(*path_as_list) / filename
mysubfolder=Path(*path_as_list)
# Check if folder exists and create folder otherwise
if not mysubfolder.exists():
mysubfolder.mkdir(parents=True)
return Path(*path_as_list) / filename
def my_processing_run(operation,ln_input,dict_params,layer_name):
"""
function to execute processing.run from a list of parameters
it creates a temporary output (in memory)
dict_params: dictionary with operation parameters except 'INPUT' and 'OUTPUT'
layer_name: name for the output layer
output: output QgsVectorLayer
"""
dict_params['INPUT']=ln_input
dict_params['OUTPUT']=QgsProcessing.TEMPORARY_OUTPUT
mylayer=processing.run(operation,dict_params)['OUTPUT']
mylayer.setName(layer_name)
QgsProject().instance().addMapLayer(mylayer)
return mylayer
def my_export_layer_as_file(mylayer,fn):
"""
function to save QgsVectorLayer as file
"""
if isinstance(mylayer,QgsVectorLayer):
# file name has to be a string
processing.run("native:savefeatures", {'INPUT':mylayer, 'OUTPUT':str(fn)})
def my_create_project(my_folder,project_name):
"""
Create new project, set title, and save
"""
my_project=QgsProject.instance() # QgsProject
my_project.clear() # Clear project
my_project.setTitle(project_name)
project_file=str(my_folder/project_name)+'.qgz'
# Save project to file
my_project.write(project_file) #
def my_create_project_with_crs(my_folder,project_name,crs_epsg=4326):
"""
Create new project, set CRS from espg code, set title, and save
"""
my_project=QgsProject.instance() # QgsProject
my_project.clear() # Clear project
# set project CRS
my_crs=QgsCoordinateReferenceSystem(crs_epsg)
my_project.setCrs(my_crs)
# set project title
my_project.setTitle(project_name)
project_file=str(my_folder/project_name)+'.qgz'
# Save project to file
my_project.write(project_file) #
def my_add_vector_layer_from_shapefile(fn,ln):
"""
add and name vector layer from file
fn: string: path_to_file
ln: string: output layer name
output: output layer
"""
mylayer=QgsVectorLayer(str(fn),"", "ogr")
# set encoding to utf-8
provider=mylayer.dataProvider()
if provider.encoding()!='UTF-8':
mylayer.dataProvider().setEncoding('UTF-8')
# set name
mylayer.setName(ln)
QgsProject().instance().addMapLayer(mylayer)
return mylayer
def my_add_layer_from_csv(fn,ln,params):
"""
reads csv file and adds to project
"""
# create uri as string
uri=fn.as_uri()+params
# create and load layer
mylayer = QgsVectorLayer(uri, '' , "delimitedtext")
# encoding
provider=mylayer.dataProvider()
if provider.encoding()!='UTF-8':
mylayer.dataProvider().setEncoding('UTF-8')
# set name
mylayer.setName(ln)
# add to project
QgsProject().instance().addMapLayer(mylayer)
return mylayer
def my_add_to_memory_vector_layer_from_shapefile(fn,ln):
"""
add and name vector layer from file
fn: string: path_to_file
ln: string: output layer name
output: layer copied to memory layer
"""
mylayer=QgsVectorLayer(str(fn),"", "ogr")
mylayer.selectAll()
clone_layer = processing.run("native:saveselectedfeatures", {'INPUT': mylayer, 'OUTPUT': 'memory:'})['OUTPUT']
mylayer.removeSelection()
clone_layer.setName(ln)
QgsProject().instance().addMapLayer(clone_layer)
return clone_layer
def my_zoom_to_layer(layer_name):
# Access layer in project if it exists
mylayers=QgsProject().instance().mapLayersByName(layer_name)
# mylayer is the first in the returned list
if mylayers:
mylayer=mylayers[0]
# determine CRS
my_crs=mylayer.crs()
QgsProject.instance().setCrs(my_crs)
# Determine extent
extent = mylayer.extent()
iface.mapCanvas().setExtent(extent)
iface.mapCanvas().refresh()
def my_add_string_attribute_and_compute_value(layer):
'''
input: layer
creates a new field called 'produces' and computes its values from the values of an existing field 'Total'
'''
# Create new categorized attribute 'produces' with values 'yes' or 'no'
fld=QgsField('produces',QVariant.String)
with edit(layer):
layer.addAttribute(fld)
layer.updateFields()
for feat in layer.getFeatures():
if feat['Total'] == 0:
feat['produces'] = 'no'
else:
feat['produces'] = 'yes'
# “update-after-change”
layer.updateFeature(feat) # 'res' to be silent
return layer
def my_INE_preprocessing(layer):
"""
Edits the vector layer and make some changes to it
The goal is to compute the values of attribute di_co from the values of attribute NUTS_2013
Only the values of NUTS_2013 with maximum length are of interest (those are the counties)
"""
# 1st: determine maximum length of NUTS_2013
maxDigits=0
for feat in layer.getFeatures():
if len(feat['NUTS_2013']) > maxDigits:
maxDigits=len(feat['NUTS_2013'])
# 2nd: for those, compute and store new 4-digit code (last 4 digits) in di_co
with edit(layer):
for feat in layer.getFeatures():
if len(feat['NUTS_2013']) == maxDigits:
feat['di_co'] = feat['NUTS_2013'][-4:] # last 4 digits
# “update-after-change”
res=layer.updateFeature(feat) # 'res' to be silent
# return output layer
return layer
def my_create_sturges_graduated_legend_dict(vlayer,attrib,colormap,myopacity,units):
'''
This function creates a dictionary to be used as input for setting up the legend
the number of classes is given by Sturges rule
# The inputs for the function are:
# 1. the layer and one of its attributes
# 2. the colormap from matplotlib, e.g. 'viridis'
# 3. a value between 0 and 1 for the opacity of the symbols, e.g. 0.7
# 4. a string to add information to the classes in the legend, e.g. 'hl' for the units
'''
from matplotlib.cm import get_cmap
import numpy as np
# determine what is the range of values for the legend
idx = vlayer.fields().indexOf(attrib)
values = list(vlayer.uniqueValues(idx)) # uniqueValues returns a "set"
# determine min and max
mymin=min(values)
mymax=max(values)
D={} # initializes dictionary
count=0
# number of classes as the logarithm of the number of observations (Sturges)
N = int(1+np.ceil(np.log2(vlayer.featureCount())))
breaks=np.linspace(0,mymax,num=N) # linspace divides interval in N parts
# color using colormap defined in the header of the script
mycolormap=get_cmap(colormap,N) # get N colors from colormap
mycolors=mycolormap.colors*255 # numpy.ndarray
for i in range(1,len(breaks)):
# determine class minimum and maximum value
if i==1:
classMin = 0
else:
classMin = classMax
classMax = breaks[i]
# define label
mylabel = f'from {round(classMin)} to {round(classMax)} {units}'
# choose count-th color from mycolors
mycolor=mycolors[count]
count +=1
# create QColor object
myQColor=QColor(mycolor[0],mycolor[1],mycolor[2]) #RGB
# insert a new entry to the dictionary
D[mylabel]= (classMin,classMax,myQColor,myopacity)
return D # dictionary
############################# Create symbology using all years between year_start and year_end
def my_create_jenks_symbology(fn,N,year_start,year_end,my_colormap,my_opacity,my_units):
import jenkspy
# 1. Read data with geopandas and select columns of production (years)
gdf=gpd.read_file(fn)
df_all = pd.DataFrame(gdf.drop(columns='geometry')) # drop geometries and convert to pandas dataframe
cols=df_all.columns[(df_all.columns >= year_start) & (df_all.columns <= year_end)]
df=df_all[cols]
# 2. Compute breaks for classes (Jenks)
# flatten dataframe to a single list of values
x= df.to_numpy().flatten()
# compute jenks breaks
breaks=jenkspy.jenks_breaks(x, n_classes=10)
# 3. create dictionary for legend
dict_legend=my_create_graduated_legend_from_breaks_dict(breaks,my_colormap,my_opacity,my_units)
return dict_legend
# adapted from my_create_sturges_graduated_legend_dict(vlayer,attrib,colormap,myopacity,units) (T19)
def my_create_graduated_legend_from_breaks_dict(breaks,colormap,myopacity,units):
'''
This function creates a dictionary to be used as input for setting up the legend
The classes are defined by the 'breaks' input
# The inputs for the function are:
# 1. breaks that define classes
# 2. the colormap from matplotlib, e.g. 'viridis'
# 3. a value between 0 and 1 for the opacity of the symbols, e.g. 0.7
# 4. a string to add information to the classes in the legend, e.g. 'hl' for the units
'''
from matplotlib.cm import get_cmap
import numpy as np
D={} # initializes dictionary
count=0
N=len(breaks)
# number of classes as the logarithm of the number of observations (Sturges)
# color using colormap defined in the header of the script
mycolormap=get_cmap(colormap,N) # get N colors from colormap
mycolors=mycolormap.colors*255 # numpy.ndarray
for i in range(1,len(breaks)):
# determine class minimum and maximum value
if i==1:
classMin = 0
else:
classMin = classMax
classMax = breaks[i]
# define label
mylabel = f'from {round(classMin)} to {round(classMax)} {units}'
# choose count-th color from mycolors
mycolor=mycolors[count]
count +=1
# create QColor object
myQColor=QColor(mycolor[0],mycolor[1],mycolor[2]) #RGB
# insert a new entry to the dictionary
D[mylabel]= (classMin,classMax,myQColor,myopacity)
return D # dictionary
# creates graduated symbology from dictionary with structure as above
def my_set_graduated_legend(vlayer,attrib,dict):
myRangeList=[]
count=0
for mylabel, (classMin,classMax, myQColor, myopacity) in dict.items():
mySymbol = QgsSymbol.defaultSymbol(vlayer.geometryType())
mySymbol.setColor(myQColor)
mySymbol.setOpacity(myopacity)
# For graduated symbols, there is a QgsClassificationRange object:
myRange = QgsRendererRange(QgsClassificationRange(mylabel,classMin, classMax),mySymbol)
myRangeList.append(myRange)
# define Graduated Symbol renderer and pass it to mylayer
my_renderer = QgsGraduatedSymbolRenderer(attrib, myRangeList)
# set renderer
vlayer.setRenderer(my_renderer)
# Refresh layer
vlayer.triggerRepaint()
# updates single band raster layer symbology by stretching to min and max
def my_stretch_single_band_raster_symbology_to_min_max(rlayer):
'''
input: single band raster layer
no output
'''
############################################# reset min and max
renderer=rlayer.renderer()
myType = renderer.dataType(1)
myEnhancement = QgsContrastEnhancement(myType)
contrast_enhancement = QgsContrastEnhancement.StretchToMinimumMaximum
myEnhancement.setContrastEnhancementAlgorithm(contrast_enhancement,True)
# statistics for the raster layer
stats = provider.bandStatistics(1, QgsRasterBandStats.All, rlayer.extent(), 0)
# setup min and max
min = stats.minimumValue
max = stats.maximumValue
myEnhancement.setMinimumValue(min) #Set the minimum value you want
myEnhancement.setMaximumValue(max) #Put the maximum value you want
# set renderer and refresh
rlayer.renderer().setContrastEnhancement(myEnhancement)
rlayer.triggerRepaint()
# This function does not reuire a dictionary of ranges; it mimimcs the QGIS interface
def my_set_vector_graduated_ramp_legend(vlayer,value_field,num_classes,ramp_name):
'''
input: vector layer, field with values for symbology, number of classes, color ramp (see below)
no output
'''
# https://gis.stackexchange.com/questions/342352/apply-a-color-ramp-to-vector-layer-using-pyqgis3
# see possible ramps: print(QgsStyle().defaultStyle().colorRampNames())
classification_method = QgsClassificationJenks()
#You can use any of these classification method classes:
#QgsClassificationQuantile()
#QgsClassificationEqualInterval()
#QgsClassificationJenks()
#QgsClassificationPrettyBreaks()
#QgsClassificationLogarithmic()
#QgsClassificationStandardDeviation()
#
# change format settings as necessary
format = QgsRendererRangeLabelFormat()
format.setFormat("%1 - %2")
format.setPrecision(2)
format.setTrimTrailingZeroes(True)
# color ramp
default_style = QgsStyle().defaultStyle()
color_ramp = default_style.colorRamp(ramp_name)
# renderer
my_renderer = QgsGraduatedSymbolRenderer()
my_renderer.setClassAttribute(value_field)
my_renderer.setClassificationMethod(classification_method)
my_renderer.setLabelFormat(format)
my_renderer.updateClasses(vlayer, num_classes)
my_renderer.updateColorRamp(color_ramp)
# set renderer
vlayer.setRenderer(my_renderer)
# Refresh layer
vlayer.triggerRepaint()
##################################################### Layout manager
def my_create_layout_manager(layout_name):
manager = QgsProject.instance().layoutManager()
layoutName = layout_name
layouts_list = manager.printLayouts()
# remove any duplicate layouts
for layout in layouts_list:
if layout.name() == layoutName:
manager.removeLayout(layout)
layout = QgsPrintLayout(QgsProject.instance())
layout.initializeDefaults()
layout.setName(layoutName)
manager.addLayout(layout)
return manager,layout
############################################## create map item in the layout
def my_add_map_to_layout(layout,layer,X,Y,width,height):
map = QgsLayoutItemMap(layout)
map.setRect(20, 20, 20, 20)
# set the map extent
ms = QgsMapSettings()
ms.setLayers([layer]) # set layers to be mapped
rect = QgsRectangle(ms.fullExtent())
rect.scale(1.7)
ms.setExtent(rect)
map.setExtent(rect)
map.setBackgroundColor(QColor(255, 255, 255, 0))
layout.addLayoutItem(map)
# resize and move
map.attemptMove(QgsLayoutPoint(X, Y, QgsUnitTypes.LayoutMillimeters))
map.attemptResize(QgsLayoutSize(width,height, QgsUnitTypes.LayoutMillimeters))
return map
############################################ add scale bar
def my_add_scale_bar_to_layout(layout,map,size, X,Y,segments,units):
scalebar = QgsLayoutItemScaleBar(layout)
scalebar.setStyle('Line Ticks Up')
scalebar.setUnits(QgsUnitTypes.DistanceKilometers)
scalebar.setNumberOfSegments(segments)
scalebar.setNumberOfSegmentsLeft(0)
scalebar.setUnitsPerSegment(units)
scalebar.setLinkedMap(map)
scalebar.setUnitLabel('km')
scalebar.setFont(QFont('Arial', size))
scalebar.update()
layout.addLayoutItem(scalebar)
scalebar.attemptMove(QgsLayoutPoint(X,Y, QgsUnitTypes.LayoutMillimeters))
########################################## add label to layout
def my_add_label_to_layout(layout,label,size,X,Y,minsizeX,minsizeY):
title = QgsLayoutItemLabel(layout)
title.setText(label)
text_format = QgsTextFormat()
text_format.setFont(QFont("Arial"))
text_format.setSize(size)
title.setTextFormat(text_format)
title.adjustSizeToText()
# minimum box size for label
title.setMinimumSize(QgsLayoutSize(minsizeX,minsizeY, QgsUnitTypes.LayoutMillimeters))
title.update()
layout.addLayoutItem(title)
title.attemptMove(QgsLayoutPoint(X, Y, QgsUnitTypes.LayoutMillimeters))
# add picture to layout
def my_add_picture_to_layout(layout,fn,X,Y,width,height):
north = QgsLayoutItemPicture(layout)
north.setPicturePath(str(fn))
layout.addLayoutItem(north)
north.attemptResize(QgsLayoutSize(width,height,QgsUnitTypes.LayoutMillimeters))
north.attemptMove(QgsLayoutPoint(X,Y,QgsUnitTypes.LayoutMillimeters))
# add legend to layout
def my_add_legend_to_layout(layout,layerTree,X,Y):
legend = QgsLayoutItemLegend(layout)
legend.setTitle("")
legend.model().setRootGroup(layerTree)
layout.addLayoutItem(legend)
legend.attemptMove(QgsLayoutPoint(X,Y, QgsUnitTypes.LayoutMillimeters))
| isa-ulisboa/qgis-python | my_functions.py | my_functions.py | py | 19,060 | python | en | code | 0 | github-code | 13 |
34737807299 | import math
import torch
from torch import nn
import my_plt
import test_attention_cues
import train_framework
# 为了仅将有意义的词元作为值来获取注意⼒汇聚,我们可以指定⼀个有效序列⻓度(即词元的个数),
# 以便在计算softmax时过滤掉超出指定范围的位置。
# 通过这种⽅式,我们可以在下⾯的masked_softmax函数中实现这样的掩蔽softmax操作。
#@save
def masked_softmax(X, valid_lens):
"""通过在最后⼀个轴上掩蔽元素来执⾏softmax操作"""
# X:3D张量,valid_lens:1D或2D张量,用于进行遮蔽操作的矩阵。
# 如果没有指定进行遮蔽操作的矩阵,直接调用softmax返回。
if valid_lens is None:
return nn.functional.softmax(X, dim=-1)
else:
shape = X.shape
# print("masked_softmax::X.shape : ", shape)
# 如果指定用于进行遮蔽操作的矩阵为一维。则传入的valid_lens表明是复制次数。
if valid_lens.dim() == 1:
# 调用repeat_interleave把valid_lens复制成一个矩阵。
valid_lens = torch.repeat_interleave(valid_lens, shape[1])
else:
# 否则把valid_lens展平。
valid_lens = valid_lens.reshape(-1)
# 最后⼀轴上被掩蔽的元素使⽤⼀个⾮常⼤的负值替换,从⽽其softmax输出为0
X = train_framework.sequence_mask(X.reshape(-1, shape[-1]), valid_lens,
value=-1e6)
# 调用softmax返回。
return nn.functional.softmax(X.reshape(shape), dim=-1)
if __name__ == '__main__':
# 考虑由两个2 × 4矩阵表⽰的样本,这两个样本的有效⻓度分别为2和3。
maskSoftMax = masked_softmax(torch.rand(2, 2, 4), torch.tensor([2, 3]))
# 经过掩蔽softmax操作,超出有效⻓度的值都被掩蔽为0。
print("maskSoftMax : ", maskSoftMax)
# maskSoftMax :
# tensor([[[0.4881, 0.5119, 0.0000, 0.0000],
# [0.4542, 0.5458, 0.0000, 0.0000]],
#
# [[0.2827, 0.4817, 0.2357, 0.0000],
# [0.2787, 0.2590, 0.4623, 0.0000]]])
# 我们也可以使⽤⼆维张量,为矩阵样本中的每⼀⾏指定有效⻓度。
maskSoftMax = masked_softmax(torch.rand(2, 2, 4), torch.tensor([[1, 3], [2, 4]]))
print("maskSoftMax : ", maskSoftMax)
# maskSoftMax :
# tensor([[[1.0000, 0.0000, 0.0000, 0.0000],
# [0.2168, 0.3288, 0.4543, 0.0000]],
#
# [[0.3199, 0.6801, 0.0000, 0.0000],
# [0.2193, 0.2319, 0.3128, 0.2360]]])
# 下⾯我们来实现加性注意⼒。
#@save
class AdditiveAttention(nn.Module):
"""加性注意⼒"""
def __init__(self, key_size, query_size, num_hiddens, dropout, **kwargs):
super(AdditiveAttention, self).__init__(**kwargs)
self.W_k = nn.Linear(key_size, num_hiddens, bias=False)
self.W_q = nn.Linear(query_size, num_hiddens, bias=False)
self.w_v = nn.Linear(num_hiddens, 1, bias=False)
self.dropout = nn.Dropout(dropout)
# 参见加性注意⼒(additive attention)的评分函数。
def forward(self, queries, keys, values, valid_lens):
queries, keys = self.W_q(queries), self.W_k(keys)
# 在维度扩展后,
# queries的形状:(batch_size,查询的个数,1,num_hidden)
# key的形状:(batch_size,1,“键-值”对的个数,num_hiddens)
# 使⽤⼴播⽅式进⾏求和
features = queries.unsqueeze(2) + keys.unsqueeze(1)
features = torch.tanh(features)
# self.w_v仅有⼀个输出,因此从形状中移除最后那个维度。
# scores的形状:(batch_size,查询的个数,“键-值”对的个数)
scores = self.w_v(features).squeeze(-1)
self.attention_weights = masked_softmax(scores, valid_lens)
# values的形状:(batch_size,“键-值”对的个数,值的维度)
return torch.bmm(self.dropout(self.attention_weights), values)
if __name__ == '__main__':
# 我们⽤⼀个⼩例⼦来演⽰上⾯的AdditiveAttention类
# 其中查询、键和值的形状为(批量⼤⼩,步数或词元序列⻓度,特征⼤⼩),
# 实际输出为(2, 1, 20)、(2, 10, 2)和(2, 10, 4)。
# 注意⼒汇聚输出的形状为(批量⼤⼩,查询的步数,值的维度)。
queries, keys = torch.normal(0, 1, (2, 1, 20)), torch.ones((2, 10, 2))
# values的⼩批量,两个值矩阵是相同的
values = torch.arange(40, dtype=torch.float32).reshape(1, 10, 4).repeat(
2, 1, 1)
valid_lens = torch.tensor([2, 6])
attention = AdditiveAttention(key_size=2, query_size=20, num_hiddens=8,
dropout=0.1)
attention.eval()
print("attention(queries, keys, values, valid_lens) : ",
attention(queries, keys, values, valid_lens))
test_attention_cues.show_heatmaps(attention.attention_weights.reshape((1, 1, 2, 10)),
xlabel='Keys', ylabel='Queries', usingION = False)
my_plt.plt.ioff()
my_plt.plt.show()
# 缩放点积注意⼒
#@save
class DotProductAttention(nn.Module):
"""缩放点积注意⼒"""
def __init__(self, dropout, **kwargs):
super(DotProductAttention, self).__init__(**kwargs)
self.dropout = nn.Dropout(dropout)
# queries的形状:(batch_size,查询的个数,d)
# keys的形状:(batch_size,“键-值”对的个数,d)
# values的形状:(batch_size,“键-值”对的个数,值的维度)
# valid_lens的形状:(batch_size,)或者(batch_size,查询的个数)
def forward(self, queries, keys, values, valid_lens=None):
d = queries.shape[-1] # 设置transpose_b=True为了交换keys的最后两个维度
scores = torch.bmm(queries, keys.transpose(1,2)) / math.sqrt(d)
self.attention_weights = masked_softmax(scores, valid_lens)
return torch.bmm(self.dropout(self.attention_weights), values)
if __name__ == '__main__':
queries = torch.normal(0, 1, (2, 1, 2))
attention = DotProductAttention(dropout=0.5)
attention.eval()
print("attention(queries, keys, values, valid_lens) : ",
attention(queries, keys, values, valid_lens))
test_attention_cues.show_heatmaps(attention.attention_weights.reshape((1, 1, 2, 10)),
xlabel='Keys', ylabel='Queries', usingION = False)
my_plt.plt.ioff()
my_plt.plt.show()
| lucelujiaming/luceluDiveIntoDeepLearning | ch14_natural_language_processing_pretraining/test_attention_scoring_functions.py | test_attention_scoring_functions.py | py | 5,994 | python | zh | code | 0 | github-code | 13 |
15726111250 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Python 程式的第一、二行一定是如上所示,請不要略過了。
#file.py
f=open("./sample.txt","r")
b=f.read()
f.close()
"""------------------------------------讀取資料"""
k=[]
str(k)
for i in range(0,len(b),1):
k.append(b[i])#將字串轉為陣列
if(ord(k[i])<ord('A')or ord(k[i])>ord('z')):
k[i]=' '#將非大小寫的符號換為空白
m=''
a=m.join(k) #將陣列轉回字串
l=[]
l=a.split()#拆為字彙
sampleWordList =[]#列表
for i in range(0,len(l),1):
if len(l[i])>=5:
sampleWordList.append(l[i])#將自述多於5的字丟入列表
print (sampleWordList )
"""----------------------------------列表"""
boo=False
while(True):
s=input('Please input the word more then five letters\n')
if len(s)<5:
print ("your input is lesser than five letters")
continue
# Python 每次縮排固定是 4 個空格,縮排兩次則是 8 個空格,請不要把 TAB 和空格混用。
if(s==''):
break
boo=False#確定是否有相同的字彚在文章中
for i in range (0,len(sampleWordList ),1):
if s==sampleWordList [i]:#找到
print ('The word exists in the list')
boo=True
break#直接脫離迴圈
if boo==False:#全部沒找到
print ('The word doesn\'t exist in the list')
"""---------------------------------查詢有無(while迴圈)""" | PeterWolf-tw/ESOE-CS101-2016 | homework1_b05505028.py | homework1_b05505028.py | py | 1,436 | python | en | code | 15 | github-code | 13 |
24847002900 | #!/usr/bin/python
# encoding: utf-8
import sys
from workflow import Workflow
log = None
def main(wf):
import subprocess
# Get args from Workflow as normalized Unicode
args = wf.args
# get the paths of query matches
project_paths = subprocess.check_output([
'mdfind',
'kMDItemFSName=*.sublime-project'
]).splitlines()
# decode project_paths to normalised unicode
for i in range(0, len(project_paths) - 1):
project_paths[i] = wf.decode(project_paths[i])
if not args:
query_matches = project_paths # return all project files
else:
query_matches = wf.filter(args[0], project_paths)
# Add matches to Alfred feedback
for match_path in query_matches:
file_name = match_path.split('/')[-1].split('.')[0]
wf.add_item(file_name,
match_path,
uid=match_path,
arg=match_path,
valid=True,
icon="icon.png")
# Send output to Alfred
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow()
# Assign Workflow logger to a global variable, so all module
# functions can access it without having to pass the Workflow
# instance around
log = wf.logger
sys.exit(wf.run(main)) | riotbit/sublimeprojects-workflow | sublimeprojects-workflow.py | sublimeprojects-workflow.py | py | 1,415 | python | en | code | 1 | github-code | 13 |
24660421699 | import asyncio
from asyncio import Task, CancelledError
from typing import Optional
from store import Store
from vk_bot.data_classes import TypeMessage
class Poller:
def __init__(self, store: Store):
self.store = store
self.is_running = False
self.poll_task: Optional[Task] = None
async def start(self):
self.is_running = True
self.poll_task = asyncio.create_task(self.poll())
async def stop(self):
self.is_running = False
self.poll_task.cancel()
await self.poll_task
self.store.bots_manager.logger.info("Stopped Bot manager polling")
async def poll(self):
while self.is_running:
try:
message = await self.store.bots_manager.vk_bot.queue_output.get()
self.store.bots_manager.vk_bot.queue_output.task_done()
if message.type.value == TypeMessage.message_event.value:
await self.store.vk_api.send_message_event_answer(message)
else:
await self.store.vk_api.send_message(message)
except CancelledError:
self.is_running = False
| VIVERA83/game_sapper | game_sapper/store/manager/poller.py | poller.py | py | 1,169 | python | en | code | 0 | github-code | 13 |
7385707177 | import nextcord
from nextcord import Interaction, SlashOption
from nextcord.ext import commands
from discord_utils.embed_validation import validate_embed
from scrapers.discgolfbagbuilder import DiscgolfBagBuilder
from .bagconfig import BagConfig
class Bag(commands.Cog):
"""Bag Class Cog"""
def __init__(self, bot):
self.bot = bot
@nextcord.slash_command(name="bag", description="Get bag from discgolfbagbuilder", guild_ids=[])
async def bag_slash_command(self, interaction:Interaction):
"""/bag slash command"""
@bag_slash_command.subcommand(name="show", description="Show your bag, or from another player")
async def show(
self,
interaction:Interaction,
user:nextcord.Member=SlashOption(description="user", required=False)
):
"""/bag show"""
await interaction.response.defer()
if user is None:
user = interaction.user
bag_scraper = self.scrape_bag(interaction.guild.name, user.id)
if bag_scraper is not None:
embed = self.get_embed(bag_scraper)
if embed is not None:
file = nextcord.File(bag_scraper.image_file, filename="flight.png")
await interaction.followup.send(file=file, embed=embed)
else:
await interaction.followup.send('https://giphy.com/embed/32mC2kXYWCsg0')
await interaction.followup.send('WOW, thats a lot of discs in the bag!')
else:
await interaction.followup.send(f'Could not find any bag for player {user.display_name}')
@bag_slash_command.subcommand(name="add", description="Add or modify your bag from discgolfbagbuilder.com")
async def add(
self,
interaction:Interaction,
bag_url=SlashOption(description="discgolfbagbuilder.com url", required=True)
):
"""/bag add url"""
user = interaction.user
cfg = BagConfig(interaction.guild.name)
modified = cfg.add_bag(user.id, bag_url)
if modified:
await interaction.response.send_message(f'Modified your bag {user.mention}')
else:
await interaction.response.send_message(f'Added your bag {user.mention}')
def scrape_bag(self, guild_name, user):
"""Scrape user bag from discgolfbagbuilder.com"""
cfg = BagConfig(guild_name)
bag = cfg.get_bag(user)
if bag is not None:
bag_scraper = DiscgolfBagBuilder(bag)
bag_scraper.scrape_discs()
return bag_scraper
return None
def get_embed(self, bag_scraper):
"""Get bag embed"""
embed = nextcord.Embed(title=bag_scraper.bag_name, description=bag_scraper.bag_description, url=bag_scraper.scrape_url, color=0x004899)
embed.set_image(url="attachment://flight.png")
if bag_scraper.distance_drivers:
drivers = ''
drivers_flights = ''
for driver in bag_scraper.distance_drivers:
drivers += f'[{driver}]({driver.url})\n'
drivers_flights += f'{driver.flight}\n'
embed.add_field(name="Drivers", value=drivers, inline=True)
embed.add_field(name="Flight", value=drivers_flights, inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
if bag_scraper.fairway_drivers:
fairways = ''
fairways_flights = ''
for fairway in bag_scraper.fairway_drivers:
fairways += f'[{fairway}]({fairway.url})\n'
fairways_flights += f'{fairway.flight}\n'
embed.add_field(name="Fairways", value=fairways, inline=True)
embed.add_field(name="Flight", value=fairways_flights, inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
if bag_scraper.midranges:
midranges = ''
midranges_flights = ''
for midrange in bag_scraper.midranges:
midranges += f'[{midrange}]({midrange.url})\n'
midranges_flights += f'{midrange.flight}\n'
embed.add_field(name="Midranges", value=midranges, inline=True)
embed.add_field(name="Flight", value=midranges_flights, inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
if bag_scraper.putt_approach:
putters = ''
putters_flights = ''
for putter in bag_scraper.putt_approach:
putters += f'[{putter}]({putter.url})\n'
putters_flights += f'{putter.flight}\n'
embed.add_field(name="Putt and Approach", value=putters, inline=True)
embed.add_field(name="Flight", value=putters_flights, inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
embed.set_footer(text="discgolfbagbuilder.com", icon_url=bag_scraper.icon_url)
if validate_embed(embed):
return embed
return None
| skaretk/disc-score-bot | discgolfbot/bag/bag.py | bag.py | py | 4,992 | python | en | code | 2 | github-code | 13 |
9019693878 | import cv2
import os
IMG_SIZE = 224
def resize(dirname1, dirname2):
dir_name = dirname2
files = os.listdir(os.getcwd() + '/imgs/' + dirname1 + dirname2)
save_dir = os.getcwd() + '/imgs/' + dirname1 + dirname2 + '/'
print("resize start!")
for file in files:
# 画像読み込み
img = cv2.imread(os.getcwd() + '/imgs/' + dirname1 + dirname2 + '/' + file)
if img is not None:
# 1辺がIMG_SIZEの正方形にリサイズ
img = cv2.resize(img, dsize=(IMG_SIZE, IMG_SIZE), interpolation=cv2.INTER_NEAREST)
cv2.imwrite(save_dir + file, img)
print("completed!!")
resize("sushi/test_images/", "kinmedai") | ryu1sazae/Sushi_Deep_Learning | resize.py | resize.py | py | 663 | python | en | code | 0 | github-code | 13 |
19522551398 | #-----------------------------------------------------------
# SEABED2030 - B3
# Update metadata table with extent information from B2 (tiling)
#
# (C) 2021 Sacha Viquerat, Alfred Wegener Institute Bremerhaven, Germany
# sacha.vsop@gmail.com & fynn.warnke@awi.de
#-----------------------------------------------------------
import os
import sys
import glob
import argparse
import pandas as pd
from GENERAL.lib.MySQL import MySQL_Handler
def read_minmax_files(dir_files):
"""
Read all '*.minmax' extent files in input directory and combine values for all splits.
Parameters
----------
dir_files : str
Input directory with all '*.minmax' metadata files
Returns
-------
results : dict
Dictionary with "dataset_rid" as key and tuple of (minmax, time_tiling) as item
"""
files_minmax = glob.glob(os.path.join(dir_files,'*.minmax'))
names = ['xmin', 'xmax', 'ymin', 'ymax', 'zmin', 'zmax', 'rid', 'time_tiling']
df = pd.concat([pd.read_csv(f, sep=' ', header=None, names=names) for f in files_minmax], ignore_index=True)
grouped = df.groupby('rid')
results = {}
for rid, g in grouped:
minmax = [g['xmin'].min(), g['xmax'].max(), g['ymin'].min(), g['ymax'].max(), g['zmin'].min(), g['zmax'].max()]
minmax = [int(v) for v in minmax]
time_tiling = g['time_tiling'].mean() # get tiling timestamp (epoch time)
results[rid] = (minmax, time_tiling)
return results
def get_cruise_RID_per_tile(dir_files):
"""
Read all '*.til' files and extract cruise RID from filename
Parameters
----------
dir_files : str
Input directory with all '*.til' metadata files
Returns
-------
results : dict
Dictionary with "tile ID" as key and string of all featured RIDs (e.g. '11102;12578;11001') as item
"""
files = glob.glob(os.path.join(dir_files,'*.til'))
results = {}
for f in files:
f_dir, f_name = os.path.split(f)
f_name_split = f_name.split('_')
tile_id = int(f_name_split[1]) # extract tile ID from filename
rid = f_name_split[3] # extract RID from filename
rid_entries = results.get(tile_id)
if rid_entries is None:
results[tile_id] = rid
else:
new_entries = rid_entries + ';' + rid
results[tile_id] = new_entries
return results
def update_minmax(results_dict, sql_handler):
"""
Update mysql with extents for X,Y,Z data of every cruise RID
Parameters
----------
results_dict : dict
Dictionary with "dataset_rid" as key and tuple of (minmax values, time_tiling) as item
sql_handler : SQL_Handler
Handler for SQL access
"""
sql_handler.query('UPDATE metadata SET x_min=NULL, x_max=NULL, y_min=NULL, y_max=NULL, z_min=NULL, z_max=NULL, date_tiled=NULL;')
for rid, (values, time_tiling) in results_dict.items():
query_update = f'UPDATE metadata SET x_min = {values[0]}, x_max = {values[1]}, y_min = {values[2]}, y_max = {values[3]}, z_min = {values[4]}, z_max = {values[5]} WHERE dataset_rid = "{rid}";'
sql_handler.query(query_update)
time_tiled = sql_handler.fromTimeStamp(time_tiling)
sql_handler.query(f'UPDATE metadata SET date_tiled = {time_tiled} WHERE dataset_rid = "{rid}";')
def update_featured_cruises(results_dict, sql_handler):
"""
Update mysql with cruise RIDs located in each tile
Parameters
----------
results_dict : dict
Dictionary with "tile ID" as key and string of all featured RIDs (e.g. '11102;12578;11001') as item
sql_handler : SQL_Handler
Handler for SQL access
"""
sql_handler.query('UPDATE info_tiles SET featured_cruises=NULL, number_of_cruises=0, filesize_MB=0, avg_weight=0, sum_weight=0;')
for tile_id, values in results_dict.items():
n_cruises = len(values.split(';'))
query_update = f'UPDATE info_tiles SET featured_cruises="{values}", number_of_cruises={n_cruises} WHERE ID = "{tile_id}";'
sql_handler.query(query_update)
def defineInputArguments():
parser = argparse.ArgumentParser(description='sync minmax and tile info with db')
parser.add_argument('--minmax', '-m', nargs='?', type=str, help='Folder containing minmax files')
parser.add_argument('--tiledir', '-t', nargs='?', type=str, help='Folder containing tiles')
return parser
if __name__ =='__main__':
parser = defineInputArguments()
args = parser.parse_args()
minmax_dir=args.minmax
tile_dir=args.tiledir
results_minmax = read_minmax_files(minmax_dir) # combine minmax values per cruise RID
results_featured_cruises = get_cruise_RID_per_tile(tile_dir) # extract RIDs per tile
sql_handler = MySQL_Handler() # create mysql handler
update_minmax(results_minmax, sql_handler) # update metadata table with min/max values
update_featured_cruises(results_featured_cruises, sql_handler) # update info_tiles table with info about featured cruises
| SeaBed2030/IBCSO_v2_Dorschel_et_al_2022 | SEAHORSE/PYTHON/B4_update_metadata.py | B4_update_metadata.py | py | 5,199 | python | en | code | 6 | github-code | 13 |
34060766582 | #!/usr/bin/env python
# Author: Dogacan S. Ozturk
# Function to calculate integrals.
def calculate_point_integral(numberOfPoints, delta, val):
intVal = 0.0
for i in range(numberOfPoints):
intVal = intVal+(i*delta*val)/(numberOfPoints-1)
return intVal
# Function to calculate distance between two points on Earth.
def calc_distance(lat1,lat2,lon1,lon2):
'''
Function returns the distance between two points on Earth based on their
geographic coordinates.
Parameters:
===========
lat1: Float latitude of the first point in degrees.
lat2: Float latitude of the second point in degrees.
lon1: Float longitude of the first point in degrees.
lon2: Float longitude of the second point in degrees.
Returns:
========
dist: Float distance between the two points in km.
Example:
========
>> from hime_helper_functions import calc_distance
>> distance = calc_distance(lat1, lat2, lon1, lon2)
'''
import numpy as np
a = np.sin(np.deg2rad(lat1-lat2)/2.)**2+np.cos(np.deg2rad(lat1))*np.cos(np.deg2rad(lat2))*np.sin(np.deg2rad(lon1-lon2)/2)**2
c = 2*np.arcsin(a**0.5)
dist = c*6371
return dist
# Function to calculate averages.
def smooth(y,box_pts):
import numpy as np
box = np.ones(box_pts)/box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
| dcsozturk/hime | Code/hime_helper_functions.py | hime_helper_functions.py | py | 1,399 | python | en | code | 4 | github-code | 13 |
1816406581 | import torch
import torch.nn as nn
# MomentumOptimizer
class MomentumOptimizer(torch.optim.Optimizer):
# Init Method:
def __init__(self, params, lr=1e-3, momentum=0.9):
super(MomentumOptimizer, self).__init__(params, defaults={'lr': lr})
self.momentum = momentum
self.state = dict()
for group in self.param_groups:
for p in group['params']:
self.state[p] = dict(mom=torch.zeros_like(p.data))
# Step Method
def step(self, closure=None):
for group in self.param_groups:
for p in group['params']:
if p not in self.state:
self.state[p] = dict(mom=torch.zeros_like(p.data))
mom = self.state[p]['mom']
mom = self.momentum * mom - group['lr'] * p.grad.data
p.data += mom | can-git/LossFunctionsComparison | MomentumOptimizer.py | MomentumOptimizer.py | py | 846 | python | en | code | 0 | github-code | 13 |
21373658758 | import numpy as np
"""Ive created 3 phase maps sinusoidally varying from 0 to 255 but when i
plot them they arent in gray scale and im not sure how to do that but these
are the current 3 phase maps """
w=(2*np.pi)/50
Px1 = []
for i in range(800):
Px1.append(127.5*np.sin((w*(i+1))+127.5))
repetitions = 600
px1 = np.tile(Px1,(repetitions,1))
Px2 = []
for i in range(800):
Px2.append(127.5*np.sin(w*(i+1)+(2*np.pi/3))+127.5)
repetitions = 600
px2 = np.tile(Px2,(repetitions,1))
Px3 = []
for i in range(800):
Px3.append(127.5*np.sin(w*(i+1)+(4*np.pi/3))+127.5)
repetitions = 600
px3 = np.tile(Px3,(repetitions,1))
"""building code for the combining of the images and the object for the phase
#possibly lazy way of getting the ith value for the x coordinate """
x = []
for i in range(800):
x.append(i+1)
repetitions = 600
x1 = np.tile(x,(repetitions,1))
print(x1) #the object
Height = 1
obj = np.zeros((800,600))
obj[4:14,4:12] = Height
print(obj)
#then the equations started but need some clarifying
"""
Ixy = IIxy = o1 = (wx)+obj
o2 = (wx)+obj+((2np.pi)/3)
o3 = (wx)+obj+((4np.pi)/3)
I1 = Ixy +(IIxy(cos(o1)))
I2 = Ixy +(IIxy*(cos(o2)))
I3 = Ixy +(IIxy*(cos(o3)))
oxy = atan(sqrt(3)(I1-I3)/((2*I2)-I1-I3))
""" | StevenJohnsonOptics/Otoscope_Imaging | Sinewave_plot.py | Sinewave_plot.py | py | 1,301 | python | en | code | 0 | github-code | 13 |
29380053603 | # *** Practical task 3 ***
# Write a function that calculates the number
# of characters included in given string.
def get_num_repeat(phrase: str) -> dict:
"""
Function that calculates the number
of characters included in given string
:param phrase: str
:return: dictionary with the number of characters included in phrase
"""
result_dict = {}
for char in phrase:
if not result_dict.get(char):
result_dict[char] = phrase.count(char)
return result_dict
if __name__ == "__main__":
string = input("Enter an arbitrary string: ")
print(get_num_repeat(string))
| kolyasalubov/Lv-14.03.PythonFundamentals | Stanislav_Hryhorskiy/HW7/hw7_practical_task_3.py | hw7_practical_task_3.py | py | 622 | python | en | code | 0 | github-code | 13 |
20209101264 | import os
import sys
import json
import pytorch_lightning as pl
from bert_utils import create_dataframe, get_stat_details, Model, ToSDataModule
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from transformers import BertTokenizerFast as BertTokenizer
def main():
if len(sys.argv) != 2:
print("\nPass Config JSON as argument!")
print("--------------------\nModel training failed!\n--------------------\n")
sys.exit()
filename = sys.argv[1]
with open(filename, "rt") as fjson:
hyper_params = json.load(fjson)
root_dir = hyper_params["csv"]["rootDir"]
train_csv_path = hyper_params["csv"]["trainDataframePath"]
test_csv_path = hyper_params["csv"]["testDataframePath"]
val_csv_path = hyper_params["csv"]["valDataframePath"]
train_csv_path = os.path.join(root_dir, train_csv_path)
test_csv_path = os.path.join(root_dir, test_csv_path)
val_csv_path = os.path.join(root_dir, val_csv_path)
stage_flag = hyper_params["knowledgedistillation"]["stageFlag"]
if stage_flag == "1":
stage = "Stage1"
stage_train_csv_path = hyper_params["knowledgedistillation"][
"stage1trainDataframePath"
]
stage_test_csv_path = hyper_params["knowledgedistillation"][
"stage1testDataframePath"
]
stage_val_csv_path = hyper_params["knowledgedistillation"][
"stage1valDataframePath"
]
else:
stage = "Stage2"
stage_train_csv_path = hyper_params["knowledgedistillation"][
"stage2trainDataframePath"
]
stage_test_csv_path = hyper_params["knowledgedistillation"][
"stage2testDataframePath"
]
stage_val_csv_path = hyper_params["knowledgedistillation"][
"stage2valDataframePath"
]
stage_train_csv_path = os.path.join(root_dir, stage_train_csv_path)
stage_test_csv_path = os.path.join(root_dir, stage_test_csv_path)
stage_val_csv_path = os.path.join(root_dir, stage_val_csv_path)
model_name = hyper_params["train"]["modelName"]
label_column = hyper_params["train"]["labelColumn"]
batch_size = hyper_params["train"]["batchSize"]
token_length = hyper_params["train"]["tokenLength"]
epochs = hyper_params["train"]["noEpochs"]
learning_rate = hyper_params["train"]["learningRate"]
classes = hyper_params["train"]["noClasses"]
checkpoint_dir = hyper_params["train"]["checkpointDir"]
checkpoint_filename = hyper_params["train"]["checkpointFileName"]
save_top_k = hyper_params["train"]["saveTopK"]
verbose_flag = hyper_params["train"]["verboseFlag"]
checkpoint_monitor = hyper_params["train"]["checkpointMonitor"]
checkpoint_mode = hyper_params["train"]["checkpointMode"]
paitence_value = hyper_params["train"]["paitenceValue"]
gpu_count = hyper_params["train"]["gpuCount"]
refresh_rate = hyper_params["train"]["refreshRate"]
checkpoint_dir = os.path.join(root_dir, checkpoint_dir)
train_df, test_df, val_df = create_dataframe(
stage=stage,
stage_train_csv_path=stage_train_csv_path,
stage_test_csv_path=stage_test_csv_path,
stage_val_csv_path=stage_val_csv_path,
train_csv_path=train_csv_path,
test_csv_path=test_csv_path,
val_csv_path=val_csv_path,
)
if stage == "Stage1":
get_stat_details(
dataframe=train_df,
root_dir=root_dir,
title="{}TrainDistribution".format(stage),
)
get_stat_details(
dataframe=test_df,
root_dir=root_dir,
title="{}TestDistribution".format(stage),
)
get_stat_details(
dataframe=val_df, root_dir=root_dir, title="{}ValDistribution".format(stage)
)
tokenizer = BertTokenizer.from_pretrained(model_name)
data_module = ToSDataModule(
train_df=train_df,
test_df=test_df,
val_df=val_df,
tokenizer=tokenizer,
label_column=label_column,
batch_size=batch_size,
max_token_len=token_length,
stage_flag=stage_flag,
)
steps_per_epoch = len(train_df) // batch_size
total_training_steps = steps_per_epoch * epochs
warmup_steps = total_training_steps // 5
model = Model(
model_name=model_name,
learning_rate=learning_rate,
n_classes=classes,
n_training_steps=total_training_steps,
n_warmup_steps=warmup_steps,
)
checkpoint_callback = ModelCheckpoint(
dirpath=checkpoint_dir,
filename=checkpoint_filename,
save_top_k=save_top_k,
verbose=verbose_flag,
monitor=checkpoint_monitor,
mode=checkpoint_mode,
)
early_stopping_callback = EarlyStopping(
monitor=checkpoint_monitor, patience=paitence_value
)
trainer = pl.Trainer(
checkpoint_callback=checkpoint_callback,
callbacks=[early_stopping_callback],
max_epochs=epochs,
gpus=gpu_count,
progress_bar_refresh_rate=refresh_rate,
)
trainer.fit(model, data_module)
print(
"--------------------\n{} training successfull!\n--------------------\n".format(
stage
)
)
if __name__ == "__main__":
main()
| MBadriNarayanan/TermsOfServiceClassification | Scripts/bert_train.py | bert_train.py | py | 5,298 | python | en | code | 0 | github-code | 13 |
7315743054 | """
pre_process_datafiles.py
This script preprocesses the output of
"""
import pandas as pd
import numpy as np
import sys
import argparse
import os
import pareto
import subprocess
################ Test Case Setup ###################
# Path
header_file = r"header_files/GTV_CTD_Liver_setup_2_header.txt"
# Update the input_path to be dynamic and based on current directory, eliminates the needs for update when running from other folders
input_path = os.getcwd()
output_path = input_path
script_path = os.path.dirname(os.path.realpath(__file__))
#b1_columns = ["b1_0","b1_1","b1_2","b1_3","b1_4","b1_5","b1_6","b1_7","b1_8","b1_9","b1_10","b1_11","b1_12","b1_13","b1_14","b1_15","b1_16","b1_17","b1_18","b1_19","b1_20","b1_21","b1_22","b1_23","b1_24","b1_25","b1_26","b1_27","b1_28","b1_29","b1_30","b1_31","b1_32","b1_33","b1_34","b1_35","b1_36","b1_37","b1_38","b1_39","b1_40","b1_41","b1_42","b1_43","b1_44","b1_45","b1_46","b1_47","b1_48","b1_49","b1_50","b1_51","b1_52","b1_53"]
#b2_columns = ["b2_0","b2_1","b2_2","b2_3","b2_4","b2_5","b2_6","b2_7","b2_8","b2_9","b2_10","b2_11","b2_12","b2_13","b2_14","b2_15","b2_16","b2_17","b2_18","b2_19","b2_20","b2_21","b2_22","b2_23","b2_24","b2_25","b2_26","b2_27","b2_28","b2_29","b2_30","b2_31","b2_32","b2_33","b2_34","b2_35","b2_36","b2_37","b2_38","b2_39","b2_40","b2_41","b2_42","b2_43","b2_44","b2_45","b2_46","b2_47","b2_48","b2_49","b2_50","b2_51","b2_52"]
#b3_columns = ["b3_0","b3_1","b3_2","b3_3","b3_4","b3_5","b3_6","b3_7","b3_8","b3_9","b3_10","b3_11","b3_12","b3_13","b3_14","b3_15","b3_16","b3_17","b3_18","b3_19","b3_20","b3_21","b3_22","b3_23","b3_24","b3_25","b3_26","b3_27","b3_28","b3_29","b3_30","b3_31","b3_32","b3_33","b3_34","b3_35","b3_36","b3_37","b3_38","b3_39","b3_40","b3_41","b3_42","b3_43","b3_44","b3_45","b3_46","b3_47","b3_48","b3_49","b3_50","b3_51","b3_52","b3_53","b3_54"]
b1_columns = ["b1_0","b1_1","b1_2","b1_3","b1_4","b1_5","b1_6","b1_7","b1_8","b1_9","b1_10","b1_11","b1_12","b1_13","b1_14","b1_15","b1_16","b1_17","b1_18","b1_19","b1_20","b1_21","b1_22","b1_23","b1_24","b1_25","b1_26","b1_27","b1_28","b1_29","b1_30","b1_31","b1_32","b1_33","b1_34","b1_35","b1_36","b1_37","b1_38","b1_39","b1_40","b1_41","b1_42","b1_43","b1_44","b1_45","b1_46","b1_47","b1_48","b1_49","b1_50","b1_51","b1_52","b1_53"]
b2_columns = ["b2_0","b2_1","b2_2","b2_3","b2_4","b2_5","b2_6","b2_7","b2_8","b2_9","b2_10","b2_11","b2_12","b2_13","b2_14","b2_15","b2_16","b2_17","b2_18","b2_19","b2_20","b2_21","b2_22","b2_23","b2_24","b2_25","b2_26","b2_27","b2_28","b2_29","b2_30","b2_31","b2_32","b2_33","b2_34","b2_35","b2_36","b2_37","b2_38","b2_39","b2_40","b2_41","b2_42","b2_43","b2_44","b2_45","b2_46","b2_47","b2_48","b2_49","b2_50","b2_51","b2_52","b3_53","b3_54"]
b3_columns = ["b3_0","b3_1","b3_2","b3_3","b3_4","b3_5","b3_6","b3_7","b3_8","b3_9","b3_10","b3_11","b3_12","b3_13","b3_14","b3_15","b3_16","b3_17","b3_18","b3_19","b3_20","b3_21","b3_22","b3_23","b3_24","b3_25","b3_26","b3_27","b3_28","b3_29","b3_30","b3_31","b3_32","b3_33","b3_34","b3_35","b3_36","b3_37","b3_38","b3_39","b3_40","b3_41","b3_42","b3_43","b3_44","b3_45","b3_46","b3_47","b3_48","b3_49","b3_50","b3_51","b3_52","b3_53","b3_54"]
objective_columns = ["Liver","CTD_combined"]
CTD_columns = ["CTD_L0","CTD_CTV","CTD_PTV"]
beamlet_list = [[1, b1_columns],
[2, b2_columns],
[3, b3_columns]]
####################################################
################### Functions ######################
def append_columns(dataframe, header_list):
# Function to append new columns to dataframe to ensure the number of column matches the number of columns
# stated in the header_list. Obviously, it's expected that the number of columns in header_list should be
# higher than the number of columns in dataframe.
# Parameters:
# - dataframe: <dataframe> Pandas dataframe to be compared
# - header_list: <list> List of headers
col_diff = len(header_list) - len(dataframe.columns)
[dataframe.insert(i,i,"") for i in range(len(header_list) - col_diff, len(header_list))]
return dataset
# Faster than is_pareto_efficient_simple, but less readable.
def is_pareto_efficient(costs, return_mask = True):
"""
Find the pareto-efficient points
:param costs: An (n_points, n_costs) array
:param return_mask: True to return a mask
:return: An array of indices of pareto-efficient points.
If return_mask is True, this will be an (n_points, ) boolean array
Otherwise it will be a (n_efficient_points, ) integer array of indices.
"""
is_efficient = np.arange(costs.shape[0])
n_points = costs.shape[0]
next_point_index = 0 # Next index in the is_efficient array to search for
while next_point_index<len(costs):
nondominated_point_mask = np.any(costs<costs[next_point_index], axis=1)
nondominated_point_mask[next_point_index] = True
is_efficient = is_efficient[nondominated_point_mask] # Remove dominated points
costs = costs[nondominated_point_mask]
next_point_index = np.sum(nondominated_point_mask[:next_point_index])+1
if return_mask:
is_efficient_mask = np.zeros(n_points, dtype = bool)
is_efficient_mask[is_efficient] = True
return is_efficient_mask
else:
return is_efficient
####################################################
###################### Main ########################
if __name__ == "__main__":
"""
Main section of the pre-processing script
Each steps of the pre-processing to be run sequentially
"""
# Here we use subprocess to call the CLI command to run the command to append
# the filenames to the end of each of the files in the current directory
# Please take note that the working directory will be pointing to the
# directory from which the script is called.
#print("Running concat filename script")
#subprocess.run([script_path + "/concat_filename.sh"])
#subprocess.run([script_path + "/column_arrange.sh"])
#print("Done running concat filename script")
# Read the header file
with open(header_file) as f:
header_names = f.read().splitlines()
data_directory = os.fsencode(input_path)
# Iterate through all files in the data folder
for file in os.listdir(data_directory):
if(os.fsdecode(file) == ".DS_Store"):
continue
filename = input_path + '/' + os.fsdecode(file)
print(filename)
# Read the input file into a dataframe
dataset = pd.read_csv(filename, header = None)
# Appending the columns to ensure it matches the number of columns listed in the headers
dataset = append_columns(dataset, header_names)
# Assigning the header from the external header files
dataset.columns = header_names
# Here we calculate the statistical values of each of the beamlets
for element in beamlet_list:
dataset['b{beam_num}_avg'.format(beam_num=element[0])] = dataset.loc[:, element[1]].mean(axis=1)
dataset['b{beam_num}_max'.format(beam_num=element[0])] = dataset.loc[:, element[1]].max(axis=1)
dataset['b{beam_num}_min'.format(beam_num=element[0])] = dataset.loc[:, element[1]].min(axis=1)
dataset['b{beam_num}_std'.format(beam_num=element[0])] = dataset.loc[:, element[1]].std(axis=1)
print(dataset)
# Here we calculate the combined objectives value of the CTD
dataset['CTD_combined'] = dataset.loc[:, CTD_columns].sum(axis=1)
# Here we call the pareto script to assign the pareto
objectives_idx = [dataset.columns.get_loc(c) for c in objective_columns if c in dataset]
print(objectives_idx)
np_objevtive = dataset.iloc[:, objectives_idx].to_numpy()
print(np_objevtive)
#dataset['pareto'] = pareto.flag_nondominated(np_objevtive)
dataset['pareto'] = is_pareto_efficient(np_objevtive)
# Saving the dataframe to h
dataset.to_csv(filename, header=False, index=False)
# We will use subprocess to call the CLI command to concatenate all the files into a master csv
#subprocess.run(["concat_content"])
| MarcusLG/RT_ConNav_Scripts | pre_process_datafiles.py | pre_process_datafiles.py | py | 8,194 | python | en | code | 0 | github-code | 13 |
5276136446 | #!/usr/bin/env python3
#
# Show difference made by solver tolerances
#
from __future__ import division, print_function
import os
import sys
import pints
import numpy as np
# Load project modules
sys.path.append(os.path.abspath(os.path.join('..', '..', 'python')))
import cells
import data
import model
import results
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
#
# Check input arguments
#
base = os.path.splitext(os.path.basename(__file__))[0]
args = sys.argv[1:]
if len(args) > 2:
print('Syntax: ' + base + '.py <cell> <protocol>')
sys.exit(1)
if len(args) < 2:
protocol = 7
else:
protocol = int(args[1])
if len(args) < 1:
cell = 5
else:
cell = int(args[0])
print('Selected cell ' + str(cell))
print('Selected protocol ' + str(protocol))
if protocol < 1 or protocol > 7:
print('Unknown protocol Pr' + str(protocol))
sys.exit(1)
if protocol == 6:
raise NotImplementedError
#
# Define error function
#
# Create forward model
m = model.Model(
data.load_myokit_protocol(protocol),
cells.reversal_potential(cells.temperature(cell)),
sine_wave=(protocol == 7),
start_steady=True,
)
# Load data, create single output problem
log = data.load(cell, 7)
time = log.time()
current = log['current']
voltage = log['voltage']
del(log)
# Create single output problem
problem = pints.SingleOutputProblem(m, time, current)
# Define error function
f = pints.RootMeanSquaredError(problem)
#
# Load solution from sine wave fitting
#
popt = results.load_parameters(cell, 4) # Always from sine wave fitting!
#
# Create figure
#
# Set font
font = {'family': 'arial', 'size': 9}
matplotlib.rc('font', **font)
# Matplotlib figure sizes are in inches
def mm(*size):
return tuple(x / 25.4 * 1.5 for x in size)
# Create figure
#tolerances = ['1e-1', '1e-2', '1e-3', '1e-4', 1e-6, 1e-8]
tolerances = ['1e-4', 1e-6, 1e-8]
panels = len(tolerances) + (0 if protocol == 7 else 1)
fig = plt.figure(figsize=mm(170, 40), dpi=200)
fig.subplots_adjust(0.06, 0.17, 0.99, 0.92)
grid = GridSpec(1, panels, wspace=0.33)
n = 100
xlo = 0.995
xhi = 1.005
p1s = np.linspace(popt[0] * xlo, popt[0] * xhi, n)
ps = popt.reshape(1, 9).repeat(n, axis=0)
ps[:, 0] = p1s
for i, tol in enumerate(tolerances):
print('Evaluating for ' + str(tol))
m.set_tolerances(float(tol))
fs = [f(p) for p in ps]
ax = fig.add_subplot(grid[0, i])
ax.set_xlabel('$p_1$')
ax.set_ylabel('Score')
ax.ticklabel_format(style='sci', scilimits=(0, 0))
ax.plot(p1s, fs, lw=1, label='tol=' + str(tol))
ax.legend().get_frame().set_alpha(1)
if protocol < 7:
print('Evaluating in analytical mode')
m.set_tolerances(1e-4)
problem._model = model.Model(
data.load_myokit_protocol(protocol),
cells.reversal_potential(cells.temperature(cell)),
sine_wave=False,
start_steady=True,
analytical=True,
)
fs = [f(p) for p in ps]
ax = fig.add_subplot(grid[0, -1])
ax.set_xlabel('$p_1$')
ax.set_ylabel('Score')
ax.ticklabel_format(style='sci', scilimits=(0, 0))
ax.plot(p1s, fs, lw=1, label='analytical')
ax.legend().get_frame().set_alpha(1)
plt.savefig(base + '-cell-' + str(cell) + '-pr-' + str(protocol) + '.png')
plt.savefig(base + '-cell-' + str(cell) + '-pr-' + str(protocol) + '.pdf')
#plt.show()
| CardiacModelling/FourWaysOfFitting | figures-unused/u6-solver-tolerance/u6-solver-tolerance.py | u6-solver-tolerance.py | py | 3,357 | python | en | code | 4 | github-code | 13 |
72283148819 | from django.contrib.auth import authenticate, login, get_user_model
from django.http import HttpResponse
from django.shortcuts import render,redirect
import requests
from Allorder.models import Product,Customer
from Allorder .form import UpdateForm
from django.http import HttpResponseRedirect, HttpResponse
def cutomer_page(request):
query_set=Customer.objects.all()
context={
'customer':query_set
}
from django.db.utils import IntegrityError
if request.method == 'GET':
headers = {"Accept": "application/json", "Content-Type": "application/json"}
r = requests.get("https://ed132262e67d426893252a6a25146285:e498ddfb0638a28216e60c34b90cffef@cab-store12.myshopify.com/admin/api/2019-04/orders.json")
data=r.json()
try :
# print("lenght : ",data["orders"][i]["customer"]["id"])
for i in range(0,len(data["orders"])):
email = data["orders"][i]["customer"]["email"]
customer_id = data["orders"][i]["customer"]["id"]
first_name = data["orders"][i]["customer"]["first_name"]
last_name = data["orders"][i]["customer"]["last_name"]
name = first_name + last_name
contactno = data["orders"][i]["customer"]["phone"]
Customer.objects.create(email=email,customer_id=customer_id,name=name,contactno=contactno)
return render(request,"home_page.html",context)
except IntegrityError :
return render(request,"home_page.html",context)
return render(request,"home_page.html",context)
def edit_customer(request,customer_id):
customer_id = customer_id
email = request.POST['email']
contactno = request.POST['contactno']
print("id: ",customer_id)
abc = Customer.objects.filter(customer_id=customer_id).first()
print("data : ",abc.email)
Customer.objects.filter(customer_id=customer_id).update(email=email,contactno=contactno)
return HttpResponseRedirect('/')
def edit_customer_page(request,customer_id):
customer_id = customer_id
form = UpdateForm(request.POST or None)
context={
'customer_id': customer_id,
'form':form
}
return render(request,'updata_detail.html', context) | imdad000/shopifApp | shopifApp/views.py | views.py | py | 2,029 | python | en | code | 0 | github-code | 13 |
35845643557 | # silent bidding program
bids = {}
def check_winer(bids_):
highest_bid = 0
winner = ""
for bid in bids_:
if bids[bid] > highest_bid:
highest_bid = bids_[bid]
winner = bid
return winner, highest_bid
bidding_finished = False
while not bidding_finished:
bidder = input("Input your name:\n")
bidding_amount = input("Input your bidding amount:\n$ ")
bids[bidder] = int(bidding_amount)
next_person = input("Is there anyone else left for bidding? yes/no\n").lower()
if next_person == "yes":
continue
elif next_person == "no":
winner, highest_bid = check_winer()
print(f"{winner} wins this bidding contest by an bidding amount of $ {highest_bid}")
bidding_finished = True
else:
print("Incorrect entry! Finding the winner among the bidders.")
winner, highest_bid = check_winer(bids)
print(f"{winner} wins this bidding contest by an bidding amount of $ {highest_bid}")
bidding_finished = True
| vasusen-code/100DaysOfCode | beginner/secret_auction.py | secret_auction.py | py | 1,037 | python | en | code | 1 | github-code | 13 |
9296952073 |
import ha
_ha = ha.haproxy (
minimal_configure = True,
)
_fe = _ha.http_frontends.basic ()
_be = _ha.http_backends.basic (_frontend = _fe)
_fe.requests.force_caching_enable ()
_fe.responses.force_caching ()
_ha.output_stdout ()
| cipriancraciun/haproxy-configurator | examples/http-caching-force.py | http-caching-force.py | py | 243 | python | en | code | 2 | github-code | 13 |
43230312156 | #!/usr/bin/env python
from os import path
import sys
# Directory containing this program.
PROGDIR = path.dirname(path.realpath(__file__))
# For click_common and common.
sys.path.insert(0, path.join(PROGDIR, ".."))
# For python_config.
sys.path.insert(0, path.join(PROGDIR, "..", "..", "etc"))
import click_common
import common
import python_config
# If True, then do not run experiments and instead only print configurations.
DRY_RUN = False
# If True, then collect tcpdump traces for every experiment.
TCPDUMP = False
# If True, then racks will be launched in serial.
SYNC = False
# The number of racks to mimic when creating the strobe schedule.
NUM_RACKS_FAKE = 8
# Run static buffer experiments up to buffer size 2**STATIC_POW_MAX.
STATIC_POW_MAX = 7
# Long prebuffering sweep bounds.
RESIZE_LONG_MIN_us = 0
RESIZE_LONG_MAX_us = 3000
RESIZE_LONG_DELTA_us = 300
ALL_VARIANTS_uss = [1200]
# Short prebuffering sweep bounds.
RESIZE_SHORT_MIN_us = 0
RESIZE_SHORT_MAX_us = 300
RESIZE_SHORT_DELTA_us = 50
# VOQ capacities.
SMALL_QUEUE_CAP = 16
BIG_QUEUE_CAP = 50
# Vary night len.
NIGHT_LEN_POW_MIN = 2
NIGHT_LEN_POW_MAX = 11
# Duration of prebuffering required for reTCP to achieve high utilization.
RETCP_RESIZE_us = 150
def maybe(fnc, do=not DRY_RUN):
""" Executes "fnc" if "do" is True, otherwise does nothing. """
if do:
fnc()
def main():
# Experiments:
# (1) Long nights/days, static buffers. CUBIC. Old optical switches.
# (2) Very short nights/days, static buffers, CUBIC. Future optical
# switches.
# (3) Short nights/days, static buffers, all TCP variants.
# (4) Short nights/days, dynamic buffers, all TCP variants.
# (5) Vary night/day length, static buffers + CUBIC and dynamic buffers + reTCP.
# Assemble configurations. Generate the list of configurations first so that
# we know the total number of experiments.
cnfs = []
# CC modes are the outside loop to minimize how frequently we change the CC
# mode, since doing so requires restarting the cluster.
for cc in python_config.CCS:
# if cc in ["cubic"]:
# # (1) Old switches.
# cnfs += [{"type": "fake_strobe",
# "num_racks_fake": NUM_RACKS_FAKE,
# "night_len_us": 1000 * python_config.TDF,
# "day_len_us": 9000 * python_config.TDF,
# "cc": cc}]
# # (2) Future switches.
# cnfs += [{"type": "fake_strobe",
# "num_racks_fake": NUM_RACKS_FAKE,
# "night_len_us": 1 * python_config.TDF,
# "day_len_us": 9 * python_config.TDF, "cc": cc}]
# # (3) Static buffers.
# for exp in xrange(2, STATIC_POW_MAX + 1):
# # Only do full sweeps for CUBIC and reTCP, but capture 16 packets
# # for all variants.
# if cc in ["cubic", "retcp"] or exp == 4:
# cnfs += [{"type": "fake_strobe",
# "num_racks_fake": NUM_RACKS_FAKE,
# "small_queue_cap": 2**exp,
# "big_queue_cap": 2**exp,
# "cc": cc}]
# # (4) Long prebuffering.
# for us in xrange(RESIZE_LONG_MIN_us, RESIZE_LONG_MAX_us + 1,
# RESIZE_LONG_DELTA_us):
# # Only do a full sweep for CUBIC, but capture a few key us's for all
# # variants.
# if cc == "cubic" or us in ALL_VARIANTS_uss:
# cnfs += [{"type": "fake_strobe",
# "num_racks_fake": NUM_RACKS_FAKE,
# "queue_resize": True,
# "in_advance": int(round(us * python_config.TDF)),
# "cc": cc}]
# # (4) Short prebuffering, only for reTCP.
# if cc == "retcp":
# for us in xrange(RESIZE_SHORT_MIN_us, RESIZE_SHORT_MAX_us + 1,
# RESIZE_SHORT_DELTA_us):
# cnfs += [{"type": "fake_strobe",
# "num_racks_fake": NUM_RACKS_FAKE,
# "queue_resize": True,
# "in_advance": int(round(us * python_config.TDF)),
# "cc": cc}]
# (5) Vary night len.
for exp in xrange(NIGHT_LEN_POW_MIN, NIGHT_LEN_POW_MAX + 1):
night_len_us = 2**exp
day_len_us = 9 * night_len_us
night_len_us_tdf = int(round(night_len_us * python_config.TDF))
day_len_us_tdf = int(round(day_len_us * python_config.TDF))
# CUBIC with static buffers.
if cc == "cubic":
cnfs += [{"type": "fake_strobe",
"num_racks_fake": NUM_RACKS_FAKE,
"small_queue_cap": SMALL_QUEUE_CAP,
"big_queue_cap": SMALL_QUEUE_CAP,
"night_len_us": night_len_us_tdf,
"day_len_us": day_len_us_tdf,
"cc": cc}]
# reTCP with dynamic buffers.
if cc == "retcp":
cnfs += [{"type": "fake_strobe",
"num_racks_fake": NUM_RACKS_FAKE,
"small_queue_cap": SMALL_QUEUE_CAP,
"big_queue_cap": BIG_QUEUE_CAP,
"night_len_us": night_len_us_tdf,
"day_len_us": day_len_us_tdf,
"queue_resize": True,
# Use 150us or 75% of the circuit downtime length,
# whichever is less.
"in_advance": int(round(min(
0.75 * ((NUM_RACKS_FAKE - 1) *
(night_len_us + day_len_us) - day_len_us),
RETCP_RESIZE_us) * python_config.TDF)),
"cc": cc}]
# Set paramters that apply to all configurations.
for cnf in cnfs:
# Enable the hybrid switch's packet log. This should already be enabled
# by default.
cnf["packet_log"] = True
# If the night and day lengths have not been set already, then do so
# here. Explicitly set the night and day lengths instead of relying on
# their defaults so that we can automatically calculate the experiment
# duration, below.
if "night_len_us" not in cnf:
cnf["night_len_us"] = int(round(20 * python_config.TDF))
cnf["day_len_us"] = int(round(180 * python_config.TDF))
if "small_queue_cap" not in cnf:
cnf["small_queue_cap"] = SMALL_QUEUE_CAP
cnf["big_queue_cap"] = BIG_QUEUE_CAP
if cnf["cc"] == "dctcp":
# If the configuration uses DCTCP, then enable threshold-based ECN
# marking.
cnf["ecn"] = (
python_config.DCTCP_THRESH,
int(round(float(cnf["big_queue_cap"]) / cnf["small_queue_cap"] *
python_config.DCTCP_THRESH)))
# Assemble settings. Generate the list of settings first so that we can
# the estimated total duration.
cnfs = [
(cnf, {
# Generate a flow from each machine on rack 1 to its corresponding
# partner machine on rack 2.
"flows": [{"src": "r2", "dst": "r3"}],
# Run the flow for three thousand weeks plus 100 ms for good
# measure, converted to seconds. The resulting duration is not under
# TDF, but the flow will be under TDF when it is executed (i.e., the
# flow will actually take TDF times longer than the value computed
# here).
"dur": (((cnf["night_len_us"] + cnf["day_len_us"]) # One night and day under TDF.
/ python_config.TDF # Convert from TDF to real time.
* (cnf["num_racks_fake"] - 1) # Convert to a full week.
/ 1e3 # Convert from us to ms.
* 3000 # 3000 weeks.
+ 100) # Extra 100 ms, for good measure.
/ 1e3), # Convert to seconds.
"tcpdump": TCPDUMP
}) for cnf in cnfs]
# Total number of experiments.
tot = len(cnfs)
# Estimated total duration.
dur = sum([stg["dur"] for cnf, stg in cnfs]) * python_config.TDF
print("Estimated total duration: > {:.2f} seconds, > {:.2f} hours".format(
dur, dur / 3600.))
# Run experiments. Use the first experiment's CC mode to avoid unnecessarily
# restarting the cluster.
maybe(lambda: common.initializeExperiment(
"flowgrindd", cc=cnfs[0][0]["cc"], sync=SYNC))
for cnt, (cnf, stg) in enumerate(cnfs, start=1):
maybe(lambda: click_common.setConfig(cnf))
print("--- experiment {} of {}, config:\n{}".format(cnt, tot, cnf))
maybe(lambda: common.flowgrind(settings=stg))
maybe(common.finishExperiment)
if __name__ == "__main__":
main()
| mukerjee/etalon | experiments/buffers/nsdi2020.py | nsdi2020.py | py | 9,228 | python | en | code | 12 | github-code | 13 |
41130662343 | import pygame
import sys
import sqlite3
from class_boton import Button
from game import game, oscurecer_pantalla, draw_text2, draw_text_and_image
from utilidades import cambiar_musica
from vid.pyvidplayer import Video
from configuracion import ANCHO_PANTALLA, ALTO_PANTALLA
from class_game_over import GameOver
pygame.init()
font_obtenida = "fonts/font.ttf"
SCREEN = pygame.display.set_mode((ANCHO_PANTALLA, ALTO_PANTALLA))
pygame.display.set_caption("Dragon Ball Sprite")
background_main = pygame.image.load("asset/5795524.jpg")
background_main_rescalado = pygame.transform.scale(background_main, (ANCHO_PANTALLA, ALTO_PANTALLA))
game_over_respuesta = None
list_resp_score_game = []
over_game = GameOver(SCREEN) #score ejemplo
def get_font(size)-> pygame.font:
"""
Devuelve una fuente de pygame con el tamaño especificado.
Args:
size (int): Tamaño de la fuente.
Returns:
pygame.font.Font: Fuente de pygame.
"""
return pygame.font.Font(font_obtenida, size)
def play()-> None:
"""
Función principal del juego. Muestra la pantalla de juego y maneja los eventos.
Recibe: None
Devuelve: None
"""
while True:
PLAY_MOUSE_POS = pygame.mouse.get_pos()
SCREEN.fill("Black")
PLAY_BACK = Button(image=None, pos=(640, 460),
text_input="BACK", font=get_font(75), base_color="White", hovering_color="Green")
PLAY_BACK.changeColor(PLAY_MOUSE_POS)
PLAY_BACK.update(SCREEN)
lista_game_over_respuesta = game()
resp_game_over = lista_game_over_respuesta[0]
list_resp_score_game = lista_game_over_respuesta[1]
if resp_game_over == "Game Over":
over_game.show_game_over(resp_game_over, main_menu, list_resp_score_game)
else:# Win
over_game.show_game_over(resp_game_over, main_menu, list_resp_score_game)
pygame.display.update()
def options()-> None:
"""
Muestra la pantalla de opciones.(creditos en este caso .)
Recibe : None
Devuelve: None
"""
background_main = pygame.image.load("asset\creditos pygame.jpg")
background_main_rescalado = pygame.transform.scale(background_main, (ANCHO_PANTALLA, ALTO_PANTALLA))
while True:
OPTIONS_MOUSE_POS = pygame.mouse.get_pos()
SCREEN.blit(background_main_rescalado, (0, 0))
OPTIONS_TEXT = get_font(20).render("Creditos", True, "Grey")
OPTIONS_RECT = OPTIONS_TEXT.get_rect(center=(ANCHO_PANTALLA / 2, 150))
SCREEN.blit(OPTIONS_TEXT, OPTIONS_RECT)
OPTIONS_BACK = Button(image=None, pos=(ANCHO_PANTALLA / 2, 460),
text_input="BACK", font=get_font(40), base_color="White", hovering_color="Orange")
OPTIONS_BACK.changeColor(OPTIONS_MOUSE_POS)
OPTIONS_BACK.update(SCREEN)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
if OPTIONS_BACK.checkForInput(OPTIONS_MOUSE_POS):
main_menu()
pygame.display.update()
def main_menu()-> None:
"""
Muestra el menú principal del juego.
Recibe: None
Devuelve. None
"""
pygame.mixer.music.load('sonido/DRAGON BALL Z Cha-La Head Guitarra Christianvib.mp3')
pygame.mixer.music.play(-1)
pygame.mixer.music.set_volume(0.3)
while True:
SCREEN.blit(background_main_rescalado, (0, 0))
MENU_MOUSE_POS = pygame.mouse.get_pos()
MENU_TEXT = get_font(40).render("Dragon Ball Sprite", True, (247, 35, 12))
MENU_RECT = MENU_TEXT.get_rect(center=(ANCHO_PANTALLA / 2, 100))
PLAY_BUTTON = Button(image=pygame.image.load("asset/Play Rect.png"), pos=(ANCHO_PANTALLA / 2, 200),
text_input="Jugar", font=get_font(20), base_color="White",
hovering_color=(248, 209, 5))
OPTIONS_BUTTON = Button(image=pygame.image.load("asset/Options Rect.png"), pos=(ANCHO_PANTALLA / 2, 350),
text_input="Creditos", font=get_font(20), base_color="White",
hovering_color=(248, 209, 5))
QUIT_BUTTON = Button(image=pygame.image.load("asset/Quit Rect.png"), pos=(ANCHO_PANTALLA / 2, 500),
text_input="Salir", font=get_font(20), base_color="White",
hovering_color=(248, 209, 5))
SCREEN.blit(MENU_TEXT, MENU_RECT)
for button in [PLAY_BUTTON, OPTIONS_BUTTON, QUIT_BUTTON]:
button.changeColor(MENU_MOUSE_POS)
button.update(SCREEN)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
if PLAY_BUTTON.checkForInput(MENU_MOUSE_POS):
preludio(SCREEN)
if OPTIONS_BUTTON.checkForInput(MENU_MOUSE_POS):
options()
if QUIT_BUTTON.checkForInput(MENU_MOUSE_POS):
pygame.quit()
sys.exit()
pygame.display.update()
def intro()-> None:
"""
Muestra la introducción del juego.
Recibe : None
Devuelve: None
"""
vid = Video("vid\INTRO GAME UTN V2.mp4")
vid.set_size((ANCHO_PANTALLA, ALTO_PANTALLA))
vid.set_volume(0.5)
while True:
if vid.active:
vid.draw(SCREEN, (0, 0))
else:
vid.close()
main_menu()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
vid.close()
main_menu()
pygame.display.update()
def intro_2(path : str, go_game: bool)-> None:
"""
Muestra una segunda introducción del juego.
Recibe:
Args:
path (str): Ruta del video de introducción.
go_game (bool): Indica si se debe iniciar el siguiente juego después de la introducción.
Devuelve: None
"""
vid = Video(path)
vid.set_size((ANCHO_PANTALLA, ALTO_PANTALLA))
while True:
if vid.active == True:
vid.draw(SCREEN, (0, 0))
vid.set_volume(0.5)
else:
vid.close()
if(go_game):
lista_game_over_respuesta = game()
resp_game_over = lista_game_over_respuesta[0]
list_resp_score_game = lista_game_over_respuesta[1]
if resp_game_over == "Game Over":
over_game.show_game_over(resp_game_over, main_menu, list_resp_score_game)
else:# Win
over_game.show_game_over(resp_game_over, main_menu, list_resp_score_game)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
vid.close()
lista_game_over_respuesta = game()
resp_game_over = lista_game_over_respuesta[0]
list_resp_score_game = lista_game_over_respuesta[1]
if resp_game_over == "Game Over":
over_game.show_game_over(resp_game_over, main_menu, list_resp_score_game)
else:# Win
over_game.show_game_over(resp_game_over, main_menu, list_resp_score_game)
pygame.display.update()
def preludio(screen: pygame.Surface)-> None:
"""
Muestra el preludio del juego antes de comenzar la partida.
Recibe:
Args:
screen (pygame.Surface): Superficie de la pantalla del juego.
Devuelve: None
"""
background_main = pygame.image.load("asset\kamehouse.jpg")
background_main_rescalado = pygame.transform.scale(background_main, (ANCHO_PANTALLA, ALTO_PANTALLA))
cambiar_musica("sonido\intro_karaoke_dragonball_buscar_esferas (mp3cut.net).mp3", 0.2)
fps = 30
relog = pygame.time.Clock()
index_stage = 0
text_color = (0, 0, 0)
text_index = 0
balloon_position_krillin = (250, 300)
balloon_color = (255, 255, 255)
path_goku_intro = "asset/goku_intro_game_res.png"
text = ["¡Hola, Goku!\nEstaba pensando que \n quizas seria bueno que practiquemos para el Gran Torneo."]
text_goku = ["Es verdad tenes mucha razon Krillin,\n hay que prepararse... Empecemos!"]
dx_slide_boss = 20
slide_krillin = 800
contador_escena_start_game = 0
path_krillin = "asset/krillin_intro_game.png"
path_por_defecto = path_krillin
time_text = 180
time_text_limit = 180
running = True
finished_animation = False # Variable para indicar si la animación ha finalizado
while running and not finished_animation: # Salir del bucle cuando la animación haya terminado
SCREEN.blit(background_main_rescalado, (0, 0))
for evento in pygame.event.get():
if evento.type == pygame.QUIT:
pygame.quit()
sys.exit()
if index_stage == 0 and contador_escena_start_game < 2:
# load_music_intro = True
#cambiar musica estaba aca
font = pygame.font.Font(None, 36)
image = pygame.image.load(path_por_defecto)
oscurecer_pantalla(screen)
if slide_krillin > 400:
slide_krillin -= dx_slide_boss
draw_text_and_image(screen, image, slide_krillin, 300)
if slide_krillin == 400:
if time_text > 0:
if text_index < len(text):
draw_text2(screen, text[text_index], font, text_color,
balloon_position_krillin, balloon_color, max_width=350)
time_text -= 1
else:
time_text = time_text_limit
text_index += 1
if text_index >= len(text):
path_por_defecto = path_goku_intro
slide_krillin = 800
text_index = 0
text = text_goku
contador_escena_start_game += 1
if contador_escena_start_game >= 2:
finished_animation = True # La animación ha finalizado
running = False
relog.tick(fps)
pygame.display.update()
pygame.mixer.music.stop()
intro_2("vid/stage_0.avi", True)
intro()
| HoracioxBarrios/mi_juego_final_limpio | main.py | main.py | py | 10,634 | python | es | code | 2 | github-code | 13 |
26652442026 | from bloghandler import BlogHandler
from models.user import User
from models.post import Post
from models.like import Like
from models.comment import Comment
from helper import *
from google.appengine.ext import db
class NewPost(BlogHandler):
def get(self):
if self.user:
self.render("newpost.html")
else:
msg = "Please Login to Create Your Blog"
self.render("login-form.html", error=msg)
return
def post(self):
if not self.user:
self.redirect('/blog')
return
subject = self.request.get('subject')
content = self.request.get('content')
post_id = self.request.get('post_id')
author = self.get_uid()
author = User.by_id(int(author))
author = author.name
if subject and content:
p = Post(parent=blog_key(), subject=subject,
content=content, author=author)
p.put()
self.redirect('/blog/%s' % str(p.key().id()))
else:
error = "subject and content, please!"
self.render("newpost.html", subject=subject,
content=content, error=error)
return
| xUansen/udacity-full-stack-nanodegree | @Project3/app/newposthandler.py | newposthandler.py | py | 1,216 | python | en | code | 0 | github-code | 13 |
6922717828 | '''
Dynamic Programming Question.
'''
#INterview Question:
#Leetcode
#Dynamic Problem Question
#O(n²) --> Brute Force Solution
#Dynamic Problem solution:
'''
For example we have:
[-2,-3,4,1,-2,1,5,-3]
max Subarray = [4,-1,-2,5]
#Lets look at the problem stepwise
Initially: max = -inf., currSum = 0
[-2] --> max = -2, currSum = -2
[-2,-3] --> max = [-2] ,curSum = -2 - 3 = -5
[-2,-3,4] --> max = 4, curSum = -2 - 4 + 4 = -1
[-2,-3,4,1,-2] --> max = - 2+4+4 =-1
'''
class Solution():
def maxSubarray(self,numbers):
#start_index = 0
end_index = 1
maxsoFar = numbers[0]
sum = [0 for i in range(len(numbers))]
for i in range(len(numbers)):
if numbers[i] > 0:
start_index = i
for j in range(i,len(numbers)):
sum[j] = sum[j-1] + numbers[j] #build the sum
maxsoFar = max(maxsoFar,sum[j])
return maxsoFar
if __name__ == "__main__":
numbers = [-2,-3,4,1,-2,1,5,-3]
output = Solution()
print ('The maximum subarray is:',output.maxSubarray(numbers)[0])
| Oushesh/CODING_INTERVIEW | interviewing.io/SlackEngineer_maxSubarray.py | SlackEngineer_maxSubarray.py | py | 1,108 | python | en | code | 0 | github-code | 13 |
17053456974 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
class InvoiceLineInfoOrder(object):
def __init__(self):
self._duty_free_flag = None
self._duty_free_type = None
self._line_amt = None
self._measurement_unit = None
self._product_name = None
self._product_specification = None
self._quantity = None
self._tax_rate = None
self._unit_amt = None
@property
def duty_free_flag(self):
return self._duty_free_flag
@duty_free_flag.setter
def duty_free_flag(self, value):
self._duty_free_flag = value
@property
def duty_free_type(self):
return self._duty_free_type
@duty_free_type.setter
def duty_free_type(self, value):
self._duty_free_type = value
@property
def line_amt(self):
return self._line_amt
@line_amt.setter
def line_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._line_amt = value
else:
self._line_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def measurement_unit(self):
return self._measurement_unit
@measurement_unit.setter
def measurement_unit(self, value):
self._measurement_unit = value
@property
def product_name(self):
return self._product_name
@product_name.setter
def product_name(self, value):
self._product_name = value
@property
def product_specification(self):
return self._product_specification
@product_specification.setter
def product_specification(self, value):
self._product_specification = value
@property
def quantity(self):
return self._quantity
@quantity.setter
def quantity(self, value):
self._quantity = value
@property
def tax_rate(self):
return self._tax_rate
@tax_rate.setter
def tax_rate(self, value):
self._tax_rate = value
@property
def unit_amt(self):
return self._unit_amt
@unit_amt.setter
def unit_amt(self, value):
self._unit_amt = value
def to_alipay_dict(self):
params = dict()
if self.duty_free_flag:
if hasattr(self.duty_free_flag, 'to_alipay_dict'):
params['duty_free_flag'] = self.duty_free_flag.to_alipay_dict()
else:
params['duty_free_flag'] = self.duty_free_flag
if self.duty_free_type:
if hasattr(self.duty_free_type, 'to_alipay_dict'):
params['duty_free_type'] = self.duty_free_type.to_alipay_dict()
else:
params['duty_free_type'] = self.duty_free_type
if self.line_amt:
if hasattr(self.line_amt, 'to_alipay_dict'):
params['line_amt'] = self.line_amt.to_alipay_dict()
else:
params['line_amt'] = self.line_amt
if self.measurement_unit:
if hasattr(self.measurement_unit, 'to_alipay_dict'):
params['measurement_unit'] = self.measurement_unit.to_alipay_dict()
else:
params['measurement_unit'] = self.measurement_unit
if self.product_name:
if hasattr(self.product_name, 'to_alipay_dict'):
params['product_name'] = self.product_name.to_alipay_dict()
else:
params['product_name'] = self.product_name
if self.product_specification:
if hasattr(self.product_specification, 'to_alipay_dict'):
params['product_specification'] = self.product_specification.to_alipay_dict()
else:
params['product_specification'] = self.product_specification
if self.quantity:
if hasattr(self.quantity, 'to_alipay_dict'):
params['quantity'] = self.quantity.to_alipay_dict()
else:
params['quantity'] = self.quantity
if self.tax_rate:
if hasattr(self.tax_rate, 'to_alipay_dict'):
params['tax_rate'] = self.tax_rate.to_alipay_dict()
else:
params['tax_rate'] = self.tax_rate
if self.unit_amt:
if hasattr(self.unit_amt, 'to_alipay_dict'):
params['unit_amt'] = self.unit_amt.to_alipay_dict()
else:
params['unit_amt'] = self.unit_amt
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = InvoiceLineInfoOrder()
if 'duty_free_flag' in d:
o.duty_free_flag = d['duty_free_flag']
if 'duty_free_type' in d:
o.duty_free_type = d['duty_free_type']
if 'line_amt' in d:
o.line_amt = d['line_amt']
if 'measurement_unit' in d:
o.measurement_unit = d['measurement_unit']
if 'product_name' in d:
o.product_name = d['product_name']
if 'product_specification' in d:
o.product_specification = d['product_specification']
if 'quantity' in d:
o.quantity = d['quantity']
if 'tax_rate' in d:
o.tax_rate = d['tax_rate']
if 'unit_amt' in d:
o.unit_amt = d['unit_amt']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/InvoiceLineInfoOrder.py | InvoiceLineInfoOrder.py | py | 5,414 | python | en | code | 241 | github-code | 13 |
17049177184 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class BeneficialEntity(object):
def __init__(self):
self._cert_effective_date = None
self._cert_expiration_date = None
self._cert_no = None
self._cert_type = None
self._name = None
@property
def cert_effective_date(self):
return self._cert_effective_date
@cert_effective_date.setter
def cert_effective_date(self, value):
self._cert_effective_date = value
@property
def cert_expiration_date(self):
return self._cert_expiration_date
@cert_expiration_date.setter
def cert_expiration_date(self, value):
self._cert_expiration_date = value
@property
def cert_no(self):
return self._cert_no
@cert_no.setter
def cert_no(self, value):
self._cert_no = value
@property
def cert_type(self):
return self._cert_type
@cert_type.setter
def cert_type(self, value):
self._cert_type = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
def to_alipay_dict(self):
params = dict()
if self.cert_effective_date:
if hasattr(self.cert_effective_date, 'to_alipay_dict'):
params['cert_effective_date'] = self.cert_effective_date.to_alipay_dict()
else:
params['cert_effective_date'] = self.cert_effective_date
if self.cert_expiration_date:
if hasattr(self.cert_expiration_date, 'to_alipay_dict'):
params['cert_expiration_date'] = self.cert_expiration_date.to_alipay_dict()
else:
params['cert_expiration_date'] = self.cert_expiration_date
if self.cert_no:
if hasattr(self.cert_no, 'to_alipay_dict'):
params['cert_no'] = self.cert_no.to_alipay_dict()
else:
params['cert_no'] = self.cert_no
if self.cert_type:
if hasattr(self.cert_type, 'to_alipay_dict'):
params['cert_type'] = self.cert_type.to_alipay_dict()
else:
params['cert_type'] = self.cert_type
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BeneficialEntity()
if 'cert_effective_date' in d:
o.cert_effective_date = d['cert_effective_date']
if 'cert_expiration_date' in d:
o.cert_expiration_date = d['cert_expiration_date']
if 'cert_no' in d:
o.cert_no = d['cert_no']
if 'cert_type' in d:
o.cert_type = d['cert_type']
if 'name' in d:
o.name = d['name']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/BeneficialEntity.py | BeneficialEntity.py | py | 3,020 | python | en | code | 241 | github-code | 13 |
27074944285 | import openai
from openpyxl import Workbook
from openpyxl import load_workbook
from time import sleep
import os
openai.api_key = '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'
print('please input interval time:')
sleeptime=int(input())
#print('please input engine name:')
#enginee=input()
enginels=['text-davinci-001',
'text-curie-001'
]
wb=load_workbook('chat.xlsx')
question=[]
ws=wb.active
for i in range(2,ws.max_row+1):
if ws.cell(row=i,column=2).value!=None:
print(ws.cell(row=i,column=1).value + " pass")
continue
if ws.cell(row=i,column=1).value==None:
break
print("==============================")
print(ws.cell(row=i,column=1).value)
question=(ws.cell(row=i,column=1).value)
try:
responses=openai.Completion.create(
engine=enginels[0],
prompt=question,
max_tokens=2000,
stop=None,
n=1
)
except:
responses=openai.Completion.create(
engine=enginels[1],
prompt=question,
max_tokens=2000,
stop=None,
n=1
)
sleep(sleeptime)
print(responses.choices[0].text)
print(type(responses.choices[0].text))
print("==============================")
ws.cell(row=i,column=2).value=(responses.choices[0].text).replace('\n','')
wb.save('chat.xlsx')
wb.close()
os.system('pause')
| carryyangorz/pythonprojects | chat.py | chat.py | py | 1,369 | python | en | code | 0 | github-code | 13 |
29113453086 | import logging
import os
from datetime import datetime
from zoneinfo import ZoneInfo
from slack_bolt import App
from slack_bolt.adapter.aws_lambda import SlackRequestHandler
from slack_bolt.adapter.socket_mode import SocketModeHandler
from slack_sdk.web import WebClient
# from ./constants import ZUNDA_EMOJI
ZUNDA_EMOJI = "zundamon"
app = App(
token=os.environ.get("SLACK_BOT_TOKEN"),
# signing_secret=os.environ.get("SLACK_SIGNING_SECRET"),
# process_before_response=True
)
bot_user = os.environ.get("SLACK_BOT_USER")
register = App(
token=os.environ.get("SLACK_USER_TOKEN"),
# process_before_response=True
)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
@app.event("reaction_added")
def reply_nanoda(client: WebClient, event: dict) -> None:
"""
特定の絵文字(ZUNDA_EMOJI)が押された際に
ボットから定型文メッセージをスレッドに返信
"""
logger.info("reply_nanoda start") # debug
reaction: str = event["reaction"]
logger.info("reaction", reaction) # debug
mes = _greeting()
if reaction == ZUNDA_EMOJI:
logger.info("ZUNDA EMOJI reaction") # debug
client.chat_postMessage(
channel=event["item"]["channel"],
thread_ts=event["item"]["ts"],
text=mes,
)
logger.info("reply_nanoda end") # debug
def _greeting() -> str:
"""
時間に応じてずんだもんの挨拶の冒頭を変える
"""
now = datetime.now(ZoneInfo("Asia/Tokyo"))
logger.info("now", now) # debug
now_hour: int = now.hour
prefix = ""
if 6 <= now_hour <= 10:
prefix = "おはようございます。\n"
elif 11 <= now_hour <= 16:
prefix = "こんにちは。\n"
elif 17 <= now_hour <= 24:
prefix = "こんばんは。\n"
else:
prefix = "くそねみぃのだ...\n"
return prefix + "僕、ずんだもんなのだ。"
@app.event("channel_created")
def register_nanoda(event: dict) -> None:
"""
新しいチャンネルが作成されるたびに、
ずんだもんbotを該当のチャンネルに登録(招待)
"""
logger.info("register_nanoda start") # debug
channel = event.get("channel", dict())
channel_id = channel.get("id")
if channel_id and bot_user:
res = register.client.conversations_invite(
channel=channel_id,
users=bot_user,
)
logger.info("res", res) # debug
logger.info("register_nanoda end") # debug
# ソケットモードにしてアプリ起動
if __name__ == "__main__":
SocketModeHandler(app, os.environ["SLACK_APP_TOKEN"]).start()
| Kumamoto-Hamachi/zunda-fairy-bot | src/handler.py | handler.py | py | 2,665 | python | en | code | 0 | github-code | 13 |
70988772497 | import pandas as pd
import numpy as np
import torch
from torch import nn
from torch.nn.functional import interpolate
from torch.utils.data import Dataset, DataLoader
import torch.autograd as autograd
import torch.nn.functional as F
import torch.optim as optim
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import classification_report, confusion_matrix
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from pytorch_lightning.loggers import TensorBoardLogger
import torchmetrics
from torchmetrics.functional import accuracy
######## Net and Model ########
def flatten_size(max_trial_length):
dummy_input = torch.rand(size=(1, 14, max_trial_length), dtype=torch.float32)
net = nn.Sequential(
nn.BatchNorm1d(14)
,nn.Conv1d(14, 84, kernel_size = 128*3, stride = 2, padding = 0)
,nn.AvgPool1d(kernel_size = 64, stride = 2)
,nn.Conv1d(84, 200, kernel_size = 32, stride = 4, padding = 0)
,nn.AvgPool1d(kernel_size = 4, stride = 8)
,nn.LeakyReLU()
,nn.Dropout()
,nn.Flatten()
)
hid_size = net(dummy_input).shape[1]
return hid_size
class CNNnet(torch.nn.Module):
def __init__(self, num_inputs, num_outputs, flatten_size):
super(CNNnet, self).__init__()
self.conv1 = nn.Conv1d(num_inputs, 84, kernel_size = 128*3, stride = 2, padding = 0)
self.conv2 = nn.Conv1d(84, 200, kernel_size = 32, stride = 4, padding = 0)
self.rl = nn.LeakyReLU()
self.avgpool1 = nn.AvgPool1d(kernel_size = 64, stride = 2)
self.avgpool2 = nn.AvgPool1d(kernel_size = 4, stride = 8)
self.drop = nn.Dropout()
self.bn0 = nn.BatchNorm1d(num_inputs)
# self.bn1 = nn.BatchNorm1d(num_inputs)
self.bn1 = nn.BatchNorm1d(84)
# self.bn2 = nn.BatchNorm1d(84)
self.bn2 = nn.BatchNorm1d(200)
self.fl = nn.Flatten()
self.linear1 = nn.Linear(flatten_size,100)
self.linear2 = nn.Linear(100, num_outputs)
def forward(self, x):
out = x
out = self.bn0(out)
out = self.conv1(out)
out = self.bn1(out)
out = self.rl(out)
out = self.avgpool1(out)
out = self.drop(out)
# out = self.bn2(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.rl(out)
out = self.avgpool2(out)
out = self.fl(out)
out = self.linear1(out)
out = self.linear2(out)
return out
class CNNmodel(pl.LightningModule):
def __init__(self, num_features: int, num_classes: int, flatten_size:int):
super().__init__()
self.model = CNNnet(num_features, num_classes, flatten_size)
self.criterion = nn.CrossEntropyLoss()
def forward(self, x, labels = None):
output = self.model(x)
loss = 0
if labels is not None:
loss = self.criterion(output, labels)
return loss, output
def training_step(self, batch, batch_idx):
sequences = batch['feature']
labels = batch['label']
loss, outputs = self(sequences, labels)
predictions = torch.argmax(outputs, dim = 1)
step_accuracy = accuracy(predictions, labels)
self.log('train_loss', loss, prog_bar=True, logger=True)
self.log('train_accuracy', step_accuracy, prog_bar=True, logger=True)
return {'loss':loss, 'accuracy': step_accuracy}
def validation_step(self, batch, batch_idx):
sequences = batch['feature']
labels = batch['label']
loss, outputs = self(sequences, labels)
predictions = torch.argmax(outputs, dim = 1)
step_accuracy = accuracy(predictions, labels)
self.log('val_loss', loss, prog_bar=True, logger=True)
self.log('val_accuracy', step_accuracy, prog_bar=True, logger=True)
return {'loss':loss, 'accuracy': step_accuracy}
def test_step(self, batch, batch_idx):
sequences = batch['feature']
labels = batch['label']
loss, outputs = self(sequences, labels)
predictions = torch.argmax(outputs, dim = 1)
step_accuracy = accuracy(predictions, labels)
self.log('test_loss', loss, prog_bar=True, logger=True)
self.log('test_accuracy', step_accuracy, prog_bar=True, logger=True)
return {'loss':loss, 'accuracy': step_accuracy}
def configure_optimizers(self):
return optim.Adam(self.parameters(), lr = 0.001 , weight_decay =0) | hangming1992/QMUL-Project | project_CNN.py | project_CNN.py | py | 4,603 | python | en | code | 0 | github-code | 13 |
74449868176 | import requests
BASE_URL = "https://opendata.paris.fr/api/explore/v2.1/catalog/datasets"
def get_dataset_info(dataset_id, **kwargs):
"""
Args:
dataset_id:
**kwargs:
Returns:
"""
url = f"{BASE_URL}/{dataset_id}/records?"
response = requests.get(url, params=kwargs)
if response.status_code == 200:
return response.json().get("results")
else:
print(f"Erreur: {response.status_code}")
return None
| bricefotzo/velib-subscribe | api/external.py | external.py | py | 471 | python | en | code | 0 | github-code | 13 |
73806564818 | #!/usr/bin/env python
import sys
import os
import io
import argparse
from collections import Counter, defaultdict
import pickle
from scorer.reader import *
from scorer.truth import *
from scorer.sDCG import *
from scorer.expected_utility import *
from scorer.cubetest import *
def _years(runs_path):
return [year for year in os.listdir(runs_path) if year.startswith('2')]
# session
def _runs_S(runs_path, year):
path = runs_path + '/' + year + '/'
return [(path + f, f[:-4]) for f in os.listdir(path) if f.endswith('run')]
# dynamic domain
def _file_path(root_dir, year, sub):
path = root_dir + '/' + year + '/' + sub + '/'
file = [f for f in os.listdir(path) if not f.startswith('.')][0]
return path + file
def _runs_DD(root_dir, year, sub):
path = root_dir + '/' + year + '/' + sub + '/'
return [(path + f, f) for f in os.listdir(path) if not f.startswith('.')]
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--runs_path', type=str, required=True)
parser.add_argument('--out_path', type=str, required=True)
parser.add_argument('--cutoff', type=int, default=10)
parser.add_argument('--list_depth', type=int, default=10)
# DD=dynamic domain track, S=session track
parser.add_argument('--track', type=str, required=True)
parser.add_argument('--maxrel', type=str2bool, default=True)
args = parser.parse_args()
runs_path = args.runs_path
out_path = args.out_path
cutoff = args.cutoff
list_depth = args.list_depth
track = args.track
max_doc_rel = args.maxrel
print(max_doc_rel)
out_file_path = os.path.join(out_path, track + '.new.max.eval')
with io.open(out_file_path, 'w', encoding='utf8') as out_f:
out_f.write('dataset\tyear\trun\ttopic\tsDCG\tnsDCG\tEU\tnEU\tCT\tnCT'
'\tsDCGs\tnsDCGs\tEUs\tnEUs\tCTs\tnCTs\n')
for year in _years(runs_path):
print(year)
if track == 'DD':
truth_file_path = _file_path(runs_path, year, 'groundtruth')
dd_info_path = _file_path(runs_path, year, 'params')
doc_length = pickle.load(open(dd_info_path, 'rb'))
truth = DDTruth(truth_file_path, 'DD', doc_length, max_doc_rel)
runs = _runs_DD(runs_path, year, 'runs')
for run, r_name in runs:
print(run)
print(r_name)
itercorr = False
run_result = DDReader(run, itercorr).run_result
# sort by topic no
sorted_results = sorted(run_result.items(), key=lambda x: \
int(x[0].split('-')[1]))
# sDCG params
bq, b = 4, 2
# EU params
a, eu_gamma, p = 0.001, 0.5, 0.5
# CT params
ct_gamma, max_height = 0.5, 50
for topic_id, topic_result in sorted_results:
sdcg, normalized_sdcg, utility, normalized_eu, ct, normalized_ct = 0,0,0,0,0,0
if track == 'DD':
# sDCG
sdcg = sDCG_per_topic(truth.truth4SDCG(topic_id), \
topic_result, bq, b, cutoff, list_depth)
sdcg_bound = sDCG_bound_per_topic( \
truth.truth4SDCG_bound(topic_id), bq, b, \
cutoff, list_depth)
normalized_sdcg = 0
if sdcg_bound != 0:
normalized_sdcg = sdcg / sdcg_bound
else:
print('Optimal dcg is equal to 0')
print(topic_id)
print(truth.truth4SDCG(topic_id))
print(topic_result)
break
# EU
clear_prob()
utility = eu_per_topic(truth.truth4EU(topic_id), \
topic_result, a, eu_gamma, p, cutoff, list_depth)
upper, lower = eu_bound_per_topic( \
truth.truth4EU_bound(topic_id), a, eu_gamma, p, \
cutoff, list_depth)
normalized_eu = 0
if (upper - lower) != 0:
normalized_eu = (utility - lower) / (upper - lower)
# CT
gain, ct, act = cubetest_per_topic( \
truth.truth4CT(topic_id), \
topic_result, ct_gamma, max_height, \
cutoff, list_depth)
bound = ct_bound_per_topic(truth.truth4CT_bound(topic_id),\
ct_gamma, max_height, cutoff, list_depth)
normalized_ct = 0
if bound != 0:
normalized_ct = ct / bound
# and again, without subtopics
# sDCG
sdcg_s = sDCG_per_topic(truth.truth4SDCG_simple(topic_id),\
topic_result, bq, b, cutoff, list_depth)
sdcg_bound_s = sDCG_bound_per_topic( \
truth.truth4SDCG_bound_simple(topic_id), \
bq, b, cutoff, list_depth)
normalized_sdcg_s = 0
if sdcg_bound_s != 0:
normalized_sdcg_s = sdcg_s / sdcg_bound_s
# EU
clear_prob()
utility_s = eu_per_topic(truth.truth4EU_simple(topic_id), \
topic_result, a, eu_gamma, p, cutoff, \
list_depth)
upper_s, lower_s = eu_bound_per_topic( \
truth.truth4EU_bound_simple(topic_id),\
a, eu_gamma, p, cutoff, list_depth)
normalized_eu_s = (utility_s - lower_s)/(upper_s - lower_s)
# CT
gain_s, ct_s, act_s = cubetest_per_topic( \
truth.truth4CT_simple(topic_id), \
topic_result, ct_gamma, \
max_height, cutoff, list_depth)
bound_s = ct_bound_per_topic( \
truth.truth4CT_bound_simple(topic_id), \
ct_gamma, max_height, cutoff, list_depth)
normalized_ct_s = 0
if bound_s != 0:
normalized_ct_s = ct_s / bound_s
# write measurements
out_f.write(
'{dataset}\t{year}\t{run}\t{topic}'
'\t{sDCG}\t{nsDCG}'
'\t{EU}\t{nEU}'
'\t{CT}\t{nCT}'
'\t{sDCGs}\t{nsDCGs}'
'\t{EUs}\t{nEUs}'
'\t{CTs}\t{nCTs}'
'\n'.format(
dataset = track,
year = year,
run = r_name,
topic = topic_id,
sDCG = sdcg,
nsDCG = normalized_sdcg,
EU = utility,
nEU = normalized_eu,
CT = ct,
nCT = normalized_ct,
sDCGs = sdcg_s,
nsDCGs = normalized_sdcg_s,
EUs = utility_s,
nEUs = normalized_eu_s,
CTs = ct_s,
nCTs = normalized_ct_s,
))
if __name__ == "__main__":
sys.exit(main())
| ekanou/Markovian-Session-Measures | scoring/eval_runs.py | eval_runs.py | py | 8,512 | python | en | code | 0 | github-code | 13 |
74330140176 | import copy
from datetime import datetime, timedelta, time
from package import Package
from hashmap import HashMap
import csv
# Boolean used to prevent updating package 9's address more than once during the optimization algorithm
_address_updated_9 = False
# Will point to copied instance of package 9 with old address after all deliveries are completed.
# This allows the old address to be used in lookups performed before 10:20 am.
_package_9_old = None
# Create appropriate data structures for use in the program
_packages = HashMap()
_locations = []
_distances = []
_trucks = {}
"""
Datetime object used in creating, comparing, and manipulating
all relevant datetime objects throughout the program
"""
today = datetime.combine(datetime.today(), time(0))
# Time: O(n^2) - Space: O(n^2)
def deliver_packages(truck):
start_time = truck.get_departure_time()
packages = truck.get_packages()
# Cancel package delivery if package list is empty
if not packages:
return
# If truck contains package 9, ensure that the address is correct before running delivery.
global _address_updated_9
if not _address_updated_9 and start_time >= today.replace(hour=10, minute=20):
get_package(9).update_address('410 S State St', 'Salt Lake City', 'UT', '84111')
_address_updated_9 = True
# Create list of all package locations for use in nearest neighbor algorithm
package_locations = {}
locations = set()
for pkg in packages: # Time: O(n) - Space: O(n)
pkg_loc = int(get_location(get_package(pkg).address)['id'])
if pkg_loc not in package_locations:
package_locations[pkg_loc] = []
package_locations[pkg_loc].append(pkg)
locations.add(pkg_loc)
# Create appropriate variables and data structures for use in algorithm
current_location = 0
visited_locations = set()
current_time = start_time
total_miles = 0
delivered = set()
"""
Nearest Neighbor Algorithm
Time: O(n^2)
Space: O(n^2)
"""
while len(visited_locations) < len(locations): # Time: O(n) - Space: O(n)
# If current delivery time is on or after 10:20 am, update package 9's address.
if not _address_updated_9 and current_time >= today.replace(hour=10, minute=20):
get_package(9).update_address('410 S State St', 'Salt Lake City', 'UT', '84111')
_address_updated_9 = True
neighbors = []
for loc in locations:
if loc not in visited_locations:
neighbors.append({'id': loc, 'distance': get_distance(current_location, loc)})
# Generate dictionary list of all unvisited neighbors (possible locations for next delivery)
# neighbors = get_available(current_location, locations, visited_locations) # Time: O(n)
# Find nearest neighbor (location with the shortest distance)
nearest = {'id': neighbors[0]['id'], 'distance': neighbors[0]['distance']}
for neighbor in neighbors: # Time: O(n)
if neighbor['distance'] < nearest['distance']:
nearest = {'id': neighbor['id'], 'distance': neighbor['distance']}
# Calculate delivery distance and time
travel_minutes = (nearest['distance'] / truck.speed) * 60
current_time += timedelta(minutes=travel_minutes)
total_miles += nearest['distance']
# Deliver and update all packages found at current delivery location
for pkg in package_locations.get(nearest['id']): # Time: O(n) - Space: O(n)
nearest_package = get_package(pkg)
delivered.add(nearest_package.get_id())
nearest_package.set_delivered(True, current_time.replace(second=0))
# Mark current location as visited
current_location = nearest['id']
visited_locations.add(current_location)
# Return truck to hub after all deliveries are complete
return_miles = get_distance(current_location, 0)
total_miles += return_miles
travel_minutes = (return_miles / truck.speed) * 60
current_time += timedelta(minutes=travel_minutes)
# Calculate total time traveled using miles traveled and truck speed
hours = int((total_miles // truck.speed))
minutes = int(((total_miles / truck.speed) * 60) % 60)
# Finally, set the delivery metrics for the truck
truck.set_metrics({'delivered': delivered, 'miles': total_miles, 'end_time': current_time,
'time_spent': f'{hours}h {minutes}m'})
"""
------------------------
PACKAGE METHODS
------------------------
"""
# Time: O(n^2) - Space: O(n)
# Scan and store all package data found in 'packages.csv' file
def scan_packages():
with open('data/packages.csv') as file:
reader = csv.DictReader(file)
for line in reader:
# Instantiate proper datetime object for deadline
deadline = line['deadline']
if deadline == 'EOD':
deadline = "5:00 PM"
deadline_time = datetime.combine(today, datetime.strptime(deadline, '%I:%M %p').time())
package = Package(int(line['id']), line['address'], line['city'], line['state'], line['zip_code'],
deadline_time, int(line['weight']))
_packages.put(package.package_id, package) # O(n)
# Time: O(1) - Space: O(1)
def get_all_packages():
return _packages
# Time: O(n)
# Space: O(1)
def get_package(package_id):
return _packages.get(package_id) # O(n)
# Time: O(n^2) - Space: O(n)
def get_packages_by_address(address, time_req):
result = []
for i in range(1, len(_packages) + 1):
package = _packages.get(i) # O(n)
# If time used in the lookup is before 10:20am, use old address for package 9
if package.get_id() == 9 and time_req < today.replace(hour=10, minute=20):
package = _package_9_old
if package.address.lower() == address.lower():
result.append(package)
return result
# Time: O(n^2) - Space: O(n)
def get_packages_by_deadline(deadline):
result = []
for i in range(1, len(_packages) + 1):
package = _packages.get(i)
if package.deadline == deadline:
result.append(package)
return result
# Time: O(n^2) - Space: O(n)
def get_packages_by_city(city):
result = []
for i in range(1, len(_packages) + 1):
package = _packages.get(i)
if package.city.lower() == city.lower():
result.append(package)
return result
# Time: O(n^2) - Space: O(n)
def get_packages_by_zip(zip_code, time_req):
result = []
for i in range(1, len(_packages) + 1):
package = _packages.get(i)
# If time used in the lookup is before 10:20am, use old address for package 9
if package.get_id() == 9 and time_req < today.replace(hour=10, minute=20):
package = _package_9_old
if package.zip_code == zip_code:
result.append(package)
return result
# Time: O(n^2) - Space: O(n)
def get_packages_by_weight(weight):
result = []
for i in range(1, len(_packages) + 1):
package = _packages.get(i)
if package.weight == weight:
result.append(package)
return result
# Time: O(n^2) - Space: O(n)
def get_packages_by_status(status, time_req):
result = []
for i in range(1, len(_packages) + 1):
package = _packages.get(i)
if package.get_status(time_req).lower() == status.lower():
result.append(package)
return result
# Time: O(n) - Space: O(1)
# Set deep copy of package 9 with initial known address,
# for use in lookup functions when time is before 10:20 am
def set_old_package_9():
global _package_9_old
_package_9_old = copy.deepcopy(get_package(9)) # O(n)
_package_9_old.update_address('300 State St', 'Salt Lake City', 'UT', '84103')
# Time: O(1) - Space: O(1)
def get_old_package_9():
return _package_9_old
"""
----------------------
TRUCK METHODS
----------------------
"""
# Time: O(1) - Space: O(1)
def put_truck(truck):
_trucks[truck.id] = truck
# Time: O(1) - Space: O(1)
def get_truck(truck_id):
return _trucks[truck_id]
# Time: O(1) - Space: O(1)
def get_all_trucks():
return _trucks
# Time: O(1) - Space: O(1)
# Prints out per-truck delivery metrics
def print_metrics(truck_id):
metrics = get_truck(truck_id).get_metrics()
print("\n\tTruck", truck_id, "delivery metrics:")
print("\t\t" + f"{'Departure time: ':<25}" + f"{get_truck(truck_id).get_departure_time().strftime('%I:%M %p'):<20}")
print("\t\t" + f"{'Packages delivered: ':<25}" + f"{len(metrics['delivered']):<20}")
print("\t\t" + f"{'Miles traveled: ':<25}" + f"{metrics['miles']:<20}")
print("\t\t" + f"{'Time spent: ':<25}" + f"{metrics['time_spent']:<20}")
print("\t\t" + f"{'Return to hub time: ':<25}" + f"{metrics['end_time'].strftime('%I:%M %p'):<20}")
# Time: O(1) - Space: O(1)
# Prints out all delivery metrics
def print_all_metrics():
print_metrics(1)
print_metrics(2)
print_metrics(3)
total_miles = get_truck(1).get_metrics()['miles'] + get_truck(2).get_metrics()['miles'] + \
get_truck(3).get_metrics()['miles']
total_td = str(get_truck(3).get_metrics()['end_time'] - get_truck(1).get_departure_time()).split(':')
total_time = f"{int(total_td[0]):01}h {int(total_td[1]):02}m"
print("\n\t" + f"{'Total miles traveled: ':<29}" + f"{total_miles:<20}")
print("\t" + f"{'Total delivery time: ':<29}" + f"{total_time:<20}")
"""
-------------------------
LOCATION METHODS
-------------------------
"""
# Time: O(n) - Space: O(1)
# Scan and store all location data found in 'locations.csv' file
def scan_locations():
with open('data/locations.csv') as file:
reader = csv.DictReader(file)
for line in reader:
_locations.append({'id': line['id'], 'name': line['name'], 'address': line['address']})
# Time: O(1) - Space: O(1)
def get_all_locations():
return _locations
# Time: O(n) - Space: O(1)
def get_location(address):
for location in _locations:
if location['address'].lower() == address.lower():
return location
"""
-------------------------
DISTANCE METHODS
-------------------------
"""
# Time: O(n^2) - Space: O(n)
# Scan and store all distance data found in 'distances.csv' file
def scan_distances():
with open('data/distances.csv') as file:
reader = csv.reader(file)
for line in reader:
_curr_distances = []
for j in range(len(line)):
if line[j]:
_curr_distances.append({'id': j, 'distance': float(line[j])})
_distances.append(_curr_distances)
# Time: O(1) - Space: O(1)
def get_all_distances():
return _distances
# Time: O(1) - Space: O(1)
# Return the distance (in miles) between two points, A and B
def get_distance(a, b):
if a > b:
return _distances[a][b]['distance']
return _distances[b][a]['distance']
| alexmbright/C950-PackageDeliveryAlgorithm | logistics.py | logistics.py | py | 11,031 | python | en | code | 0 | github-code | 13 |
3239306291 | # -*- coding: utf-8 -*-
from app.bootstrap import Bootstrap
from data.config import Config
from routing.router import Router
from net.request import Request, Builder
from error.errors import Error
class LambdaApp(object):
"""AWS Lambda用アプリケーション"""
def __build_request(self, event: dict) -> Request:
"""イベントデータからリクエストを生成
:param dict event: イベントデータ
:return Request: リクエスト
"""
builder = Builder()
if 'url' in event:
builder.url(event['url'])
if 'method' in event:
builder.method(event['method'])
if 'headers' in event:
builder.headers(event['headers'])
if 'queries' in event:
builder.queries(event['queries'])
if 'body' in event:
builder.body(event['body'])
return builder.build()
def run(self, config: Config, event: dict) -> dict:
"""イベントデータから対応するハンドラーを呼び出し、レスポンスを返却
:param Config config: コンフィグ
:param dict event: イベントデータ
:return dict: レスポンスの連想配列
:raise Error: Error系の例外発生時にメッセージを整形して再出力
"""
try:
request = self.__build_request(event)
Bootstrap(config, request)
router = Router(config.get('routes.path'))
receiver = router.dispatch(request.url)
handler = receiver.instantiate(config, request)
return handler().to_dict()
except Error as e:
raise Exception(f'[{e.code}] {e.__class__.__name__}: {e.message}')
| rog-works/python-ws | app/lambdaapp.py | lambdaapp.py | py | 1,514 | python | ja | code | 0 | github-code | 13 |
34278527461 | from datetime import datetime, timedelta
from time import mktime
from test_utils import NellTestCase
from scheduler.models import *
from utils import create_blackout
from utilities.TimeAgent import dst_boundaries, tz_to_tz
class TestBlackout(NellTestCase):
def setUp(self):
super(TestBlackout, self).setUp()
# create some user blackouts
self.u = User(first_name = "Test"
, last_name = "User"
)
self.u.save()
self.blackout1 = create_blackout(user = self.u,
repeat = "Once",
start = datetime(2009, 1, 1, 11),
end = datetime(2009, 1, 3, 11))
self.blackout2 = create_blackout(user = self.u,
repeat = 'Weekly',
start = datetime(2009, 1, 4, 11),
end = datetime(2009, 1, 4, 13),
until = datetime(2009, 5, 4, 11))
# create some project blackouts
semester = Semester.objects.get(semester = "08C")
ptype = Project_Type.objects.get(type = "science")
self.pcode = "GBT08C-01"
self.project = Project(semester = semester
, project_type = ptype
, pcode = self.pcode
)
self.project.save()
self.blackout3 = create_blackout(project = self.project,
timezone = 'UTC',
repeat = 'Once',
start = datetime(2008, 10, 1, 11),
end = datetime(2008, 10, 3, 11))
def test_isActive(self):
results = [(b.isActive(b.getStartDate() + timedelta(hours = 2))
, b.isActive(b.getEndDate() + timedelta(hours = 2)))
for b in Blackout.objects.all()]
self.assertEqual(results, [(True, False), (True, True), (True, False)])
def test_generateDates(self):
# no repeats are easy ...
dts = [(self.blackout1.getStartDate(), self.blackout1.getEndDate())]
calstart = datetime(2009, 1, 1)
calend = datetime(2009, 1, 30)
gdts = self.blackout1.generateDates(calstart, calend)
self.assertEquals(dts, gdts)
# should be none in June.
calstart = datetime(2009, 6, 1)
calend = datetime(2009, 6, 30)
gdts = self.blackout1.generateDates(calstart, calend)
self.assertEquals(0, len(gdts))
# repeats are more complicated
# how does January look?
calstart = datetime(2009, 1, 1)
calend = datetime(2009, 1, 30)
start = self.blackout2.getStartDate()
end = self.blackout2.getEndDate()
dts = [(start, end)]
for i in [1,2,3]:
dts.append((start + timedelta(days = 7 * i)
, end + timedelta(days = 7 * i)))
gdts = self.blackout2.generateDates(calstart, calend)
self.assertEquals(dts, gdts)
# outside of calendar start/end, but weekly until May
calstart = datetime(2009, 2, 1)
calend = datetime(2009, 2, 28)
gdts = self.blackout2.generateDates(calstart, calend)
self.assertEquals(4, len(gdts))
# should be none in June.
calstart = datetime(2009, 6, 1)
calend = datetime(2009, 6, 30)
gdts = self.blackout2.generateDates(calstart, calend)
self.assertEquals(0, len(gdts))
# should be none in previous June.
calstart = datetime(2008, 6, 1)
calend = datetime(2008, 6, 30)
gdts = self.blackout2.generateDates(calstart, calend)
self.assertEquals(0, len(gdts))
# no repeats are easy ... even for project blackouts
dts = [(self.blackout3.getStartDate(), self.blackout3.getEndDate())]
calstart = datetime(2008, 10, 1)
calend = datetime(2008, 10, 30)
gdts = self.blackout3.generateDates(calstart, calend)
self.assertEquals(dts, gdts)
# test filter outside of blackouts
calstart = datetime(2011, 10, 1)
calend = datetime(211, 10, 30)
gdts = self.blackout1.generateDates(calstart, calend)
self.assertEquals(0, len(gdts))
gdts = self.blackout2.generateDates(calstart, calend)
self.assertEquals(0, len(gdts))
gdts = self.blackout3.generateDates(calstart, calend)
self.assertEquals(0, len(gdts))
def test_projectBlackout(self):
"Repeat some of the other tests, but for project blackouts"
self.assertEquals(self.blackout3.forName(), self.project.pcode)
self.assertEquals(self.blackout3.forUrlId(), self.project.pcode)
def test_ut_dst(self):
blackout = create_blackout(user = self.u,
repeat = 'Weekly',
start = datetime(2011, 1, 1, 11),
end = datetime(2011, 1, 4, 13),
until = datetime(2011, 12, 4, 11),
timezone = 'UTC')
# This is a UTC blackout. Every start time and end time
# generated should equal the start and end times above, 11:00
# and 13:00.
dates = blackout.generateDates(blackout.getStartDate(), blackout.getUntil())
start_time = blackout.getStartDate().time()
end_time = blackout.getEndDate().time()
for i in dates:
self.assertEquals(i[0].time(), start_time)
self.assertEquals(i[1].time(), end_time)
def test_pt_dst(self):
# dates are given as UTC dates even though the timezone is
# given as a local timezone. This is the way the blackout
# view works. :/
localstart = datetime(2011, 1, 1, 11)
localend = datetime(2011, 1, 4, 13)
localuntil = datetime(2011, 12, 4, 11)
utcstart = tz_to_tz(localstart, 'US/Pacific', 'UTC', naive = True)
utcend = tz_to_tz(localend, 'US/Pacific', 'UTC', True)
utcuntil = tz_to_tz(localuntil, 'US/Pacific', 'UTC', True)
spring, fall = dst_boundaries('US/Pacific', utcstart, utcuntil)
my_bo = create_blackout(user = self.u,
repeat = 'Weekly',
start = utcstart,
end = utcend,
until = utcuntil,
timezone = 'US/Pacific')
# generate 'UTC' sequence of blackout dates for standard time
# until spring transition.
dates = my_bo.generateDates(utcstart,
spring,
local_timezone = False)
self.assertNotEquals(len(dates), 0)
# All the dates except the last one are in standard time.
for i in range(0, len(dates) - 1):
self.assertEquals(dates[i][0].time(), utcstart.time())
self.assertEquals(dates[i][1].time(), utcend.time())
# the last one straddles DST, so end should be an hour earlier in UTC.
self.assertEquals(dates[-1][0].time(), utcstart.time())
self.assertEquals(dates[-1][1].time(), (utcend - timedelta(hours = 1)).time())
# generate 'UTC' sequence of blackout dates for spring DST
# transition until fall transition. This sequence will
# include 2 transition blackouts over both DST transitions:
one_hour = timedelta(hours = 1)
dates = my_bo.generateDates(spring,
fall,
local_timezone = False)
self.assertNotEquals(len(dates), 0)
self.assertEquals(dates[0][0].time(), utcstart.time())
self.assertEquals(dates[0][1].time(), (utcend - one_hour).time())
for i in range(1, len(dates) - 1):
self.assertEquals(dates[i][0].time(), (utcstart - one_hour).time())
self.assertEquals(dates[i][1].time(), (utcend - one_hour).time())
self.assertEquals(dates[-1][0].time(), (utcstart - one_hour).time())
self.assertEquals(dates[-1][1].time(), utcend.time())
# generate 'UTC' sequence of blackout dates from fall
# transition until the 'until' time. Back to standard time.
# The first blackout in the range will be a transition
# blackout.
dates = my_bo.generateDates(fall,
utcuntil,
local_timezone = False)
self.assertNotEquals(len(dates), 0)
self.assertEquals(dates[0][0].time(), (utcstart - one_hour).time())
self.assertEquals(dates[0][1].time(), utcend.time())
for i in range(1, len(dates)):
self.assertEquals(dates[i][0].time(), utcstart.time())
self.assertEquals(dates[i][1].time(), utcend.time())
# generate local timezone sequence of blackout dates for the
# entire range. Despite the complexity of the underlying UTC
# representation, the local times should all be the same.
dates = my_bo.generateDates(utcstart,
utcuntil,
local_timezone = True)
self.assertNotEquals(len(dates), 0)
for i in dates:
self.assertEquals(i[0].time(), localstart.time())
self.assertEquals(i[1].time(), localend.time())
| nrao/nell | scheduler/tests/TestBlackout.py | TestBlackout.py | py | 9,711 | python | en | code | 0 | github-code | 13 |
12641859572 | import numpy as np
#from numba import jit
HEADER_KEY_VAL_SIZE = 80 #bytes
DIRECT_IO_SIZE = 512
HPGUPPI_HDR_SIZE = 5*80*512
HPGUPPI_DATA_SIZE = 128*1024*1024
HPGUPPI_N_BLOCKS = 24
class Dumpfile():
"""
A very basic guppi raw file reader
"""
def __init__(self, fname):
if type(fname) != str:
raise RuntimeError("Please provide string filename")
self.fname = fname
self.file = open(fname, "rb")
def __del__(self):
self.file.close()
#@jit(nopython=True)
def _parse_header(self):
header = {}
nbytes_read = 0
hread = self.file.read(HEADER_KEY_VAL_SIZE).decode('UTF-8')
if not hread: # we have reachec the end of file
return None
nbytes_read += HEADER_KEY_VAL_SIZE
while not hread.startswith("END"):
key, val = hread.split("=")
key = key.strip()
val = val.strip()
try:
if "." in val:
val = float(val)
else:
val = int(val)
except ValueError:
val = val.strip("'").strip()
header[key] = val
hread = self.file.read(HEADER_KEY_VAL_SIZE).decode('UTF-8')
nbytes_read += HEADER_KEY_VAL_SIZE
assert hread == "END"+" "*77, "Not a GUPPI RAW format"
_ = self.file.read(HPGUPPI_HDR_SIZE - nbytes_read)
nbytes_read += HPGUPPI_HDR_SIZE - nbytes_read
if header['DIRECTIO']:
remainder = nbytes_read % DIRECT_IO_SIZE
to_seek = (DIRECT_IO_SIZE - remainder)%DIRECT_IO_SIZE
_ = self.file.read(to_seek)
return header
def read_next_block(self):
header = self._parse_header()
if not header:
return None
blocsize = header['BLOCSIZE']
nbits = header['NBITS']
try:
nants = header['NANTS']
except KeyError as e:
nants = -1
if nbits != 4:
raise NotImplementedError("Only 4-bit data is implemented")
data_raw = np.fromfile(self.file, dtype=np.int8, count=blocsize)
data = np.zeros_like(data_raw, dtype=np.complex64)
data[:] = (data_raw >> 4) + 1j*(data_raw << 4 >> 4)
self.file.seek(HPGUPPI_DATA_SIZE - blocsize, 1)
return header, data
| wfarah/guppi | guppi/dumpfile.py | dumpfile.py | py | 2,377 | python | en | code | 1 | github-code | 13 |
34899519450 | #!/usr/bin/env python3
import argparse
import logging
import os
import select
from systemd import journal
class Exporter:
def __init__(
self,
target_systemd_unit: str,
logger: logging.Logger = None,
log_level: str = "INFO",
systemd_log_level: str = "INFO",
):
# set up our logger
if not logger:
logger_level = getattr(logging, log_level.upper())
self.logger = logging.getLogger(__name__)
logging.basicConfig(level=logger_level)
else:
self.logger = logger
# set up our journal
journal_log_level = getattr(journal, f"LOG_{systemd_log_level.upper()}")
self.reader = journal.Reader()
self.reader.log_level(journal_log_level)
# if there's a specific systemd unit to filter on, like the kubelet, set that filter
if target_systemd_unit:
self._target_systemd_unit = target_systemd_unit # we're saving this for a useful log output on startup/debugging
logging.debug(
"Target systemd service provided, targeting _SYSTEMD_UNIT [%s]",
target_systemd_unit,
)
self.reader.add_match(_SYSTEMD_UNIT=target_systemd_unit)
def log(self):
self.logger.info("Starting log collection")
if self._target_systemd_unit:
self.logger.info(
"Logging events to systemd service [%s]", self._target_systemd_unit
)
else:
self.logger.info("Logging all systemd events - this will be noisy!")
# start at the head of the journal
self.logger.debug("Seeking head of systemd logs")
self.reader.seek_head()
# poll the stream for logs
p = select.poll()
p.register(self.reader, self.reader.get_events())
self.logger.debug("Polling for events...")
while p.poll():
# if the process is not a new line of log output
if self.process() != journal.APPEND:
self.logger.debug("Found non-append event, continuing...")
continue
for entry in self.reader:
self.logger.debug("Received entry [%s]", entry)
if entry["MESSAGE"]:
print(f"{entry['__REALTIME_TIMESTAMP']} {entry['MESSAGE']}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-l",
"--exported-log-level",
dest="systemd_log_level",
default=os.environ.get("EXPORTER_SYSTEMD_LOG_LEVEL", "INFO"),
help="The log level to read systemd logs at; one of ALERT, CRIT, DEBUG, EMERG, ERR, INFO, LOG, WARNING",
)
parser.add_argument(
"-t",
"--target",
"-t",
dest="target_systemd_unit",
default=os.environ.get("EXPORTER_SYSTEMD_TARGET"),
help="The systemd unit to target",
)
parser.add_argument(
"--log-level",
dest="log_level",
default=os.environ.get("EXPORTER_PYTHON_LOG_LEVEL", "INFO"),
help="The **Python** log level to output application debug logs at",
)
args = parser.parse_args()
exporter = Exporter(
target_systemd_unit=args.target_systemd_unit,
log_level=args.log_level,
systemd_log_level=args.systemd_log_level,
)
exporter.log()
| blomquistr/python-node-exporter | exporter/exporter.py | exporter.py | py | 3,374 | python | en | code | 0 | github-code | 13 |
8902576740 | from turtle import Turtle
class Runer(Turtle):
def __init__(self) -> None:
super().__init__()
self.hideturtle()
self.penup()
self.shape("turtle")
self.color("black")
self.setheading(90)
self.goto(0, -280)
self.showturtle()
def run(self):
self.forward(10) | foxtailer/learn | 100day_of_code/TURTLE/turtle_cross/runer.py | runer.py | py | 336 | python | en | code | 0 | github-code | 13 |
40424588352 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Libraries dependancies :
#
# Import peewee ORM.
from peewee import *
# Import Base utils.base (Peewee ORM connector)
from utils.bdd import MyModel
# Importe datetime.datatime and datetime.timedelta
from datetime import datetime, timedelta
# Import logging library
import logging
#
#
class Context(MyModel):
"""
Concept of Haroun Context.
Allow to store value that can be shared between Intents.
Context info (key, value) must be defined by Domains skills.
"""
# Context info key.
key = CharField()
# Context info value.
value = CharField()
# Context domain, define a specific domain context info is reserved for.
domain = CharField(null = True)
# Context expiration date (store timestamp).
expire = FloatField()
class Meta:
""" Model-specific configuration class Meta. """
# Table indexes.
indexes = (
# Create unique index on key/domain
(('key', 'domain'), True),
)
@staticmethod
def add(key, value, domain = None, duration = 60):
"""
Add some info (key, value) to context table.
Update value if key already exist.
---
Parameters
key : String
Context key.
value : String
Context value.
domain : String (optionnal)
Domain name info is reserved for, by default context info is for all domains. [Default = None]
duration : Int (optionnal)
Context duration in minutes [Default = 60].
---
Return : Context
Created Context Object.
"""
# Get expire date from duration.
expire_date = datetime.now() + timedelta(minutes=duration)
expire_timestamp = datetime.timestamp(expire_date)
# If exist.
if Context.check(key, domain) :
# Retrieve.
context = Context.get(key, domain)
# Update value and expire.
context.value = value
context.expire = expire_timestamp
# Save.
context.save()
else:
# Create context entry.
context = Context.create(
key=key,
value=value,
domain=domain,
expire=expire_timestamp,
)
# [LOG]
logging.debug(f"Context.add : key = {key}, value = {value}, domain = {domain}, duration = {duration}")
# Return created context.
return context
@staticmethod
def remove(key, domain = None):
"""
Remove specific info (key, value) from context table.
---
Parameters
key : String
Context key.
domain : String (optionnal)
Domain name info is reserved for, by default context info is for all domains. [Default = None]
---
Return : Boolean
Context found and deleted.
"""
# Retrieve context object.
context = Context.get(key, domain)
# [LOG]
logging.debug(f"Context.remove : key = {key}, domain = {domain}")
# If exist.
if context :
context.delete_instance()
return True
else:
return False
@staticmethod
def get(key, domain = None):
"""
Retrieve some info from context using Context.key
---
Parameters
key : String
Context key.
domain : String (optionnal)
Domain name info is reserved for, by default context info is for all domains. [Default = None]
---
Return : Context/None
Context Object, None if no matching result.
"""
# Get current timestamp.
now_timestamp = datetime.timestamp(datetime.now())
# Create select query
query = Context.select().where((Context.key == key) & (Context.domain == domain) & (Context.expire > now_timestamp))
# Check if no result.
if query.exists() :
for context in query:
return context
else:
return None
@staticmethod
def reverse_get(value, domain = None):
"""
Retrieve some info from context using Context.value
---
Parameters
value : String
Context value.
domain : String (optionnal)
Domain name info is reserved for, by default context info is for all domains. [Default = None]
---
Return : List
Contexts Object in list, None if no matching result.
"""
# Get current timestamp.
now_timestamp = datetime.timestamp(datetime.now())
# Create query
query = Context.select().where((Context.value == value) & (Context.domain == domain) & (Context.expire >now_timestamp))
# Check query have result.
if query.exists() :
# Return query context objects.
return [context for context in query]
else :
return None
@staticmethod
def check(key, domain = None):
"""
Check if some info exist in context using Context.key
---
Parameters
key : String
Context key.
domain : String (optionnal)
Domain name info is reserved for, by default context info is for all domains. [Default = None]
---
Return : Boolean
Context Object exists.
"""
# First clean expired context.
Context.clean()
# Get current timestamp.
now_timestamp = datetime.timestamp(datetime.now())
# Create query
query = Context.select().where((Context.key == key) & (Context.domain == domain) & (Context.expire > now_timestamp))
# Return query exists value.
return query.exists()
@staticmethod
def reverse_check(value, domain = None):
"""
Check if some info exist in context using Context.value
---
Parameters
value : String
Context value.
domain : String (optionnal)
Domain name info is reserved for, by default context info is for all domains. [Default = None]
---
Return : Boolean
Context Object exists.
"""
# Get current timestamp.
now_timestamp = datetime.timestamp(datetime.now())
# Create query
query = Context.select().where((Context.value == value) & (Context.domain == domain) & (Context.expire > now_timestamp))
# Return query exists value.
return query.exists()
@staticmethod
def clean():
"""
Search for expired context info and remove them from table.
---
return Int
Number of rows removed.
"""
# Get current timestamp.
now_timestamp = datetime.timestamp(datetime.now())
# Create delete query.
query = Context.delete().where(Context.expire < now_timestamp)
# Execute query
nb_rows_deleted = query.execute()
# Return
return nb_rows_deleted
| LounisBou/haroun | core/concepts/Context.py | Context.py | py | 8,145 | python | en | code | 0 | github-code | 13 |
27953288364 | """
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
from __future__ import annotations
import typing
from typing import Literal
import aiohttp
import discord
from discord import app_commands
from discord.app_commands import locale_str as _
from discord.ext import commands, menus
from ...core.context import Context
from ...core.embed import ZEmbed
from ...core.errors import DefaultError, NotNSFWChannel
from ...core.menus import ZMenuView
from ...core.mixin import CogMixin
from ...utils import isNsfw
NEKO_API = "http://api.nekos.fun:8080/api/"
class NekoMenu(ZMenuView):
def __init__(self, ctx, source, **kwargs):
self._source = source
super().__init__(ctx, **kwargs)
@property
def source(self):
return self._source
def shouldAddButtons(self):
return self._source.is_paginating()
async def getNeko(self, interaction: discord.Interaction = None):
if interaction:
await interaction.response.defer()
page = await self._source.getNeko()
return page
async def sendInitialMessage(self, ctx):
e = await self.getNeko()
return await ctx.send(embed=e, view=self)
async def finalize(self, timedOut):
try:
if self._message:
for item in self.children:
item.disabled = True # type: ignore
await self._message.edit(view=self)
super().finalize(timedOut)
except discord.HTTPException:
pass
@discord.ui.button(emoji="\N{BLACK SQUARE FOR STOP}")
async def stopNeko(self, interaction: discord.Interaction, button: discord.ui.Button):
await self.stop()
@discord.ui.button(emoji="\N{BLACK RIGHT-POINTING TRIANGLE}")
async def getNewNeko(self, interaction: discord.Interaction, button: discord.ui.Button):
e = await self.getNeko(interaction)
if interaction.message:
return await interaction.message.edit(embed=e)
class NekoPageSource(menus.PageSource):
def __init__(self, session, endpoint, onlyOne: bool = False):
self.session = session or aiohttp.ClientSession()
self.endpoint = endpoint
self.onlyOne = onlyOne
def is_paginating(self):
return not self.onlyOne
async def getNeko(self):
for _ in range(5):
try:
async with self.session.get(NEKO_API + self.endpoint) as req:
img = await req.json()
return ZEmbed().set_image(url=img["image"].replace(" ", "%20")).set_footer(text="Powered by nekos.fun")
except KeyError:
continue
raise DefaultError("Can't find any image, please try again later.")
DEFAULT_NEKO = "lewd"
TAGS = Literal[
"pussy",
"feet",
"tits",
"boobs",
"yuri",
"lesbian",
"holo",
"ahegao",
"gasm",
"ass",
]
class NSFW(commands.Cog, CogMixin):
"""NSFW Commands."""
icon = "😳"
def __init__(self, bot) -> None:
super().__init__(bot)
async def cog_check(self, ctx):
"""Only for NSFW channels"""
if not isNsfw(ctx.channel):
raise NotNSFWChannel
return True
async def showHentai(self, ctx, tag: str):
endpoints = {
"any": DEFAULT_NEKO,
"pussy": "pussy",
"feet": "feet",
"boobs": "boobs",
"lesbian": "lesbian",
"holo": "holo",
"gasm": "gasm",
"ass": "ass",
}
menus = NekoMenu(
ctx,
NekoPageSource(
self.bot.session,
endpoints.get(tag, DEFAULT_NEKO),
),
)
await menus.start()
@app_commands.command(name=_("hentai"), nsfw=True, description=_("hentai-desc"))
async def hentaiSlash(self, inter: discord.Interaction, tag: TAGS):
ctx = await Context.from_interaction(inter)
return await self.showHentai(ctx, tag)
@commands.command(
aliases=typing.get_args(TAGS),
description="Get hentai images from nekos.fun",
help="\n\nTIPS: Use different alias to get images from different hentai category",
)
async def hentai(self, ctx: Context):
aliases = {"tits": "boobs", "yuri": "lesbian", "ahegao": "gasm"}
invokedWith = ctx.invoked_with or "any"
tag = aliases.get(invokedWith, invokedWith)
return await self.showHentai(ctx, tag)
| ZiRO-Bot/Z3R0 | src/zibot/exts/nsfw/nsfw.py | nsfw.py | py | 4,611 | python | en | code | 3 | github-code | 13 |
41588476221 | from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
admin.autodiscover()
urlpatterns = patterns('',
# social login stuff
url('', include('social.apps.django_app.urls', namespace='social')),
url('', include('django.contrib.auth.urls', namespace='auth')),
url(r'^$', 'rssplus.views.home', name='home'),
url(r'^wakemydyno.txt$', 'rssplus.views.home'),
url(r'^admin/', include(admin.site.urls)),
url(r'^subscribe/',include('subscribe.urls')),
url(r'^settings/',include('userSettings.urls')),
url(r'^rss/',include('getFeeds.urls')),
)
| Davidhw/infocatch | rssplus/urls.py | urls.py | py | 634 | python | en | code | 0 | github-code | 13 |
22856042519 | from collections import deque
from itertools import combinations
def find_subarray(data, target):
curr_sum = data[0]
start, idx = 0, 1
size = len(data)
while idx <= size:
while curr_sum > target and start < idx - 1:
curr_sum = curr_sum - data[start]
start += 1
if curr_sum == target:
return data[start:idx]
if idx < size:
curr_sum = curr_sum + data[idx]
idx += 1
return None
def find_sum(list, target):
for comb in combinations(list, 2):
if sum(comb) == target:
return True
return False
def decode(data, preamble):
window = deque()
for i in range(len(data)):
if len(window) < preamble:
window.append(data[i])
else:
if not find_sum(window, data[i]):
return data[i]
window.popleft()
window.append(data[i])
with open("./test.txt") as f:
data = f.read().split("\n")
data = [int(x) for x in data]
target = decode(data, 25)
print(target)
subarray = find_subarray(data, target)
subarray.sort()
print(subarray[0] + subarray[len(subarray) - 1])
| brandon-charest/AdventOfCode2020 | Python/Day_09/encoding.py | encoding.py | py | 1,172 | python | en | code | 0 | github-code | 13 |
32276610633 | # exercise 160: Weird Words
import string
def contains_adjacent_e_i(word):
"""
:param word: string
:return: True if the word contains adjacent 'e' and 'i' (e.g. ceiling, scientist, etc.), False otherwise
"""
if 'e' in word and 'i' in word:
for i in range(len(word)):
if i != len(word) - 1:
if (word[i] == 'e' and word[i+1] == 'i') or (word[i] == 'i' and word[i+1] == 'e'):
return True
return False
def rhyme_rule(word):
"""
it applies the <<I before E except after C>> rule
:param word: a string
:return: True if the word respects tbe rule, False otherwise
"""
word = word.lower()
if 'e' in word and 'i' in word:
for i in range(len(word)):
if word[i] == 'c' and word[i+1] == 'i' and word[i+2] == 'e':
return False
elif i != 0 and i != len(word)-1:
if word[i] == 'e' and word[i+1] == 'i' and word[i-1] != 'c':
return False
return True
def main():
inf = open('../files/emma.txt', 'r')
respecting = []
not_respecting = []
for line in inf:
line = line.strip()
for word in line.split():
# removing punctuations to left and right of the word in order to count the same words only once
word = word.strip(string.punctuation)
# if the word contains adjacent 'e' and 'i', it gets analyzed
if contains_adjacent_e_i(word):
# if the word respects the rule, it gets added to the list of respecting words (if not present)
if rhyme_rule(word):
if word.lower() not in respecting:
respecting.append(word)
# otherwise, if not already added before, the not respecting word gets added to the other list
else:
if word not in not_respecting:
not_respecting.append(word)
print('words that respect the rule: {}'.format(len(respecting)))
print(respecting)
print('word that do not respect the rule: {}'.format(len(not_respecting)))
print(not_respecting)
tot_adjacent_ei_words = len(respecting) + len(not_respecting)
respecting_proportion = len(respecting) / tot_adjacent_ei_words
not_respecting_proportion = len(not_respecting) / tot_adjacent_ei_words
print()
print("in total, {} words that include adjacent 'e' and 'i' were found".format(tot_adjacent_ei_words))
print("Among them, %.2f %% respect the rule and %.2f %% do not respect the rule"\
% (respecting_proportion * 100, not_respecting_proportion * 100))
inf.close()
if __name__ == '__main__':
main()
| sara-kassani/1000_Python_example | books/Python Workbook/files_and_exceptions/ex160.py | ex160.py | py | 2,729 | python | en | code | 1 | github-code | 13 |
2680437079 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.animation import FuncAnimation
def rastrigin_function(x, y):
"""Funkcja celu - Rastrigin"""
return 20 + x**2 - 10 * np.cos(2 * np.pi * x) + y**2 - 10 * np.cos(2 * np.pi * y)
class AntColony:
def __init__(self, num_ants, bounds, alpha, beta, evaporation_rate):
self.num_ants = num_ants
self.bounds = bounds
self.alpha = alpha
self.beta = beta
self.evaporation_rate = evaporation_rate
self.best_solution = None
self.best_fitness = float('inf')
self.initialize_ants()
def initialize_ants(self):
self.ants = []
for _ in range(self.num_ants):
ant = {'x': np.random.uniform(*self.bounds),
'y': np.random.uniform(*self.bounds),
'fitness': None}
self.ants.append(ant)
def update_ant_fitness(self):
for ant in self.ants:
x, y = ant['x'], ant['y']
ant['fitness'] = rastrigin_function(x, y)
if ant['fitness'] < self.best_fitness:
self.best_solution = (x, y)
self.best_fitness = ant['fitness']
def update_pheromone(self):
for ant in self.ants:
x, y = ant['x'], ant['y']
pheromone = self.evaporation_rate * rastrigin_function(x, y)
ant['x'] += np.random.uniform(-self.alpha, self.alpha) * pheromone
ant['y'] += np.random.uniform(-self.alpha, self.alpha) * pheromone
def optimize(self, num_iterations):
self.initialize_ants()
self.update_ant_fitness()
history = [(ant['x'], ant['y'], ant['fitness']) for ant in self.ants]
for _ in range(num_iterations):
self.update_pheromone()
self.update_ant_fitness()
history.extend([(ant['x'], ant['y'], ant['fitness']) for ant in self.ants])
return history
# Parametry algorytmu mrówkowego
num_ants = 10
bounds = (-5, 5) # Zakres dla x i y
alpha = 0.1
beta = 0.1
evaporation_rate = 0.5
num_iterations = 50
# Inicjalizacja algorytmu
ant_colony = AntColony(num_ants, bounds, alpha, beta, evaporation_rate)
# Optymalizacja
history = ant_colony.optimize(num_iterations)
# Tworzenie wykresu 3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X = np.linspace(*bounds, 100)
Y = np.linspace(*bounds, 100)
X, Y = np.meshgrid(X, Y)
Z = rastrigin_function(X, Y)
ax.plot_surface(X, Y, Z, cmap='viridis', alpha=0.8)
# Animacja ścieżek mrówek
scat = ax.scatter([], [], [], c='red', marker='o')
def update(frame):
if frame < len(history):
x, y, _ = zip(*history[:frame])
else:
x, y, _ = [], [], []
scat._offsets3d = (x, y, rastrigin_function(x, y))
return scat,
ani = FuncAnimation(fig, update, frames=len(history), blit=True)
# Wyświetlenie animacji
plt.show()
| kingasmi/algorytm_mrowkowy | ant2-3d.py | ant2-3d.py | py | 2,922 | python | en | code | 0 | github-code | 13 |
2112188030 |
from south.db import db
from django.db import models
from unidades.models import *
class Migration:
def forwards(self, orm):
# Adding model 'SistemaMedida'
db.create_table('unidades_sistemamedida', (
('id', models.AutoField(primary_key=True)),
('abrev_en', models.CharField(_('Abreviacion (Ing)'), max_length=5)),
('nombre_en', models.CharField(_('Nombre (Ing)'), max_length=150)),
('nombre_es', models.CharField(_('Nombre (Esp)'), max_length=150)),
('ref', models.CharField(_('Referencia'), max_length=5)),
('abrev_es', models.CharField(_('Abreviacion (Esp)'), max_length=5)),
))
db.send_create_signal('unidades', ['SistemaMedida'])
# Adding model 'UnidadTipoSistema'
db.create_table('unidades_unidadtiposistema', (
('id', models.AutoField(primary_key=True)),
('unidad', models.ForeignKey(orm.UnidadMedida, verbose_name=_('Unidad'))),
('tipo', models.ForeignKey(orm.TipoUnidad, verbose_name=_('Tipo'))),
('sistema', models.ForeignKey(orm.SistemaMedida, verbose_name=_('Sistema'))),
))
db.send_create_signal('unidades', ['UnidadTipoSistema'])
# Adding model 'TipoUnidad'
db.create_table('unidades_tipounidad', (
('id', models.AutoField(primary_key=True)),
('abrev_en', models.CharField(_('Abreviacion (Ing)'), max_length=5)),
('nombre_en', models.CharField(_('Nombre (Ing)'), max_length=150)),
('nombre_es', models.CharField(_('Nombre (Esp)'), max_length=150)),
('ref', models.CharField(_('Referencia'), max_length=5)),
('abrev_es', models.CharField(_('Abreviacion (Esp)'), max_length=5)),
))
db.send_create_signal('unidades', ['TipoUnidad'])
# Adding model 'Conversion'
db.create_table('unidades_conversion', (
('id', models.AutoField(primary_key=True)),
('unidad_origen', models.ForeignKey(orm.UnidadMedida, related_name='u_origen', verbose_name=_('Unidad Origen'))),
('unidad_destino', models.ForeignKey(orm.UnidadMedida, related_name='u_destino', verbose_name=_('Unidad Destino'))),
('factor', models.DecimalField(default=0, max_digits=30, decimal_places=10)),
))
db.send_create_signal('unidades', ['Conversion'])
# Adding model 'UnidadMedida'
db.create_table('unidades_unidadmedida', (
('id', models.AutoField(primary_key=True)),
('abrev_en', models.CharField(_('Abreviacion (Ing)'), max_length=5)),
('tipo', models.ForeignKey(orm.TipoUnidad, related_name='unidades', verbose_name=_('Tipo Unidad'))),
('nombre_en', models.CharField(_('Nombre (Ing)'), max_length=150)),
('nombre_es', models.CharField(_('Nombre (Esp)'), max_length=150)),
('sistema', models.ForeignKey(orm.SistemaMedida, related_name='unidades', verbose_name=_('Sistema'))),
('ref', models.CharField(_('Referencia'), max_length=5)),
('abrev_es', models.CharField(_('Abreviacion (Esp)'), max_length=5)),
))
db.send_create_signal('unidades', ['UnidadMedida'])
# Creating unique_together for [sistema, tipo] on UnidadTipoSistema.
db.create_unique('unidades_unidadtiposistema', ['sistema_id', 'tipo_id'])
def backwards(self, orm):
# Deleting model 'SistemaMedida'
db.delete_table('unidades_sistemamedida')
# Deleting model 'UnidadTipoSistema'
db.delete_table('unidades_unidadtiposistema')
# Deleting model 'TipoUnidad'
db.delete_table('unidades_tipounidad')
# Deleting model 'Conversion'
db.delete_table('unidades_conversion')
# Deleting model 'UnidadMedida'
db.delete_table('unidades_unidadmedida')
# Deleting unique_together for [sistema, tipo] on UnidadTipoSistema.
db.delete_unique('unidades_unidadtiposistema', ['sistema_id', 'tipo_id'])
models = {
'unidades.sistemamedida': {
'Meta': {'ordering': "['ref']"},
'abrev_en': ('models.CharField', ["_('Abreviacion (Ing)')"], {'max_length': '5'}),
'abrev_es': ('models.CharField', ["_('Abreviacion (Esp)')"], {'max_length': '5'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'nombre_en': ('models.CharField', ["_('Nombre (Ing)')"], {'max_length': '150'}),
'nombre_es': ('models.CharField', ["_('Nombre (Esp)')"], {'max_length': '150'}),
'ref': ('models.CharField', ["_('Referencia')"], {'max_length': '5'}),
'unidades_tipo': ('models.ManyToManyField', ["'TipoUnidad'"], {'null': 'True', 'through': "'UnidadTipoSistema'", 'blank': 'True'})
},
'unidades.unidadtiposistema': {
'Meta': {'ordering': "['sistema','tipo','unidad']", 'unique_together': "(('sistema','tipo'),)"},
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'sistema': ('models.ForeignKey', ['SistemaMedida'], {'verbose_name': "_('Sistema')"}),
'tipo': ('models.ForeignKey', ['TipoUnidad'], {'verbose_name': "_('Tipo')"}),
'unidad': ('models.ForeignKey', ['UnidadMedida'], {'verbose_name': "_('Unidad')"})
},
'unidades.tipounidad': {
'Meta': {'ordering': "['ref']"},
'abrev_en': ('models.CharField', ["_('Abreviacion (Ing)')"], {'max_length': '5'}),
'abrev_es': ('models.CharField', ["_('Abreviacion (Esp)')"], {'max_length': '5'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'nombre_en': ('models.CharField', ["_('Nombre (Ing)')"], {'max_length': '150'}),
'nombre_es': ('models.CharField', ["_('Nombre (Esp)')"], {'max_length': '150'}),
'ref': ('models.CharField', ["_('Referencia')"], {'max_length': '5'})
},
'unidades.conversion': {
'Meta': {'ordering': "['unidad_origen','unidad_destino']"},
'factor': ('models.DecimalField', [], {'default': '0', 'max_digits': '30', 'decimal_places': '10'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'unidad_destino': ('models.ForeignKey', ['UnidadMedida'], {'related_name': "'u_destino'", 'verbose_name': "_('Unidad Destino')"}),
'unidad_origen': ('models.ForeignKey', ['UnidadMedida'], {'related_name': "'u_origen'", 'verbose_name': "_('Unidad Origen')"})
},
'unidades.unidadmedida': {
'Meta': {'ordering': "['sistema','tipo','ref']"},
'abrev_en': ('models.CharField', ["_('Abreviacion (Ing)')"], {'max_length': '5'}),
'abrev_es': ('models.CharField', ["_('Abreviacion (Esp)')"], {'max_length': '5'}),
'conversiones': ('models.ManyToManyField', ["'self'"], {'through': "'Conversion'", 'verbose_name': "_('Conversiones')", 'symmetrical': 'False'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'nombre_en': ('models.CharField', ["_('Nombre (Ing)')"], {'max_length': '150'}),
'nombre_es': ('models.CharField', ["_('Nombre (Esp)')"], {'max_length': '150'}),
'ref': ('models.CharField', ["_('Referencia')"], {'max_length': '5'}),
'sistema': ('models.ForeignKey', ['SistemaMedida'], {'related_name': "'unidades'", 'verbose_name': "_('Sistema')"}),
'tipo': ('models.ForeignKey', ['TipoUnidad'], {'related_name': "'unidades'", 'verbose_name': "_('Tipo Unidad')"})
}
}
complete_apps = ['unidades']
| esw/SapInt | unidades/migrations/0001_initial.py | 0001_initial.py | py | 7,757 | python | es | code | 2 | github-code | 13 |
5452035538 | import re
def railfence_encrypt(text):
result= ""
echr = [] #even position letter list
ochr = [] #odd position letter list
for i in range(len(text)):
if i % 2 == 0:
echr.append(text[i])
else:
ochr.append(text[i])
echr.extend(ochr) #extending odd list to even list.
for i in echr: #converting list to string.
result += i
return result
def encrypt(text, k):
result = ""
#extract one character from string at a time and encrypt it.
for i in range(len(text)):
char = text[i]
# spaces are ignored in encryption process
# checks if character is upper case or lower case.
if (char.isupper()):
# formula : ciphertext = (plaintext * key ) mod 26
result += chr(((ord(char) - 65) * k) % 26 + 65)
else:
result += chr(((ord(char) - 97) * k) % 26 + 97)
return result
# function that will calculate and return multiplicative inverse of key.
def inverse(k1):
r1, r2, t1, t2 = 26, k1, 0, 1
while r2 > 0:
q = r1 // r2
r = r1 - (q * r2)
r1 = r2
r2 = r
t = t1 - (q * t2)
t1 = t2
t2 = t
if r1 == 1:
r1 = t1
# negative numbers are not used in cryptography so we make addition of modulo value and negative number to make it positive.
if t1 < 0 :
r1 = (26 + t1)
return r1
def railfence_decrypt(ct):
ciphertext = ct
if len(ciphertext) % 2 != 0:
ciphertext += " "
plain_text= ""
mid = len(ciphertext) // 2
for i in range(0,mid):
plain_text = plain_text + ciphertext[i] + ciphertext[i + mid]
plain_text = plain_text.replace(" ","")
return plain_text
def muldecrypt(enc, kinv):
result = ""
for i in range(len(enc)):
char = enc[i]
if (char.isupper()):
# formula: plaintext = (ciphertext * key⁻¹) mod 26
result += chr(((ord(char) - 65) * kinv) % 26 + 65)
else:
result += chr(((ord(char) - 97) * kinv) % 26 + 97 )
return result
if __name__ == "__main__":
plain_text = input("Enter the plaintext: ")
plain_text = plain_text.replace(" ","")
plain_text = plain_text.replace('.',"")
plain_text = re.sub(r'[0-9]','', plain_text)
keyrange = [1,3,5,7,9,11,15,17,19,21,23,25]
print("Enter key from range: ", keyrange)
key = int(input())
ct = railfence_encrypt((encrypt(plain_text, key)))
print("Encryption (Cipher text) after applying Multiplicative Cipher and Railfence Cipher: " + ct)
print("Decryption (Plain text) after applying Railfence Cipher and Multiplicative Cipher: " + muldecrypt(railfence_decrypt(ct), inverse(key))) | shreedhar37/Cryptography | product_cipher.py | product_cipher.py | py | 2,883 | python | en | code | 0 | github-code | 13 |
30121883161 | # -*- coding:utf-8 -*-
import argparse
import glob
import os
import collections
import re
import cPickle as pickle
import logging
from collections import defaultdict
logging.basicConfig(level=logging.INFO)
class ConvertFunctionError(RuntimeError):
pass
def extractFunctions(functions, inputPath):
functionRegex = {}
totalFunctions = 0
totalArguments = 0
for function in functions:
typeRegex = "\w+"
funcNameRegex = function
functionRegex[function] = re.compile("""
^ # Start of line
(%s) # Type (group 1)
\ # Space
%s # Function name
\s* # Optional whitespace
\( # Open bracket
([^()]*) # All of the arguments
\) # Close bracket
\s* # Optional whitespace
$ # End of line
""" % (typeRegex, funcNameRegex)
, re.VERBOSE)
inFunctionDeclaration = None
argumentTypes = None
arguments = None
with open(inputPath, 'r') as inputFile:
for line in inputFile:
if inFunctionDeclaration:
if line.startswith("{"):
argumentsWithTypes = []
logging.debug(argumentTypes)
for arg in arguments:
if arg in argumentTypes:
argumentsWithTypes.append((arg, argumentTypes[arg]))
elif arg + "[]" in argumentTypes:
# If we can't find the argument directly, check whether it was stored as an array type
argumentsWithTypes.append((arg + "[]", argumentTypes[arg + "[]"]))
else:
raise RuntimeError("Cannot find type for argument %s" % (arg,))
functionDescription = {
'returnType' : returnType,
'functionName' : inFunctionDeclaration,
'arguments' : argumentsWithTypes,
'origin' : inputPath
}
yield functionDescription
inFunctionDeclaration = None
else:
lineBeforeSemicolon = line.split(";")[0]
lineWords = lineBeforeSemicolon.split()
typeName = None
if lineWords[0] == "register":
typeName = lineWords[1]
lineWords = lineWords[2:]
logging.debug("REGISTER :" + typeName + " " + str(lineWords))
else:
typeName = lineWords[0]
lineWords = lineWords[1:]
logging.debug("NORMAL :" + typeName + " " + str(lineWords))
for argumentName in "".join(lineWords).strip().split(","):
if argumentName.startswith("*"):
argumentTypes[argumentName[1:]] = typeName + " *"
else:
argumentTypes[argumentName] = typeName
else:
for function in functions:
match = functionRegex[function].match(line)
if match:
logging.debug("Found function %s:" % (function,))
logging.debug(line)
logging.debug(match.groups())
returnType = match.group(1)
arguments = [ x.strip(" ,") for x in match.group(2).split(",") ]
totalArguments += len(arguments)
totalFunctions += 1
logging.debug("return type: " + returnType)
logging.debug("arguments " + str(arguments))
inFunctionDeclaration = function
argumentTypes = {}
parser = argparse.ArgumentParser()
parser.add_argument("functionList")
parser.add_argument("outputPath")
parser.add_argument("--input", action='append')
parser.add_argument("--debug", action='store_true')
args = parser.parse_args()
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
with open(args.functionList, 'r') as functionFile:
functions = functionFile.read().split()
allFunctions = defaultdict(lambda: [])
seenNames = set()
for inputDir in args.input:
sectionName = os.path.basename(inputDir)
logging.info("Processing directory %s" %(sectionName,))
for cFile in glob.glob(os.path.join(inputDir,"*.c")):
logging.info("Looking for functions in %s" %(cFile))
for function in extractFunctions(functions, cFile):
logging.info("Extracted function %s" % (function['functionName'],))
allFunctions[sectionName].append(function)
seenNames.add(function['functionName'])
logging.info("Writing function list to: " + str(args.outputPath))
with open(args.outputPath, 'w') as outputFile:
pickle.dump(dict(allFunctions), outputFile)
missingNames = set(functions) - seenNames
for name in missingNames:
logging.warn("Could not find %s" % (name,))
| deepmind/torch-cephes | makewrap/extractFunctions.py | extractFunctions.py | py | 5,357 | python | en | code | 46 | github-code | 13 |
31527946161 | # 导入nltk数据包
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
# 导入包
from sklearn.naive_bayes import MultinomialNB
import csv
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
text = '''As per your request 'Melle Melle (Oru Minnaminunginte Nurungu Vettam)' has been set as your callertune for all Callers. Press *9 to copy your friends Callertune'''
# 进行邮件预处理
def preprocessing(text):
text = text.decode("utf-8")
# 分词
tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
stops = stopwords.words('english') # 停用词
tokens = [token for token in tokens if token not in stops] # 去掉停用词
tokens = [token.lower() for token in tokens if len(token) >= 3] # 去掉短于3的词
# 词性还原
lmtzr = WordNetLemmatizer()
tokens = [lmtzr.lemmatize(token) for token in tokens]
# 将剩下的词重新连接成字符串
preprocessed_text = ' '.join(tokens)
return preprocessed_text
# 读数据
file_path = r' '
ems = open(file_path, 'r', encoding='utf-8')
ems_data = []
ems_label = [] # 保存
csv_reader = csv.reader(ems, delimiter='\t')
# 将数据分别存入数据列表和目标分类列表
for line in csv_reader:
ems_label.append(line[0])
ems_data.append(preprocessing(line[1]))
ems.close()
# 将数据分为训练集和测试集,再将其向量化
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(ems_data,ems_target, test_size=0.3, random_state=0,
startify=ems_target)
print(len(x_train, len(x_test)))
# 将其向量化
from sklearn.feature_extraction.text import TfidfVectorizer # 建立数据的特征向量
vectorizer = TfidfVectorizer(min_df=2, ngram_range=(1, 2), stop_words='english', strip_accents='unicode', norm='l2')
X_train = vectorizer.fit_transform(x_train)
X_test = vectorizer.transform(x_test)
import numpy as np # 观察向量
a = X_train.toarray()
for i in range(1000): # 输出不为0的列
for j in range(5984):
if a[i, j] != 0:
print(i, j, a[i, j])
# 朴素贝叶斯分类器
clf = MultinomialNB().fit(X_train, y_train)
y_nb_pred = clf.predict(X_test)
# 分类结果显示
print(y_nb_pred.shape, y_nb_pred) # x-test预测结果
print('nb_confusion_matrix:')
cm = confusion_matrix(y_test, y_nb_pred) # 混淆矩阵
print(cm)
print('nb_classification_repert:')
cr = classification_report(y_test, y_nb_pred) # 主要分类指标的文本报告
print(cr)
feature_names = vectorizer.get_feature_names() # 出现过的单词列表
coefs = clf.coef_ # 先验概率 p(x_ily),6034 feature_log_preb
intercept = clf.intercept_ # P(y),class_log_prior : array,shape(n...
coefs_with_fns = sorted(zip(coefs[0], feature_names)) # 对数概率P(x_i|y)与单词x_i映射
n = 10
top = zip(coefs_with_fns[:n], coefs_with_fns[:-(n + 1):-1]) # 最大的10个与最小的10个单词
for (coef_1, fn_1), (coef_2, fn_2) in top:
print('\t%.4f\t%-15s\t\t%.4f\t%-15s' % (coef_1, fn_1, coef_2, fn_2))
# 预测一封新邮件的类别。
new_email = ['新邮件']
vectorizer(new_email)
clf.predict(new_email) | wwyys/echarts | echarts/python/朴素贝叶斯。词向量/垃圾信息分类(scdn).py | 垃圾信息分类(scdn).py | py | 3,363 | python | en | code | 0 | github-code | 13 |
1119602241 | #https://www.udemy.com/course/complete-python-developer-zero-to-mastery/learn/lecture/16016662#questions
# for number in range(0, 90, 2):
# print(number)
#
# for i,char in enumerate(list(range(100))):
# print(i, char)
# if char == 50:
# print(f'index of 50 is: {i}')
i = 0
while i < 50:
print(i)
i += 1
else:
print('done with all the work') | tripura-kant/Python-Scripting | ZTM/Day3.py | Day3.py | py | 375 | python | en | code | 0 | github-code | 13 |
23968787729 | import numpy as np
from flask import Flask, request, app, render_template
import numpy as np
import cv2 as cv
import os
import numpy as np
import os
import torch
UPLOAD_FOLDER = './static/uploads'
app = Flask(__name__, template_folder='./templates', static_folder='./static')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['SESSION_TYPE'] = 'filesystem'
app.config['SECRET_KEY'] = 'mlappisrunninghere'
model = torch.hub.load('ultralytics/yolov5', 'custom', path='trained_weights/best.pt')
def draw_bounding_boxes(image, results):
for detection in results.pandas().xyxy[0].to_dict(orient='records'):
x1, y1, x2, y2, conf, cls = int(detection['xmin']), int(detection['ymin']), int(detection['xmax']), int(detection['ymax']), float(detection['confidence']), int(detection['class'])
color = (0, 255, 0) # Green color for bounding box
cv.rectangle(image, (x1, y1), (x2, y2), color, 2)
return image
def detect_drowsiness(image):
results = model(image)
# Extracting the detected labels
labels = results.names # List of class names
# print(labels)
boxes = results.pandas().xyxy[0] # DataFrame with bounding box coordinates
# print("Boxes :",boxes)
label = boxes['name'][0] # label
# print("label : ",label[0])
# Draw bounding boxes on the detected image
detected_img = np.squeeze(results.render())
detected_img_with_boxes = draw_bounding_boxes(detected_img, results)
# Save the detected image with bounding boxes in the 'Upload' folder
output_path = os.path.join(app.config['UPLOAD_FOLDER'], 'detected_image.jpg')
cv.imwrite(output_path, detected_img_with_boxes)
return label
@app.route("/", methods=['GET', 'POST'])
def index():
data = {"text": "------", "res": False}
if request.method == "POST":
try:
file = request.files['image'].read()
file_bytes = np.fromstring(file, np.uint8)
img = cv.imdecode(file_bytes, cv.IMREAD_UNCHANGED)
# Perform drowsiness detection on the uploaded image
drowsiness_results = detect_drowsiness(img)
print("Result :", drowsiness_results)
# Assuming your drowsiness detection results contain the 'text' and 'res' fields
data = {"res": drowsiness_results}
# Display the detection results on the web page
return render_template("index.html", data=data)
except:
print("Exception occured")
# render_template("error_page.html")
return render_template("index.html", data=data)
if __name__ == '__main__':
app.run(host='0.0.0.0',port=5001, debug=True)
| Rameez0216j/Drowsiness_Detection_application | app.py | app.py | py | 2,661 | python | en | code | 0 | github-code | 13 |
19098230694 | import os
import csv
csvpath = os.path.join('budget_data.csv')
with open(csvpath, newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
header=next(csvfile,None)
monthly_change = []
total_months = 0
total_profit = 0
greatest_increase = 0
greatest_decrease = 0
previous = 0
for row in csvreader:
current = float(row[1])
if previous == 0:
pass
else:
monthly_change.append(current - previous)
total_months = total_months + 1
total_profit = float(row[1]) + total_profit
if (current - previous) > greatest_increase:
greatest_increase = current - previous
increase_date = row[0]
elif current - previous < greatest_decrease:
greatest_decrease = current - previous
decrease_date = row[0]
previous = float(row[1])
average = round(sum(monthly_change) / len(monthly_change),2)
print("Financial Analysis")
print("------------------------------")
print("Total Months: " + str(total_months))
print("Total: " + str(total_profit))
print("Average: $" + str(average))
print("Greatest Increase in Profits: " + increase_date + " $" + str(greatest_increase))
print("Greatest Decrease in Profits: " + decrease_date + " $(" + str(greatest_decrease) + ")")
f = open('PyBank_Analysis.txt','w')
f.write("Financial Analysis\n")
f.write("------------------------------\n")
f.write("Total Months: " + str(total_months)+ "\n")
f.write("Total: " + str(total_profit) + "\n")
f.write("Average: " + str(average) + "\n")
f.write("Greatest Increase in Profits: " + increase_date + " $" + str(greatest_increase) + "\n")
f.write("Greatest Decrease in Profits: " + decrease_date + " $(" + str(greatest_decrease) + ")\n")
f.close() | JKaplan814/UTexas_Python_Homework | PyBank/main.py | main.py | py | 1,803 | python | en | code | 0 | github-code | 13 |
35860185781 | import urllib.parse
import os
import json
print('Loading function')
def lambda_handler(event, context):
print("Received event: " + json.dumps(event, indent=2))
# Get the object from the event and show its content type
bucket = event['detail']['requestParameters']['bucketName']
key = urllib.parse.unquote_plus(event['detail']['requestParameters']['key'], encoding='utf-8')
filename, file_extension = os.path.splitext(key)
print(f'File extension is: {file_extension}')
payload = {
"file_extension": file_extension,
"bucket": bucket,
"key": key
}
return payload
| PacktPublishing/Data-Engineering-with-AWS | Chapter10/dataeng-check-file-ext.py | dataeng-check-file-ext.py | py | 625 | python | en | code | 239 | github-code | 13 |
13597950222 | import os, qrcode
from qrcode.constants import ERROR_CORRECT_L
class QRCodeGenerator:
def __init__(self, box_size=10, border=1, version=1, error_correction=ERROR_CORRECT_L, fill_color="#000000"):
self.box_size = box_size
self.border = border
self.version = version
self.error_correction = error_correction
self.fill_color = fill_color
def generate_qr_code(self, text):
qr = qrcode.QRCode(
version=self.version,
error_correction=self.error_correction,
box_size=self.box_size,
border=self.border
)
qr.add_data(text)
qr.make(fit=True)
img = qr.make_image(fill_color=self.fill_color, back_color="white")
return img
def save_qr_code(self, text, file_name):
img = self.generate_qr_code(text)
img.save(file_name)
print(f"QR code saved as {file_name}")
# Use it however you want, then delete the file with the following command for space saving:
#self._delete_file(file_name)
def _delete_file(self, file_name):
try:
os.remove(file_name)
print(f"Deleted {file_name} successfully!")
except OSError as e:
print(f"Error deleting {file_name}: {e}")
if __name__ == '__main__':
text = "https://kwayservices.top" # Change this to whatever you want to encode
file_name = "qr-code.png" # Change this to whatever you want to name your file
qr_generator = QRCodeGenerator()
qr_generator.save_qr_code(text, file_name) | kWAYTV/qr-code-generator | main.py | main.py | py | 1,575 | python | en | code | 1 | github-code | 13 |
21841958063 | import torch
import deepdish as dd
import numpy as np
from tqdm import tqdm
def checkpoint(
model,
optimizer,
scheduler,
epoch,
curr_step,
save_path,
verbose,
metric_dict={},
tpu=False,
lean=False,
):
save_lib = torch
print_fn = print
if tpu:
import torch_xla.core.xla_model as xm
save_lib = xm
print_fn = xm.master_print
if verbose:
print_fn(f"Saving model checkpoint for step {curr_step}")
save_dict = {
"epoch": epoch,
"step": curr_step
}
save_dict.update(metric_dict)
if not lean:
save_dict.update({
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict(),
})
filename = f"{save_path}/ckpt/step{curr_step}.tar"
save_lib.save(
save_dict, filename,
)
if tpu:
if xm.get_ordinal() == 0 and filename[0:5] == "gs://":
from utils.gcloud import post_file_to_bucket
post_file_to_bucket(filename, verbose)
# TODO: we maybe don't want to have the scheduler inside the train function
def train(
model,
loss,
optimizer,
scheduler,
dataloader,
device,
epoch,
verbose,
save,
save_freq,
save_begin_epoch,
save_path,
log_interval=10,
lean_ckpt=False,
test_loader=None,
**kwargs,
):
batch_size = kwargs.get("batch_size") # per core batch size
num_batches = kwargs.get("num_batches") # len(dataloader)
dataset_size = kwargs.get("dataset_size") # len(dataloader.dataset)
print_fn = print
if device.type == "xla":
import torch_xla.core.xla_model as xm
xrt_world_size = kwargs.get("xrt_world_size")
xm_ordinal = kwargs.get("xm_ordinal")
tracker = xm.RateTracker()
if verbose <= 1:
print_fn = xm.master_print
model.train()
total_loss = 0
total_samples = 0
correct1 = 0
correct5 = 0
for batch_idx, (data, target) in enumerate(dataloader):
curr_step = epoch * num_batches + batch_idx
###### Batch loading
if device.type != "xla":
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
train_loss = loss(output, target)
total_loss += train_loss.item() * data.size(0)
total_samples += data.size(0)
train_loss.backward()
if device.type == "xla":
xm.optimizer_step(optimizer)
tracker.add(batch_size)
else:
optimizer.step()
curr_step += 1
# Train accuracy
_, pred = output.topk(5, dim=1)
correct = pred.eq(target.view(-1, 1).expand_as(pred))
correct1 += correct[:, :1].sum().item()
correct5 += correct[:, :5].sum().item()
###### Logging
if verbose and (batch_idx % log_interval == 0):
examples_seen = batch_idx * batch_size
per_worker_header = ""
if device.type == "xla" and verbose >= 2:
per_worker_header = (
f"[xla:{xm_ordinal}, "
f"rate: {tracker.rate():.2f}, "
f"global_rate: {tracker.global_rate():.2f}]\t"
)
examples_seen *= xrt_world_size
examples_seen += xm_ordinal * batch_size
print_fn(
f"{per_worker_header}"
f"Train Epoch: {epoch} "
f"[{examples_seen}/{dataset_size} "
f"({100.0*batch_idx/num_batches:.0f}%)]"
f"\tLoss: {train_loss.item():.6f}"
f"\tStep: {curr_step}"
)
# TODO: this is just to be able to save at any step (even mid-epoch)
# it might make more sense to checkpoint only on epoch: makes
# for a cleaner codebase and can include test metrics
# TODO: additionally, could integrate tfutils.DBInterface here
# TODO: customize the metric dict based on flags
######## Checkpointing
if save and save_path is not None and save_freq is not None:
# Do this for consecutive steps
if curr_step % save_freq <= 0 and (epoch + batch_idx/num_batches) >= save_begin_epoch:
pos, vel = optimizer.track()
metric_dict = {
"pos_norm": torch.norm(pos),
"vel_norm": torch.norm(vel),
"pos_dot": torch.dot(pos, kwargs["theta_0"]),
"dist_from_start": torch.norm(pos - kwargs["theta_0"]),
}
if "eigenvectors" in kwargs.keys():
metric_dict["projected_pos"] = torch.matmul(kwargs["eigenvectors"], pos),
metric_dict["projected_vel"] = torch.matmul(kwargs["eigenvectors"], vel),
if kwargs["eval_mid_epoch"]:
test_loss, test_accuracy1, test_accuracy5 = eval(
model, loss, test_loader, device, verbose, epoch
)
model.train()
eval_metrics = {
"train_loss": train_loss.item(),
"train_batch_accuracy1": correct[:, :1].sum().item(),
"train_batch_accuracy5": correct[:, :5].sum().item(),
"test_loss": test_loss,
"test_accuracy1": test_accuracy1,
"test_accuracy5": test_accuracy5,
}
metric_dict.update(eval_metrics)
checkpoint(
model,
optimizer,
scheduler,
epoch,
curr_step,
save_path,
verbose,
metric_dict=metric_dict,
tpu=(device.type == "xla"),
lean=lean_ckpt,
)
if device.type == "xla":
total_loss = xm.mesh_reduce("total_train_loss", total_loss, np.sum)
total_samples = xm.mesh_reduce("total_train_samples", total_samples, np.sum)
correct1 = xm.mesh_reduce("total_train_correct1", correct1, np.sum)
correct5 = xm.mesh_reduce("total_train_correct5", correct5, np.sum)
average_loss = 1.0 * total_loss / total_samples
accuracy1 = 100.0 * correct1 / total_samples
accuracy5 = 100.0 * correct5 / total_samples
return average_loss, accuracy1, accuracy5
def eval(model, loss, dataloader, device, verbose, epoch, **kwargs):
print_fn = print
if device.type == "xla":
import torch_xla.core.xla_model as xm
print_fn = xm.master_print
model.eval()
total_loss = 0
correct1 = 0
correct5 = 0
total_samples = 0
with torch.no_grad():
for data, target in dataloader:
data, target = data.to(device), target.to(device)
output = model(data)
total_loss += loss(output, target).item() * data.size(0)
_, pred = output.topk(5, dim=1)
correct = pred.eq(target.view(-1, 1).expand_as(pred))
correct1 += correct[:, :1].sum().item()
correct5 += correct[:, :5].sum().item()
total_samples += data.size()[0]
if device.type == "xla":
total_loss = xm.mesh_reduce("total_test_loss", total_loss, np.sum)
total_samples = xm.mesh_reduce("total_test_samples", total_samples, np.sum)
correct1 = xm.mesh_reduce("total_test_correct1", correct1, np.sum)
correct5 = xm.mesh_reduce("total_test_correct5", correct5, np.sum)
average_loss = 1.0 * total_loss / total_samples
accuracy1 = 100.0 * correct1 / total_samples
accuracy5 = 100.0 * correct5 / total_samples
print_fn(
f"Epoch {epoch} evaluation: Average Test Loss: {average_loss:.4f}, "
f"Top 1 Test Accuracy: {correct1}/{total_samples} ({accuracy1:.2f}%)"
)
return average_loss, accuracy1, accuracy5
def train_eval_loop(
model,
loss,
optimizer,
scheduler,
train_loader,
test_loader,
device,
epochs,
verbose,
save,
save_freq=None,
save_begin_epoch=0,
save_path=None,
epoch_offset=0,
lean_ckpt=False,
**kwargs,
):
print_fn = print
if device.type == "xla":
import torch_xla.distributed.parallel_loader as pl
import torch_xla.core.xla_model as xm
print_fn = xm.master_print
train_loader = pl.MpDeviceLoader(train_loader, device)
test_loader = pl.MpDeviceLoader(test_loader, device)
# Get the weights at initialization
trainabe_weights = []
for name,param in model.named_parameters():
if param.requires_grad:
trainabe_weights.append(param.detach().clone())
theta_0 = torch.cat([p.reshape(-1) for p in trainabe_weights])
kwargs["theta_0"] = theta_0
# Also get the eigenvectors if a path is specified
if kwargs["spectral_path"]:
print_fn("Including evecs in kwargs")
evecs = dd.io.load(kwargs["spectral_path"], "/eigenvector")
kwargs["eigenvectors"] = torch.tensor(evecs.T, device=device)
# Initial eval
test_loss, test_accuracy1, test_accuracy5 = eval(model, loss, test_loader, device, verbose, 0)
metric_dict = {
"train_loss": 0,
"test_loss": test_loss,
"test_accuracy1": test_accuracy1,
"test_accuracy5": test_accuracy5,
}
if save:
checkpoint(
model,
optimizer,
scheduler,
0,
0,
save_path,
verbose,
metric_dict,
tpu=(device.type == "xla"),
)
for epoch in tqdm(range(epoch_offset, epoch_offset + epochs)):
train_loss, train_accuracy1, train_accuracy5 = train(
model,
loss,
optimizer,
scheduler,
train_loader,
device,
epoch,
verbose,
save,
save_freq=save_freq,
save_begin_epoch=save_begin_epoch,
save_path=save_path,
lean_ckpt=lean_ckpt,
test_loader=test_loader,
**kwargs,
)
print_fn(
f"Epoch {epoch}: Average Train Loss: {train_loss:.4f}, "
f"Top 1 Train Accuracy: {train_accuracy1:.2f}%"
)
test_loss, test_accuracy1, test_accuracy5 = eval(
model, loss, test_loader, device, verbose, epoch + 1
)
metric_dict = {
"train_loss": train_loss,
"train_accuracy1": train_accuracy1,
"train_accuracy5": train_accuracy5,
"test_loss": test_loss,
"test_accuracy1": test_accuracy1,
"test_accuracy5": test_accuracy5,
}
curr_step = (epoch + 1) * kwargs.get("num_batches")
if save:
checkpoint(
model,
optimizer,
scheduler,
epoch,
curr_step,
save_path,
verbose,
metric_dict,
tpu=(device.type == "xla"),
)
scheduler.step()
if epochs > 0:
print_fn(
f"Final performance: "
f"\tTrain Loss: {train_loss:.4f}"
f"\tTest Loss: {test_loss:.4f}"
f"\tTest Accuracy: {test_accuracy1:.2f}%"
)
| danielkunin/rethinking-SGD | utils/optimize.py | optimize.py | py | 11,518 | python | en | code | 6 | github-code | 13 |
71817367378 | #!/usr/bin/python
import logging, socket, sys
__NAME__ = "wbRedirector.py"
UDP_IP = "127.0.0.1"
UDP_PORT_SEND = 5005
UDP_PORT_RECV = 5006
sock_send = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock_recv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock_recv.bind((UDP_IP, UDP_PORT_RECV))
logging.basicConfig(filename='example.log',level=logging.INFO)
logging.debug('%s started...' % (__NAME__))
while True:
line = sys.stdin.readline().strip()
logging.debug('Received %s from Squid.' % (line))
list = line.split(' ')
redirect = '%s\n' % (list[0])
logging.info('Sending %s to GUI.' % list[0])
sock_send.sendto(redirect, (UDP_IP, UDP_PORT_SEND))
logging.info('Waiting for response from GUI...')
data, addr = sock_recv.recvfrom(1024)
logging.info('Received %s from %s.' % (data, addr))
if (data == "OK"):
logging.info('Received OK. Returning %s to squid.' % list[0])
sys.stdout.write(redirect)
sys.stdout.flush()
elif (data == "NO"):
logging.info('Received NO. Returning 404 to squid.')
sys.stdout.write("404\n")
sys.stdout.flush()
| dudemcbacon/wbRedirector | wbRedirector.py | wbRedirector.py | py | 1,139 | python | en | code | 0 | github-code | 13 |
2223158791 | import tokenizer
class CompileException(Exception):
def __init__(self, token, message, got=None):
if got:
message += ', got "{}" instead'.format(got.value)
msg = "Error line {} column {}: {}".format(token.line, token.column, message)
super().__init__(msg)
lastToken = None
def peek(tokens, throws=True):
if len(tokens):
return tokens[0]
elif throws:
raise CompileException(lastToken, 'Unexpected EOF')
else:
return None
def pop(tokens, throws=True):
global lastToken
if len(tokens):
lastToken = tokens[0]
return tokens.pop(0)
elif throws:
raise CompileException(lastToken, 'Unexpected EOF')
else:
return None
nextLabel = 0
def genLabel():
global nextLabel
label = ':L%d' % nextLabel
nextLabel += 1
return label
class exprlist:
def __init__(self, tokens, terminators=[]):
self.expressions = []
self.toplevel = not terminators
while len(tokens) > 0:
if peek(tokens).label in terminators: break
self.expressions.append(expr(tokens))
def gen(self, pgm):
labels = []
for expr in self.expressions:
label = genLabel()
pgm.append(label)
labels.append(label)
expr.gen(pgm)
if self.toplevel:
pgm.append('POP')
endLabel = genLabel()
pgm.append(endLabel)
labels.append(endLabel)
return labels
diceEncountered = False
class expr:
def __init__(self, tokens):
global diceEncountered
diceEncountered = False
tok = peek(tokens)
if tok.label in ('file_write', 'print'):
self.expr = printexpr(tokens)
elif tok.label == 'file_close':
self.expr = closeexpr(tokens)
elif tok.label == 'o_bracket':
self.expr = ifexpr(tokens)
else:
self.expr = storeexpr(tokens)
#if not diceEncountered:
# raise CompileException(tok, 'Expression does not contain a dice roll.')
def gen(self, pgm):
self.expr.gen(pgm)
class ifexpr:
def __init__(self, tokens):
tok = pop(tokens)
if tok.label != 'o_bracket':
raise CompileException(tok, 'Conditional block must start with \'{\'')
self.conditions = []
self.elseclause = None
while True:
# parse expression (either conditional field or else clause)
ifcond = expr(tokens)
thendo = None
tok = pop(tokens)
if tok.label == 'comma':
# normal conditional clause - parse the "do" portion of the condition
thendo = expr(tokens)
self.conditions.append((ifcond, thendo))
elif tok.label == 'c_bracket':
# ending else clause
self.elseclause = ifcond
break
else:
# error
raise CompileException(tok, 'Expected a comma or closing bracket', tok)
# handle separator
tok = pop(tokens)
if tok.label == 'v_bar':
# another condition/else clause
continue
elif tok.label == 'c_bracket':
# no else clause
break
else:
# error
raise CompileException(tok, 'Expected a vertical bar or closing bracket', tok)
def gen(self, pgm):
endLabel = genLabel()
for condition, expression in self.conditions:
condition.gen(pgm)
jumpLabel = genLabel()
pgm.append('JMPZ ' + jumpLabel)
pgm.append('POP')
expression.gen(pgm)
pgm.append('JMP ' + endLabel)
pgm.append(jumpLabel)
pgm.append('POP')
if self.elseclause:
self.elseclause.gen(pgm)
else:
# Add a dummy else clause loading 0 onto the stack
# necessary so the ifexpr will always yield a value
pgm.append('LOAD 0')
pgm.append(endLabel)
class storeexpr:
def __init__(self, tokens):
tok = peek(tokens)
if tok.label in ['file_open_r', 'file_open_w', 'file_read', 'prompt']:
self.type = pop(tokens).label
else:
self.type = 'basic'
if self.type == 'prompt':
self.value = None
elif self.type == 'file_read':
self.value = readref(tokens)
else:
self.value = mathexpr(tokens)
tok = peek(tokens, False)
if tok and tok.label == 'store':
self.store = storeref(tokens)
else:
self.store = None
def gen(self, pgm):
if self.type == 'prompt':
pgm.append('READ')
elif self.type == 'file_read':
self.value.gen(pgm)
pgm.append('FREAD')
elif self.type == 'file_open_r':
self.value.gen(pgm)
pgm.append('OPENR')
elif self.type == 'file_open_w':
self.value.gen(pgm)
pgm.append('OPENW')
else:
self.value.gen(pgm)
if self.store:
self.store.gen(pgm)
class closeexpr:
def __init__(self, tokens):
tok = pop(tokens)
if tok.label != 'file_close':
raise CompileException(tok, 'Close expression expected', tok)
self.file = readref(tokens)
def gen(self, pgm):
self.file.gen(pgm)
pgm.append('CLOSE')
class printexpr:
def __init__(self, tokens):
tok = pop(tokens)
if tok.label not in ['print', 'file_write']:
raise CompileException(tok, 'Expected print or file write', tok)
if tok.label == 'file_write':
self.file = readref(tokens)
else:
self.file = None
self.value = expr(tokens)
def gen(self, pgm):
if self.file:
self.value.gen(pgm)
self.file.gen(pgm)
pgm.append('FPRINT')
else:
self.value.gen(pgm)
pgm.append('PRINT')
class mathexpr:
def __init__(self, tokens):
self.left = addsub(tokens)
tok = peek(tokens, False)
if tok and tok.label in ('logical_and', 'logical_or'):
self.op = pop(tokens).label
self.right = mathexpr(tokens)
else:
self.op = None
self.right = None
def gen(self, pgm):
if not self.op:
self.left.gen(pgm)
else:
mapping = {'local_and': 'AND', 'logical_or': 'OR'}
self.right.gen(pgm)
self.left.gen(pgm)
pgm.append(mapping[self.op])
class addsub:
def __init__(self, tokens):
self.left = muldiv(tokens)
tok = peek(tokens, False)
if tok and tok.label in ('add', 'subtract'):
self.op = pop(tokens).label
self.right = addsub(tokens)
else:
self.op = None
self.right = None
def gen(self, pgm):
if not self.op:
self.left.gen(pgm)
else:
mapping = {'add': 'ADD', 'subtract': 'SUB'}
self.right.gen(pgm)
self.left.gen(pgm)
pgm.append(mapping[self.op])
class muldiv:
def __init__(self, tokens):
self.left = prefix(tokens)
tok = peek(tokens, False)
if tok and tok.label in ('multiply', 'divide', 'modulo'):
self.op = pop(tokens).label
self.right = muldiv(tokens)
else:
self.op = None
self.right = None
def gen(self, pgm):
if not self.op:
self.left.gen(pgm)
else:
mapping = {'multiply': 'MUL', 'divide': 'DIV', 'modulo': 'MOD'}
self.right.gen(pgm)
self.left.gen(pgm)
pgm.append(mapping[self.op])
class prefix:
def __init__(self, tokens):
if peek(tokens).label in ('invert', 'add'):
self.op = pop(tokens).label
else:
self.op = None
self.value = value(tokens)
def gen(self, pgm):
self.value.gen(pgm)
if self.op == 'invert':
pgm.append('INV')
elif self.op == 'add':
pgm.append('SUM')
class value:
def __init__(self, tokens):
if peek(tokens).label == 'load':
self.type = 'load'
self.value = readref(tokens)
elif peek(tokens).label == 'o_square':
self.type = 'list'
self.value = listgen(tokens)
else:
self.value = paren(tokens)
tok = peek(tokens, False)
self.type = 'basic'
if tok and tok.label == 'roll':
self.type = 'roll'
self.value = diceroll(tokens, count=self.value)
# perhaps we make sure we only do modifiers on dice rolls and list generators
# then pass a list to the gen() function of list generators to add labels to
# each label corresponds to each value of the list generator. We then jump
# to the corresponding label after testing it against the repeater
# provided its a r<critcheck> or ro<critcheck>
self.modifiers = modifiers(tokens)
def gen(self, pgm):
repeatLabel = genLabel()
self.value.gen(pgm)
self.modifiers.gen(pgm, repeatLabel)
class modifiers:
def __init__(self, tokens):
keys = ['repeat_once', 'repeat', 'keep_high', 'keep_low',
'discard_high', 'discard_low', 'sort', 'sort_descend',
'greater_than', 'less_than', 'equals', 'keep_front',
'keep_rear', 'discard_front', 'discard_rear']
self.sort = False
self.sortd = False
self.ro = None
self.r = None
self.critcheck = None
self.keepdiscard = []
token = peek(tokens, False)
while token and token.label in keys:
mod = pop(tokens).label
if mod == 'sort':
if self.sort or self.sortd:
raise CompileException(token, 'Only 1 sort allowed per expression')
self.sort = True
elif mod == 'sort_descend':
if self.sort or self.sortd:
raise CompileException(token, 'Only 1 sort allowed per expression')
self.sortd = True
elif mod == 'repeat':
if self.r or self.ro:
raise CompileException(token, 'Only 1 repeat allowed per expression')
if peek(tokens).value in '><=':
self.r = critcheck(tokens)
else:
self.r = paren(tokens)
elif mod == 'repeat_once':
if self.r or self.ro:
raise CompileException(token, 'Only 1 repeat allowed per expression')
self.ro = critcheck(tokens)
elif mod in ['less_than', 'greater_than', 'equals']:
# critcheck
if self.critcheck:
raise CompileException(token, 'Only 1 crit check allowed per expression')
self.critcheck = critcheck(tokens, token=token)
else:
self.keepdiscard.append(keepdiscard(tokens))
token = peek(tokens, False)
def gen(self, pgm, repeatLabel):
'''
Order of operations:
sort
keep/discard
repeat
critcheck
'''
if self.sort:
pgm.append('SORTA')
elif self.sortd:
pgm.append('SORTD')
for kd in self.keepdiscard:
kd.gen(pgm)
if self.r:
pass
if self.ro:
pass
if self.critcheck:
self.critcheck.gen(pgm)
class keepdiscard:
def __init__(self, tokens):
tok = pop(tokens)
if tok.value not in ('kh', 'kl', 'kf', 'kr', 'dh', 'dl', 'df', 'dr'):
raise CompileException(tok, 'Expected a keep/discard modifier', tok)
self.op = tok.value.upper()
self.quantity = paren(tokens)
def gen(self, pgm):
self.quantity.gen(pgm)
pgm.append(self.op)
class critcheck:
def __init__(self, tokens, token=None):
if not token: token = pop(tokens)
self.op = token.label
if self.op not in ['greater_than', 'less_than', 'equals']:
raise CompileException(token, 'Expected crit check', token)
self.value = paren(tokens)
def gen(self, pgm):
self.value.gen(pgm)
if self.op == 'greater_than':
pgm.append('CCGT')
elif self.op == 'less_than':
pgm.append('CCLT')
else:
pgm.append('CCEQ')
class listgen:
def __init__(self, tokens):
if peek(tokens).label != 'o_square':
raise CompileException(peek(tokens), 'Expected list generator', peek(tokens))
pop(tokens)
self.value = exprlist(tokens, terminators=['c_square'])
if pop(tokens).label != 'c_square':
raise CompileException(peek(tokens), 'Expected closing square bracket', peek(tokens))
def gen(self, pgm):
self.value.gen(pgm)
pgm.append('MLIST {}'.format(len(self.value.expressions)))
class storeref:
def __init__(self, tokens):
tok = pop(tokens)
if tok.label != 'store':
raise CompileException(tok, 'Expected write storage reference', tok)
self.ref = diceroll(tokens)
def gen(self, pgm):
self.ref.genSides(pgm)
self.ref.genCount(pgm)
pgm.append('POPV')
class readref:
def __init__(self, tokens):
tok = pop(tokens)
if tok.label != 'load':
raise CompileException(tok, 'Expected read storage reference', tok)
self.ref = diceroll(tokens)
def gen(self, pgm):
self.ref.genSides(pgm)
self.ref.genCount(pgm)
pgm.append('PUSHV')
class diceroll:
def __init__(self, tokens, count=None):
if count:
self.count = count
else:
self.count = paren(tokens)
tok = pop(tokens)
if tok.label != 'roll':
raise CompileException(tok, 'Expected dice roll', tok)
self.sides = paren(tokens)
def genSides(self, pgm):
self.sides.gen(pgm)
pgm.append('SUM')
def genCount(self, pgm):
self.count.gen(pgm)
pgm.append('SUM')
def gen(self, pgm):
self.genSides(pgm)
self.genCount(pgm)
pgm.append('ROLL')
class paren:
def __init__(self, tokens):
tok = pop(tokens)
if tok.label == 'number':
self.type = 'numeric'
self.inner = tok.value
return
if tok.label != 'o_paren':
raise CompileException(tok, "Expected a number or open parenthesis", tok)
self.type = 'expression'
self.inner = expr(tokens)
tok = pop(tokens)
if tok.label != 'c_paren':
raise CompileException(tok, "Expected a closing parenthesis", tok)
def gen(self, pgm):
if self.type == 'numeric':
pgm.append('PUSH {}'.format(self.inner))
else:
self.inner.gen(pgm)
| snyderdan/D20-Lang | src/compiler/parser.py | parser.py | py | 15,284 | python | en | code | 0 | github-code | 13 |
11369384665 | import re
from pydub import AudioSegment
import aiosqlite
import io
from config import *
from aiogram import Bot, types
from aiogram.types import ReplyKeyboardMarkup, KeyboardButton, Voice
kb_read_zn = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
button_1 = KeyboardButton(text='За сегодня')
button_2 = KeyboardButton(text='За вчера')
button_3 = KeyboardButton(text='За выбранную дату')
button_cansel = KeyboardButton(text='отмена')
kb_read_zn.add(button_1,button_2,button_3)
kb_read_zn.row(button_cansel)
kb_cansel = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
kb_cansel.add(button_cansel)
kb_main = ReplyKeyboardMarkup(resize_keyboard=True)
button_see = KeyboardButton(text='Просмотр з/н')
button_to = KeyboardButton(text='Просмотр ТО')
kb_main.add(button_to, button_see)
kb_to = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
bt_kamaz = KeyboardButton(text='KAMAZ')
bt_china = KeyboardButton(text='китайцы')
kb_to.add(bt_kamaz, bt_china)
kb_china = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
howo = KeyboardButton(text='HOWO')
sitrak = KeyboardButton(text='SITRAK')
faw_DM = KeyboardButton(text='FAW big DM')
faw_DL = KeyboardButton(text='FAW little DL')
kb_china.row(sitrak,howo)
kb_china.add(faw_DM,faw_DL)
kb_kamaz = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
orig_740 = KeyboardButton(text='740 ДВС')
old_740 = KeyboardButton(text='740 старые')
cummins = KeyboardButton(text='cummins')
dvs_5490 = KeyboardButton(text='5490')
dvs_54901 = KeyboardButton(text='54901')
hubs = KeyboardButton(text='Ступицы')
kompas = KeyboardButton(text='КОМПАС')
gbc = KeyboardButton(text='ГБЦ')
kb_kamaz.add(orig_740,old_740,cummins)
kb_kamaz.row(dvs_54901, dvs_5490, kompas)
kb_kamaz.row(hubs, gbc)
kb_hubs = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
hubs_43118 = KeyboardButton(text='43118')
hubs_6520 = KeyboardButton(text='6520')
kb_hubs.add(hubs_43118,hubs_6520)
def gbc_kb():
gbc_kamaz = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
bt_740 = KeyboardButton(text='ГБЦ 740')
bt_901 = KeyboardButton(text='ГБЦ 901')
gbc_kamaz.add(bt_740, bt_901)
return gbc_kamaz
async def set_default_commands(dip):
await dip.bot.set_my_commands([
types.BotCommand("start", "старт бот"),
])
def user_input_format(user_input):
a = [',', '.', '-']
b = [' ']
if user_input[2] in a:
cleaned_text = re.sub(r'(\d+)[^\w\s]*(\d+)', r'\1\2', user_input)
return cleaned_text
elif user_input[2] in b:
cleaned_text = re.sub(r'(\d+)\s+(\d+)', r'\1\2', user_input)
return cleaned_text
else:
return user_input
async def audio_to_text(file_path: str) -> str:
"""Принимает путь к аудио файлу, возвращает текст файла."""
with open(file_path, "rb") as audio_file:
transcript = await openai.Audio.atranscribe(
"whisper-1", audio_file
)
return transcript["text"]
async def save_voice_as_mp3(bot: Bot, voice: Voice) -> str:
"""Скачивает голосовое сообщение и сохраняет в формате mp3."""
voice_file_info = await bot.get_file(voice.file_id)
voice_ogg = io.BytesIO()
await bot.download_file(voice_file_info.file_path, voice_ogg)
voice_mp3_path = f"voice_files/voice-{voice.file_unique_id}.mp3"
AudioSegment.from_file(voice_ogg, format="ogg").export(
voice_mp3_path, format="mp3"
)
return voice_mp3_path
async def create_table_users():
conn = await aiosqlite.connect('kamaz.db')
cursor = await conn.cursor()
await cursor.executescript('''
CREATE TABLE IF NOT EXISTS users(
user_id INTEGER PRIMARY KEY,
user_name TEXT
)
''')
await conn.commit()
await conn.close()
async def write_to_db_user(user_id, user_name):
conn = await aiosqlite.connect('kamaz.db')
cursor = await conn.cursor()
select_user_id = await cursor.execute(
"SELECT user_id FROM users WHERE user_id = ?", (user_id,)
)
user_id1 = await select_user_id.fetchone()
if not user_id1:
await cursor.execute(
'INSERT INTO users (user_id, user_name) VALUES (?, ?)',
(user_id, user_name),)
await conn.commit()
await conn.close()
async def read_to_db_user_id():
conn = await aiosqlite.connect('kamaz.db')
cursor = await conn.cursor()
select_user_id = await cursor.execute(
"SELECT * FROM users "
)
select_order = await select_user_id.fetchall()
users_dict = []
for i in select_order:
users_dict.append(i[0])
await conn.commit()
await conn.close()
return users_dict
async def send_to_all_users(text):
users = await read_to_db_user_id()
for user_id in users:
await bot.send_message(chat_id=user_id, text=text)
async def create_table():
conn = await aiosqlite.connect('kamaz.db')
cursor = await conn.cursor()
await cursor.executescript('''
CREATE TABLE IF NOT EXISTS kamaz(
id INTEGER PRIMARY KEY,
order_outfit INTEGER,
product TEXT,
send_date DATE
)
''')
await conn.commit()
await conn.close()
async def write_to_db(order_outfit, product2, join_date):
conn = await aiosqlite.connect('kamaz.db')
cursor = await conn.cursor()
order_outfit1 = await cursor.execute(
"SELECT order_outfit FROM kamaz WHERE order_outfit = ?", (str(order_outfit),)
)
order_outfit1 = await order_outfit1.fetchone()
if order_outfit1:
select_product = await cursor.execute(
"SELECT product FROM kamaz WHERE order_outfit = ?", (str(order_outfit),)
)
select_product = await select_product.fetchone()
await cursor.execute(
'UPDATE kamaz SET product = ? || ? WHERE order_outfit = ?',
(product2,
' '.join(select_product),
order_outfit
),
)
else:
await cursor.execute(
'INSERT INTO kamaz (order_outfit, product, send_date) VALUES (?, ?, ?)',
(int(order_outfit), product2, join_date),)
await conn.commit()
await conn.close()
async def read_to_db(order_outfit):
conn = await aiosqlite.connect('kamaz.db')
cursor = await conn.cursor()
select_order = await cursor.execute(
"SELECT product FROM kamaz WHERE order_outfit = ?", (str(order_outfit),)
)
select_order = await select_order.fetchone()
if select_order:
for row in select_order:
return row
await conn.commit()
await conn.close()
async def read_to_db_today(date):
conn = await aiosqlite.connect('kamaz.db')
cursor = await conn.cursor()
select_order = await cursor.execute(
"SELECT * FROM kamaz WHERE send_date = ?", (date,)
)
select_order = await select_order.fetchall()
await conn.commit()
await conn.close()
return select_order
| perep111/KAMAZ-BOT | func.py | func.py | py | 7,210 | python | en | code | 1 | github-code | 13 |
70047463058 | """GUI snake game. It uses external Snake library and pyGame
"""
import sys, pygame, glob
from constants import *
from pygame.locals import *
from snakecore import *
from pygame import Color, display, time, font, draw, Surface, image
class MainUI:
"""The game's user interface class
"""
def __init__(self):
"""Creates new MainUI object
state - state to be visualized
"""
pygame.init()
display.set_caption("Snake Game")
self.surface = display.set_mode((1200, 650))
self.fpsClock = time.Clock()
self.state = None
self.frame = 0
self.green_color = Color(0, 200, 0)
def start(self):
"""Starts the drawing and event handling of the game
"""
while True:
self.surface.fill(self.green_color)
self.state.draw()
self.state.handle_events()
display.update()
self.fpsClock.tick(30)
self.frame += 1
class Menu:
"""Class used to draw a menu and handle menu events
"""
def __init__(self, menu_text_list, font_size=100,\
distance=140, first=100, cursor=30):
self.menu_items_pos = [x * distance + first\
for x in range(len(menu_text_list))]
self.menu_text_list= menu_text_list
if len(menu_text_list) > 0:
self.selected_index = 0
else:
self.selected_index = -1
self.menu_font = font.Font(None, font_size)
self.orange_color = Color(224, 76, 27)
self.green_color = Color(42, 77, 6)
self.cursor = cursor
def draw(self, surface):
if self.selected_index != -1:
self.__draw_menu_cursor(surface)
for i, text in enumerate(self.menu_text_list):
self.__draw_menu_item(surface, text, i)
def get_events(self):
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYDOWN:
return self.__handle_key_press(event.key)
def __handle_key_press(self, key):
if key == K_UP:
if self.selected_index > 0:
self.selected_index -= 1
elif key == K_DOWN:
if self.selected_index < len(self.menu_text_list) - 1:
self.selected_index += 1
elif key == K_RETURN:
return self.selected_index
elif key == K_ESCAPE:
return -1
def __draw_menu_cursor(self, surface):
cursor_height = self.menu_items_pos[self.selected_index]
draw.polygon(surface, self.orange_color, [
(400, cursor_height),
(400, cursor_height + self.cursor * 2),
(400 + self.cursor, cursor_height + self.cursor)])
draw.polygon(surface, self.orange_color, [
(780, cursor_height),
(780, cursor_height + self.cursor * 2),
(780 - self.cursor, cursor_height + self.cursor)])
def __draw_menu_item(self, surface, text, item_pos):
menu_item = self.menu_font.render(text, False, self.green_color)
menu_start_pos =\
(surface.get_width() / 2 - menu_item.get_width() / 2,
self.menu_items_pos[item_pos])
surface.blit(menu_item, menu_start_pos)
class MainMenuUI:
"""Snake's game main menu
(The menu which is shown when the game is firstly opened)
"""
def __init__(self, main_ui):
self.main_ui = main_ui
menu_text_list = ["START", "LOAD", "EXIT"]
self.menu = Menu(menu_text_list)
self.white_color = Color(255, 255, 255)
def draw(self):
self.menu.draw(self.main_ui.surface)
def handle_events(self):
index = self.menu.get_events()
if index == 0:
snake_ui= SnakeUI(self.main_ui)
self.main_ui.state = snake_ui
elif index == 1:
load_ui = LoadMenuUI(self.main_ui)
self.main_ui.state = load_ui
elif index == 2:
pygame.quit()
sys.exit()
class LoadMenuUI:
"""The game's menu which is shown to load saved game
"""
def __init__(self, main_ui):
self.main_ui = main_ui
self.filenames = glob.glob(SAVE_FILE_TEMPLATE.format('*'))
names = [name[name.rindex('\\') + 1:name.rindex('.')]
for name in self.filenames]
self.menu = Menu(names, 50, 40, 3, 16)
def draw(self):
self.menu.draw(self.main_ui.surface)
def handle_events(self):
index = self.menu.get_events()
if index == -1:
self.main_ui.state = MainMenuUI(self.main_ui)
elif index != None:
filename = self.filenames[index]
game = Game.load_game_from_file(filename)
game_ui = SnakeUI(self.main_ui, game)
self.main_ui.state = game_ui
class SaveMenuUI:
"""The game's menu which is used to save a game's state
"""
def __init__(self, main_ui, snake_ui):
self.main_ui = main_ui
self.snake_ui= snake_ui
self.name = ''
self.text_font = font.Font(None, 50)
self.black_color = Color(0, 0, 0)
self.green_color = Color(42, 77, 6)
def draw(self):
text = self.name
message_surface = self.text_font.render("Save name: ",\
False, self.green_color)
text_surface = self.text_font.render(text, False, self.black_color)
self.main_ui.surface.blit(message_surface, (0, 3))
text_pos = (message_surface.get_width(), 3)
self.main_ui.surface.blit(text_surface, text_pos)
def handle_events(self):
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYDOWN:
self.__handle_key_down(event.key)
def __handle_key_down(self, key):
if key == K_ESCAPE:
self.main_ui.state = self.snake_ui
self.snake_ui.is_running = True
elif key == K_RETURN:
if len(self.name) >= MIN_SAVEFILE_LEN:
game = self.snake_ui.snake_game
self.snake_ui.is_running = True
filename = SAVE_FILE_TEMPLATE.format(self.name)
game.save(filename)
self.main_ui.state = self.snake_ui
elif key == K_BACKSPACE:
self.name = self.name[:-1]
else:
if len(self.name) <= MAX_SAVEFILE_LEN:
key_press = key
if key_press >= 32 and key_press <= 126 and\
key_press not in INVALID_SAVEFILE_CHARACTERS:
key_input = pygame.key.get_pressed()
if key_input[K_LSHIFT]:
self.name += chr(key_press).upper()
else:
self.name += chr(key_press)
class GameMenuUI:
"""The menu which is shown when user press 'esc' in game mode
"""
def __init__(self, main_ui, snake_ui):
self.main_ui = main_ui
self.snake_ui = snake_ui
menu_text_list = ["SAVE", "EXIT", "CLOSE"]
self.menu = Menu(menu_text_list)
self.white_color = Color(255, 255, 255)
def draw(self):
self.snake_ui.is_running = False
self.snake_ui.draw()
self.menu.draw(self.main_ui.surface)
def handle_events(self):
index = self.menu.get_events()
if index == 0:
self.main_ui.state = SaveMenuUI(self.main_ui, self.snake_ui)
elif index == 1:
self.main_ui.state = MainMenuUI(self.main_ui)
elif index == 2 or index == -1:
self.main_ui.state = self.snake_ui
self.snake_ui.is_running = True
class ErrorUI:
"""The UI state used to display critical error message
"""
def __init__(self, main_ui, message):
self.main_ui = main_ui
self.message = message
self.menu_font = font.Font(None, 50)
self.red_color = Color(255, 0, 0)
def draw(self):
text = self.menu_font.render(self.message, False, self.red_color)
text_width = text.get_width()
text_height = text.get_height()
surface_width = self.main_ui.surface.get_width()
surface_height = self.main_ui.surface.get_height()
position = (surface_width / 2 - text_width / 2,
surface_height / 2 -text_height / 2)
self.main_ui.surface.blit(text, position)
def handle_events(self):
for current_event in event.get():
if current_event.type == QUIT:
pygame.quit()
sys.exit()
class SnakeUI:
"""Snake's game menu
"""
def __init__(self, main_ui, game=None):
if game != None:
self.snake_game = game
else:
level_manager = LevelManager(LEVELS_DIRECTORY)
self.snake_game = Game(level_manager)
self.last_move = GameMoves.PASS
self.main_ui = main_ui
level = self.snake_game.current_level
self.maze_size = (level.maze_height, level.maze_width)
self.game_surface = Surface(transform(self.maze_size, 2, 2))
#colors and images
self.green_color = Color(151, 255, 148)
self.white_color = Color(255, 255, 255)
self.black_color = Color(0, 0, 0)
self.apple = image.load('images/apple.png')
self.block = image.load('images/block.png')
self.brick = image.load('images/brick.jpg')
#fonts
self.info_font = font.Font(None, 23)
self.is_running = True
def draw(self):
if self.is_running and self.main_ui.frame % GUI_GAME_SPEED == 0:
self.snake_game.move(self.last_move)
self.last_move = GameMoves.PASS
level = self.snake_game.current_level
if self.maze_size != (level.maze_width, level.maze_height):
self.maze_size = (level.maze_height, level.maze_width)
self.game_surface = Surface(transform(self.maze_size, 2, 2))
self.game_surface.fill(self.green_color)
self.__draw_apple()
self.__draw_snake()
self.__draw_barrier()
self.__draw_level_info()
surface_width = self.main_ui.surface.get_width()
surface_height = self.main_ui.surface.get_height()
game_width = self.game_surface.get_width()
game_height = self.game_surface.get_height()
y_pos = surface_width / 2 - game_width / 2
x_pos = surface_height / 2 - game_height / 2
game_surface_pos = (y_pos, x_pos)
self.main_ui.surface.blit(self.game_surface, game_surface_pos)
def __draw_apple(self):
apple_position = transform(self.snake_game.current_level.apple, 1, 1)
self.game_surface.blit(self.apple, apple_position)
def __draw_snake(self):
level = self.snake_game.current_level
for block in level.snake:
self.game_surface.blit(self.block, transform(block, 1, 1))
def __draw_barrier(self):
level = self.snake_game.current_level
for brick in level.barrier:
self.game_surface.blit(self.brick, transform(brick, 1, 1))
brick_height = self.brick.get_height()
brick_width = self.brick.get_width()
maze_height = self.game_surface.get_height()
maze_width = self.game_surface.get_width()
for x in range(0, maze_width, brick_width):
self.game_surface.blit(self.brick, (x, 0))
for x in range(0, maze_width, brick_width):
self.game_surface.blit(self.brick, (x, maze_height - brick_height))
for y in range(0, maze_height, brick_height):
self.game_surface.blit(self.brick, (0, y))
for y in range(0, maze_height, brick_height):
self.game_surface.blit(self.brick, (maze_width - brick_width, y))
def __draw_level_info(self):
level = self.snake_game.current_level
current_level = level.level
snake_len = level.snake_length
snake_max_len = level.snake_max_length
info ='Level: {0} Snake Length: {1}/{2}'\
.format(current_level, snake_len, snake_max_len)
info_surface = self.info_font.render(info, False, self.black_color)
self.main_ui.surface.blit(info_surface, (10, 10))
def handle_events(self):
for current_event in pygame.event.get():
if current_event.type == QUIT:
pygame.quit()
sys.exit()
elif current_event.type == KEYDOWN:
if current_event.key == K_LEFT:
self.last_move = GameMoves.LEFT
elif current_event.key == K_RIGHT:
self.last_move = GameMoves.RIGHT
elif current_event.key == K_UP:
self.last_move = GameMoves.UP
elif current_event.key == K_DOWN:
self.last_move = GameMoves.DOWN
elif current_event.key == K_ESCAPE:
self.main_ui.state = GameMenuUI(self.main_ui, self)
def transform(coordinates, x_translation=0, y_translation=0):
"""This function transform coordinates from this form which
is used in BL to the form used in pyGame
"""
return ((coordinates[1] + x_translation) * PIXEL ,\
(coordinates[0] + y_translation) * PIXEL)
def show_error(mainUI, text):
"""Shows general error on pyGame screen
mainUI - the MainUI object
text - error's description
"""
error_ui = ErrorUI(mainUI, text)
mainUI.state = error_ui
mainUI.start()
def main():
try:
main_ui = MainUI()
menu_ui = MainMenuUI(main_ui)
main_ui.state = menu_ui
main_ui.start()
except LastLevelError:
show_error(mainUI, 'There are no levels into the level directory')
except LevelIOError:
show_error(mainUI, 'Unexpected error while loading level information')
except LevelFormatError:
show_error(mainUI, 'Incorrect level syntax: "{0}"'\
.format(str(LevelFormatError)))
except pygame.error as ex:
show_error(mainUI, str(ex))
if __name__ == '__main__':
main()
| ngkolev/snake | snakegui.py | snakegui.py | py | 14,192 | python | en | code | 0 | github-code | 13 |
4623050367 | import csv
from math import ceil
from tqdm import tqdm
from data_converter import DataConverter
class BKT_converter(DataConverter):
def convert(self):
input_file = open(self.input_file_path, 'rb')
csv_reader = csv.reader(input_file)
header = csv_reader.next()
user_col = self.col_mapping['user_id']
seq_col = self.col_mapping['sequence_id']
problem_col = self.col_mapping['problem_id']
correct_col = self.col_mapping['correct']
seq_user_dict = {}
seq_list = []
for row in csv_reader:
user = row[user_col]
seq = row[seq_col]
problem = row[problem_col]
correct = ceil(float(row[correct_col]))
if seq_user_dict.has_key(seq) == False:
seq_user_dict[seq] = {}
this_seq = seq_user_dict.get(seq)
if this_seq.has_key(user) == False:
this_seq[user] = []
this_seq_user = this_seq[user]
this_seq_user.append(correct)
input_file.close()
skill_id_count = 0
for seq_id in tqdm(seq_user_dict.keys()):
skill_id_count += 1
users = seq_user_dict[seq_id]
output_file_path = self.output_file_path.replace('nnn', str(seq_id))
output_file = open(output_file_path, 'wb')
csv_writer = csv.writer(output_file, delimiter='\t')
for user_id in users.keys():
n = 0
data = users[user_id]
for d in data:
n += 1
csv_writer.writerow([user_id, int(d), n])
output_file.close()
return seq_user_dict
if __name__ == "__main__":
col_mapping = {'user_id': 1, 'sequence_id' : 2, 'problem_id' : 3, 'correct': 4}
converter = BKT_converter('../data/1415_full.csv', '../data/bkt/nnn.txt', col_mapping)
converter.convert()
| beardeer/assistments_workbench | data_converters/BKT_converter.py | BKT_converter.py | py | 1,612 | python | en | code | 1 | github-code | 13 |
38330060396 | """
Модуль c командами, вызывающимися из интерфейса
"""
import tkinter.messagebox as box
from py.c_commands import create_fib_arr
from py.c_commands import delete_repeated
MAX_INT_FIB_NUM = 47
MAX_INT = 32767
MAX_LEN = MAX_INT
MIN_INT = -(MAX_INT - 1)
def generate(size_entry, res_entry):
"""
Генерация массива с числами Фибоначчи
"""
try:
size = int(size_entry.get())
assert size > 0
assert size < MAX_INT_FIB_NUM + 1
arr = create_fib_arr(size)
clear(res_entry)
res_entry.insert(0, arr)
except (ValueError, AssertionError):
box.showerror("Неверный размер", "Размер -- целое k, где 0 < k < 48")
def delete(enter_arr, res_arr):
"""
Удаление повторяющиxся элементов массива
"""
try:
arr = list(map(int, enter_arr.get().split()))
for element in arr:
assert element <= MAX_INT
assert element >= MIN_INT
assert len(arr) > 0
assert len(arr) < MAX_LEN
result = delete_repeated(arr)
clear(res_arr)
res_arr.insert(0, result)
except ValueError:
box.showerror("Неверные данные массива",
"Элементы -- целые числа, разделенные пробелом!")
except AssertionError:
box.showerror("Неверные данные массива",
"Длина от 1 до %d,\n" % MAX_LEN +
"элементы от %d до %d!" % (MAX_INT, MIN_INT))
def close(window):
"""
Закрытие приложения
"""
window.destroy()
def author():
"""
Открытие окна с информацией об авторе
"""
box.showinfo('Об авторе', 'Маслова Марина.\nИУ7-33Б.')
def clear(entry):
"""
Очистка поля ввода
"""
entry.delete(0, 'end')
def clear_all(entries):
"""
Очистка нескольких полей ввода
"""
for entry in entries:
clear(entry)
| MyMiDiII/bmstu-c | lab_12_02_2/py/commands.py | commands.py | py | 2,259 | python | ru | code | 3 | github-code | 13 |
219853923 | # n, k = map(int, input().split())
#
# cnt = 0
# for i in range(1, n + 1):
# if(n % i == 0):
# cnt += 1
# if (cnt == k):
# print(i)
# break
# else:
# if(cnt < k):
# print(0)
n, k = map(int, input().split())
arr = []
for i in range(1, n + 1):
if(n % i == 0): arr.append(i)
if(len(arr) >= k): print(arr[k - 1])
else: print(0) | ignis535/baekjoon | 코딩테스트 준비/약수 구하기.py | 약수 구하기.py | py | 398 | python | en | code | 0 | github-code | 13 |
19905276427 | class Solution(object):
def fractionToDecimal(self, numerator, denominator):
"""
:type numerator: int
:type denominator: int
:rtype: str
"""
n, remainder = divmod(abs(numerator), abs(denominator))
sign = '-' if denominator * numerator < 0 else ''
if remainder == 0:
return sign + str(n)
res = [sign+str(n),'.']
stack = []
while remainder not in stack:
stack.append(remainder)
n, remainder = divmod(remainder * 10, abs(denominator))
res.append(str(n))
index = stack.index(remainder)
res.insert(index+2, '(')
res.append(')')
return ''.join(res).replace('(0)','')
s = Solution()
a = s.longestValidParentheses(')()())')
print(a)
| littleliona/leetcode | medium/166.Fraction to recurring decimal.py | 166.Fraction to recurring decimal.py | py | 819 | python | en | code | 0 | github-code | 13 |
22018553976 | # softphone class that uses simple_pj
import random
import re
from time import time, sleep
import lib.logging_esi as logging_esi
from lib.wrappers import Trace
import lib.softphone.simple_pj as pj
from lib.softphone.wav_audio import create_wav_file
from lib.user_exception import UserException as Ux, UserTimeoutException as Tx
log = logging_esi.get_logger('esi.softphone2')
class Softphone:
lib = None
pbfile = None
dst_uri = None
rec_id = None
rec_slot = None
@Trace(log)
def __init__(self, uri, proxy, password, null_snd=True, dns_list=None, tcp=False,
pbfile=None, rec=True, quiet=True):
self.uri = uri
self.pbfile = pbfile
if not self.lib:
Softphone.lib = pj.PjsuaLib()
self.lib.start(null_snd=null_snd, dns_list=dns_list, tcp=tcp)
if self.pbfile:
create_wav_file(self.pbfile, quiet)
m = re.match('sip:([^@]+)@(.+)', self.uri)
if m:
self.lib.add_account(m.group(1), m.group(2), proxy, password)
self.account_info = pj.account_infos[self.uri]
@Trace(log)
def wait_for_call_status(self, desired_status, timeout=30):
# possible desired_status values: 'call', 'idle', 'early', 'hold'
start = time()
while time() - start < timeout:
if self.account_info.call_status == desired_status:
return time() - start
sleep(0.1)
if self.account_info.call_status == 'call' and desired_status == 'early':
self.teardown_call()
raise Ux('wait for call status "early" terminated call because status was "call"')
else:
raise Tx('wait for call status "%s" timed out after %s seconds' % (desired_status, timeout))
@Trace(log)
def make_call(self, dst_uri):
self.dst_uri = dst_uri
if self.account_info.reg_status != 200:
raise Ux("Can't set up call, registration status (src) %s" % self.account_info.reg_status)
log.debug("%s calling %s" % (self.uri, self.dst_uri))
# print self.dst_uri
self.account_info.call = self.account_info.account.make_call_to_softphone(self.dst_uri)
self.account_info.call.set_callback(pj.MyCallCallback(self.account_info))
@Trace(log)
def end_call(self):
if not self.account_info.call:
raise Ux("end_call(): %s not in call" % self.uri)
log.debug("%s ending call to %s" % (self.uri, self.dst_uri))
self.account_info.call.hangup()
@Trace(log)
def leave_msg(self, length=None):
if not self.account_info.call:
raise Ux("leave_msg(): %s not in call" % self.uri)
sleep(10)
self.account_info.call.dial_dtmf('2')
if length is None:
random.seed(time())
length = random.randrange(10, 30, 1)
sleep(length)
def teardown_call(self):
if self.account_info.call:
self.account_info.call.hangup()
log.debug("%s hanging up" % self.uri)
log.debug("calling wait_for_call_status(%s, 'end', 15)" % self.uri)
self.wait_for_call_status('disconnected', 15)
@Trace(log)
def dial_dtmf(self, dtmf_string):
if self.account_info.call:
for c in list(dtmf_string):
log.debug('%s:send dtmf %s' % (self.uri, c))
self.account_info.call.dial_dtmf(c)
sleep(0.3)
@Trace(log)
def set_monitor_on(self):
pass
@Trace(log)
def set_monitor_off(self):
pass
@Trace(log)
def connect_media(self):
if self.rec_id is None:
raise Ux("connect_media: no media exists")
self.rec_slot = self.lib.recorder_get_slot(self.rec_id)
my_uri = self.call.info().account.info().uri
# self.media_call_slot is set to the call's conference slot when connecting media,
# and set to None when disconnecting, so if it is not None, this is a reconnect
if self.media_call_slot is not None:
# if self.media_call_slot is not None but is not the current call's conference slot,
# it isn't a reconnect, it's a structural program error
if self.media_call_slot != self.call.info().conf_slot:
raise Ux("connect_media: call at slot %d media already connected to call slot %d"
% (self.call.info().conf_slot, self.media_call_slot))
log.debug("%s: disconnecting call slot %d from recorder %s at slot %d"
% (my_uri, self.media_call_slot, self.rec_id, self.rec_slot))
lib.conf_disconnect(self.media_call_slot, self.rec_slot)
if self.player_id is not None:
self.pb_slot = lib.player_get_slot(self.player_id)
log.debug("%s: disconnecting player %s at slot %d to call slot %d"
% (my_uri, self.player_id, self.pb_slot, self.media_call_slot))
lib.conf_disconnect(self.pb_slot, self.media_call_slot)
self.media_call_slot = None
log.debug("%s: connecting call slot %d to recorder %s at slot %d"
% (my_uri, self.call.info().conf_slot, self.rec_id, self.rec_slot))
lib.conf_connect(self.call.info().conf_slot, self.rec_slot)
# if there is a player ID then the player was created during create_media and we can connect it, too
if self.player_id is not None:
self.pb_slot = lib.player_get_slot(self.player_id)
log.debug("%s: connecting player %s at slot %d to call slot %d"
% (my_uri, self.player_id, self.pb_slot, self.call.info().conf_slot))
lib.conf_connect(self.pb_slot, self.call.info().conf_slot)
self.media_call_slot = self.call.info().conf_slot
| mccrorey48/mtaf_private | lib/deprecated/softphone2.py | softphone2.py | py | 5,833 | python | en | code | 0 | github-code | 13 |
13092878510 | from __future__ import annotations
from enum import Enum, auto
from typing import TypeVar, Callable, Any, TYPE_CHECKING
from weakref import ReferenceType
CallbackType = TypeVar("CallbackType", Callable[..., Any], Callable[[Any], Any])
if TYPE_CHECKING:
Reference = ReferenceType[CallbackType] # pylint: disable=invalid-name
else:
# The ReferenceType that is available at runtime does not inherit from
# Generic.
Reference = ReferenceType # pylint: disable=invalid-name
SignalCallback = Callable[[], None]
ValueType = TypeVar("ValueType")
ValueCallback = Callable[[ValueType], None]
InterfaceType = TypeVar('InterfaceType')
ModelType = TypeVar('ModelType')
class NodeType(Enum):
model = auto()
interface = auto()
| JiveHelix/pex | python/pex/types.py | types.py | py | 745 | python | en | code | 0 | github-code | 13 |
6005051227 | import requests
import os
from hashlib import md5
from requests.exceptions import RequestException
def download_image(url):
print('正在下载', url)
try:
response = requests.get(url)
if response.status_code == 200:
save_image(response.content)
return None
except RequestException:
print('请求图片出错', url)
return None
def save_image(content):
file_path = '{0}/{1}.{2}'.format(os.getcwd()+'\picture', md5(content).hexdigest(), 'jpg')
print(file_path)
if not os.path.exists(file_path):
with open(file_path, 'wb') as f:
f.write(content)
f.close()
def main():
url = 'http://g-search3.alicdn.com/img/bao/uploaded/i4/i4/TB1zUfNRFXXXXaMXVXXXXXXXXXX_!!0-item_pic.jpg'
download_image(url)
if __name__ == '__main__':
main() | 0217smile/Python-Spider | Spider_Practice/taobao_wangyiyun/save_image_test.py | save_image_test.py | py | 850 | python | en | code | 0 | github-code | 13 |
38832538851 | # Import
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.utils import ChromeType
from selenium.webdriver.chrome.options import Options
from difflib import get_close_matches
import numpy as np
import pandas as pd
import time
import re
import random
class SolveMotus():
def __init__(self):
"""
Initialization of the Solver :
- create driver, start the game and scrap the first letter of the word
- create letter lists
"""
chrome_options = Options()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_experimental_option("detach", True)
chrome_options.add_argument("--start-maximized")
chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])
self.driver = webdriver.Chrome('chromedriver.exe', options=chrome_options)
self.driver.get("https://motus.absolu-puzzle.com/index.php")
self.driver.execute_script("document.body.style.zoom='zoom %'")
self.path_letter = "/html/body/div[1]/div[1]/div[2]/div[1]/table/tbody/"
self.first_letter = self.driver.find_element(By.XPATH, f'{self.path_letter}tr[1]/td[1]').text.lower()
self.enter = self.driver.find_element(By.CLASS_NAME, 'form-control')
self.letters_out = []
self.letters_misplaced = []
self.well_placed = []
time.sleep(0.5)
#We have to accept cookies in order to play
while True :
try :
cookies = self.driver.find_element(By.XPATH, "/html/body/div[3]/div[2]/div[1]/div[2]/div[2]/button[1]")
cookies.click()
break
except :
pass
def send_first_word(self, words8):
"""
Function which sends a first word based on most used letter in French
"""
best_first_word = ['azulejos', 'byzantin', 'cypriote', 'dynastie', 'exutoire', 'fuyantes', 'gypseuse', 'hysterie', 'ivrognes', 'juvenile', 'kystique', 'lyonnais', 'mystifie',
'nympheas', 'oxygenes', 'pyromane', 'quotient', 'rythmera', 'systemes', 'tziganes', 'utopiste', 'vulvaire', 'wishbone', 'xiphoide', 'yearling', 'zestames']
first_word = [word for word in best_first_word if word[0]==self.first_letter][0]
new_words = get_close_matches(first_word, words8, n=5, cutoff=0)
# for let in common_letter:
# if len(first_word) < 8 or self.first_letter!=let:
# first_word += let
#print(f"Words selected for the first step are : {new_words}")
self.send_word(new_words[0], )
i=1
while "n'est pas accepté comme un mot valide" in self.driver.page_source:
self.send_word(new_words[i])
i+=1
def get_result_pred(self, it):
"""
Function which scraps result of the try n° it and updates letter lists
INPUT :
- it : iteration rank (int)
OUTPUT :
- result : list of info concerning try it [[letter, color of the letter], ...]
"""
self.well_placed = []
self.letters_misplaced = []
result = []
path_letter = "/html/body/div[1]/div[1]/div[2]/div[1]/table/tbody/"
for i in range(1,9):
try :
result.append( [self.driver.find_element(By.XPATH, f'{path_letter}tr[{it}]/td[{i}]').text.lower(),
self.driver.find_element(By.XPATH, f'{path_letter}tr[{it}]/td[{i}]').get_attribute("bgcolor")] )
except :
print(f"An error occured due to {self.driver.find_element(By.XPATH, f'{path_letter}tr[{it}]/td[{i}]').text}")
break
#If letter not in word, we append it to unwanted letters
if result[i-1][1] == '#36c':
self.letters_out.append(result[i-1][0])
if result[i-1][0] in self.well_placed or result[i-1][0] in self.letters_misplaced :
self.letters_out.pop(self.letters_out.index((result[i-1][0])))
#It it is misplaced, we update letters missplaced
elif result[i-1][1] == '#f60':
self.letters_misplaced.append(result[i-1][0])
if result[i-1][0] in self.letters_out:
self.letters_out.pop(self.letters_out.index((result[i-1][0])))
#If the letter is well-placed, we try to pop it out from missplaced
elif result[i-1][1] == '#008a05':
self.well_placed.append(result[i-1][0])
if result[i-1][0] in self.letters_out:
self.letters_out.pop(self.letters_out.index((result[i-1][0])))
# try :
# self.letters_misplaced.pop(self.letters_misplaced.index(result[i-1][0]))
# except:
# pass
self.letters_out = list(set(self.letters_out))
print(self.letters_out, self.letters_misplaced, self.well_placed)
return result
def new_prediction(self, previous_result, words8):
"""
Function which uses result of the try n° it-1 to update the list of possible words
INPUT :
- previous_result : list of info concerning try it-1 [[letter, color of the letter], ...]
- words8 : list of available words to solve the Motus
OUTPUT :
- words8 updated
"""
architecture = '(?='
#Add first condition concerning letters well-placed
for idx, info in enumerate(previous_result) :
if info[1] == '#008a05':
architecture += info[0]
else:
architecture += '.'
architecture += ')'
#Add second condition about missplaced letters
for letter in self.letters_misplaced:
architecture += f"(?=[a-zA-Z]*{letter}[a-zA-Z]*)"
#Add third condition about eliminated letters
for letter in self.letters_out:
architecture += f'(?=^((?!{letter}).)*$)'
#print(architecture)
#Use RegEx condition
r = re.compile(architecture)
words8 = list(filter(r.match, words8))
return words8
def send_word(self, word):
"""
Function which sends a word in the online game
INPUT :
- word : string chosen to be sent
"""
#To prevent selenium.common.exceptions.StaleElementReferenceException
# It may happen because the element to which I have referred is removed from the DOM structure. [https://stackoverflow.com/a/18226139]
try :
self.enter.click()
self.enter.send_keys(word)
self.enter.send_keys(Keys.RETURN)
except :
self.enter = self.driver.find_element(By.CLASS_NAME, 'form-control')
self.enter.click()
self.enter.send_keys(word)
self.enter.send_keys(Keys.RETURN)
# try :
# self.driver.find_element(By.XPATH, '/html/body/div[1]/div[1]/div[2]/form/div/button').click()
# except :
# self.driver.find_element(By.XPATH, '/html/body/div[1]/div[1]/div[2]/form/div/button').click()
| luceien/MotusSolver | utils.py | utils.py | py | 7,438 | python | en | code | 0 | github-code | 13 |
30615801942 | import os, re, configparser, requests
import random
from aiogram import Bot, types
from aiogram.dispatcher import Dispatcher, FSMContext
from aiogram.utils import executor
from aiogram.utils.helper import Helper, HelperMode, ListItem
from aiogram.dispatcher.filters.state import State, StatesGroup
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.types import ReplyKeyboardRemove, ReplyKeyboardMarkup, KeyboardButton, InlineKeyboardMarkup, \
InlineKeyboardButton
import urllib.request
from tiktok_downloader import snaptik
import sys
import urllib
import requests
from bs4 import BeautifulSoup
bot = Bot(token=sys.argv[1])
dp = Dispatcher(bot, storage=MemoryStorage())
def download_video(video_url, name):
r = requests.get(video_url, allow_redirects=True)
content_type = r.headers.get('content-type')
if content_type == 'video/mp4':
open(f'./videos/video{name}.mp4', 'wb').write(r.content)
else:
pass
if not os.path.exists('videos'):
os.makedirs('videos')
@dp.message_handler(commands=['bibometr'])
async def start_command(message: types.Message):
size1 = random.randint(-20, 20)
size2 = random.randint(0, 9)
biba = "8"
for i in range(abs(int(size1 / 2))):
biba = biba + "="
biba += "D"
if size1 < 1:
await message.reply(str(abs(size1)) + " см в жопе 😳" + "\n" + biba)
else:
await message.reply("Твой стручок: " + str(size1) + " см 😎" + "\n" + biba)
@dp.message_handler(commands=['all'])
async def ping(m):
if not os.path.exists(str(m.chat.id).strip().replace('-', '') + '.txt'):
f = open(str(m.chat.id).strip().replace('-', '') + '.txt', 'w')
f.writelines(m.from_user.username)
f.close()
await m.reply('Добавлены в список')
else:
f = open(str(m.chat.id).strip().replace('-', '') + '.txt', 'r')
if m.from_user.username in f.read():
msg = ''
f.seek(0)
for line in f.read().splitlines():
msg = msg + ' @' + line
await m.reply(msg)
else:
f.close()
f = open(str(m.chat.id).strip().replace('-', '') + '.txt', 'a+')
f.writelines('\n' + m.from_user.username)
f.close()
m.reply('Добавлены в список')
@dp.message_handler(commands=['weather'])
async def getWeather(message: types.Message):
try:
resultMessage = ""
headers = {
'user agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/98.0.4758.102 Safari/537.36"} # Headers для запроса
url = "https://weather.com/ru-RU/weather/today/l/f2312a9747951a5ddc2e2678f4d7519282e4448dc9bea0157e8f805abb4e4043"
html = requests.get(url, headers) # Отправляем запрос
soup = BeautifulSoup(html.content, 'html.parser') # Получаем html страницу
ParsedWeather = soup.findAll("span", {"class": "CurrentConditions--tempValue--3a50n"})
resultMessage += "Ижевск\n" + str(ParsedWeather[0]).replace(
"<span class=\"CurrentConditions--tempValue--3a50n\" data-testid=\"TemperatureValue\">", "").replace(
"</span>", "") + ", "
ParsedCondition = soup.findAll("div", {"class": "CurrentConditions--phraseValue--2Z18W"})
resultMessage += str(ParsedCondition[0]).replace(
"<div class=\"CurrentConditions--phraseValue--2Z18W\" data-testid=\"wxPhrase\">", "").replace("</div>",
"")
await message.answer(resultMessage)
except Exception as e:
await bot.send_message(chat_id=message.chat.id, text='Неверные данные, попробуйте еще раз')
@dp.message_handler(commands=['roll'])
async def roll(message: types.Message):
try:
trueOrFalse = random.choice([True, False])
with open(f'./videos/{str(trueOrFalse)}.mp4', 'rb') as file:
await bot.send_video(
chat_id=message.chat.id,
video=file,
reply_to_message_id=message.message_id
)
except Exception as e:
await bot.send_message(chat_id=message.chat.id, text='Неверные данные, попробуйте еще раз')
@dp.message_handler(content_types=['text'])
async def text(message: types.Message):
regxp = re.compile(
"(?:(?:https?|ftp):\/\/|\b(?:[a-z\d]+\.))(?:(?:[^\s()<>]+|\((?:[^\s()<>]+|(?:\([^\s()<>]+\)))?\))+(?:\((?:[^\s()<>]+|(?:\(?:[^\s()<>]+\)))?\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))?")
link = regxp.findall(message.text)
headers = {
'user agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/91.0.4472.135 Safari/537.36"
}
if message.text[0] == '$':
try:
resultMessage = ""
headers = {
'user agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/98.0.4758.102 Safari/537.35"} # Headers для запроса
user = message.text.replace('$', '')
ticker = user.upper() # Перевод в верхний регистр для удобства
if ticker == "BTC":
fullUrl = "https://www.rbc.ru/crypto/currency/btcusd"
html = requests.get(fullUrl, headers) # Отправляем запрос
soup = BeautifulSoup(html.content, 'html.parser').decode() # Получаем html страницу
pricePattern = re.compile(
"<div class=\"chart__subtitle js-chart-value\">([\n \d,]+)<span class=\"chart__change chart__change")
price = re.findall(pricePattern, soup)
resultMessage += "Цена за штуку: " + str(price[0]).replace(" ", "").replace("\n", "") + " USD\n"
elif ticker == "USD":
fullUrl = "https://bcs-express.ru/kotirovki-i-grafiki/usd000utstom" # Ссылка на запрос
html = requests.get(fullUrl, headers) # Отправляем запрос
soup = BeautifulSoup(html.content, 'html.parser').decode() # Получаем html страницу
pricePattern = re.compile(
"<div class=\"quote-head__price-value js-quote-head-price js-price-close\">([\d,.]+)</div>") # Regexp для получения цены
price = re.findall(pricePattern, soup) # Находим по регулярке значение цену
profitPattern = re.compile(
"js-profit-percent\">([-+\w,%]+)</div>") # Regexp для получения профита за день
profit = str(re.findall(profitPattern, soup)) # Находим по регулярке значение профита за день
resultMessage += "Цена за штуку: " + str(price[0]) + " RUB" + "\n"
resultMessage += "Движение цены за день: " + str(
profit.replace("['", "").replace(",", ".").replace("']", "")) + "\n"
elif ticker == "EUR":
fullUrl = "https://bcs-express.ru/kotirovki-i-grafiki/eur_rub__tom" # Ссылка на запрос
html = requests.get(fullUrl, headers) # Отправляем запрос
soup = BeautifulSoup(html.content, 'html.parser').decode() # Получаем html страницу
pricePattern = re.compile(
"<div class=\"quote-head__price-value js-quote-head-price js-price-close\">([\d,.]+)</div>") # Regexp для получения цены
price = re.findall(pricePattern, soup) # Находим по регулярке значение цену
profitPattern = re.compile(
"js-profit-percent\">([-+\w,%]+)</div>") # Regexp для получения профита за день
profit = str(re.findall(profitPattern, soup)) # Находим по регулярке значение профита за день
resultMessage += "Цена за штуку: " + str(price[0]) + " RUB" + "\n"
resultMessage += "Движение цены за день: " + str(
profit.replace("['", "").replace(",", ".").replace("']", "")) + "\n"
elif ticker != "":
fullUrl = "https://bcs-express.ru/kotirovki-i-grafiki/" + ticker # Ссылка на запрос
html = requests.get(fullUrl, headers) # Отправляем запрос
soup = BeautifulSoup(html.content, 'html.parser').decode() # Получаем html страницу
pricePattern = re.compile("js-price-close\">([\d,]+)</div>") # Regexp для получения цены
symbolPattern = re.compile("js-currency-code\">(\w+)</div>") # Regexp для получения валюты
profitPattern = re.compile(
"js-profit-percent\">([-+\w,%]+)</div>") # Regexp для получения профита за день
profit = str(re.findall(profitPattern, soup)) # Находим по регулярке значение профита за день
symbol = str(re.findall(symbolPattern, soup)[0]) # Находим по регулярке значение валюты
price = str(re.findall(pricePattern, soup)[0]).replace(",",
".") + " " + symbol # Находим по регулярке и получаем цену со знаком валюты
resultMessage += "Цена за штуку: " + price + "\n"
resultMessage += "Движение цены за день: " + str(
profit.replace("['", "").replace(",", ".").replace("']", "")) + "\n"
else:
resultMessage += "Неверные данные, попробуйте еще раз" + "\n"
await bot.send_message(chat_id=message.chat.id, text=resultMessage)
except Exception as e:
await bot.send_message(chat_id=message.chat.id, text='Неверные данные, попробуйте еще раз')
@dp.message_handler(commands=['set'])
async def set_default_commands(dp):
await dp.bot.set_my_commands([
types.BotCommand("all", "Пингануть всех"),
types.BotCommand("bibometr", "Узнать размер агрегата"),
types.BotCommand("weather", "Узнать погоду"),
types.BotCommand("roll", "Да или нет")
])
if __name__ == "__main__":
# Запускаем бота
executor.start_polling(dp, skip_updates=True)
| grasth/tikTokBot | main.py | main.py | py | 11,137 | python | ru | code | 2 | github-code | 13 |
46286695634 | from diva import Diva
from diva.widgets import *
from diva.utilities import register_simple_util, register_widget_util, file_response
import pandas as pd
import tempfile
# if your utility has options that depend on the currently displayed value,
# of the figure, then use register_widget_util
def my_util_widgets(val):
"""
Allow the user to select which of the table's columns to export
"""
column_names = [str(name) for name in list(val)]
return [SelectSubset('select the columns you want', column_names)]
def my_util_apply(val, chosen_columns):
"""
Export only the selected columns to csv
"""
# convert the subset to a list of bools, with True for cols to include
# and False ow
all_col_names = [str(name) for name in list(val)]
col_bools = [e in chosen_columns for e in all_col_names]
my_file = tempfile.NamedTemporaryFile()
val.to_csv(my_file.name, columns=col_bools)
return file_response('your_file.csv', my_file.name)
register_widget_util('export columns', pd.DataFrame, my_util_widgets, my_util_apply)
# if, on the other hand, your utility does not depend on the currently displayed
# value, you can use register_simple_util, which is a wrapper around the above method
@register_simple_util('export with separator', pd.DataFrame, [String('enter a separator', ',')])
def another_util_apply(val, sep):
my_file = tempfile.NamedTemporaryFile()
val.to_csv(my_file.name, sep=sep)
return file_response('your_file.csv', my_file.name)
app = Diva()
@app.view('my sample view')
def foo():
return pd.DataFrame({'a': [1, 2], 'b': [3, 4], 'c': [5, 6]})
app.run()
| mgriley/diva | examples/custom_utility.py | custom_utility.py | py | 1,642 | python | en | code | 45 | github-code | 13 |
30858813959 | from django.urls import path
from movies.views import *
from .views import *
urlpatterns = [
path("login/", giris, name="login"),
path("register/", register, name="register"),
path("profile/", profile, name="user"),
path("logout/", cikis, name="logout"),
path("hesapSil/", hesapSilme, name="hesapSil"),
] | MervanKoncuk/DjangoNetflix | user/urls.py | urls.py | py | 324 | python | en | code | 0 | github-code | 13 |
12871055545 | """
Problem Statement:
You have an UNDIRECTED, connected graph of n nodes labeled from 0 to n-1. You are given an array
graph where graph[i] is a list of all the nodes connected with node i by an edge.
Return the length of the shortest path that visits every node. You may start and stop at any node,
you may revisit nodes multiple times and you may reuse edges
Example 1:
Input: graph = [[1,2,3],[0],[0],[0]]
Output: 4
Explanation: One possible path is [1,0,2,0,3]
Example 2:
Input: graph = [[1],[0,2,4],[1,3,4],[2],[1,2]]
Output: 4
Explanation: One possible path is [0,1,4,2,3]
Constrains:
* n == graph.length
* 1 <= n <= 12
* 0 <= graph[i].length < n
* graph[i] does not contain i
* If graph[a] contains b, then graph[b] contains [a]
* The input graph is always connected
Idea:
Use BFS with mask
"""
from typing import List
class Solution:
def shortestPathLength(self, graph: List[List[int]]) -> int:
"""
"""
n = len(graph)
# Final mask when all the node will be visited
finalMask = (1<<n) - 1
# Initialize a queue for BFS which will store current
# node id and mask of visited nodes.
q = []
# Initialize a visited array for keeping track
# of all mask that are visited in the path, each will keep track for the starting node ->
visited = [[0 for i in range(finalMask+1)] for j in range(n)]
print(f'Visited array: {visited}')
# Push starting node for
# all possible path with their mask
for i in range(n):
q.append([i,1<<i])
print(f"Queue: {q}")
# For counting the minimum time
# to visit all the nodes
timeCount = 0
while len(q):
size = len(q)
# Iterate over each level
for i in range(size):
# Fetch and pop the current node
curr = q.pop(0)
# Check if the current node mask
# is equal to finalMask
if(curr[1] == finalMask):
print(f"Final mask: {finalMask}")
return timeCount
# Explore all the child of current node
for child in graph[curr[0]]:
# Make a new Mask for child
newVisitedBit = curr[1]|(1<<child)
# If new Mask for child has
# not been visited yet,
# push child and new Mask in
# the queue and mark visited
# for child with newVisitedBit
if(visited[child][newVisitedBit] == False):
q.append([child,newVisitedBit])
visited[child][newVisitedBit] = True
# Increment the time Count after each level
timeCount = timeCount + 1
# If all node can't be visited
return -1
if __name__ == '__main__':
s = Solution()
print(s.shortestPathLength(graph=[[1,2,3],[0],[0],[0]]))
print(s.shortestPathLength(graph=[[1],[0,2,4],[1,3,4],[2],[1,2]]))
| Nacriema/Leet-Code | daily_challenges/shortest-path-visiting-all-nodes.py | shortest-path-visiting-all-nodes.py | py | 3,456 | python | en | code | 0 | github-code | 13 |
2927731667 | import numpy as np
from copy import deepcopy as dc
import re
def read_input(filename):
with open(filename) as f:
lines = f.readlines()
lines = [l.replace("\n","").strip() for l in lines]
data = []
pp = dict()
for line in lines:
if line == "":
data.append(dc(pp))
pp = dict()
continue
for field in line.split(" "):
key = field.split(":")[0]
pp[key] = field.split(":")[1]
for s in data:
print(s)
return data
def validate(key, value):
if key == "byr":
if re.match("[0-9]{4}", value) is None: return False
value = int(value)
return value <= 2002 and value >= 1920
elif key == "iyr":
if re.match("[0-9]{4}", value) is None: return False
value = int(value)
return value <= 2020 and value >= 2010
elif key == "eyr":
if re.match("[0-9]{4}", value) is None: return False
value = int(value)
return value <= 2030 and value >= 2020
elif key == "hgt":
if len(value) < 4:
return False
unit = value[-2:]
value = int(value[:-2])
if unit == "cm":
return 150 <= value and value <= 193
elif unit == "in":
return 59 <= value and value <= 76
else:
return False
elif key == "hcl":
return (re.match("#[0-9a-f]{6}", value) is not None) and len(value) == 7
elif key == "ecl":
return value in ["amb", "blu", "brn", "gry", "grn", "hzl", "oth"]
elif key == "pid":
return (re.match("[0-9]{9}", value) is not None) and len(value) == 9
elif key == "cid":
return True
print("INVALID KEY: ", key)
return False
def part_1(filename):
data = read_input(filename)
required = set([
"byr",
"iyr",
"eyr",
"hgt",
"hcl",
"ecl",
"pid",
#"cid",
])
num_good = 0
for d in data:
s = set(d.keys())
num_good += (len(required.difference(s)) == 0)
print(">> ", required.difference(s))
print(">> ", num_good)
def part_2(filename):
data = read_input(filename)
required = set([
"byr",
"iyr",
"eyr",
"hgt",
"hcl",
"ecl",
"pid",
#"cid",
])
num_good = 0
for d in data:
s = set(d.keys())
valid = (len(required.difference(s)) == 0)
for key in d:
#print(key, d[key], validate(key, d[key]))
valid = valid and validate(key, d[key])
num_good += valid
print(">> ", num_good)
if __name__ == '__main__':
#part_1("data_test")
#part_1("data")
part_2("data_test_invalid")
part_2("data_test_valid")
part_2("data") | N40/advendofcode_2022 | legacy_2020/day_4/run.py | run.py | py | 2,368 | python | en | code | 0 | github-code | 13 |
72682824977 | from ..modeling.pairwise import ChainCosine, TargetEncoder, ContextEncoderConcat
from ..modeling.aux import mySentenceTransformer
from sklearn.cluster import AgglomerativeClustering
import numpy as np
import torch
from tqdm import tqdm
def _load_pairwise_cat(ckpt_path, device):
context_size = 3
encoder_name = 'aws-ai/dse-bert-large'
_encoder = mySentenceTransformer(encoder_name)
_target_encoder = TargetEncoder(_encoder)
_context_encoder = ContextEncoderConcat(_encoder, context_size=context_size)
_model = ChainCosine(
target_encoder=_target_encoder,
context_encoder=_context_encoder,
projection_size=256,
context_size=context_size,
)
return ChainCosine.from_checkpoint(
path_to_ckpt=ckpt_path,
model=_model,
map_location=device
).eval()
class Pruner:
def __init__(
self,
ckpt_path='/home/alekseev_ilya/dialogue-augmentation/nup/logs/training/pairwise-cat-speaker-issue/checkpoints/last.ckpt',
device='cpu',
thresh=-np.inf
):
self.thresh = thresh
self.model = _load_pairwise_cat(ckpt_path, device)
def __call__(self, dialogues):
res = []
for dia in tqdm(dialogues, desc='cutting dialogues'):
aug, score = self._cut(self.model, dia)
res.append(aug if score >= self.thresh else None)
return res
@staticmethod
def _cut(model, dia):
"""drops all clusters except the biggest one. applies transformation only to dialogues with 6 utterances at least"""
if len(dia) < 6:
return None, -np.inf
end = len(dia) // 3
start = 2
variations = []
for n_clusters in range(start, end+1):
clusterwise_uts = _cluster(model, dia, n_clusters)
ids = clusterwise_uts[np.argmax([len(clust) for clust in clusterwise_uts])]
aug = [dia[i] for i in ids]
score = model.score(aug)
variations.append((aug, score))
res, score = max(variations, key=lambda x: x[1])
return res, score
@torch.no_grad()
def _cluster(model, dia, n_clusters):
"""clusters utterances within dia according to logits (similarities) from pairwise model"""
batch = model.make_batch_from_dia(dia)
similarities = model.get_logits(batch, temperature=1).cpu().numpy()
# mask out similarities between utterances of same speaker
# speaker = [item['speaker'] for item in dia]
# context_speaker = np.array(speaker[:-1])[:, None]
# target_speaker = np.array(speaker[1:])[None, :]
# mask = (context_speaker != target_speaker) | np.eye(len(speaker)-1, dtype=np.bool_)
# similarities[~mask] = -1e3
labels = AgglomerativeClustering(
n_clusters=n_clusters,
linkage='average',
metric='precomputed'
).fit_predict(similarities)
labels = np.r_[labels[0], labels]
res = [[] for _ in range(len(np.unique(labels)))]
for i_ut, lab in enumerate(labels):
res[lab].append(i_ut)
return res
| voorhs/practicum-fall-2023 | src/mylib/augmentations/prune.py | prune.py | py | 3,080 | python | en | code | 0 | github-code | 13 |
21457780526 | # -*- coding: utf-8 -*-
# This file is part of Argos.
#
# Argos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Argos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Argos. If not, see <http://www.gnu.org/licenses/>.
""" Miscellaneous routines.
"""
import logging, sys
logger = logging.getLogger(__name__)
class NotSpecified(object):
""" Class for NOT_SPECIFIED constant.
Is used so that a parameter can have a default value other than None.
Evaluate to False when converted to boolean.
"""
def __nonzero__(self):
""" Always returns False. Called when to converting to bool in Python 2.
"""
return False
def __bool__(self):
""" Always returns False. Called when to converting to bool in Python 3.
"""
return False
NOT_SPECIFIED = NotSpecified()
def python_major_version():
""" Returns 2 or 3 for Python 2.x or 3.x respectively
"""
return sys.version_info[0]
def python2():
""" Returns True if we are running python 2
"""
major_version = sys.version_info[0]
assert major_version == 2 or major_version == 3, "major_version = {!r}".format(major_version)
return major_version == 2
def log_dictionary(dictionary, msg='', logger=None, level='debug', item_prefix=' '):
""" Writes a log message with key and value for each item in the dictionary.
:param dictionary: the dictionary to be logged
:type dictionary: dict
:param name: An optional message that is logged before the contents
:type name: string
:param logger: A logging.Logger object to log to. If not set, the 'main' logger is used.
:type logger: logging.Logger or a string
:param level: log level. String or int as described in the logging module documentation.
Default: 'debug'.
:type level: string or int
:param item_prefix: String that will be prefixed to each line. Default: two spaces.
:type item_prefix: string
"""
level_nr = logging.getLevelName(level.upper())
if logger is None:
logger = logging.getLogger('main')
if msg :
logger.log(level_nr, "Logging dictionary: {}".format(msg))
if not dictionary:
logger.log(level_nr,"{}<empty dictionary>".format(item_prefix))
return
max_key_len = max([len(k) for k in dictionary.keys()])
for key, value in sorted(dictionary.items()):
logger.log(level_nr, "{0}{1:<{2}s} = {3}".format(item_prefix, key, max_key_len, value))
def prepend_point_to_extension(extension):
""" Prepends a point to the extension of it doesn't already start with it
"""
if extension.startswith('.'):
return extension
else:
return '.' + extension
def is_quoted(s):
""" Returns True if the string begins and ends with quotes (single or double)
:param s: a string
:return: boolean
"""
return (s.startswith("'") and s.endswith("'")) or (s.startswith('"') and s.endswith('"'))
def string_to_identifier(s, white_space_becomes='_'):
""" Takes a string and makes it suitable for use as an identifier
Translates to lower case
Replaces white space by the white_space_becomes character (default=underscore).
Removes and punctuation.
"""
import re
s = s.lower()
s = re.sub(r"\s+", white_space_becomes, s) # replace whitespace with underscores
s = re.sub(r"-", "_", s) # replace hyphens with underscores
s = re.sub(r"[^A-Za-z0-9_]", "", s) # remove everything that's not a character, a digit or a _
return s
if __name__ == "__main__":
print (string_to_identifier("Pea\nsdf-43q45,.!@#%&@&@@24n pijn Kenter, hallo$"))
| leehawk2001/argos | argos/utils/misc.py | misc.py | py | 4,163 | python | en | code | null | github-code | 13 |
22531518044 | from collections import deque
import gym
from matplotlib import pyplot as plt
import numpy as np
import random
import tensorflow as tf
from tensorflow.keras import models, layers, optimizers, Input
from tensorflow.keras import backend as K
from tensorflow.keras import initializers
import time
from memory import Memory
tf.compat.v1.disable_eager_execution()
class DQN(object):
def __init__(
self,
n_actions=4,
n_features=2,
learning_rate=0.005,
reward_decay=0.9,
e_greedy=0.9,
replace_target_iter=500,
memory_size=2048,
batch_size=32,
e_greedy_increment=None,
prioritized=False,
dueling=False,
double_q=False,
forest=False,
bPT=False,
):
self.step = 0
self.n_actions = n_actions
self.n_features = n_features
self.factor = reward_decay
self.update_freq = replace_target_iter # the update frequency of model
self.replay_size = memory_size # the size of training data
self.lr = learning_rate
self.epsilon_max = e_greedy
self.epsilon_increment = e_greedy_increment # epsilon increase with training steps
self.epsilon = 0.0 if e_greedy_increment is not None else self.epsilon_max
self.batch_size = batch_size
self.prioritized = prioritized
self.ISWeights = np.zeros((self.batch_size, 1))
if self.prioritized:
print('prioritized experience replay')
self.replay_queue = Memory(self.replay_size)
else:
self.replay_queue = deque(maxlen=self.replay_size)
self.dueling = dueling
self.model = self.create_model()
self.target_model = self.create_model()
self.double_q = double_q
self.loss = []
def create_model(self):
"""创建一个隐藏层为10的神经网络"""
if self.dueling:
inputA = Input(shape=(self.n_features,))
inputB = Input(shape=(1,))
x = layers.Dense(self.batch_size, activation='relu')(inputA)
x1 = layers.Dense(1, activation='linear')(x)
x2 = layers.Dense(self.n_actions, activation='linear')(x)
y = x1 + (x2 - tf.reduce_mean(x2, axis=1, keepdims=True))
model = models.Model(inputs=[inputA, inputB], outputs=y)
else:
inputA = Input(shape=(self.n_features,))
inputB = Input(shape=(1,))
x = layers.Dense(self.batch_size, activation='relu',
kernel_initializer=initializers.RandomNormal(mean=0.0, stddev=0.3, seed=None),
bias_initializer=initializers.Constant(value=0.1))(inputA)
y = layers.Dense(self.n_actions, activation='linear',
kernel_initializer=initializers.RandomNormal(mean=0.0, stddev=0.3, seed=None),
bias_initializer=initializers.Constant(value=0.1))(x)
model = models.Model(inputs=[inputA, inputB], outputs=y)
# Pay attention to this part !!!
# need to customize the loss function
def my_loss_wrapper(ISWeights):
def my_loss(y_true, y_pred):
return K.mean(ISWeights * K.square(y_pred - y_true), -1)
return my_loss
if self.prioritized:
model.compile(loss=my_loss_wrapper(inputB),
optimizer=optimizers.RMSprop(self.lr))
else:
model.compile(loss='mean_squared_error',
optimizer=optimizers.RMSprop(self.lr))
return model
def act(self, s, flag=False):
"""predict action"""
data = np.array([s])
if np.random.uniform() < self.epsilon and flag:
temp = self.model.predict([data, self.ISWeights])[0]
a = np.argmax(temp)
else:
a = np.random.randint(0, self.n_actions)
return a
def save_model(self, file_path='multiCamerasSensing-v0-dqn.h5'):
print('model saved')
self.model.save(file_path)
def save_model_weights(self, file_path='dqn_weights.h5'):
print('model weights saved')
self.model.save(file_path)
def remember(self, s, a, next_s, reward):
""" store the transition"""
if self.prioritized:
self.replay_queue.store((s, a, next_s, reward))
else:
self.replay_queue.append((s, a, next_s, reward))
def train(self):
# copy the parameters of the real network to the target network
if self.step % self.update_freq == 0:
self.target_model.set_weights(self.model.get_weights())
# sample from the experience buffer/memory
if self.prioritized:
tree_idx, replay_batch, self.ISWeights = self.replay_queue.sample(self.batch_size)
else:
replay_batch = random.sample(self.replay_queue, self.batch_size)
# predict the q value
s_batch = np.array([replay[0] for replay in replay_batch])
next_s_batch = np.array([replay[2] for replay in replay_batch])
Q = self.model.predict([s_batch, self.ISWeights])
Q_eval4next = self.model.predict([next_s_batch, self.ISWeights])
Q_next = self.target_model.predict([next_s_batch, self.ISWeights])
Q_target = Q.copy()
# update the target q value by formula
for i, replay in enumerate(replay_batch):
_, a, _, reward = replay
if self.double_q:
max_act4next = np.argmax(Q_eval4next[i])
selected_q_next = Q_next[i, max_act4next]
else:
selected_q_next = np.amax(Q_next[i])
Q_target[i][a] = reward + self.factor * selected_q_next
if self.prioritized:
abs_errors = np.sum(np.abs(Q - Q_target), axis=1)
self.replay_queue.batch_update(tree_idx, abs_errors) # update priority
# train the network
history = self.model.fit([s_batch, self.ISWeights], Q_target, batch_size=self.batch_size, verbose=0)
self.loss.append(history.history['loss'][0])
self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
self.step += 1
| ThatDreamer/Multi-camera-perception | DQN/dqn.py | dqn.py | py | 6,339 | python | en | code | 0 | github-code | 13 |
1186039963 | #import shenanigans
import time
import sys
import random
import os
def cls():
os.system('cls' if os.name=='nt' else 'clear')
#pravidla hry
rules = {
"rock": "scissors",
"paper": "rock",
"scissors": "paper",
}
#volby bot
choices_bot = {
"1": "rock",
"2": "paper",
"3": "scissors"
}
#volby hráč
choices_pl = {
"1": "rock",
"2": "paper",
"3": "scissors"
}
#some nice fella v quora se zeptal, a nějaký iq pán to vymyslel. :)
def delay_print1(s):
for c in s:
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(0.1)
#porovnání co vyhraje
def hra(human, bot): #definování formátu vstupu
if bot == rules[human]: #Vyhrál bot nad člověkem?
# #když bot má stejný jako přiřazená hodnota k výběru human (volby, 1 = rock atd..)
print (f"The Bot chose {bot}, you chose {human}. {human} wins.")
delay_print1("Bot: NOOOOO. I Lost!\n")
elif human == rules[bot]:
print (f"The Bot chose {bot}, you chose {human}. {bot} wins.")
delay_print1("Bot: HAHA you fool! You thought you would outsmart me!\n")
else:
print (f"The Bot chose {bot}, you chose {human}. You both chose the same. It's a tie!")
delay_print1("Bot: Well. That's awkward...\n")
#interaktivní část
def delay_print(s):
for c in s:
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(0.25)
def delay_printch(s):
for c in s:
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(0.15)
while True:
clear = lambda: os.system('cls') #buďto to udělá co má, nebo to vyhodí . Podle toho, zda jsi v pycharm, nebo vscode (vscode funguje)
print("Hi player!")
delay_print("WELCOME\n")
print("to the awesome")
time.sleep(0.5)
delay_print("ROCK PAPER SCISSORS game©\n") #v plánu bylo to mít bold a nějak vyznačený. Ale too much work a ještě bych dostal body mínus
time.sleep(1)
print("Now. let's begin. The rules are simple.")
print("rock beats scissors, scissors beat paper, paper beats rock")
print("Choices: ")
delay_printch("1 = rock, 2 = paper, 3 = scissors (or type -quit- to quit)\n")
time.sleep(0.5)
choice = input("Now the fate of yours, lies in your hand. What will you choose?: ")
if choice == ("quit"):
print("Oke see ya later..")
break
else:
try:
a = int(choice)
b = str(choice)
rnum = random.randint(1, 3)
rstrg = str(rnum)
c = choices_pl[b]
d = choices_bot[rstrg]
print (f"Bot: So you chose {c}.")
delay_print1(f"Bot: Well... I chose: {d}\n")
hra(choices_pl[b], choices_bot[rstrg])
time.sleep(5)
clear()
except ValueError:
print("That was not a number!!! I am mad. Bye...")
time.sleep(5)
clear()
break
except KeyError:
print(f"ey. 1 to 3 (or quit)... not {b}... You tried to fool me.... Bye... ")
time.sleep(5)
clear()
break
| Thasimo1/Programming-files | RockPaperScissors-hw.py | RockPaperScissors-hw.py | py | 3,140 | python | en | code | 0 | github-code | 13 |
20236786830 | from django.shortcuts import render, redirect
from django.views.generic.base import TemplateView
def error_403(request, exception):
data = {}
return render(request, 'webpage/403.html', data)
def error_404(request, exception):
data = {}
return render(request, 'webpage/404.html', data)
def error_500(request, exception=None):
data = {}
return render(request, 'webpage/500.html', data)
class IndexView(TemplateView):
template_name = 'webpage/index.html'
class IndexManagerView(TemplateView):
def get(self, request, *args, **kargs):
"""
After user login, redirect for respective dashboard,
depending on the department
"""
department = {
'ad': 'administracao',
'fi': 'financeiro',
'se': 'secretaria',
'pr': 'professor',
're': 'responsavel',
'al': 'aluno',
}
if request.user.is_authenticated:
template = department[request.user.department]
return redirect('index-{}'.format(template))
else:
return redirect('login')
# return render(request, 'webpage/index.html')
| Antonio-Neves/Gestao-Escolar | webpage/views.py | views.py | py | 1,027 | python | en | code | 15 | github-code | 13 |
14210129572 | import json
import requests
command = {"command": "move_forward", "distance": 10}
json_command = json.dumps(command)
response = requests.post("http://192.168.197.58:8000/execute_command", data=json_command)
if response.status_code == 200:
print("Command executed successfully")
else:
print("Command execution failed")
| Mohammed-Rahman-sherif/communication-system | transmitter.py | transmitter.py | py | 329 | python | en | code | 0 | github-code | 13 |
4616995088 | import requests
import pandas
from io import BytesIO
url= "https://dart.fss.or.kr/pdf/download/excel.do?rcp_no=20210817001883&dcm_no=8182806&lang=ko"
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36'}
resp = requests.get(url,headers = headers)
# url로 열기
table = BytesIO(resp.content)
pocket = ["연결 재무상태표", "연결 손익계산서", "연결 포괄손익계산서"]
for sheet in pocket:
data = pandas.read_excel(table, sheet_name=sheet,skiprows=5)
data.to_csv(sheet+".csv", encoding="euc-kr")
| siilver94/Dart-Financial-Statement-Data-Collection-and-Analysis | python/src/etc/save-specific-cell.py | save-specific-cell.py | py | 616 | python | en | code | 1 | github-code | 13 |
43351717423 | import sys
from collections import defaultdict
n = int(sys.stdin.readline())
shoes_dict = defaultdict(int)
for i in range(n):
shoes_dict[int(sys.stdin.readline())] += 1
answer = 0
max_stock = 0
for key, value in shoes_dict.items():
if value > max_stock:
max_stock = value
answer = key
elif value == max_stock:
if key > answer:
answer = key
print(answer) | W00SUNGLEE/codingmasters | 4319/4319.py | 4319.py | py | 408 | python | en | code | 0 | github-code | 13 |
7954884169 | '''Load all transcripts from the genome file,
select only for ncRNA that are confirmed
and store them in a FASTA file.'''
from Bio.SeqIO import parse
genome_filename = '/home/jlflores/Documents/Aptamer21/Genome/GCF_000001405.38_GRCh38.p12_rna.fna'
all_transcripts = [rna for rna in parse(genome_filename, 'fasta')]
# NM_<number> = protein-coding transcript
# NR_<number> = non-protein-coding transcript
# XM_<number> = predicted protein-coding transcript
# XR_<number> = predicted non-protein coding transcript
mrna_count = 0
mrna_predicted_count = 0
ncrna_count = 0
ncrna_predicted_count = 0
access_prefix = {'NM': mrna_count, 'XM': mrna_predicted_count, 'NR': ncrna_count, 'XR': ncrna_predicted_count}
for rna in all_transcripts:
cur_type = rna.id[:2]
access_prefix[cur_type] += 1
print(f'{len(all_transcripts)}\tTotal transcripts')
print(f'{access_prefix["NM"]}\tmRNA')
print(f'{access_prefix["XM"]}\tmRNA - Predicted')
print(f'{access_prefix["NR"]}\tncRNA')
print(f'{access_prefix["XR"]}\tncRNA - Predicted')
print('-------------------------------------------------------')
# count types of ncRNAs
ncrna_types = {}
for rna in all_transcripts:
if rna.id.startswith('NR'):
# retrieve transcript type and count it
description = rna.description.split(', ')
rna_type = description[ len(description)-1 ]
if rna_type not in ncrna_types:
ncrna_types[ rna_type ] = 0
ncrna_types[ rna_type ] += 1
for rna_type in sorted(list(ncrna_types.keys())):
print(f'{ncrna_types[rna_type]}\t{rna_type}')
# write ncRNAs to file
with open('GCF_all_mRNA.fa', 'w') as fp:
for rna in all_transcripts:
if rna.id.startswith('NR'):
fp.write(f'>{rna.description}\n{str(rna.seq)}\n')
| jl-flores/udem-2018-bioinfo | genome-manip/script2_get_ncRNA.py | script2_get_ncRNA.py | py | 1,762 | python | en | code | 0 | github-code | 13 |
2930470087 | # -*- coding:utf-8 -*-
from luma.core.interface.serial import i2c, spi
from luma.core.render import canvas
from luma.core import lib
from luma.oled.device import sh1106
import RPi.GPIO as GPIO
import time
import subprocess
import os
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
# Load default font.
font = ImageFont.load_default()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
width = 128
height = 64
image = Image.new('1', (width, height))
# First define some constants to allow easy resizing of shapes.
padding = -2
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = 0
RST = 25
CS = 8
DC = 24
USER_I2C = 0
if USER_I2C == 1:
GPIO.setmode(GPIO.BCM)
GPIO.setup(RST,GPIO.OUT)
GPIO.output(RST,GPIO.HIGH)
serial = i2c(port=1, address=0x3c)
else:
serial = spi(device=0, port=0, bus_speed_hz = 8000000, transfer_size = 4096, gpio_DC = 24, gpio_RST = 25)
device = sh1106(serial, rotate=2) #sh1106
try:
while True:
with canvas(device) as draw:
#draw.rectangle(device.bounding_box, outline="white", fill="black")
#draw.text((30, 40), "Hello World", fill="white")
# Shell scripts for system monitoring from here : https://unix.stackexchange.com/questions/119126/command-to-display-memory-usage-disk-usage-and-cpu-load
curtop = top
#for files in os.listdir('hexdir'):
for files in os.listdir('/home/pi/Desktop/HexFiles'):
draw.text((x, curtop), files, font=font, fill=255)
curtop = curtop + 8
#CPU = subprocess.check_output(cmd, shell = True )
except:
print("except")
GPIO.cleanup()
| mtulu-argeX/minoroller | bluetoothExample/exampleOled/screenDemo.py | screenDemo.py | py | 1,823 | python | en | code | 4 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.