seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
292830537 |
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
####runner, no buffer but O(N^2) time
def deleteDuplicates1(head):
if not head:
return head
cur = head
while cur:
runner = cur
while runner.next:
if cur.val == runner.next.val:
runner.next = runner.next.next
else:
runner = runner.next
cur = cur.next
return head
#####iterate, use hash table
def deleteDuplicates2(head):
if not head:
return head
cur = head
check = {}
while cur and cur.next:
if cur.next.val not in check:
check[cur.next.val] = 1
else:
cur.next = cur.next.next
return head
def main():
head, head.next, head.next.next = ListNode(1), ListNode(1), ListNode(2)
head.next.next.next, head.next.next.next.next = ListNode(3), ListNode(3)
res = deleteDuplicates2(head)
print(res.val)
if __name__ == '__main__':
main()
| shaniavina/Cracking-the-Coding-Interview_python | remove_dups.py | remove_dups.py | py | 1,060 | python | en | code | 0 | github-code | 90 |
6562507686 | from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from score.models import Score
def get_score(request, pk):
try:
data = Score.objects.get(user=pk)
r = {'success': True, 'id': data.user.pk, 'score': data.score}
except Score.DoesNotExist:
r = {'success': False, 'message': 'user not found'}
return JsonResponse(r)
def get_score_divided(request, pk):
try:
data = Score.objects.get(user=pk)
r = {'success': True, 'id': data.user.pk, 'score': data.score_divided}
except Score.DoesNotExist:
r = {'success': False, 'message': 'user not found'}
return JsonResponse(r)
@csrf_exempt
def set_score(request, pk):
print(request.POST)
try:
data = Score.objects.get(user=pk)
data.score = request.POST['score']
data.score_divided = int(request.POST['score'])/2
data.save()
r = {
'message': f'score for user {data.user.pk} successfully edited.',
'success': True,
'id': data.user.pk,
'score': data.score,
}
except Score.DoesNotExist:
r = {'success': False, 'message': 'user not found'}
return JsonResponse(r)
| juwaini/pacer | score/views.py | views.py | py | 1,231 | python | en | code | 0 | github-code | 90 |
1399130545 |
import os
import tensorflow as tf
from tensorflow import keras
import warnings
import numpy as np
import cv2
from sklearn import metrics
import numpy as np
from keras.models import Sequential
from keras.layers import LSTM, Input, Dropout
from keras.layers import Dense
from keras.layers import RepeatVector
from keras.layers import TimeDistributed
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from keras.models import Model
import seaborn as sns
from keras.models import Sequential
from keras.layers import Activation, Dense, Flatten, BatchNormalization, Conv1D, MaxPool1D, Dropout, LSTM
from tensorflow.keras.optimizers import RMSprop, Adam, SGD, Adagrad, Adadelta
from keras.metrics import categorical_crossentropy
from keras.callbacks import ReduceLROnPlateau
from keras.callbacks import ModelCheckpoint, EarlyStopping
warnings.simplefilter(action='ignore', category=FutureWarning)
from keras.regularizers import l2
import matplotlib.pyplot as plt
################ PARAMETERS ########################
path = 'G7_dataset/'
print (path)
x_gesture= []
x_no_gesture= []
sweeps=50
samples=207
##############
button_press = os.listdir(path + 'button_press/')
for i, gestures in enumerate(button_press): #Remember enumerate method adds a counter and returns the enumerate object
if (gestures.split('.')[1] == 'npy'):
gesture= np.load(path + 'button_press/' + gestures)
gesture = gesture.reshape((sweeps,samples))
gmax=np.max(gesture)
gesture =gesture/gmax
x_gesture.append(np.array(gesture))
finger_slide = os.listdir(path + 'finger_slide/')
for i, gestures in enumerate(finger_slide): #Remember enumerate method adds a counter and returns the enumerate object
if (gestures.split('.')[1] == 'npy'):
gesture= np.load(path + 'finger_slide/' + gestures)
gesture = gesture.reshape((sweeps,samples))
gmax=np.max(gesture)
gesture =gesture/gmax
x_gesture.append(np.array(gesture))
hand_away = os.listdir(path + 'hand_away/')
for i, gestures in enumerate(hand_away): #Remember enumerate method adds a counter and returns the enumerate object
if (gestures.split('.')[1] == 'npy'):
gesture= np.load(path + 'hand_away/' + gestures)
gesture = gesture.reshape((sweeps,samples))
gmax=np.max(gesture)
gesture =gesture/gmax
x_gesture.append(np.array(gesture))
hand_closer = os.listdir(path + 'hand_closer/')
for i, gestures in enumerate(hand_closer): #Remember enumerate method adds a counter and returns the enumerate object
if (gestures.split('.')[1] == 'npy'):
gesture= np.load(path + 'hand_closer/' + gestures)
gesture = gesture.reshape((sweeps,samples))
gmax=np.max(gesture)
gesture =gesture/gmax
x_gesture.append(np.array(gesture))
swipe_left = os.listdir(path + 'swipe_left/')
for i, gestures in enumerate(swipe_left): #Remember enumerate method adds a counter and returns the enumerate object
if (gestures.split('.')[1] == 'npy'):
gesture= np.load(path + 'swipe_left/' + gestures)
gesture = gesture.reshape((sweeps,samples))
gmax=np.max(gesture)
gesture =gesture/gmax
x_gesture.append(np.array(gesture))
swipe_right = os.listdir(path + 'swipe_right/')
for i, gestures in enumerate(swipe_right): #Remember enumerate method adds a counter and returns the enumerate object
if (gestures.split('.')[1] == 'npy'):
gesture= np.load(path + 'swipe_right/' + gestures)
gesture = gesture.reshape((sweeps,samples))
gmax=np.max(gesture)
gesture =gesture/gmax
x_gesture.append(np.array(gesture))
no_gesture = os.listdir(path + 'no_gesture/')
for i, gestures in enumerate(no_gesture): #Remember enumerate method adds a counter and returns the enumerate object
if (gestures.split('.')[1] == 'npy'):
gesture= np.load(path + 'no_gesture/' + gestures)
gesture = gesture.reshape((sweeps,samples))
gmax=np.max(gesture)
gesture =gesture/gmax
x_no_gesture.append(np.array(gesture))
###################### npy array ################
x_gesture = np.array(x_gesture)
x_no_gesture = np.array(x_no_gesture)
print(x_gesture.shape)
print(x_no_gesture.shape)
############## split into train and test ###############
from sklearn.model_selection import train_test_split
x_gesture_train, x_gesture_test = train_test_split(
x_gesture, test_size=0.20, random_state=42)
encoder_input = keras.Input(shape=(sweeps,samples), name='ges')
print(encoder_input)
x = keras.layers.Flatten()(encoder_input)
print(x)
encoder_output = keras.layers.Dense(64, activation="relu")(x)
print(encoder_output)
encoder = keras.Model(encoder_input, encoder_output, name='encoder')
print(encoder)
decoder_input = keras.layers.Dense(64, activation="relu")(encoder_output)
print(decoder_input)
x = keras.layers.Dense((sweeps*samples), activation="relu")(decoder_input)
print(x)
decoder_output = keras.layers.Reshape((50,207))(x)
print(decoder_output)
opt = tf.keras.optimizers.Adam(lr=0.001, decay=1e-6)
print(opt)
autoencoder = keras.Model(encoder_input, decoder_output, name='autoencoder')
print(autoencoder )
autoencoder.summary()
autoencoder.compile(opt, loss='mse')
history = autoencoder.fit(x_gesture_train,
x_gesture_train,
epochs=100,
batch_size=32, validation_split=0.10
)
autoencoder.save("ae.h5")
plt.plot(history.history['loss'], label='Training loss')
plt.show()
plt.plot(history.history['val_loss'], label='Validation loss')
plt.show()
pred1 = autoencoder.predict(x_gesture_test)
pred1 = pred1[30]
score2 = np.sqrt(metrics.mean_squared_error(pred1,x_no_gesture[30]))
print(score2)
pred2 = autoencoder.predict(x_gesture)
pred2 = pred2[30]
score2 = np.sqrt(metrics.mean_squared_error(pred2,x_gesture[30]))
print(score2)
trainPredict = autoencoder.predict(x_no_gesture)
trainMAE = np.mean(np.abs(trainPredict - x_no_gesture), axis=1)
plt.hist(trainMAE, bins=30)
plt.show()
testPredict = autoencoder.predict(x_gesture_test)
testMAE = np.mean(np.abs(testPredict - x_gesture_test), axis=1)
plt.hist(testMAE, bins=30)
plt.show()
#######################
| fromwaseem/mmWave | Python/002-auto_encoder/gestures_classification_autoencoder.py | gestures_classification_autoencoder.py | py | 6,773 | python | en | code | 0 | github-code | 90 |
43042358873 | import discord
from random import randint, shuffle
from scenes import scenes, images
from discord.ext import commands
import config
bot = commands.Bot(command_prefix=config.prefix)
players_list = []
gameinfo = {"started":False, "map":None, 'spy':None}
@bot.command()
async def ping(ctx):
await ctx.send('Pong! {0}'.format(round(bot.latency, 1)))
@bot.command()
async def join(ctx):
if ctx.channel.type is discord.ChannelType.private:
return
if gameinfo["started"]:
await ctx.send('Game not started!')
return
if len(players_list) >= 8:
await ctx.send('Maximum players reached!')
return
for x in players_list:
if x.id == ctx.author.id:
await ctx.send(f'You already joined the match!')
return
players_list.append(ctx.author)
await ctx.send(f'Now have {len(players_list)} players')
@bot.command()
async def start(ctx):
if ctx.channel.type is discord.ChannelType.private:
return
if gameinfo["started"]:
await ctx.send('Game not started!')
return
if len(players_list) < 3:
await ctx.send(f'Only have {len(players_list)} players, minimum of 3 players!')
return
message = "lista de jogadores:\n"
for y, x in enumerate(players_list):
message+= f"{y+1} - <@{x.id}>\n"
await ctx.send(message)
random_map = randint(0, (len(scenes)-1))
funcoes = []
players_ingame = players_list.copy()
for x,item in enumerate(scenes.items()):
y, z = item
if x == random_map:
y,z
gameinfo["map"] = y
funcoes = z.copy()
shuffle(players_ingame)
shuffle(funcoes)
funcoes = iter(funcoes)
gameinfo["started"] = True
for n,x in enumerate(players_ingame):
if n == 0:
if images["Spy"]:
await x.send(f"Spy, find out where you are!", file=discord.File(images["Spy"]))
else:
await x.send(f"Spy, find out where you are!")
gameinfo["spy"] = x.id
continue
funcao = next(funcoes)
if images[gameinfo["map"]]:
await x.send(f'Place: {gameinfo["map"]}!\nFunction: {funcao}!',file=discord.File(images[gameinfo["map"]]))
else:
await x.send(f'Place: {gameinfo["map"]}!\nFunction: {funcao}!')
@bot.command()
async def players(ctx):
if ctx.channel.type is discord.ChannelType.private:
return
message = "Players list:\n"
for y, x in enumerate(players_list):
message+= f"{y+1} - <@{x.id}>\n"
await ctx.send(message)
@bot.command()
async def stop(ctx):
global players_list
global gameinfo
if ctx.channel.type is discord.ChannelType.private:
return
if not gameinfo["started"]:
await ctx.send('Game not started!')
return
await ctx.send(f'Place: {gameinfo["map"]}\n Spy: <@{gameinfo["spy"]}>')
gameinfo["started"] = False
gameinfo["map"] = None
gameinfo["spy"] = None
players_list = []
@bot.command()
async def places(ctx):
message = "Places:\n"
counter = 0
for x,y in scenes.items():
counter += 1
message += f'{counter} - {x}\n'
await ctx.send(message)
# Events
@bot.event
async def on_ready():
await bot.change_presence(activity=discord.Streaming(name="Github:", url="https://github.com/LeonardoSola"))
print('Ready!')
bot.run(config.token) | LeonardoSola/SpyFall-discord | index.py | index.py | py | 3,432 | python | en | code | 1 | github-code | 90 |
9972254848 | import os
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
def main():
""" Machine Learning Dataset Explorer"""
st.title("Machine Learning Dataset Explorer")
st.subheader("Simple Data Science Explorer with Streamlit")
html_temp = """
<div style="background-color:tomato;">
<p style="color:white; font-size: 50px">Frase aleatória</p>
<div>
"""
st.markdown(html_temp,unsafe_allow_html=True)
def file_selector(folder_path='.'):
filenames = os.listdir(folder_path)
selected_filename = st.selectbox("Escolhar um arquivo", filenames)
return os.path.join(folder_path,selected_filename)
filename = file_selector()
st.info("Você escolheu {}". format(filename))
#Ler os dados
df = pd.read_csv(filename)
# Mostrar o dataset
if st.checkbox("Mostrar DataSet"):
number = st.number_input("Número de linhas para visualizar", 5,10)
st.dataframe(df.head(number))
#Mostrar colunas
if st.button("Nomes das Colunas"):
st.write(df.columns)
#Mostrar formatos
if st.checkbox("Formato do Dataset"):
st.write(df.shape)
data_dim = st.radio("Show Dimension By",("Rows","Columns"))
if data_dim == 'Columns':
st.text("Número de Colunas")
st.write(df.shape[1])
elif data_dim == "Rows":
st.text("Número de linhas")
st.write(df.shape[0])
else:
st.write(df.shape)
#Escolher colunas
if st.checkbox("Selecione as colunas desejadas"):
all_columns = df.columns.tolist()
selected_columns = st.multiselect("Escolha", all_columns)
new_df = df[selected_columns]
st.dataframe(new_df)
#Mostrar valores
if st.button("Valores"):
st.text("Valores em classes")
st.write(df.iloc[:,0].value_counts()) #moradores
st.write(df.iloc[:,1].value_counts()) #idosos
st.write(df.iloc[:,-1].value_counts()) #crianças
st.write(df.iloc[:,-2].value_counts()) #familias
#Mostrar Datatypes
if st.button("DataTypes"):
st.write(df.dtypes)
#Mostrar sumário
if st.checkbox("Sumário"):
st.write(df.describe().T)
#Visualização
st.subheader("Visualização dos dados")
#Corelação
#Seaborn
if st.checkbox("Seaborn Plot"):
st.write(sns.heatmap(df.corr(), annot=True))
st.pyplot
#Count plot
if st.checkbox("Plot of Value Counts"):
st.text("Value Counts By Target")
all_columns_names = df.columns.tolist()
primary_col = st.selectbox("Primary Columm to GroupBy",all_columns_names)
selected_columns_names = st.multiselect("Select Columns",all_columns_names)
if st.button("Plot"):
st.text("Generate Plot")
if selected_columns_names:
vc_plot = df.groupby(primary_col)[selected_columns_names].count()
else:
vc_plot = df.iloc[:,-1].value_counts()
st.write(vc_plot.plot(kind="bar"))
st.pyplot()
#Pie chart
if st.checkbox("Pie Plot"):
all_columns_names = df.columns.tolist()
selected_column= st.selectbox("Selecione a coluna desejada", all_columns_names)
if st.button("Gerar Pie Plot"):
st.success("Gerando um Pie Plot")
st.write(df[selected_column].value_counts().plot.pie(autopct="%1.1f%%"))
st.pyplot()
#Plot customizado
st.subheader("Plot Customizado")
all_columns_names = df.columns.tolist()
type_of_plot = st.selectbox("Selecione o tipo de plot",['area','bar','line','hist','box','kde'])
selected_columns_names = st.multiselect("Selecione as colunas", all_columns_names)
if st.button("Gerar Plot"):
st.success("Gerando plot de {} para {}".format(type_of_plot,selected_columns_names))
if type_of_plot == 'area':
cust_data = df[selected_columns_names]
st.area_chart(cust_data)
elif type_of_plot == 'bar':
cust_data = df[selected_columns_names]
st.bar_chart(cust_data)
elif type_of_plot == 'line':
cust_data = df[selected_columns_names]
st.line_chart(cust_data)
elif type_of_plot:
cust_plot = df[selected_columns_names].plot(kind=type_of_plot)
st.write(cust_plot)
st.pyplot()
if __name__ == '__main__':
main()
| GabrielaDS11/Domotica-Assistiva | Streamlit/data_explorer.py | data_explorer.py | py | 4,670 | python | en | code | 0 | github-code | 90 |
70508400618 | import pickle
import socket
HOST = '127.0.0.1'
PORT = 65433
def run_receiver():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
conn, addr = s.accept()
with conn:
data = conn.recv(2**20)
return pickle.loads(data)
def run_sender(obj):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
s.send(pickle.dumps(obj))
| Inexpediency/itmo-pt-assignments | 04-files/practice/transfer/transfer_object.py | transfer_object.py | py | 475 | python | en | code | 1 | github-code | 90 |
29327657424 | import json
import sys
from math import sqrt
def load_data(file_path):
with open(file_path, 'r', encoding='utf-8') as open_file:
return json.load(open_file)
def get_biggest_bar(bars):
biggest_bar = max(
bars,
key=lambda bar: bar['properties']['Attributes']['SeatsCount'],
)
return biggest_bar
def get_smallest_bar(bars):
smallest_bar = min(
bars,
key=lambda bar: bar['properties']['Attributes']['SeatsCount'],
)
return smallest_bar
def calc_distance(x1, y1, x2, y2):
distance = sqrt(((x2 - x1) ** 2) + ((y2 - y1) ** 2))
return distance
def get_closest_bar(bars, longitude, latitude):
nearest_bar = min(
bars,
key=lambda restaurant:
calc_distance(
longitude,
latitude,
restaurant['geometry']['coordinates'][0],
restaurant['geometry']['coordinates'][1],
),
)
return nearest_bar
if __name__ == '__main__':
try:
user_file_path = sys.argv[1]
bars = load_data(user_file_path)['features']
custom_coordinates = [float(point) for point in input(
'\nВведите через пробел '
'координаты текущего местоположения: \n'
).split(' ')]
user_longitude, user_latitude = custom_coordinates
except (json.decoder.JSONDecodeError, FileNotFoundError, IndexError):
exit('Некорректный JSON')
except (ValueError, TypeError):
exit('\nНекорректный формат координат.'
'\nПример корректного ввода: '
'"37.635709999610896 55.805575000158512" ')
print(
'Самый большой бар',
get_biggest_bar(bars)['properties']['Attributes']['Name'],
)
print(
'Самый маленький бар',
get_smallest_bar(bars)['properties']['Attributes']['Name'],
)
the_closest_bar = get_closest_bar(
bars,
user_longitude,
user_latitude,
)['properties']['Attributes']['Name']
print('Самый близкий бар:', the_closest_bar)
| AlekseyLeskin/3_bars | bars.py | bars.py | py | 2,190 | python | en | code | null | github-code | 90 |
18527183839 | def main():
N, C = (int(i) for i in input().split())
D = [[int(i) for i in input().split()] for j in range(C)]
c = [[int(i) for i in input().split()] for j in range(N)]
base = [[((i+1)+(j+1)) % 3 for j in range(N)] for i in range(N)]
d = [{i: 0 for i in range(1, C + 1)} for j in range(3)]
for i in range(N):
for j in range(N):
d[base[i][j]][c[i][j]] += 1
ans = 1 << 60
from itertools import combinations, permutations
for comb in combinations(range(1, C+1), 3):
for p in permutations(comb):
cur = 0
for i in range(3):
for k, v in d[i].items():
cur += D[k-1][p[i]-1] * v
ans = min(ans, cur)
print(ans)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03330/s668850972.py | s668850972.py | py | 783 | python | en | code | 0 | github-code | 90 |
18069227479 | n,m = map(int,input().split())
x_li = [0]*m
y_li = [0]*m
for i in range(m):
x_li[i],y_li[i]=map(int,input().split())
map_li = [1]*n
red = [False]*n
red[0]=True
for i in range(m):
x,y = x_li[i],y_li[i]
x,y = x-1,y-1
if red[x] and map_li[x]>1:
red[y]=True
elif red[x]:
red[y]=True
red[x]=False
map_li[x]-=1
map_li[y]+=1
#print(map_li)
print(sum(red)) | Aasthaengg/IBMdataset | Python_codes/p04034/s264615963.py | s264615963.py | py | 402 | python | en | code | 0 | github-code | 90 |
6127722990 | import numpy as np
import sys
def load_binary_data(filename, dtype=np.float32):
"""
We assume that the data was written
with write_binary_data() (little endian).
"""
f = open(filename, "rb")
data = f.read()
f.close()
_data = np.frombuffer(data, dtype)
if sys.byteorder == 'big':
_data = _data.byteswap()
return _data
def calc_FT_cube(incube, x, y, z, invert=False):
"""
Function to FT cube and calculate k axes.
Args:
incube: 3D input cube in image space.
x: 1D array of x-coordinates. Assumed to be in Mpc and evenly spaced.
y: 1D array of y-coordinates. Assumed to be in Mpc and evenly spaced.
z: 1D array of z-coordinates. Assumed to be in Mpc and evenly spaced.
invert: Invert FT (go back to image space). Default False.
Returns:
FT_cube: 3D cube, the fourier transform of incube. No jacobian is applied.
kx: 1D array of kx coordinates. Units Mpc^-1.
ky: 1D array of ky coordinates. Units Mpc^-1.
kz: 1D array of kz coordinates. Units Mpc^-1.
"""
if invert:
FT_cube = np.fft.ifftn(np.fft.ifftshift(incube))
else:
FT_cube = np.fft.fftshift(np.fft.fftn(incube))
# Get k-axes
dkx = 2 * np.pi / (x.max() - x.min())
dky = 2 * np.pi / (y.max() - y.min())
dkz = 2 * np.pi / (z.max() - z.min())
kx = dkx * (np.arange(len(x)) - len(x) / 2)
ky = dky * (np.arange(len(y)) - len(y) / 2)
kz = dkz * (np.arange(len(z)) - len(z) / 2)
return FT_cube, kx, ky, kz
def calc_PS_3d(incube, x, y, z):
"""
Function to calculate 3D power spectrum from an input cube
Args:
incube: 3D input cube in image space.
x: 1D array of x-coordinates. Assumed to be in Mpc and evenly spaced.
y: 1D array of y-coordinates. Assumed to be in Mpc and evenly spaced.
z: 1D array of z-coordinates. Assumed to be in Mpc and evenly spaced.
Returns:
PS: 3D power spectrum. If inputcube is in mK, PS is in mK^2 Mpc^3
kx: 1D array of kx coordinates. Units Mpc^-1.
ky: 1D array of ky coordinates. Units Mpc^-1.
kz: 1D array of kz coordinates. Units Mpc^-1.
"""
# Get 3D PS
PS, kx, ky, kz = calc_FT_cube(incube, x, y, z)
jacobian = np.mean(np.diff(x)) * np.mean(np.diff(y)) * np.mean(np.diff(z))
PS = np.abs(jacobian * PS)**2. / (x.max() - x.min()) / (y.max() - y.min()) / (z.max() - z.min())
return PS, kx, ky, kz
def calc_PS_1d(incube, x, y, z, k_bin=1):
"""
Function to calculate 1D power spectrum from an input cube
Args:
incube: 3D input cube in image space.
x: 1D array of x-coordinates. Assumed to be in Mpc and evenly spaced.
y: 1D array of y-coordinates. Assumed to be in Mpc and evenly spaced.
z: 1D array of z-coordinates. Assumed to be in Mpc and evenly spaced.
k_bin: Factor by which to bin up k. Default 1.
Returns:
PS: 1D power spectrum. If inputcube is in mK, PS is in mK^2 Mpc^3
k: 1D array of k coordinates. Units Mpc^-1.
"""
# Get 3D PS
PS_3d, kx, ky, kz = calc_PS_3d(incube, x, y, z)
# Get k matrix
kxmat, kymat, kzmat = np.meshgrid(kx, ky, kz, indexing='ij')
kmat = np.sqrt(kxmat**2 + kymat**2 + kzmat**2)
# Form output axis
dk = np.mean([np.mean(np.diff(kx)), np.mean(np.diff(ky)), np.mean(np.diff(kz))]) * k_bin
k = np.arange(0, kmat.max(), dk)
k_inds = np.digitize(kmat, k - 0.5 * dk)
# Bin the PS
PS = np.zeros(len(k))
for i in range(len(k)):
ind = np.where(k_inds == i)
if len(ind[0]) == 0:
continue
PS[i - 1] = np.mean(PS_3d[ind])
return PS, k
def calc_PS_2d(incube, x, y, z, kperp_bin=1, kpar_bin=1):
"""
Function to calculate 2D power spectrum from an input cube
Args:
incube: 3D input cube in image space.
x: 1D array of x-coordinates. Assumed to be in Mpc and evenly spaced.
y: 1D array of y-coordinates. Assumed to be in Mpc and evenly spaced.
z: 1D array of z-coordinates. Assumed to be in Mpc and evenly spaced.
kperp_bin: Factor by which to bin up kperp. Default 1.
kpar_bin: Factor by which to bin up kpar. Default 1.
Returns:
PS: 2D power spectrum. If inputcube is in mK, PS is in mK^2 Mpc^3
kperp: 1D array of kperp coordinates. Units Mpc^-1.
kpar: 1D array of kpar coordinates. Units Mpc^-1.
"""
# Get 3D PS
PS_3d, kx, ky, kz = calc_PS_3d(incube, x, y, z)
# Get kperp matrix
kxmat, kymat = np.meshgrid(kx, ky, indexing='ij')
kperpmat = np.sqrt(kxmat**2 + kymat**2)
# Form output axes
dkperp = np.mean([np.mean(np.diff(kx)), np.mean(np.diff(ky))]) * kperp_bin
kperp = np.arange(0, kperpmat.max(), dkperp)
dkpar = np.mean(np.diff(kz)) * kpar_bin
kz = np.abs(kz) # Fold over
kpar = np.arange(0, kz.max(), dkpar)
kperpinds = np.digitize(kperpmat, kperp - 0.5 * dkperp)
kparinds = np.digitize(kz, kpar - 0.5 * dkpar)
kpar_ind_lookup = []
for j in range(len(kpar)):
ind_par = np.where(kparinds == j)[0]
kpar_ind_lookup.append(ind_par)
# Bin the PS
PS = np.zeros((len(kperp), len(kpar)))
for i in range(len(kperp)):
ind_perp = np.where(kperpinds == i)
if len(ind_perp[0]) == 0:
continue
for j in range(len(kpar)):
ind_par = kpar_ind_lookup[j]
if len(ind_par) == 0:
continue
PS[i - 1, j - 1] = np.mean(PS_3d[ind_perp][0, ind_par])
return PS, kperp, kpar
| tyler-a-cox/cross-correlation | twentyonecmFAST.py | twentyonecmFAST.py | py | 5,608 | python | en | code | 0 | github-code | 90 |
18289121529 | from collections import deque
from copy import deepcopy
h,w = map(int,input().split())
s = [list(input()) for i in range(h)]
t = ((0,1),(1,0),(-1,0),(0,-1))
m = 0
for sy in range(h):
for sx in range(w):
if s[sy][sx] == "#":
continue
ss = deepcopy(s)
ss[sy][sx] = "#"
q = deque([(0,sy,sx)])
max_cost = 0
my,mx = 0,0
while(q):
cost,y,x = q.popleft()
max_cost = max(max_cost,cost)
cost += 1
for i,j in t:
ny = y+i
nx = x+j
if 0 <= ny < h and 0 <= nx < w:
if ss[ny][nx] == ".":
q.append((cost,ny,nx))
ss[ny][nx] = "#"
m = max(m,max_cost)
print(m) | Aasthaengg/IBMdataset | Python_codes/p02803/s665606996.py | s665606996.py | py | 787 | python | en | code | 0 | github-code | 90 |
22666407738 | """
Fichier contenant tout le code relatif à
la gestion de base de données.
**************
Sauf mention contraire, tout le code
écrit dans ce fichier à été écrit par
Romain Gascoin.
"""
import random
import sqlite3
# Création de la connection à la base de données
conn = sqlite3.connect("locale/sudoku.db")
c = conn.cursor()
def commit():
"""
Permet de commit via une fonction
Par Guilian Celin-Davanture
"""
conn.commit()
def creer_table_grilles():
"""
création de la table qui va contenir les différentes grilles
Par Romain Gascoin
"""
c.executescript("""
CREATE TABLE IF NOT EXISTS grilles(
id_grille INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,
grille TEXT NOT NULL UNIQUE,
difficulte TEXT NOT NULL
)""")
commit()
def creer_table_joueurs():
"""
création de la table qui va contenir les informations à propos des joueurs
Par Romain Gascoin
"""
c.executescript("""
CREATE TABLE IF NOT EXISTS joueurs(
id_joueur INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,
pseudo TEXT NOT NULL UNIQUE,
mot_de_passe TEXT
)""")
commit()
def creer_table_grilles_resolues():
"""
création de la table grilles résolues de relations
grilles, joueurs qui contiendra les informations d'un joueur sur une grille donnée
Par Romain Gascoin
"""
#Déplacé les déclarations FOREIGN KEY à la fin pour que cela marche (Guilian Celin-Davanture)
c.executescript("""
CREATE TABLE IF NOT EXISTS grilles_resolues(
id_relation INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,
id_grille_resolue INTEGER,
id_grille_joueur INTEGER,
reussite TEXT,
FOREIGN KEY(id_grille_resolue) REFERENCES grilles(id_grille),
FOREIGN KEY(id_grille_joueur) REFERENCES joueurs(id_joueur)
)""")
commit()
def ajouter_grille():
"""
Permet l'ajout d'une grille manuellement
Par Romain Gascoin
"""
inp_grille = input("veuillez entrer une série de 81 chiffres (précédés d'un '$' si immuables (qu'on ne peut pas changer en jouant)) ou un '&' signifiant un espace")
inp_diff = input("veuillez entrer une difficulté")
c.execute(f"""
INSERT INTO grilles
(grille, difficulte)
VALUES(?, ?)
""", [inp_grille, inp_diff])
commit()
def ajouter_joueur(username, password):
c.execute(f"""
INSERT INTO joueurs
(pseudo, mot_de_passe)
VALUES(?, ?)
""", [username, password])
commit()
def ajouter_grilles_resolues(id_grille, id_joueur, reussite):
c.execute(f"""
INSERT INTO grilles_resolues
(id_grille_resolue, id_grille_joueur, reussite)
VALUES(?, ?, ?)
""", [id_grille, id_joueur, reussite])
commit()
def fetch_player_id(username):
return c.execute(f"""
SELECT id_joueur FROM joueurs WHERE pseudo = ?
""", [username,]).fetchone()[0]
def fetch_all_grids_from_player(user_id):
"""
Renvoie toutes les grilles faites par un joueur
Par Guilian Celin-Davanture
"""
return c.execute(f"""
SELECT * FROM grilles WHERE id_grille in (SELECT id_grille_resolue FROM grilles_resolues WHERE id_grille_joueur = {user_id})
""").fetchall()
def fetch_password_from_username(username):
"""
Renvoie le mot de passe associé au nom d'utilisateur,
ou None si aucun utilisateur de ce nom existe.
Par Guilian Celin-Davanture
"""
return c.execute(f"""
SELECT mot_de_passe FROM joueurs WHERE pseudo = ?
""", [username,]).fetchone()[0]
def create_relation_username_grille_id(username, grille_id):
"""
Va créer une relation entre un utilisateur et la grille
désignée par l'id donnée.
Par Guilian Celin-Davanture
"""
userid = c.execute(f"""
SELECT id_joueur FROM joueurs WHERE pseudo = ?
""", [username,]).fetchone()[0]
# la colomne de reussite contiendra une chaine de caractères
# correspondant à une version spécifique.
ajouter_grilles_resolues(grille_id, userid, 'vxb1reussie')
def fetch_all_grids():
"""
Renvoie une liste de toutes les grilles disponibles et jouables
(dont la difficulté est differente de 'debug', cette 'difficulté'
marquant une grille impossible utilisée seulement pour le déboguage)
Par Guilian Celin-Davanture
"""
return c.execute(f"""
SELECT id_grille, grille, difficulte FROM grilles WHERE difficulte <> 'debug'
""").fetchall()
def has_grid_been_done_by(grille_id, username):
"""
Renvoie True si la grille à été faite par le joueur.
Par Guilian Celin-Davanture
"""
userid = c.execute(f"""
SELECT id_joueur FROM joueurs WHERE pseudo = ?
""", [username,]).fetchone()[0]
grids = fetch_all_grids_from_player(userid)
for grid in grids:
id, _, _ = grid
if id == grille_id:
return True
return False
def fetch_random_grid_with_difficulty(diff):
"""
Renvoie une grille aléatore avec la difficulté choisie.
Par Guilian Celin-Davanture
"""
grids = c.execute(f"""
SELECT * FROM grilles WHERE difficulte = ?
""", [diff,]).fetchall()
grid = random.choice(grids)
return grid
def fetch_all_grids_with_difficulty(diff):
"""
Renvoie une grille aléatore avec la difficulté choisie.
Par Guilian Celin-Davanture
"""
grids = c.execute(f"""
SELECT * FROM grilles WHERE difficulte = ?
""", [diff,]).fetchall()
return grids
if __name__ == '__main__':
curseur = c
inp = input('Drop table ? (grilles/joueurs/grilles_resolues)')
if inp != '':
curseur.execute(f'DROP TABLE {inp}')
commit()
#Création des tables si elles n'existent pas
creer_table_grilles()
creer_table_joueurs()
creer_table_grilles_resolues()
allrows = [
curseur.execute('SELECT * FROM grilles').fetchall(),
curseur.execute('SELECT * FROM joueurs').fetchall(),
curseur.execute('SELECT * FROM grilles_resolues').fetchall()
]
for rows in allrows:
print(len(rows))
for row in rows:
print(row)
print()
try:
while True:
inp = input('Appuyez sur <Entrer> pour insérer, sinon eval():')
if inp == '':
ajouter_grille()
else:
eval(inp)
commit()
except KeyboardInterrupt:
commit()
curseur.close() | GuilianCD/nsi-sudoku | database.py | database.py | py | 5,844 | python | fr | code | 1 | github-code | 90 |
4364269282 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import torch
from transformers import BertTokenizer
def precition(text):
PRETRAINED_MODEL_NAME = "bert-base-cased" # 指定繁簡中文 BERT-BASE 預訓練模型
# 取得此預訓練模型所使用的 tokenizer
tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)
vocab = tokenizer.vocab
tokens = tokenizer.tokenize(text)
ids = tokenizer.convert_tokens_to_ids(tokens)
position = ids.index(103)
"""
這段程式碼載入已經訓練好的 masked 語言模型並對有 [MASK] 的句子做預測
"""
from transformers import BertForMaskedLM
# 除了 tokens 以外我們還需要辨別句子的 segment ids
tokens_tensor = torch.tensor([ids]) # (1, seq_len)
segments_tensors = torch.zeros_like(tokens_tensor) # (1, seq_len)
maskedLM_model = BertForMaskedLM.from_pretrained(PRETRAINED_MODEL_NAME)
# 使用 masked LM 估計 [MASK] 位置所代表的實際 token
maskedLM_model.eval()
with torch.no_grad():
outputs = maskedLM_model(tokens_tensor, segments_tensors)
predictions = outputs[0]
# (1, seq_len, num_hidden_units)
del maskedLM_model
# 將 [MASK] 位置的機率分佈取 top k 最有可能的 tokens 出來
masked_index = position
k = 5
probs, indices = torch.topk(torch.softmax(predictions[0, masked_index], -1), k)
predicted_tokens = tokenizer.convert_ids_to_tokens(indices.tolist())
result = {
'input' : tokens,
'predictions' : predicted_tokens,
'Accuracy' : []
}
for p in probs:
result['Accuracy'].append(p.item()*100)
return result
| qwe8989785/Nkust_EnglishProject | EPWeb/index/code/bertTest.py | bertTest.py | py | 1,563 | python | en | code | 0 | github-code | 90 |
41613067651 | __author__ = 'anthonymace'
import datetime
def get_hours_from_each_day():
hours_list = []
for week in range(1, 3):
number_of_days = int(input("How many days did you work week {}? ".format(week)))
week_hours = []
for day in range(1, number_of_days + 1):
hours = input("Enter your hours from day #{}: ".format(day))
week_hours.append(hours)
hours_list.append(week_hours)
return hours_list
def get_hours_from_week():
week_one_hours = input("Enter your hours from week 1: ")
week_two_hours = input("Enter your hours from week 2: ")
return week_one_hours, week_two_hours
def split_hours_and_add(hours_list):
hours_total = 0
minutes_total = 0
for weeks in hours_list:
for day_hours in weeks:
hours_total += add_hours_from_list(day_hours)
minutes_total += add_minutes_from_list(day_hours)
converted_minutes_to_hours = convert_minutes(minutes_total)
hours_total += converted_minutes_to_hours[0]
minutes_total = converted_minutes_to_hours[1]
return hours_total, minutes_total
def add_hours_from_list(hour_string):
hour = hour_string.split(':')
return int(hour[0])
def add_minutes_from_list(hour_string):
minutes = hour_string.split(':')
return int(minutes[1])
def convert_minutes(mins):
additional_hours = 0
if mins >= 60:
while mins >= 60:
mins -= 60
additional_hours += 1
else:
return mins
return additional_hours, mins
def hours_to_decimal(hours, minutes):
total = hours + (minutes / 60.0)
return total
def write_hours_to_file(hours, minutes, total_hours):
today = datetime.date.today()
file = open('hourLog.txt', 'a')
file.write(str(today) + "\n")
if minutes >= 10:
file.write("Your time for 2 weeks is {}:{}\n".format(hours, minutes))
else:
file.write("Your time for 2 weeks is {}:{}\n".format(hours, minutes))
file.write("Your hours in decimal: {:.2f}\n\n".format(total_hours))
def main():
hours_list = get_hours_from_each_day()
print(hours_list)
final_total = split_hours_and_add(hours_list)
total_hours = hours_to_decimal(int(final_total[0]), int(final_total[1]))
write_hours_to_file(int(final_total[0]), int(final_total[1]), total_hours)
print("Wrote hours to file")
main() | amaceing/PythonProjects | HourTracker/HourTracker.py | HourTracker.py | py | 2,377 | python | en | code | 0 | github-code | 90 |
2758812718 | # В этом файле пишется функция
from database import *
from view import *
def main():
while True:
num = input_num()
if num == 1:
res = input_name()
write_name(res)
print("Успешно записано\n")
if num == 2:
char = input_char()
search_data(char)
print("Успешно найдено\n")
main()
| CeEcobot/PYTHON_2023 | Lesson_8/telefone/controller.py | controller.py | py | 433 | python | ru | code | 0 | github-code | 90 |
73567349096 | import os
from flask import Flask, jsonify, request, abort, Response
from typing import Union, Optional, List, Any
from dataclasses import asdict
from utils import make_query_response, get_greetings, Greetings
from greet import greetings as g
app = Flask(__name__)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, "data")
@app.route('/')
def main_page() -> Response:
greetings: Greetings = get_greetings(g)
return jsonify(asdict(greetings))
@app.route('/perform_query', methods=["POST"])
def query() -> Response:
data: Optional[Any] = request.json
if not data:
return jsonify({"message": "Data is empty"})
cmd1: Optional[str] = data.get("cmd1")
cmd2: Optional[str] = data.get("cmd2")
value1: Optional[str] = data.get("value1")
value2: Optional[str] = data.get("value2")
filename: Optional[str] = data.get("filename")
if not (cmd1 and value1 and filename):
abort(400, 'You need input the minimum amount of commands (cmd1, value1, filename)')
file_path: str = os.path.join(DATA_DIR, filename)
if not os.path.exists(file_path):
abort(400, 'File not found')
with open(file_path) as file:
result: Union[str, List] = make_query_response(cmd1, value1, file)
if cmd2 and value2:
result2: Union[str, List] = make_query_response(cmd2, value2, result)
return jsonify(result2)
return jsonify(result)
if __name__ == '__main__':
app.run(debug=True)
| cestxvcdim/SkyPro_24.2_HW | main.py | main.py | py | 1,513 | python | en | code | 0 | github-code | 90 |
40093709920 | from torch import nn
from torchvision import models
from src.utils.registry import REGISTRY
from timm import create_model
@REGISTRY.register('resnet')
class ResNetModel(nn.Module):
def __init__(self, num_classes=5, classify=True):
super(ResNetModel, self).__init__()
# onnx_model.graph.input[0].type.tensor_type.shape.dim[0].dim_param = 'None'
# onnx_model.graph.output[0].type.tensor_type.shape.dim[0].dim_param = 'None'
pytorch_model = create_model('convnext_tiny', pretrained=True).to('cuda')
#self.resnet = models.resnet18(pretrained=True)
self.ConvXnet= pytorch_model
if classify:
#self.resnet.fc = nn.Sequential()
self.classifier = nn.Linear(in_features=512, out_features=num_classes)
self.classifier2 = nn.Linear(in_features=1000, out_features=num_classes)
else:
features = nn.ModuleList(self.resnet.children())[:-1]
#self.resnet = nn.Sequential(*features).append(nn.Flatten())
def forward(self, inputs):
# vec = self.resnet(inputs)
# classify = self.classifier(vec)
vec = self.ConvXnet(inputs)
classify = self.classifier2(vec)
return vec, classify
def get_layer_groups(self):
linear_layers = [elem[1] for elem in
filter(lambda param_tuple: 'fc' in param_tuple[0], self.ConvXnet.named_parameters())]
other_layers = [elem[1] for elem in
filter(lambda param_tuple: 'fc' not in param_tuple[0], self.ConvXnet.named_parameters())]
param_groups = {
'classifier': linear_layers,
'feature_extractor': other_layers
}
return param_groups
| Tracker1701/Smart_Contracts_Vulnerability_Detection | smart-contracts-vulnerabilities - 2D - convxnet - CNN(1)/src/modeling/network/backbone/resnet.py | resnet.py | py | 1,737 | python | en | code | 1 | github-code | 90 |
72763998697 | # -*- coding: utf-8 -*-
# Adapted from lstm_text_generation.py in keras/examples
from __future__ import print_function
from keras.layers.recurrent import SimpleRNN
from keras.models import Sequential
from keras.layers import Dense, Activation
import numpy as np
INPUT_FILE = "../data/alice_in_wonderland.txt"
# extract the input as a stream of characters
print("Extracting text from input...")
fin = open(INPUT_FILE, 'rb')
lines = []
for line in fin:
line = line.strip().lower()
line = line.decode("ascii", "ignore")
if len(line) == 0:
continue
lines.append(line)
fin.close()
text = " ".join(lines)
# creating lookup tables
# Here chars is the number of features in our character "vocabulary"
chars = set([c for c in text])
nb_chars = len(chars)
char2index = dict((c, i) for i, c in enumerate(chars))
index2char = dict((i, c) for i, c in enumerate(chars))
# create inputs and labels from the text. We do this by stepping
# through the text ${step} character at a time, and extracting a
# sequence of size ${seqlen} and the next output char. For example,
# assuming an input text "The sky was falling", we would get the
# following sequence of input_chars and label_chars (first 5 only)
# The sky wa -> s
# he sky was ->
# e sky was -> f
# sky was f -> a
# sky was fa -> l
print("Creating input and label text...")
SEQLEN = 10
STEP = 1
input_chars = []
label_chars = []
for i in range(0, len(text) - SEQLEN, STEP):
input_chars.append(text[i:i + SEQLEN])
label_chars.append(text[i + SEQLEN])
# vectorize the input and label chars
# Each row of the input is represented by seqlen characters, each
# represented as a 1-hot encoding of size len(char). There are
# len(input_chars) such rows, so shape(X) is (len(input_chars),
# seqlen, nb_chars).
# Each row of output is a single character, also represented as a
# dense encoding of size len(char). Hence shape(y) is (len(input_chars),
# nb_chars).
print("Vectorizing input and label text...")
X = np.zeros((len(input_chars), SEQLEN, nb_chars), dtype=np.bool)
y = np.zeros((len(input_chars), nb_chars), dtype=np.bool)
for i, input_char in enumerate(input_chars):
for j, ch in enumerate(input_char):
X[i, j, char2index[ch]] = 1
y[i, char2index[label_chars[i]]] = 1
# Build the model. We use a single RNN with a fully connected layer
# to compute the most likely predicted output char
HIDDEN_SIZE = 128
BATCH_SIZE = 128
NUM_ITERATIONS = 25
NUM_EPOCHS_PER_ITERATION = 1
NUM_PREDS_PER_EPOCH = 100
model = Sequential()
model.add(SimpleRNN(HIDDEN_SIZE, return_sequences=False,
input_shape=(SEQLEN, nb_chars),
unroll=True))
model.add(Dense(nb_chars))
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
# We train the model in batches and test output generated at each step
for iteration in range(NUM_ITERATIONS):
print("=" * 50)
print("Iteration #: %d" % (iteration))
model.fit(X, y, batch_size=BATCH_SIZE, epochs=NUM_EPOCHS_PER_ITERATION)
# testing model
# randomly choose a row from input_chars, then use it to
# generate text from model for next 100 chars
test_idx = np.random.randint(len(input_chars))
test_chars = input_chars[test_idx]
print("Generating from seed: %s" % (test_chars))
print(test_chars, end="")
for i in range(NUM_PREDS_PER_EPOCH):
Xtest = np.zeros((1, SEQLEN, nb_chars))
for i, ch in enumerate(test_chars):
Xtest[0, i, char2index[ch]] = 1
pred = model.predict(Xtest, verbose=0)[0]
ypred = index2char[np.argmax(pred)]
print(ypred, end="")
# move forward with test_chars + ypred
test_chars = test_chars[1:] + ypred
print()
| PacktPublishing/Deep-Learning-with-Keras | Chapter06/alice_chargen_rnn.py | alice_chargen_rnn.py | py | 3,769 | python | en | code | 1,049 | github-code | 90 |
5011171155 | import os
import re
import typing
def find_default_filename(existing_names: typing.List[str]) -> dict:
other_names = [split_filename(n)['name'] for n in existing_names]
index = 0
for i in range(1000):
index += 1
name = '{}'.format(index)
if name not in other_names:
return name
return None
def split_filename(name: str) -> dict:
"""
:param name:
:return:
"""
filename = os.path.basename(name)
parts = filename.rsplit('.', 1)
return dict(
index=None,
name=parts[0],
extension=parts[1] if len(parts) > 1 else None
)
def explode_filename(name: str, scheme: str) -> dict:
"""
Removes any path components from the input filename and returns a
dictionary containing the name of the file without extension and the
extension (if an extension exists)
:param name:
:param scheme:
:return:
"""
if not scheme:
return split_filename(name)
replacements = {
'name': '(?P<name>.*)',
'ext': '(?P<extension>.+)$',
'index': '(?P<index>[0-9]{{{length}}})'
}
scheme_pattern = '^'
empty_scheme_pattern = ''
offset = 0
while offset < len(scheme):
char = scheme[offset]
next_char = scheme[offset + 1] if (offset + 1) < len(scheme) else None
if char in r'.()^$?*+\[]|':
addition = '\\{}'.format(char)
scheme_pattern += addition
empty_scheme_pattern += addition
offset += 1
continue
if char != '{':
scheme_pattern += char
empty_scheme_pattern += char
offset += 1
continue
if next_char != '{':
scheme_pattern += char
empty_scheme_pattern += char
offset += 1
continue
end_index = scheme.find('}}', offset)
contents = scheme[offset:end_index].strip('{}').lower()
if contents in replacements:
scheme_pattern += replacements[contents]
elif contents == ('#' * len(contents)):
addition = replacements['index'].format(length=len(contents))
scheme_pattern += addition
empty_scheme_pattern += addition
else:
addition = '{{{}}}'.format(contents)
scheme_pattern += addition
empty_scheme_pattern += addition
offset = end_index + 2
match = re.compile(scheme_pattern).match(name)
if not match:
parts = split_filename(name)
comparison = re.compile(empty_scheme_pattern.rstrip('-_: .\\'))
match = comparison.match(parts['name'])
if not match:
return parts
parts = match.groupdict()
index = parts.get('index')
index = int(index) if index else None
return dict(
index=index - 1,
name=parts.get('name', ''),
extension=parts.get('extension', 'py')
)
def assemble_filename(
name: str,
scheme: str,
extension: str = None,
index: int = None
) -> str:
"""
:param name:
:param scheme:
:param extension:
:param index:
:return:
"""
if not name:
name = ''
if not extension:
extension = 'py'
if index is None:
index = 0
if not scheme:
return '{}.{}'.format(name, extension)
out = scheme
pattern = re.compile('{{(?P<count>[#]+)}}')
match = pattern.search(scheme)
if match:
out = '{before}{replace}{after}'.format(
before=out[:match.start()],
replace='{}'.format(index + 1).zfill(len(match.group('count'))),
after=out[match.end():]
)
replacements = {
'{{name}}': name,
'{{ext}}': extension
}
for pattern, value in replacements.items():
out = out.replace(pattern, value)
parts = split_filename(out)
if not name:
parts['name'] = parts['name'].rstrip('-_: .')
return '{}.{}'.format(parts['name'].strip(), parts['extension'])
| sernst/cauldron | cauldron/session/naming.py | naming.py | py | 4,062 | python | en | code | 78 | github-code | 90 |
24384473077 | #! python3
import sys
import gzip
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE,SIG_DFL)
import string
# Get the file name from the commandline.
if len(sys.argv) == 3:
patient_list = sys.argv[2]
gene_list = sys.argv[1]
output_name = gene_list + "." + patient_list + ".generes"
output_name2 = gene_list + "." + patient_list + ".res"
output_name3 = gene_list + "." + patient_list + ".pats"
else:
print("\n\n"
"Usage: python3 gene_list patient_age\n\nResult in gene_list.patient_age.res\n")
exit()
##########################
try:
out = open( output_name, 'w' )
except IOError as e:
print ("Error: Can\'t write data to output file", output_name, "\n", e)
sys.exit()
except IsADirectoryError as e:
print ("Error: Can\'t write data to output file", output_name, "\n", e)
sys.exit()
try:
out2 = open( output_name2, 'w' )
except IOError as e:
print ("Error: Can\'t write data to output file", output_name2, "\n", e)
sys.exit()
except IsADirectoryError as e:
print ("Error: Can\'t write data to output file", output_name2, "\n", e)
sys.exit()
try:
out3 = open( output_name3, 'w' )
except IOError as e:
print ("Error: Can\'t write data to output file", output_name3, "\n", e)
sys.exit()
except IsADirectoryError as e:
print ("Error: Can\'t write data to output file", output_name3, "\n", e)
sys.exit()
######################
#print("Start parse")
PatDict={}
PatSet=set()
fh = gzip.open('CosmicSample.tsv.gz', 'r')
print("Age","SAMPLE_ID","Patient_ID",file=out3)
for line in fh:
line=str(line).strip()
val=line.split('\\t')
can=0
try:
i=int(val[28])
can=1
#print("try",i,val[3],val[0])
except:
#print("ex",val[28],val[3],val[0])
pass
#print(can)
if can > 0:
#print("if",val[28],val[3],val[0])
if (i < int(patient_list)):
val[0]=val[0].replace('b\'','')
val[3]=val[3].replace('b\'','')
print(i,val[3],val[0],file=out3)
PatDict[val[0]]=val[3]
PatSet.add(val[0])
else:
pass
#print("else",int(patient_list),i,val[3],val[0])
else:
pass
#print("else",val[28],val[3],val[0])
try:
#fh2 = open( gene_list, 'r' )
GeneSet = set(line.strip() for line in open(gene_list,'r'))
except IOError:
print ("Error: Can\'t find file or read data from gene file", gene_list)
try:
fh3 = gzip.open('CosmicCompleteCNA.tsv.gz', 'r')
#with ZipFile('CosmicCompleteCNA.tsv.gz, 'r') as fh3:
# fh3.open('CosmicCompleteCNA.tsv.gz, 'r') as fh4
except IOError:
print ("Error: Can\'t find file or read data from gene file CosmicCompleteCNA.tsv.gz")
#print("End parse")
##########################
#print(GeneSet)
# For each CNA, keep line only if it is the right patient and gene
#for ent in GeneSet:
#ent=ent.replace('b\'','')
#print(type(ent),ent)
#pass
print("ID_GENE","ID_SAMPLE","ID_Patient","Hist","Hist1","Hist2","Hist3","TOTAL_CN","MUT_TYPE",sep='\t' ,file=out)
print("ID_SAMPLE","ID_Patient","Hist","Hist1","Hist2","Hist3",sep='\t' ,file=out2)
for line in fh3:
line = str(line.strip())
#print("no1" + line)
fi = line.split('\\t')
#print("no2" + fields[0])
if fi[3] in PatDict.keys():
#print("Match",fi[3])
if fi[1] in GeneSet:
print(fi[1],fi[3],PatDict[fi[3]],fi[9],fi[10],fi[11],fi[12],fi[14],fi[16],sep='\t',file=out)
else:
print(fi[3],PatDict[fi[3]],fi[9],fi[10],fi[11],fi[12],sep='\t',file=out2 )
else:
pass
#print("No match",type(fi[3]),fi[3])
exit()
| MagdalenaZZ/Python_ditties | cosmic_parser.py | cosmic_parser.py | py | 3,483 | python | en | code | 0 | github-code | 90 |
7368773164 | """
Input files:
papers.json
methods.json
and API (https://paperswithcode.com/api/v1/)
Output files:
papers.nt
tasks.nt
"""
from rdflib import Graph
from rdflib import URIRef, BNode, Literal
from rdflib.namespace import DCTERMS, RDF, RDFS, XSD, OWL, FOAF
import json
import re
import html2text
import markdown
from paperswithcode import PapersWithCodeClient
#Path to methods.json and papers.json input files
file_path_methods = '.../methods.json'
file_path_papers = '.../papers.json'
#Path to tasks.nt and papers.nt output files
ntriple_tasks_output_file_path = ".../tasks.nt"
ntriple_papers_output_file_path = ".../papers.nt"
def preprocess_pwc_proceeding_string(proceeding):
pattern = re.compile(r'(\w+)( \d{4})?( \d+)?')
match = pattern.match(proceeding)
try:
return "".join(match.group(1, 2)).strip()
except TypeError:
return match.group(1)
except AttributeError:
return proceeding
def convert_markdown_to_plain_text(md_string):
html = markdown.markdown(md_string)
text_maker = html2text.HTML2Text()
text_maker.ignore_links = True
text_maker.ignore_images = True
text_maker.ignore_emphasis = True
plain_text = text_maker.handle(html)
return plain_text
replacements = [
{
"search": re.compile(r'"'),
"replace": '', # "
"comment": "Unescaped quotation marks"
}, {
"search": re.compile(r'\\'),
"replace": '', # \
"comment": "Unescaped backslash"
}, {
"search": re.compile(r'\n'),
"replace": '',
"comment": "Newline string"
}, {
"search": re.compile(r'\b'),
"replace": '',
"comment": "Newline string"
}, {
"search": re.compile(r'\t'),
"replace": '',
"comment": "Newline string"
}, {
"search": re.compile(r'\r'),
"replace": '',
"comment": "Newline string"
}, {
"search": re.compile(r'\f'),
"replace": '',
"comment": "Newline string"
}
]
replacements_url = [
{
"search": re.compile(r'"'),
"replace": '%22',
"comment": "Unescaped quotation mark in URI"
}, {
"search": re.compile(r'\\'),
"replace": '%5c',
"comment": "Unescaped backslash in URI"
}, {
"search": re.compile(r'\n'),
"replace": '',
"comment": "Newline string"
}, {
"search": re.compile(r'\r'),
"replace": '',
"comment": "Newline string"
}, {
"search": re.compile(r'\t'),
"replace": '',
"comment": "Newline string"
},
]
def clean(nameStr):
cleaned_str = nameStr
for r in replacements:
if re.search(r["search"], nameStr):
cleaned_str = re.sub(r["search"], r["replace"], cleaned_str)
return cleaned_str
def clean_url(nameStr):
cleaned_str = nameStr
for r in replacements_url:
if re.search(r["search"], nameStr):
cleaned_str = re.sub(r["search"], r["replace"], cleaned_str)
return cleaned_str
#Info for namespaces used in LinkedPapersWithCode
lpwc_namespace = "https://linkedpaperswithcode.com"
lpwc_namespace_class = "https://linkedpaperswithcode.com/class"
lpwc_paper_class = URIRef(lpwc_namespace_class + "/paper")
lpwc_task_class = URIRef(lpwc_namespace_class + "/task")
lpwc_conference_class = URIRef(lpwc_namespace_class + "/conference")
lpwc_method_class = URIRef(lpwc_namespace_class + "/method")
lpwc_category_class = URIRef(lpwc_namespace_class + "/category")
#LinkedPapersWithCode classes used in this file
lpwc_paper = URIRef(lpwc_namespace + "/paper/")
lpwc_task = URIRef(lpwc_namespace + "/task/")
lpwc_conference = URIRef(lpwc_namespace + "/conference/")
lpwc_method = URIRef(lpwc_namespace + "/method/")
lpwc_category = URIRef(lpwc_namespace + "/methods/category/")
#LinkedPapersWithCode predicates used in this file
has_arxiv_id = URIRef("http://purl.org/spar/fabio/hasArXivId")
has_url = URIRef("http://purl.org/spar/fabio/hasURL")
has_url_abs = URIRef("https://linkedpaperswithcode.com/property/hasURLAbstract")
has_dblp_url = URIRef("https://linkedpaperswithcode.com/property/dblpURL")
has_acronym = URIRef("https://dbpedia.org/property/acronym")
has_conference = URIRef("https://linkedpaperswithcode.com/property/hasConference")
has_task = URIRef("https://linkedpaperswithcode.com/property/hasTask")
has_method = URIRef("https://linkedpaperswithcode.com/property/hasMethod")
has_main_category = URIRef("https://linkedpaperswithcode.com/property/mainCategory")
has_parent = URIRef("https://linkedpaperswithcode.com/property/hasParent")
#tasks via api
client = PapersWithCodeClient()
def get_all_tasks(client):
page = 1 # Beginnen Sie mit Seite 1
all_tasks = []
while True:
tasks = client.task_list(page=page)
all_tasks.extend(tasks.results)
if tasks.next_page is None: # Wenn es keine nächste Seite gibt, beende die Schleife
break
page = tasks.next_page # Ansonsten setzen die nächste Seite auf die nächste zu durchsuchende Seite
return all_tasks
task_list = get_all_tasks(client)
task_list.pop(0) #remove the "task" task with no id and description
#create a mapping from task-name to task-id
lpwc_graph = Graph()
task_mapping = {}
i = 0
with open(ntriple_tasks_output_file_path, "w", encoding="utf-8") as g:
for task in task_list:
task_mapping[task.name] = task.id
#task-id
task_uri = URIRef(lpwc_task + task.id)
lpwc_graph.add((task_uri, RDF.type, lpwc_task_class))
#task-name
task_name = clean(task.name)
lpwc_graph.add((task_uri, FOAF.name, Literal(task_name, datatype=XSD.string)))
#task-description
if task.description != "":
task_description = convert_markdown_to_plain_text(task.description)
task_description = clean(task_description)
lpwc_graph.add((task_uri, DCTERMS.description, Literal(task_description, datatype=XSD.string)))
i += 1
if i % 1000 == 0:
print('Processed {} task entities'.format(i))
if i % 100 == 0:
g.write(lpwc_graph.serialize(format='nt'))
lpwc_graph = Graph()
#write the last part
if not i % 100 == 0:
g.write(lpwc_graph.serialize(format='nt'))
lpwc_graph = Graph()
g.close()
#methods.json
#creates a mapping from method-name to method-id
method_mapping = {}
with open(file_path_methods, 'r') as file:
methods = json.load(file)
for method in methods:
method_id = method["url"].replace("https://paperswithcode.com/method/", "")
method_name = method["name"]
method_mapping[method_name] = method_id
#papers.json
#transform papers.json file
lpwc_graph = Graph()
i = 0
with open(ntriple_papers_output_file_path, "w", encoding="utf-8") as g:
with open(file_path_papers, 'r') as file:
papers = json.load(file)
for paper in papers:
#paper-id
paper_id = paper["paper_url"].replace("https://paperswithcode.com/paper/", "")
paper_uri = URIRef(lpwc_paper + paper_id)
lpwc_graph.add((paper_uri, RDF.type, lpwc_paper_class))
#arxiv_id
if paper["arxiv_id"] != "" and paper["arxiv_id"] is not None:
arxiv_id = clean(paper["arxiv_id"])
lpwc_graph.add((paper_uri, has_arxiv_id, Literal(arxiv_id, datatype=XSD.string)))
#title
if paper["title"] is not None:
paper_title = clean(paper["title"])
lpwc_graph.add((paper_uri, DCTERMS.title , Literal(paper_title, datatype=XSD.string)))
#abstract
if paper["abstract"] != "" and paper["abstract"] is not None:
paper_abstract = convert_markdown_to_plain_text(paper["abstract"])
paper_abstract = clean(paper_abstract)
lpwc_graph.add((paper_uri, DCTERMS.abstract, Literal(paper_abstract, datatype=XSD.string)))
#url_abs
if paper["url_abs"] != "" and paper["url_abs"] is not None:
paper_url_abs = clean_url(paper["url_abs"])
lpwc_graph.add((paper_uri, has_url_abs, Literal(paper_url_abs, datatype=XSD.anyURI)))
#url_pdf
if paper["url_pdf"] != "" and paper["url_pdf"] is not None:
paper_url_pdf = clean_url(paper["url_pdf"])
lpwc_graph.add((paper_uri, has_url, Literal(paper_url_pdf, datatype=XSD.anyURI)))
#date
if paper["date"] != "":
paper_date = paper["date"]
lpwc_graph.add((paper_uri, DCTERMS.date, Literal(paper_date, datatype=XSD.date)))
#proceeding
if paper["proceeding"] is not None:
proceeding_name = paper["proceeding"]
proceeding_name = preprocess_pwc_proceeding_string(proceeding_name)
proceeding_name = clean_url(proceeding_name)
proceeding_uri = URIRef(lpwc_conference + proceeding_name.replace(" ", "-").lower())
lpwc_graph.add((paper_uri, has_conference, proceeding_uri))
#tasks
if paper["tasks"] != []:
for task in paper["tasks"]:
if task in task_mapping:
task_id = clean_url(task_mapping[task])
task_uri = URIRef(lpwc_task + task_id)
lpwc_graph.add((paper_uri, has_task, task_uri))
else:
task_uri = URIRef(lpwc_task + clean_url(task.replace(" ", "-").lower()))
lpwc_graph.add((task_uri, RDF.type, lpwc_task_class))
lpwc_graph.add((paper_uri, has_task, task_uri))
#methods
if paper["methods"] != []:
for method in paper["methods"]:
method_name = method["name"]
if method_name in method_mapping:
method_id = clean_url(method_mapping[method_name])
method_uri = URIRef(lpwc_method + method_id)
lpwc_graph.add((paper_uri, has_method, method_uri))
else:
method_id = clean_url(method["name"].replace(" ", "-").lower())
method_uri = URIRef(lpwc_method + method_id)
lpwc_graph.add((paper_uri, has_method, method_uri))
if method["main_collection"] is not None:
main_collection_name = clean_url(method["main_collection"]["name"].replace(" ", "-").lower())
main_collection_uri = URIRef(lpwc_category + main_collection_name)
lpwc_graph.add((method_uri, has_main_category, main_collection_uri))
if method["main_collection"]["parent"] is not None:
main_collection_parent_name = clean_url(method["main_collection"]["parent"].replace(" ", "-").lower())
main_collection_parent_uri = URIRef(lpwc_category + main_collection_parent_name)
lpwc_graph.add((main_collection_uri, has_parent, main_collection_parent_uri))
i += 1
if i % 1000 == 0:
print('Processed {} Paper entities'.format(i))
if i % 100 == 0:
g.write(lpwc_graph.serialize(format='nt'))
lpwc_graph = Graph()
#write the last part
if not i % 100 == 0:
g.write(lpwc_graph.serialize(format='nt'))
lpwc_graph = Graph()
g.close()
print("Done") | davidlamprecht/linkedpaperswithcode | transformation-scripts/01_papers.py | 01_papers.py | py | 11,898 | python | en | code | 1 | github-code | 90 |
30609536728 | # Given a string s, return true if it is a palindrome, or false otherwise.
# https://leetcode.com/problems/valid-palindrome/
class Solution:
def isPalindrome(self, s: str) -> bool:
s = s.lower()
# List comprehension to remove special chars
s = ''.join([i for i in s if i.isalnum()])
# reverse the string by reading the same string backwards
r = s[::-1]
if r == s:
return True
return False | aykhazanchi/leetcode | 04_valid_palindrome.py | 04_valid_palindrome.py | py | 461 | python | en | code | 0 | github-code | 90 |
32153357160 | from threading import Thread
import time
class Consumer:
def __init__(self, text_list_obj, consumer_number, **kwargs):
self.override_fn = kwargs.pop('override_fn', None)
self.text_list_obj = text_list_obj
self.is_killed = False
self.consumer_number = consumer_number
t = Thread(
target=self.consume_text,
args=[]
)
t.start()
def kill(self):
print(f"{self.consumer_number}---consumer getting deleted---")
self.is_killed = True
def consume(self, text, *args, **kwargs):
if self.override_fn is not None:
self.override_fn(text, *args, **kwargs)
else:
print(f"{self.consumer_number}---Received message {text}---")
def consume_text(self):
while True:
if len(self.text_list_obj.text_list) > 0 and \
not self.is_killed:
text = self.text_list_obj.text_list.pop(0)
self.consume(text)
class ConsumerManager:
def __init__(self, no_of_consumers, text_list_obj, **kwargs):
self.override_fn = kwargs.pop('override_fn', None)
self.consumer_counter = 0
self.consumer_list = []
self.text_list_obj = text_list_obj
self.add_consumers(no_of_consumers)
def add_consumers(self, no_of_consumers):
for i in range(0, no_of_consumers):
self.consumer_list.append(
Consumer(
self.text_list_obj,
self.consumer_counter,
override_fn=self.override_fn
)
)
self.consumer_counter += 1
def remove_consumers(self):
time.sleep(5)
print('--------Removing consumers--------')
while (True):
self.consumer_list[0].kill()
del self.consumer_list[0]
if len(self.consumer_list) == 0:
break
| cosmos-sajal/low_level_design | publisher_subscriber/consumer.py | consumer.py | py | 1,941 | python | en | code | 3 | github-code | 90 |
30759105951 | from Tkinter import *
from tkMessageBox import *
from Tkinter import Tk, Frame, BOTH
import os
class GUI(Frame):
def __init__(self, parent = None):
Frame.__init__ (self)
self.pack()
self.master.title("Staff GUI")
self.master.minsize(width=100,height=70)
radioframe = LabelFrame(self, text="Radio").pack(anchor = CENTER, expand = TRUE, fill=BOTH)
global var
var = IntVar()
storestatus = Label(radioframe, text="Store status:").pack(padx=15)
R1 = Radiobutton(radioframe, text="Enable", variable=var, value=1).pack(padx=15, anchor = W)
R2 = Radiobutton(radioframe, text="Disable", variable=var, value=2).pack(padx=15, anchor = W)
Button(radioframe, text="Apply", command=saveshopstatus).pack(padx=15, pady=10)
def saveshopstatus():
global var
shopvar = var.get()
if shopvar == 1:
text = "Enabled"
with open("state.txt", "w") as f:
f.write(text)
os._exit(0)
elif shopvar == 2:
text = "Disabled"
with open("state.txt", "w") as f:
f.write(text)
os._exit(0)
def main():
root = Tk()
entry = GUI(root)
entry.pack()
root.mainloop()
if __name__ == "__main__":
main()
| edmund02/Gym-Fitness-Planner | StaffGUI.py | StaffGUI.py | py | 1,343 | python | en | code | 0 | github-code | 90 |
72208168298 | '''
给你一个树,请你 按中序遍历 重新排列树,使树中最左边的结点现在是树的根,并且每个结点没有左子结点,只有一个右子结点。
示例 :
输入:[5,3,6,2,4,null,8,1,null,null,null,7,9]
5
/ \
3 6
/ \ \
2 4 8
/ / \
1 7 9
输出:[1,null,2,null,3,null,4,null,5,null,6,null,7,null,8,null,9]
1
\
2
\
3
\
4
\
5
\
6
\
7
\
8
\
9
提示:
给定树中的结点数介于 1 和 100 之间。
每个结点都有一个从 0 到 1000 范围内的唯一整数值。
'''
from Tree import stringToTreeNode, TreeNode
class Solution:
def increasingBST(self, root: TreeNode) -> TreeNode:
def inorder(node: TreeNode):
if node:
inorder(node.left)
node.left = None
self.curr.right = node
self.curr = self.curr.right
inorder(node.right)
ans = self.curr = TreeNode(0)
inorder(root)
return ans.right
if __name__ == '__main__':
nums = "5,3,6,2,4,null,8,1,null,null,null,7,9"
root = stringToTreeNode(nums)
sol = Solution()
sol.increasingBST(root)
| Asunqingwen/LeetCode | 简单/递增顺序查找树.py | 递增顺序查找树.py | py | 1,357 | python | zh | code | 0 | github-code | 90 |
17941412052 | from pynput.keyboard import Listener
import snake
snake = snake.Snake(3)
def main():
with Listener(on_press=event_listener) as listener:
listener.join()
def event_listener(key):
switch = {
key.up: 'U',
key.down: 'D',
key.left: 'L',
key.right: 'R'
}
snake.move_snake(switch.get(key, "None"), 1)
if __name__ == '__main__':
main()
| OmerElmaliach/SnakeGame | main.py | main.py | py | 396 | python | en | code | 0 | github-code | 90 |
31736006426 | """
The helper functions for our YOLO model.
"""
def get_model_from_config(name):
"""
:param name: The file path for the configuation file.
Get the parameters of each layer of the neural network from the config file based on the given path name.
"""
module_params = []
module = open(name, 'r')
lines = module.read().split('\n')
for line in lines:
line = line.strip()
if not line or line[0] == '#':
continue
elif line[0] == '[':
layer = dict()
layer['type'] = line[1:-1]
module_params.append(layer)
else:
key, value = line.split("=")
module_params[-1][key.strip()] = value.strip()
return module_params
| CSMYang/YOLOLOLOLOL | util.py | util.py | py | 743 | python | en | code | 0 | github-code | 90 |
35619153316 | from item import Item
from errors import InvalidStateError
class NPC(Item):
def __init__(self, names, item_description, description, has_task, gives_task, speeches, printer, events, game):
super(NPC, self).__init__(False, False, True, names, item_description, description, printer)
self.speeches = speeches
self.has_task = has_task
self.gives_task = gives_task
self.state = NPCState.before_task
self.printer = printer
self.game = game
# Events might cause another NPC to change state, and/or change the players karma
self.events = None
if events:
self.events = events
def accept_response(self, event):
response = self.game.app.command_text.get().upper()
self.game.app.command_text["state"] = "disabled"
self.command_text.bind("<Return>", self.game.printer.skipText)
self.game.app.insert(response, True)
if not (response == "Y" or response == "N"):
self.printer.pprint("\n\nWill you help the " + self.noun_names[0]+ "? Type 'Y' for Yes or 'N' for No.")
self.game.app.command_text.bind("<Return>", self.accept_response)
return
if response == "Y":
self.printer.pprint('"' + self.speeches[1]["Yes"] + '"')
self.state = NPCState.during_task
self.game.app.command_text["state"] = "normal"
self.game.app.command_text.delete(0, "end")
self.game.app.command_text.bind("<Return>", self.game.app.execute_command)
return self.events
else:
self.printer.pprint('"' + self.speeches[1]["No"] + '"')
self.state = NPCState.task_complete
self.game.app.command_text["state"] = "normal"
self.game.app.command_text.delete(0, "end")
self.game.app.command_text.bind("<Return>", self.game.app.execute_command)
return self.events
def be_talked_to(self):
self.printer.pprint("The " + self.noun_names[0]+ " talks to you:")
if not self.has_task:
self.printer.pprint('"' + self.speeches[0] + '"')
return (self.events, True)
else:
if self.state == NPCState.before_task:
self.printer.pprint('"' + self.speeches[0] + '"')
if self.gives_task:
self.printer.pprint("\n\nWill you help the " + self.noun_names[0]+ "? Type 'Y' for Yes or 'N' for No.")
self.game.app.command_text.bind("<Return>", self.accept_response)
return (self.events, False)
else:
return (self.events, True)
elif self.state == NPCState.during_task:
if self.gives_task:
self.printer.pprint('"' + self.speeches[1]["Yes"] + '"')
else:
self.printer.pprint('"' + self.speeches[1] + '"')
return (self.events, True)
elif self.state == NPCState.task_complete:
self.printer.pprint('"' + self.speeches[2] + '"')
self.state = NPCState.after_task
return ([], True)
elif self.state == NPCState.after_task:
self.printer.pprint('"' + self.speeches[3] + '"')
return ([], True)
else:
raise InvalidStateError("NPC does not have correct state")
class NPCState(object):
before_task = 0
during_task = 1
task_complete = 2
after_task = 3
| rjmcf/HailTraveller | npc.py | npc.py | py | 2,951 | python | en | code | 0 | github-code | 90 |
70639103016 | #!/usr/bin/env python3
from bs4 import BeautifulSoup
import sys
import json
import collections
def transformHtmlTableToJson(inputPath, fileName):
inputFile = open(inputPath,"r")
table_data = [[cell.text for cell in row("td")] for row in BeautifulSoup(inputFile.read(), 'html.parser')("tr")]
inputFile.close()
dataName = fileName.split(".")[0]
data = collections.OrderedDict()
data[dataName] = []
table_data[0][0] = table_data[0][0].replace(".", "") # changing the key name for MongoDB
# from "Slno." to "Slno"
for row in table_data[1:]:
context = collections.OrderedDict()
for idx in range(len(table_data[0])):
context[table_data[0][idx]] = row[idx]
data[dataName].append(context)
outputFileName = dataName+'.json'
with open(outputFileName, 'w') as outfile:
json.dump(data,outfile,indent=4)
def main():
if len(sys.argv) < 2:
print(f'Usage: one arguments are required: inputFilePath.')
return
inputFilePath = sys.argv[1]
outputFileName = inputFilePath.split("/")[-1]
transformHtmlTableToJson(inputFilePath, outputFileName)
print(f"{inputFilePath} is Done!")
if __name__ == '__main__':
main() | ChianHuei/centriqe_pipeline | scripts for QMS/convertTableToJson.py | convertTableToJson.py | py | 1,288 | python | en | code | 0 | github-code | 90 |
12356695058 | import string
import time
from urllib.parse import quote
from urllib.request import urlopen, urlretrieve
from bs4 import BeautifulSoup
from csvFunc import csvFunc
class searchFunc():
gUrl = ""
gBbsUrl = ""
gSearchUrl = ""
gComicTitle = None
gResultFromCvs = None
gCharSet = "utf-8"
def __init__( self, aUrl, aBbsUrl, aSearchUrl, aComicTitle, aResultFromCsv, aCharSet ):
self.gUrl = aUrl
self.gBbsUrl = aBbsUrl
self.gSearchUrl = aSearchUrl
self.gComicTitle = aComicTitle
self.gResultFromCsv = aResultFromCsv
self.gCharSet = aCharSet
# 구두점 리스트 !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ 와 공백을 찾고 제거
def getOnlyText( self, aStr ):
sStr = aStr.replace( ' ', '' ).strip( '\n' )
for pun in string.punctuation:
sStr = sStr.replace( pun, '' )
return sStr
def searchComic( self, aTitle ):
if aTitle is None or aTitle is '':
return None
sList = []
for comicList in self.gResultFromCsv:
sTitle = self.getOnlyText( comicList[0].upper() )
if aTitle in sTitle:
sList.append( comicList )
if len( sList ) > 0:
return sList
else:
return None
def searchComicsFromCsv( self, aTitle ):
if aTitle is None or aTitle is '':
return None
if self.gResultFromCsv is None:
return None
sComicTitle = self.getOnlyText( aTitle.upper() )
sList = self.searchComic( sComicTitle )
if sList is None:
return None
elif len( sList ) > 0:
return sList
else:
return None
def searchComicsFromWeb( self, aTitle ):
if aTitle is None or aTitle is '':
return None
sKeyword = ''
sResultList = []
for title in aTitle.split( ' ' ):
sKeyword = sKeyword + ' ' + quote( self.getOnlyText( title ).encode( self.gCharSet ) )
# URL 에 조합할 키워드
sSearchKeyword = sKeyword.strip( ' ' ).replace( ' ', '+' ).upper()
# 실제 검색 결과와 비교할 키워드
sComicTitle = self.getOnlyText( aTitle.upper() )
# 기본 searchUrl 에 URL 조합 키워드를 더한다.
try:
sSearchUrl = self.gSearchUrl + sSearchKeyword
sHtml = urlopen( sSearchUrl )
sBsObj = BeautifulSoup( sHtml, "html.parser" )
sResult = sBsObj.findAll( "a", { "class":"sch_res_title" } )
sHtml.close()
except:
return None
for res in sResult:
sTitle = self.getOnlyText( res.get_text().upper() )
if sComicTitle in sTitle:
sVolUrl = self.gBbsUrl + res['href'].lstrip( '.' )
sList = ( sComicTitle, sVolUrl, '' )
if sList is not None:
sResultList.append( sList )
if sResultList is None:
return None
elif len( sResultList ) > 0:
return sResultList
else:
return None
def searchLastVol( self, aUrl ):
try:
sHtml = urlopen( aUrl )
sBsObj = BeautifulSoup( sHtml, "html.parser" )
sResult = sBsObj.findAll( "a", { "class":"gal_subject" } )
sHtml.close()
except:
return None
if sResult is None:
return None
for res in sResult:
pass
# 리스트의 마지막만 가져온다.
sUrl = self.gUrl + res['href'].lstrip( '.' )
sResultList = ( res.get_text(), aUrl, sUrl )
if sResultList is None:
return None
elif len( sResultList ) > 0:
return sResultList
else:
return None
def getNPrintLastVol( self, aTitle, aComicUrlList, aIsFromCsv ):
if aComicUrlList is None:
print( "[" + aTitle + "] 검색 결과가 없습니다." )
return False
sIsModify = False
sResultList = []
for res in aComicUrlList:
sRes = self.searchLastVol( res[1] )
if sRes is not None:
sResultList.append( sRes )
# 기존의 자료와 다르거나 웹에서 부터 얻어오면 신작이다.
if aIsFromCsv == True and sRes[0] != res[0] \
or aIsFromCsv == False:
sIsModify = True
print( "[" + aTitle + "] 신작 : " + sRes[0] + "\n\tURL : " + sRes[1] +
"\n\tLast Vol URL : " + sRes[2] )
else:
print( "[" + aTitle + "] 신작이 없습니다." )
print( "가장 최근 신간 : " + sRes[0] + "\n\tURL : " + sRes[1] + \
"\n\tLast Vol URL : " + sRes[2] )
if sIsModify == True:
return sResultList
else:
return None
def searchNew( self ):
sResultList = []
sCount = 0
for title in self.gComicTitle:
sTitle = title.strip( '\n' )
if sTitle == '':
continue
sIsFromCsv = False
sResult = self.searchComicsFromCsv( sTitle )
if sResult is None:
sResult = self.searchComicsFromWeb( sTitle )
else:
sIsFromCsv = True
sTempList = self.getNPrintLastVol( sTitle, sResult, sIsFromCsv )
if sTempList is not None:
sResultList.append( sTempList )
# 서버 부담을 줄이기 위해 3 번에 한번꼴로 1 초씩 쉰다.
sCount += 1
if sCount % 3 == 0:
time.sleep( 1 )
if sResultList is None:
return None
elif len( sResultList ) > 0:
sCsv = csvFunc( self.gCharSet )
sCsv.writeCsv( sResultList )
else:
return None
| martinkang/Study | Python/Web_Scraping/miniToonNotifier/searchFunc.py | searchFunc.py | py | 4,826 | python | en | code | 5 | github-code | 90 |
7438099997 | import numpy
from Project import functions, functions2
import matplotlib.pyplot as plt
def save_ApplicationWorkingPoint_DCFs(scores, L, costs, path, applicationWorkingPoints):
DCFsNormalized = []
DCFsNormalized2 = []
DCFsNormalizedMin = []
applicationWorkingPointsPrint = ['0_1', '0_5', '0_9']
for i, applicationWorkingPoint in enumerate(applicationWorkingPoints):
optimalBayesDecisionPredictions = functions2.compute_optimal_bayes_decision(scores,
applicationWorkingPoint, costs)
confusionMatrix = functions2.compute_binary_confusion_matrix(optimalBayesDecisionPredictions, L)
DCFsNormalized.append(
functions2.compute_normalized_detection_cost_function(confusionMatrix, applicationWorkingPoint, costs))
thresholds = [i for i in numpy.arange(-30, 30, 0.15)]
for threshold in thresholds:
optimalBayesDecisionPredictions = functions2.compute_optimal_bayes_decision_given_threshold(scores,
threshold)
confusionMatrix = functions2.compute_binary_confusion_matrix(optimalBayesDecisionPredictions, L)
DCFsNormalized2.append(
functions2.compute_normalized_detection_cost_function(confusionMatrix, applicationWorkingPoint, costs))
DCFsNormalizedMin.append(min(DCFsNormalized2))
DCFsNormalized2 = []
with open("./DCFs/minDCF_" + applicationWorkingPointsPrint[i] + "_" + path , "w") as fp:
fp.write(str(min(DCFsNormalizedMin)))
with open("./DCFs/actDCF_" + applicationWorkingPointsPrint[i] + "_" + path, "w") as fp:
fp.write(str(min(DCFsNormalized)))
def train_LRKFold(D, L, lambd, classPriorProbabilities, path, applicationWorkingPoints):
num_samples = int(D.shape[1] / numFold)
numpy.random.seed(27)
perm = numpy.random.permutation(D.shape[1])
D = D[:, perm]
L = L[perm]
scores = numpy.zeros(L.shape[0])
for i in range(numFold):
(DTR, LTR), (DTE, LTE) = functions.K_fold_generate_Training_and_Testing_samples(D, L, i, numFold, num_samples)
scores[i * num_samples: (i + 1) * num_samples] = functions.compute_logistic_regression_binary_llr(DTR, LTR, DTE,
lambd, classPriorProbabilities)
save_bayes_error_plot(scores, L, costs, path)
save_ApplicationWorkingPoint_DCFs(scores, L, costs, path, applicationWorkingPoints)
def save_bayes_error_plot(scores, L, costs, path):
x = []
y = []
DCFsNormalized = []
DCFsNormalized2 = []
DCFsNormalizedMin = []
effPriorLogOdds = numpy.linspace(-4, 4, 25)
for effPriorLogOdd in effPriorLogOdds:
print(str(effPriorLogOdd))
x.append(effPriorLogOdd)
effPrior = 1 / (1 + (numpy.exp(-effPriorLogOdd)))
classPriorProbability = numpy.array([1 - effPrior, effPrior], dtype=float)
optimalBayesDecisionPredictions = functions2.compute_optimal_bayes_decision(scores,
classPriorProbability, costs)
confusionMatrix = functions2.compute_binary_confusion_matrix(optimalBayesDecisionPredictions, L)
DCFsNormalized.append(
functions2.compute_normalized_detection_cost_function(confusionMatrix, classPriorProbability, costs))
thresholds = [i for i in numpy.arange(-30, 30, 0.1)]
for threshold in thresholds:
optimalBayesDecisionPredictions = functions2.compute_optimal_bayes_decision_given_threshold(scores,
threshold)
confusionMatrix = functions2.compute_binary_confusion_matrix(optimalBayesDecisionPredictions, L)
DCFsNormalized2.append(
functions2.compute_normalized_detection_cost_function(confusionMatrix, classPriorProbability, costs))
DCFsNormalizedMin.append(min(DCFsNormalized2))
DCFsNormalized2 = []
mass = max(max(DCFsNormalizedMin), max(DCFsNormalized))
plt.figure()
plt.plot(effPriorLogOdds, DCFsNormalized, label='actDCF', color='b')
plt.plot(effPriorLogOdds, DCFsNormalizedMin,':', label='minDCF', color='r')
plt.xlabel("$log \\frac{ \\tilde{\pi}}{1-\\tilde{\pi}}$")
plt.legend()
plt.ylim([0, mass])
plt.xlim([-3, 3.0])
plt.savefig(("./figures/"+path))
plt.clf()
plt.close()
if __name__=="__main__":
DTRSplitted, LTR, DTROriginal = functions2.read_file("../Train.txt")
DTESplitted, LTE, DTEOriginal = functions2.read_file("../Test.txt")
applicationWorkingPoint1 = numpy.array([9 / 10, 1 / 10], dtype=float)
applicationWorkingPoint2 = numpy.array([5 / 10, 5 / 10], dtype=float)
applicationWorkingPoint3 = numpy.array([1 / 10, 9 / 10], dtype=float)
classPriorProbabilities1 = numpy.array([9 / 10, 1 / 10], dtype=float)
classPriorProbabilities2 = numpy.array([5 / 10, 5 / 10], dtype=float)
classPriorProbabilities3 = numpy.array([1 / 10, 9 / 10], dtype=float)
applicationWorkingPoints = [[9 / 10, 1 / 10], [5 / 10, 5 / 10], [1 / 10, 9 / 10]]
costs = numpy.array([1.0, 1.0], dtype=float)
labels = [i for i in range(0, numpy.amax(LTR) + 1)]
numFold = 5
thresholds = [i for i in numpy.arange(-30, 30, 0.1)]
DTROriginalNormalized = (DTROriginal - functions2.compute_mean(DTROriginal)) / functions2.to_column(DTROriginal.std(axis=1))
DTEOriginalNormalized = (DTEOriginal - functions2.compute_mean(DTEOriginal)) / functions2.to_column(DTEOriginal.std(axis=1))
with open("Scores/mvg_score", "r") as fp:
scores_mvg = fp.read()
scores_mvg = [float(score) for score in scores_mvg.rstrip().split(",")]
with open("Scores/qlr_score", "r") as fp:
scores_qlr = fp.read()
scores_qlr = [float(score) for score in scores_qlr.rstrip().split(",")]
with open("Scores/qsvm_score", "r") as fp:
scores_qsvm = fp.read()
scores_qsvm = [float(score) for score in scores_qsvm.rstrip().split(",")]
###QLR
# DTROriginalNormalized = functions.quadratic_expansion(DTROriginalNormalized)
# DTEOriginalNormalized = functions.quadratic_expansion(DTEOriginalNormalized)
# DCFsNormalized= []
# DCFsNormalized1 = []
# DCFsNormalized2 = []
# DCFsNormalized3 = []
# effPriorLogOdds = numpy.linspace(-4, 4, 25)
# llr_QLR = functions.compute_logistic_regression_binary_quadratic_llr(DTROriginalNormalized, LTR, DTEOriginalNormalized, 10**-5, classPriorProbabilities2)
# print(list(llr_QLR))
# for threshold in thresholds:
# optimalBayesDecisionPredictions = functions.compute_optimal_bayes_decision_given_threshold(llr_QLR,
# threshold)
# confusionMatrix = functions.compute_binary_confusion_matrix(optimalBayesDecisionPredictions, LTE)
# DCFsNormalized1.append(
# functions.compute_normalized_detection_cost_function(confusionMatrix, applicationWorkingPoint1,
# costs))
# DCFsNormalized2.append(
# functions.compute_normalized_detection_cost_function(confusionMatrix, applicationWorkingPoint2,
# costs))
# DCFsNormalized3.append(
# functions.compute_normalized_detection_cost_function(confusionMatrix, applicationWorkingPoint3,
# costs))
# minDCF1 = min(DCFsNormalized1)
# minDCF2 = min(DCFsNormalized2)
# minDCF3 = min(DCFsNormalized3)
save_bayes_error_plot(scores_qlr, LTE, costs, "QLR_pt_0_5")
save_ApplicationWorkingPoint_DCFs(scores_qlr, LTE, costs, "QLR_pt_0_5", applicationWorkingPoints)
#QSVM
# DCFsNormalized1 = []
# DCFsNormalized2 = []
# DCFsNormalized3 = []
# effPriorLogOdds = numpy.linspace(-4, 4, 25)
# llr_QSVM = list(functions.compute_support_vector_machine_kernel_llr(DTROriginalNormalized, LTR,
# DTEOriginalNormalized, LTE, 1, 10**2, 'p',
# classPriorProbabilities2, c=1,d=2))
#
# print(llr_QSVM)
# for threshold in thresholds:
# optimalBayesDecisionPredictions = functions.compute_optimal_bayes_decision_given_threshold(llr_QSVM,
# threshold)
# confusionMatrix = functions.compute_binary_confusion_matrix(optimalBayesDecisionPredictions, LTE)
# DCFsNormalized1.append(
# functions.compute_normalized_detection_cost_function(confusionMatrix, applicationWorkingPoint1,
# costs))
# DCFsNormalized2.append(
# functions.compute_normalized_detection_cost_function(confusionMatrix, applicationWorkingPoint2,
# costs))
# DCFsNormalized3.append(
# functions.compute_normalized_detection_cost_function(confusionMatrix, applicationWorkingPoint3,
# costs))
# minDCF1 = min(DCFsNormalized1)
# minDCF2 = min(DCFsNormalized2)
# minDCF3 = min(DCFsNormalized3)
save_bayes_error_plot(scores_qsvm, LTE, costs, "QSVM_pt_0_5")
save_ApplicationWorkingPoint_DCFs(scores_qsvm, LTE, costs, "QSVM_pt_0_5", applicationWorkingPoints)
##MVG
# DCFsNormalized = []
# DCFsNormalized1 = []
# DCFsNormalized2 = []
# DCFsNormalized3 = []
# effPriorLogOdds = numpy.linspace(-4, 4, 25)
# llr_MVG = list(functions.compute_MVG_llrs(DTROriginalNormalized, LTR, DTEOriginalNormalized, labels))
# print(llr_MVG)
# for threshold in thresholds:
# optimalBayesDecisionPredictions = functions.compute_optimal_bayes_decision_given_threshold(llr_MVG,
# threshold)
# confusionMatrix = functions.compute_binary_confusion_matrix(optimalBayesDecisionPredictions, LTE)
# DCFsNormalized1.append(
# functions.compute_normalized_detection_cost_function(confusionMatrix, applicationWorkingPoint1,
# costs))
# DCFsNormalized2.append(
# functions.compute_normalized_detection_cost_function(confusionMatrix, applicationWorkingPoint2,
# costs))
# DCFsNormalized3.append(
# functions.compute_normalized_detection_cost_function(confusionMatrix, applicationWorkingPoint3,
# costs))
# minDCF1 = min(DCFsNormalized1)
# minDCF2 = min(DCFsNormalized2)
# minDCF3 = min(DCFsNormalized3)
save_bayes_error_plot(scores_mvg, LTE, costs, "MVG_pt_0_5")
save_ApplicationWorkingPoint_DCFs(scores_mvg, LTE, costs, "MVG_pt_0_5", applicationWorkingPoints)
# with open("Scores/mvg_score", "r") as fp:
# scores_mvg = fp.read()
# scores_mvg = [float(score) for score in scores_mvg.rstrip().split(",")]
# with open("Scores/qlr_score", "r") as fp:
# scores_qlr = fp.read()
# scores_qlr = [float(score) for score in scores_qlr.rstrip().split(",")]
# with open("Scores/qsvm_score", "r") as fp:
# scores_qsvm = fp.read()
# scores_qsvm = [float(score) for score in scores_qsvm.rstrip().split(",")]
#
# scores1 = [scores_mvg, scores_qlr, scores_qsvm]
# scores = numpy.array(numpy.vstack(scores1))
# train_LRKFold(scores, LTE, 10 ** -5, applicationWorkingPoints[1],
# "MVG_QLR_QSVM_Zscore_pt_0_5", applicationWorkingPoints)
#
# scores2 = [scores_mvg, scores_qlr]
# scores = numpy.array(numpy.vstack(scores2))
# train_LRKFold(scores, LTE, 10 ** -5, applicationWorkingPoints[1],
# "MVG_QLR_Zscore_pt_0_5", applicationWorkingPoints)
# scores3 = [scores_qlr, scores_qsvm]
# scores = numpy.array(numpy.vstack(scores3))
# train_LRKFold(scores, LTE, 10 ** -5, applicationWorkingPoints[1],
# "QSVM_QLR_Zscore_pt_0_5", applicationWorkingPoints)
#
# scores4 = [scores_mvg, scores_qsvm]
# scores = numpy.array(numpy.vstack(scores4))
# train_LRKFold(scores, LTE, 10 ** -5, applicationWorkingPoints[1],
# "MVG_QSVM_Zscore_pt_0_5", applicationWorkingPoints)
| c0st0la/MachineLearning | Project/Evaluation/evaluation.py | evaluation.py | py | 12,938 | python | en | code | 0 | github-code | 90 |
30463095817 | import sys
from collections import deque
n = int(sys.stdin.readline())
deq = deque()
for i in range(n):
text = sys.stdin.readline().strip()
if text.split()[0] == 'push_front':
deq.appendleft(text.split()[1])
elif text.split()[0] == 'push_back':
deq.append(text.split()[1])
elif text == 'pop_front':
print( deq.popleft() if deq else -1)
elif text == 'pop_back':
print( deq.pop() if deq else -1)
elif text == 'size':
print(len(deq))
elif text == 'empty':
print( 0 if deq else 1)
elif text == 'front':
print( deq[0] if deq else -1)
elif text == 'back':
print( deq[-1] if deq else -1) | laagom/Algorithm | 백준/Silver/10866. 덱/덱.py | 덱.py | py | 723 | python | en | code | 0 | github-code | 90 |
9832654609 |
import cv2
import numpy as np
import os
# 入出力共通
fp = {"gray":[], "load":[], "dump":[], "dir":"", "sample":[]}
# テンプレートを生成する(0102用)
def get_templates0102():
temp0 = cv2.imread("tmpnum.png")
temp0g = cv2.cvtColor(temp0, cv2.COLOR_BGR2GRAY)
return [
temp0g[
(0 if (i<5) else 33):(33 if (i<5) else 64),
(i%5)*17:(i%5)*17+17
] for i in range(10)
]
# テンプレートを生成する(0603用)
def get_templates(rate = 1):
temp0 = cv2.imread("tmpnum.png")
temp0g = cv2.cvtColor(temp0, cv2.COLOR_BGR2GRAY)
temp0g = cv2.resize(temp0g, None, None, rate, rate)
_, tempbw = cv2.threshold(temp0g, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(tempbw, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# 座標位置を取得
ps = [cv2.boundingRect(contour) for contour in contours]
ps = sorted(ps, key=lambda p:(p[1] < 10 and p[0] or 200 + p[0]))
wav = sum([p[2] for p in ps]) / len(ps)
print(ps,wav)
# テンプレ作成
ret = []
for p in ps:
x,y,w,h = p
if (w < wav):
w = int(wav)
x = x - int((wav-w)/2)
ret.append(temp0g[y-1:y+h+1, x-1:x+w+1])
ret.insert(0,ret[9])
"""
gray = cv2.cvtColor(fp["dump"], cv2.COLOR_BGR2GRAY)
cv2.rectangle(gray, (0,0), (250,30), (255,255,255), thickness=-1)
for i,sample in enumerate(ret):
if (i==1):
gray[1:1+len(sample), 2 + i*25:i*25+len(sample[0])] = sample[0:,:-2]
else:
gray[1:1+len(sample), 2 + i*25: 2+i*25+len(sample[0])] = sample
cv2.imwrite("result00.png", gray[0:30, 0:25*10])
exit()
"""
return ret[:-1]
# テンプレートを生成する(numfont使用)
def get_templates(rate = 1):
# 15x25フォントを読み込む
temp0 = cv2.imread("numfont00.png")
temp0g = cv2.cvtColor(temp0, cv2.COLOR_BGR2GRAY)
temp0g = cv2.resize(temp0g, None, None, rate, rate)
_, tempbw = cv2.threshold(temp0g, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(tempbw, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# 座標位置を取得
ps = [cv2.boundingRect(contour) for contour in contours]
ps = sorted(ps)
wav = sum([p[2] for p in ps]) / len(ps)
print(ps,wav)
# テンプレ作成
ret = []
for p in ps:
x,y,w,h = p
if (w < wav):
w = int(wav)
x = x - int((wav-w)/2)
ret.append(temp0g[y-1:y+h+1, x-1:x+w+1])
#return [cv2.resize(s, None, None, rate, rate) for s in ret]
return ret
# 画像内で数字っぽいサイズ感のものを洗う
def profilenum():
img = fp["gray"]
# 2値化して輪郭検出
_, tempbw = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(tempbw, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
ret = []
# w = 5-20px, h = 10-30pxの要素を集める
for contour in contours:
x, y, w, h = cv2.boundingRect(contour)
if (5 < w and w < 20 and 10 < h and h < 30):
ret.append([x,y,w,h])
cv2.rectangle(fp["dump"], (x, y), (x+w, y+h), (0,255,0), 2)
ret = sorted(ret, key=lambda p: p[1])
# y=2px 以内の要素を同列として扱う
for i,p in enumerate(ret):
if (i == 0):
p.append(p[1])
else:
bef = ret[i - 1]
p.append(p[1] if (2 < p[1] - bef[1]) else bef[4])
#print(p)
# 最も要素の多い列を拾う
row = []
for n in set([p[4] for p in ret]):
ps = list(filter(lambda p: p[4] == n, ret))
if (len(row) < len(ps)): row = ps
x0 = min([p[0] for p in row])
y0 = min([p[1] for p in row])
x1 = max([p[0]+p[2] for p in row])
y1 = max([p[1]+p[3] for p in row])
w0 = max([p[2] for p in row])
h0 = max([p[3] for p in row])
print("w=", set([p[2] for p in row]), "h=",set([p[3] for p in row]))
# 列内の要素サイズでサンプルを縮小する
samples = get_templates()
print("resize", w0/15, h0/24)
samples = [cv2.resize(sample, None, None, h0/24, h0/24) for sample in samples]
return samples, (h0/24)
# 画像内でdkw番号を拾う
def find_dkwnum():
samples, rate = profilenum()
temp0 = cv2.imread("numfont00.png")
temp0g = cv2.cvtColor(temp0, cv2.COLOR_BGR2GRAY)
w = 16
for dkw in range(21683,21822):
# dkw = 21737
digit = len(str(dkw))
sample = np.ones((50, w * (digit + 2)), np.uint8) * 255
for i in range(digit):
n = int(str(dkw)[i])
fig = temp0g[0:,n*25+1:n*25+w+1]
if (n == 1): fig = temp0g[0:,n*25-3:n*25+w-3]
sample[5:35, (i+1)*w:(i+2)*w] = fig
#rate = 0.7
print("rate=",rate)
sample = cv2.resize(sample, None, None, rate, rate)
cv2.imwrite("b.png", sample)
result = cv2.matchTemplate(fp["gray"], sample, cv2.TM_CCOEFF_NORMED)
img = fp["load"]
if (True):
loc = np.where(result >= 0.8)
for pt in zip(*loc[::-1]):
cv2.rectangle(img, (pt[0],pt[1]), (pt[0]+len(sample[0]),pt[1]+len(sample)), (255,0,0), 2)
print(pt, result[pt[1],pt[0]])
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result)
pt = maxLoc
cv2.rectangle(img, (pt[0],pt[1]), (pt[0]+len(sample[0]),pt[1]+len(sample)), (0,0,255), 2)
print(dkw,maxVal, maxLoc)
cv2.imwrite("a.png", img)
# 数字をテンプレートマッチする
def find_numbers(img = fp["gray"], samples = fp["sample"], threshold = 0.9):
ret = []
for i,sample in enumerate(samples):
result = cv2.matchTemplate(img, sample, cv2.TM_CCOEFF_NORMED)
loc = np.where(result >= threshold)
for pt in zip(*loc[::-1]):
ret.append([pt[0], pt[1], (i + 0) % 10])
# テスト用. 画面上に生成
if (fp["dump"].any):
h0,w0 = sample.shape[:2]
fp["dump"][0 : h0, 20*i : 20*i+w0] = cv2.cvtColor(sample, cv2.COLOR_GRAY2BGR)
return ret
def draw_nump(nump):
print(nump)
for pt in nump:
#print(pt)
w = 11
h = 18
col = [(0,0,127),(0,0,255),(0,127,255),(0,255,255),(0,127,0),
(255,0,0),(255,0,255),(127,127,127),
(127,0,0),(0,127,127),
][pt[2]]
cv2.rectangle(fp["dump"], (pt[0], pt[1]), (pt[0] + w, pt[1] + h), col, 2)
cv2.imwrite("result.png", fp["dump"])
def draw_ngs(ngs):
for g in ngs:
print(g)
pts = g[1]
y = max([pt[1] for pt in pts])
x = max([pt[0] for pt in pts])
cv2.rectangle(fp["dump"], (pts[0][0], pts[0][1]), (x + 11, y + 18), (255,0,0), 2)
cv2.putText(fp["dump"], g[0], (pts[0][0], pts[0][1] + 40), cv2.FONT_HERSHEY_PLAIN, 0.8, (0,0,255), 1, cv2.LINE_AA)
cv2.imwrite("result.png", fp["dump"])
# 数字位置をグループ化した位置とその文字列を返す
def group_numbers(nump):
# x軸の順に並べる
nump = sorted(nump)
for i,a in enumerate(nump):
# 自分の要素値を近所マークとする
if (len(a) == 3): a.append(i)
# 自分より右側近所のものを探す
for b in nump[i+1:]:
if ((b[0] <= a[0] + 25) and
(a[1] - 8 <= b[1]) and
(b[1] <= a[1] + 8)):
# 見つかったら近所マーク
if (len(b) == 3): b.append(a[3])
b[3] = a[3]
ret = []
for n in set(list(map(lambda x: x[3], nump))):
if (n < 0): continue
# 近所マーク同士で統合
s0 = list(filter(lambda x: x[3] == n, nump))
s = []
for i,si in enumerate(s0):
# 近接すぎる同じ値は無視
if (i == 0 or si[2] != s0[i-1][2] or 7 < si[0] - s0[i-1][0]):
s.append(si[0:3])
key = "".join([str(s0[2]) for s0 in s])
pos = [s0[0:2] for s0 in s]
ret.append([key,pos])
#ret.append(s)
return ret
# 切り出した範囲の左右上下の白幅を返す. 左右上下=1,1,1,1が返ってくれば枠内ギリギリ
def find_whitespace(fig):
x0,y0,x1,y1 = -1,-1,-1,-1
#print("y", len(fig), "x",len(fig[0]))
for y,row in enumerate(fig):
for x,p in enumerate(row):
if (p == 255):
if (x0 < 0 or x < x0): x0 = x
if (y0 < 0): y0 = y
if (x1 < 0 or x1 < x): x1 = x
y1 = y
return x0,len(fig[0])-1-x1,y0,len(fig)-1-y1
# group = ["filename", [position]]
def cropbox(group, rate = 1, digits = 5):
fname = group[0]
hits = group[1]
hit = hits[0]
width = len(hits)
# グループの上端-100:上端+40、左端-30 左端+17*文字数+30の位置を切り出す
y0 = hit[1] - int(100 * rate)
y1 = hit[1] + int(40 * rate)
x0 = hit[0] - int(10 * rate)
x1 = hit[0] + int((17 * width + 10) * rate)
# その内周5pxがすべて白である判定、だめなら拡大しながら再判定
_, nega = cv2.threshold(fp["gray"], 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
for i in range(10):
if (y0 < 0): y0 = 0
if (x0 < 0): x0 = 0
crop = nega[y0:y1, x0:x1]
xd0,xd1,yd0,yd1 = find_whitespace(crop)
#print(xd0,xd1,yd0,yd1)
if (yd0 < 5): y0 -= int(10 * rate)
#if (yd1 < 5): y1 += int(10 * rate)
if (xd0 < 5): x0 -= int(17 * rate)
if (xd1 < 5): x1 += int(17 * rate)
if (xd0 < 5 or xd1 < 5 or yd0 < 5 or yd1 < 5):
if (i == 9): fname = "m" + fname
continue
break
# Todo:左右の空白調整
# 再命名
rename = False
if (not("m" in fname) and (len(fname) < digits)):
rename = True
nump = find_numbers(fp["gray"][y1-40:y1,x0:x1], fp["sample"], 0.8)
ngs = group_numbers(nump)
if (len(ngs) > 0 and digits <= len(ngs[0][0])): fname = ngs[0][0]
if (20 < (x1 - x0) - (y1 - y0)): fname = "md" + fname
print("*", end="", flush=True)
#print([fname, x0, x1, y0, y1], rename)
return [fname, x0, x1, y0, y1];
path = fp["dir"] + fname + ".png"
i=0
while (os.path.exists(path)):
i+=1
path = fp["dir"] + fname + "-" + str(i) + ".png"
print("dump " + path, y1-y0, x1-x0)
#cv2.imwrite(path, fp["load"][y0:y1,x0:x1])
def main(args):
print("argv=", args)
fpath = args[0]
rate = args[1] if 1 < len(args) else 1.0
digits = int(args[2]) if 2 < len(args) else 5
# 読み取りデータ
fp["load"] = cv2.imread(fpath)
# 出力先
fp["dump"] = fp["load"].copy()
# 数字の位置を割り出す
fp["gray"] = cv2.cvtColor(fp["load"], cv2.COLOR_BGR2GRAY)
find_dkwnum()
exit()
if (rate == "auto"):
fp["sample"], rate = profilenum()
else:
rate = float(rate)/64
fp["sample"] = get_templates(rate)
nump = find_numbers(fp["gray"], fp["sample"])
#exit()
#draw_nump(nump)
# グループ化して位置と番号を割り出す
ngs = group_numbers(nump)
ngs = sorted(ngs, key=lambda p: p[1][0][1])
#draw_ngs(ngs)
# 切り出す
# Todo: gがすでにcropboxの返りに含まれていれば除去
crops = [cropbox(g, rate, 5) for g in ngs]
# y=10px 以内の要素を同列として扱う
for i,p in enumerate(crops):
if (i == 0):
p.append(p[4])
else:
bef = crops[i - 1]
p.append(p[4] if (10 < p[4] - bef[4]) else bef[5])
#print(p)
crops = sorted(crops, key=lambda p: (p[5], -p[1], p[0]))
# 近接する要素は消す
for i,p in enumerate(crops):
if (i != 0 and abs(p[1] - crops[i - 1][1]) < 10): p[5] = -1
# 面付け
tiles = list(filter(lambda c: (not "m" in c[0]) and 0 <= c[5], crops))
untiles = list(filter(lambda c: ( "m" in c[0]) or c[5] < 0, crops))
w0 = max([(c[2]-c[1]) for c in tiles])
h0 = max([(c[4]-c[3]) for c in tiles])
row = 20
fp["dump"] = np.ones((int(1 + len(tiles) / row) * h0, row * w0), np.uint8) * 255
fname = fpath.split("/")[-1].split(".")[0] + "crop" + str(w0) + "x" + str(h0) + ".png"
print("<<concat>>", fname)
for i,c in enumerate(tiles):
x = (i % row) * w0
y = int(i / row) * h0
name,x0,x1,y0,y1 = c[:5]
w = x1 - x0
h = y1 - y0
print(name, end=" ")
if (w0 < w or h0 < h): continue
fp["dump"][y:y+h, x:x+w] = fp["gray"][y0:y1,x0:x1]
print([c[0] for c in untiles])
# 出力
cv2.imwrite(fname, fp["dump"])
def renamer_test():
img = cv2.cvtColor(cv2.imread("./dump0603/4.png"), cv2.COLOR_BGR2GRAY)
print([[len(cs), len(cs[0])] for cs in get_templates()])
nump = find_numbers(img[-35:,0:], get_templates(), 0.8)
ngs = group_numbers(nump)
print(ngs)
cv2.imwrite("tmp.png", img[-35:])
exit()
def help() :
#for i, n in enumerate(filter(lambda c: c %2, [1,2,3])): print(i,n)
print("argv = [input png file, rate, digit],")
exit()
#renamer_test()
import sys
if (len(sys.argv) < 2): help()
main(sys.argv[1:])
| symtkhr/gwreg | crop/match.py | match.py | py | 13,323 | python | en | code | 0 | github-code | 90 |
40327336495 | #!/usr/bin/python3
# coding: utf-8
import sys
import os
import zhconv
from tqdm import tqdm
# pip3 install zhconv
def convert_hans(file_path):
if os.path.isfile(file_path):
input_files = [file_path]
elif os.path.isdir(file_path):
input_files = [os.path.join(file_path, f) for f in os.listdir(file_path)]
else:
raise ValueError('输出参数有误,参数应该为一个文件路径或文件目录')
for input_file in input_files:
try:
with open(input_file, encoding='utf-8')as f:
datalines = f.readlines()
with open(input_file, 'w', encoding='utf-8')as f:
for line in tqdm(datalines):
line = line.strip()
if not line:
continue
f.write(zhconv.convert(line, 'zh-cn')+'\n')
except Exception as e:
print('`{}`转换失败:{}'.format(input_file, e))
def main():
# input_file = '/home/gswyhq/下载/测试文件.txt'
file_path = sys.argv[1]
convert_hans(file_path)
if __name__ == '__main__':
main()
| gswyhq/hello-world | file相关/繁体转简体.py | 繁体转简体.py | py | 1,125 | python | en | code | 9 | github-code | 90 |
29702215872 | """
Python Pygal常见数据图(折线图、柱状图、饼图、点图、仪表图和雷达图)详解
Pygal 同样支持各种不同的数据图,比如饼图、折线图等。Pygal 的设计很好,不管是创建哪种数据图,Pygal 的创建方式基本是一样的,都是先创建对应的数据图对象,然后添加数据,最后对数据图进行配置。因此,使用 Pygal 生成数据图是比较简单的。
折线图
折线图与柱状图很像,它们只是表现数据的方式不同,柱状图使用条柱代表数据,而折线图则使用折线点来代表数据。因此,生成折线图的方式与生成柱状图的方式基本相同。
使用 pygal.Line 类来表示折线图,程序创建 pygal.Line 对象就是创建折线图。下面程序示范了利用折线图来展示两套教程销量统计数据的方法。
"""
import pygal
x_data = ['2011', '2012', '2013', '2014', '2015', '2016', '2017']
# 构造数据
y_data = [58000, 60200, 63000, 71000, 84000, 90500, 107000]
y_data2 = [52000, 54200, 51500, 58300, 56800, 59500, 62700]
# 创建pygal.Line对象(折线图)
line = pygal.Line()
# 添加两组代表折线的数据
line.add("C语言教程", y_data)
line.add("Python语言教程", y_data2)
# 设置X轴的刻度值
line.x_labels = x_data
# 重新设置Y轴的刻度值
line.y_labels = [20000, 40000, 60000, 80000, 100000]
line.title = "编程教程的历年销量"
# 设置X,Y轴之间的标题
line.x_title = "年份"
line.y_title = "销量"
# 设置将图例放在底部
line.legend_at_bottom = True
# 指定将数据图输出到SVG文件中
line.render_to_file("fk_books1.svg")
| Bngzifei/PythonNotes | Python数据可视化/Python Pygal常见数据图(折线图、柱状图、饼图、点图、仪表图和雷达图)详解.py | Python Pygal常见数据图(折线图、柱状图、饼图、点图、仪表图和雷达图)详解.py | py | 1,653 | python | zh | code | 1 | github-code | 90 |
34350274856 | from __future__ import absolute_import
import botocore.session
import cachetools
import requests
from datetime import datetime
from cloudperf.providers import aws_helpers
# link to the newest endpoints.json in case the installed botocore doesn't
# yet have a region
ENDPOINTS_URL = "https://raw.githubusercontent.com/boto/botocore/develop/botocore/data/endpoints.json"
# botocore's endpoints contain some locations with a different name, so provide a base here
region_map = {
"AWS GovCloud (US)": "us-gov-west-1",
"AWS GovCloud (US-East)": "us-gov-east-1",
"AWS GovCloud (US-West)": "us-gov-west-1",
"Africa (Cape Town)": "af-south-1",
"Asia Pacific (Hong Kong)": "ap-east-1",
"Asia Pacific (Hyderabad)": "ap-south-2",
"Asia Pacific (Mumbai)": "ap-south-1",
"Asia Pacific (Osaka)": "ap-northeast-3",
"Asia Pacific (Seoul)": "ap-northeast-2",
"Asia Pacific (Singapore)": "ap-southeast-1",
"Asia Pacific (Sydney)": "ap-southeast-2",
"Asia Pacific (Tokyo)": "ap-northeast-1",
"Canada (Central)": "ca-central-1",
"EU (Frankfurt)": "eu-central-1",
"EU (Ireland)": "eu-west-1",
"EU (London)": "eu-west-2",
"EU (Milan)": "eu-south-1",
"EU (Paris)": "eu-west-3",
"EU (Stockholm)": "eu-north-1",
"Europe (Spain)": "eu-south-2",
"Europe (Zurich)": "eu-central-2",
"Israel (Tel Aviv)": "il-central-1",
"Middle East (Bahrain)": "me-south-1",
"Middle East (UAE)": "me-central-1",
"South America (Sao Paulo)": "sa-east-1",
"US East (N. Virginia)": "us-east-1",
"US East (Ohio)": "us-east-2",
"US West (Los Angeles)": "us-west-2-lax-1",
"US West (N. California)": "us-west-1",
"US West (Oregon)": "us-west-2",
}
location_map = {v: k for k, v in region_map.items()}
@cachetools.cached(cache={})
def get_endpoints_json():
return requests.get(ENDPOINTS_URL).json()
@cachetools.cached(cache={})
def resolve_endpoint(region=None, location=None):
if region in location_map:
return location_map[region]
if location in region_map:
return region_map[location]
session = botocore.session.get_session()
endpoint_data = session.get_data("endpoints")
for _ in range(2):
for p in endpoint_data.get("partitions", []):
for r, data in p["regions"].items():
# the pricing API returns EU, while endpoints contain Europe
loc = data["description"].replace("Europe", "EU")
if region == r:
return loc
if location == loc:
return r
# fall back to the latest JSON
endpoint_data = get_endpoints_json()
def region_to_location(region):
return resolve_endpoint(region=region)
def location_to_region(location):
return resolve_endpoint(location=location)
class CloudProvider(object):
provider = 'aws'
filters = {'operatingSystem': 'Linux', 'preInstalledSw': 'NA',
'licenseModel': 'No License required', 'capacitystatus': 'Used',
'tenancy': 'Shared'
}
def get_prices(self, fail_on_missing_regions=False, **filters):
if not filters:
filters = self.filters
instances = aws_helpers.get_ec2_prices(fail_on_missing_regions=fail_on_missing_regions, **filters)
# add a provider column
instances['provider'] = self.provider
return instances
def get_performance(self, prices_df, perf_df=None, update=None, expire=None, tags=[], **filters):
if not filters:
filters = self.filters
# only pass our records
prices_df = prices_df[prices_df['provider'] == self.provider]
if perf_df is not None:
perf_df = perf_df[perf_df['provider'] == self.provider]
instances = aws_helpers.get_ec2_performance(prices_df, perf_df, update, expire, tags, **filters)
if instances.empty:
return instances
# add a provider column
instances['provider'] = self.provider
return instances
def terminate_instances(self):
aws_helpers.terminate_instances()
| bra-fsn/cloudperf | cloudperf/providers/aws.py | aws.py | py | 4,127 | python | en | code | 7 | github-code | 90 |
27129949095 | #! /usr/bin/env python3
import argparse
import logging
import pathlib
import pandas as pd
from src import estimator, formatter, preprocessor
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s line:%(lineno)-3d %(levelname)-8s ] %(message)s",
datefmt="%Y/%m/%d %H:%M:%S",
)
def main(argv):
workloads = [pd.read_csv(file) for file in pathlib.Path(argv.directory[0]).glob("**/*.csv")]
# print(workloads, "\n")
cores = sorted(set(workloads[0]["setup core"]))
# print(cores, "\n")
frequencies = []
for anchor in range(len(cores)):
step = len(workloads[0]) // len(cores)
frequencies.append(sorted(set(workloads[0]["frequency"][anchor * step:anchor * step + step])))
# print(frequencies, "\n")
pmus = sorted(set(workloads[0]["event"][0:len(workloads[0]) // len(cores) // len(frequencies[0])]))
# print(pmus, "\n")
for workload in workloads:
for i in range(len(cores)):
for j in range(len(frequencies[i])):
for k in range(len(pmus)):
idx = i * len(workload) // len(cores) + j * len(pmus) + k
workload.loc[idx, "count"] = int(workload["count"][idx].replace(",", ""))
workload.loc[idx, "time"] = float(workload["time"][idx])
# print(workloads, "\n")
workloads = preprocessor.Preprocessor(workloads).preprocess()
# print(workloads, "\n")
getPerFreqError(workloads, cores, frequencies)
getPerCoreError(workloads, cores, frequencies)
def getPerFreqError(workloads, cores, frequencies):
dataframes = formatter.PerFreqFormatter(workloads).format()
print("getPerFreqError: \n")
for i in range(len(cores)):
for j in range(len(frequencies[i])):
result = estimator.Estimator().estimate(dataframes[i][j])
print(pd.concat([pd.DataFrame({
"core": [cores[i] for _ in range(len(result))],
"frequency": [frequencies[i][j] for _ in range(len(result))]
}), result], axis=1), "\n")
def getPerCoreError(workloads, cores, frequencies):
dataframes = formatter.PerCoreFormatter(workloads).format()
print("getPerCoreError: \n")
for i in range(len(cores)):
result = estimator.Estimator().estimate(dataframes[i])
print(pd.concat([pd.DataFrame({"core": [cores[i] for _ in range(len(result))], "frequency": ["*********" for _ in range(len(result))]}), result], axis=1), "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
group = parser.add_argument_group()
group.add_argument("-d", "--directory", nargs=1, type=str, help="profiled directory")
argv = parser.parse_args()
if not argv.directory:
parser.error("Should have profiled directory!")
main(argv)
| misakisuna705/AOSP | main.py | main.py | py | 2,811 | python | en | code | 0 | github-code | 90 |
18550379709 | s=input()
ans=""
if len(s)<26:
cnt_alphabet=[0]*26
for i in range(len(s)):
cnt_alphabet[ord(s[i])-97]+=1
moji=""
for i in range(26):
if cnt_alphabet[i]<1:
moji=chr(i+97)
break
ans=s+moji
else:
cnt=25
for i in range(24,-1,-1):
if ord(s[i])>ord(s[i+1]):
cnt=i
else:
break
if cnt==0:
ans='-1'
else:
min_chr=ord('{')
for m in s[cnt:]:
if ord(m)>ord(s[cnt-1]):
min_chr=min(min_chr,ord(m))
ans=s[:cnt-1]+chr(min_chr)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03393/s443822450.py | s443822450.py | py | 630 | python | en | code | 0 | github-code | 90 |
21916476162 | # -*- coding: utf8 -*-
from __future__ import division
import sys
import math
from collections import defaultdict
VOCAB_NUMBER = 10**6
UNKNOWN_PROB = 0.05
def load_trained_model(train_file):
trained_model = defaultdict(float)
with open(train_file, 'r') as f:
for line in f:
word, prob = line.rstrip().split('\t')
trained_model[word] = float(prob)
return trained_model
def test_unigram(train_file, test_file):
total_words, unknown_words, h = 0, 0, 0
trained_dic = load_trained_model(train_file)
with open(test_file, 'r') as f:
for line in f:
line = line.rstrip().split(' ')
line.append('</s>')
for word in line:
total_words += 1
p = UNKNOWN_PROB / VOCAB_NUMBER
if word in trained_dic:
p += (1. - UNKNOWN_PROB) * trained_dic[word]
else:
unknown_words += 1
h -= math.log(p, 2)
print('Entropy: {}'.format(h/total_words))
print('Coverage: {}'.format((total_words-unknown_words)/total_words))
if __name__=='__main__':
if len(sys.argv) < 3:
print("Please set uni-gram files.\npython [trained file] [test file]")
train_file = sys.argv[1]
test_file = sys.argv[2]
test_unigram(train_file, test_file)
| ochiaierika/nlptutorial | 01_unigramlm/confirm-test-unigram.py | confirm-test-unigram.py | py | 1,348 | python | en | code | 0 | github-code | 90 |
18004056849 | # coding: utf-8
import sys
#from operator import itemgetter
sysread = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
#from heapq import heappop, heappush
#from collections import defaultdict
sys.setrecursionlimit(10**7)
#import math
#from itertools import product, accumulate, combinations, product
#import bisect
import numpy as np
#from copy import deepcopy
#from collections import deque
#from decimal import Decimal
#from numba import jit
INF = 1 << 50
EPS = 1e-8
def run():
n, *A = map(int, read().split())
v = 0
acum = []
for a in A:
v += a
acum.append(v)
# greedy
ans = INF
for V in (-1, 1):
cums = 0
count = 0
for a in acum:
#print(a, '---------')
V *= -1
if (a + cums) * V > 0:
continue
else:
update = abs(a + cums) + 1
cums += (update) * V
count += update
#print(V, cums, count)
ans = min(ans, count)
print(ans)
if __name__ == "__main__":
run()
| Aasthaengg/IBMdataset | Python_codes/p03739/s557767856.py | s557767856.py | py | 1,082 | python | en | code | 0 | github-code | 90 |
18216593749 | MOD = 998244353
n, m, k = map(int, input().split())
c = 1
cnt = 0
for x in range(k + 1):
cnt += c * m * pow(m - 1, n - 1 - x, MOD)
cnt %= MOD
c *= (n - x - 1) * pow(x + 1, MOD - 2, MOD)
c %= MOD
print(cnt) | Aasthaengg/IBMdataset | Python_codes/p02685/s804312814.py | s804312814.py | py | 222 | python | en | code | 0 | github-code | 90 |
73531841578 | from torch.utils.data import DataLoader
import time
import torch
from torch import nn
# import wandb
from transformers import get_linear_schedule_with_warmup
from torch.optim.lr_scheduler import CosineAnnealingLR, CyclicLR
# from model.load_model import load_model_from_path
# import logging
# from utils import *
from evaluate import evaluate
from data import collate_fn, CustomBatch, collate_wrapper
from train_utils import seed_worker
# from torch_geometric.data import DataLoader
import gc
import numpy as np
from torch.nn.functional import one_hot
import random
# import numpy
# from torch.utils.tensorboard import SummaryWriter
from utils import dump_file, mkdir
from IPython import embed
import os
from utils import load_file, dump_file, visualize_plot
import gc
from tqdm import tqdm
# from torch.optim.lr_scheduler import _LRScheduler
def train(args, model, optimizer, data):
train_data, val_data, test_data = data
if args.debug:
torch.autograd.set_detect_anomaly(True)
num_samples_test=20
train_data.instances = train_data.instances[:num_samples_test]
val_data.instances = val_data.instances[:num_samples_test]
test_data.instances = test_data.instances[:num_samples_test]
# embed()
if args.exp == "mol_pred":
train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=False, collate_fn=collate_fn,
drop_last=False, num_workers=args.num_workers, worker_init_fn=seed_worker)
else:
print('args.exp', args.exp)
# train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=False, collate_fn=collate_fn_re,
# drop_last=False)
#shuffle now true
train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True, collate_fn=collate_wrapper,
drop_last=False)
# model = args.model
# if args.n_gpu > 1:
# model = nn.DataParallel(model)
# optimizer = args.optimizer
# get logger
logger = args.logger
writer = args.writer
train_iterator = range(args.start_epoch, int(args.num_epochs) + args.start_epoch)
total_steps = int(len(train_loader) * args.num_epochs)
warmup_steps = int(total_steps * args.warmup_ratio)
scheduler = None
if args.scheduler:
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
num_training_steps=total_steps)
# scheduler = STLR(optimizer, num_warmup_steps=warmup_steps,
# num_training_steps=total_steps)
# scheduler = CosineAnnealingLR(optimizer, T_max=(int(args.num_epochs) // 4) + 1, eta_min=0)
logger.debug(f"Total steps: {total_steps}")
logger.debug(f"Warmup steps: {warmup_steps}")
scaler = torch.cuda.amp.GradScaler() if args.use_amp else None
bad_counter = 0
best_val_score = -float("inf")
best_epoch = 0
t_total = time.time()
num_steps = 0
logger.debug(f"{len(train_loader)} steps for each epoch")
# print("train_iterator",train_iterator)
for epoch in train_iterator:
gc.collect()
# logger.debug(f"Epoch {epoch}")
t = time.time()
# torch.autograd.set_detect_anomaly(True)
total_loss = 0
for step, batch in enumerate(tqdm(train_loader)):
# logger.debug(f"Step {step}")
# gc.collect()
num_steps += 1
if args.exp == "mol_pred":
encoded_input = batch[0] # tokenizer(batch[0])
# print("encoded_input", encoded_input)
inputs = {'input_ids': {key: encoded_input[key].to(args.device) for key in encoded_input},
'batch_graph_data': batch[1].to(args.device),
'ids': batch[2],
'in_train': True,
}
else:
inputs = batch.to(args.device)
# model learning
model.train()
if args.use_amp:
with torch.cuda.amp.autocast():
loss = model(inputs, args)
scaler.scale(loss).backward()
if (step + 1) % args.grad_accumulation_steps == 0 or step == len(train_loader) - 1:
if args.max_grad_norm > 0:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
scaler.step(optimizer)
scaler.update()
if args.scheduler:
scheduler.step()
optimizer.zero_grad()
else:
loss = model(inputs, args)
loss.backward()
if step % args.grad_accumulation_steps == 0 or step == len(train_loader) - 1:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
if args.scheduler:
scheduler.step()
optimizer.zero_grad()
total_loss += loss.item()
val_score, output = evaluate(args, model, val_data)
if epoch > args.burn_in:
if val_score >= best_val_score:
best_val_score, best_epoch, bad_counter = val_score, epoch, 0
torch.save({
'epoch': epoch + 1,
'num_steps': num_steps + 1,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'best_val_score': best_val_score,
}, args.model_path)
else:
bad_counter += 1
if bad_counter == args.patience:
break
logger.debug(f'Epoch {epoch} | Train Loss {total_loss:.8f} | Val Score {val_score:.4f} | '
f'Time Passed {time.time() - t:.4f}s')
# embed()
writer.add_scalar('train', total_loss, epoch)
writer.add_scalar('val', val_score, epoch)
# wandb.log({'loss_train': loss.data.item(),
# 'val_score': val_score,
# }, step=num_steps)
# print('Time passed', (time.time() - t))
logger.debug("Optimization Finished!")
logger.debug("Total time elapsed: {:.4f}s".format(time.time() - t_total))
logger.debug('Loading {}th epoch'.format(best_epoch))
gc.collect()
model.load_state_dict(torch.load(args.model_path)['model_state_dict'])
test_score, output = evaluate(args, model, test_data)
# mkdir("analyze")
# dump_file(output, "analyze/output.json")
logger.debug(f"Test Score {test_score}")
# writer.add_scalar('test', test_score, 0)
# writer.add_hparams(
# {'batch_size': args.batch_size, 'num_epochs': args.num_epochs,
# 'plm_lr': args.plm_lr, 'lr': args.lr, 'g_dim': args.g_dim, 'max_grad_norm': args.max_grad_norm,
# 'mult_mask': args.mult_mask, 'g_mult_mask': args.g_mult_mask, 'dropout': args.dropout, 'model_type': args.model_type,
# 'g_global_pooling': args.g_global_pooling},
# {'hparam/test': test_score, 'hparam/val': best_val_score})
# writer.close()
sr_file = args.experiment_path + args.exp + "_result.json"
sr = load_file(sr_file) if os.path.exists(sr_file) else []
hparam = vars(args)
# print("e2")
# serialize params
for key in hparam:
item=hparam[key]
if not isinstance(item, (float, str, int, complex, list, dict, set, frozenset, bool)):
hparam[key]=str(item)
hparam["val_score"] = best_val_score
hparam["test_score"] = test_score
sr.append(hparam)
# print("e0")
# Plot lines
visualize_plot(y=[[hparam["val_score"] for hparam in sr],
[hparam["test_score"] for hparam in sr]],
name=["val", "test"],
path=args.experiment_path + args.exp + "_result.png")
# print("e1")
dump_file(sr, sr_file)
# class STLR(torch.optim.lr_scheduler._LRScheduler):
# def __init__(self, optimizer, max_mul, ratio, steps_per_cycle, decay=1, last_epoch=-1):
# self.max_mul = max_mul - 1
# self.turning_point = steps_per_cycle // (ratio + 1)
# self.steps_per_cycle = steps_per_cycle
# self.decay = decay
# super().__init__(optimizer, last_epoch)
#
# def get_lr(self):
# residual = self.last_epoch % self.steps_per_cycle
# multiplier = self.decay ** (self.last_epoch // self.steps_per_cycle)
# if residual <= self.turning_point:
# multiplier *= self.max_mul * (residual / self.turning_point)
# else:
# multiplier *= self.max_mul * (
# (self.steps_per_cycle - residual) /
# (self.steps_per_cycle - self.turning_point))
# return [lr * (1 + multiplier) for lr in self.base_lrs]
#
# class NoamLR(_LRScheduler):
# """
# Implements the Noam Learning rate schedule. This corresponds to increasing the learning rate
# linearly for the first ``warmup_steps`` training steps, and decreasing it thereafter proportionally
# to the inverse square root of the step number, scaled by the inverse square root of the
# dimensionality of the model. Time will tell if this is just madness or it's actually important.
# Parameters
# ----------
# warmup_steps: ``int``, required.
# The number of steps to linearly increase the learning rate.
# """
# def __init__(self, optimizer, warmup_steps):
# self.warmup_steps = warmup_steps
# super().__init__(optimizer)
#
# def get_lr(self):
# last_epoch = max(1, self.last_epoch)
# scale = self.warmup_steps ** 0.5 * min(last_epoch ** (-0.5), last_epoch * self.warmup_steps ** (-1.5))
# return [base_lr * scale for base_lr in self.base_lrs]
| chenkaisun/MMLI1 | code/train.py | train.py | py | 10,037 | python | en | code | 1 | github-code | 90 |
37838319925 | import json
from django.conf import settings
from django.test import TestCase
class ServerUpdateTests(TestCase):
def setUp(self) -> None:
settings.DEBUG = False
# def test_server_release_illegal_action(self):
# head = {
# "User-Agent": "GitHub-Hookshot/12dd831",
# "X-GitHub-Delivery": "ed768cd0-c8c7-11ed-8872-a6054ecd29cb",
# "X-GitHub-Event": "release",
# "X-GitHub-Hook-ID": "405847292",
# "X-GitHub-Hook-Installation-Target-ID": "118770247",
# "X-GitHub-Hook-Installation-Target-Type": "repository",
# "X-Hub-Signature": "sha1=9eacaced69ec7306ad0ed61dc3390423f78748a3",
# "X-Hub-Signature-256": "sha256=b7f78f2072b414e3039513b9240756f057385c1e7789e1899531a092e5cb0ca0",
# }
# with open('./tests/data/github_release_notpublished.json', 'r') as file:
# data = json.loads(file.read())
# req_url = settings.DEPLOY_URL.replace('<str:srvtype>/', 'server/')
# rsp = self.client.post(req_url, data=data, content_type='application/json', **head)
# print(rsp.content.decode('utf-8'))
# self.assertEqual(200, rsp.status_code)
def test_server_release_ok(self):
head = {
"HTTP_USER_AGENT": "GitHub-Hookshot/12dd831",
"X-GitHub-Delivery": "ed7d1c80-c8c7-11ed-8984-7b8a39e5852f",
"HTTP_X_GITHUB_EVENT": "release",
"HTTP_X_GITHUB_HOOK_ID": "405847292",
"HTTP_X_GITHUB_HOOK_INSTALLATION_TARGET_ID": "118770247",
"HTTP_X_GITHUB_HOOK_INSTALLATION_TARGET_TYPE": "repository",
"X-Hub-Signature": "sha1=a9c32dd9e04f850c0d3ca3e5399484f1d11cee37",
"HTTP_X_HUB_SIGNATURE_256": "sha256=40ef019805b7adcd8e98566d78e658805b33ef5d0ef3449e418a8926c8253e53",
}
with open("./tests/data/github_release_published.json", "r") as file:
a = file.read()
print(type(a), len(a))
data = json.loads(a)
req_url = settings.DEPLOY_URL.replace("<str:srvtype>/", "server/")
print(req_url)
rsp = self.client.post(req_url, data=data, content_type="application/json", **head)
print(rsp.content.decode("utf-8"))
self.assertEqual(200, rsp.status_code)
| HEYsir/blog_system | tests/test_depoly.py | test_depoly.py | py | 2,359 | python | en | code | 1 | github-code | 90 |
1814835146 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 16 17:24:40 2020
@author: Rajesh
"""
salary = '$876,001'
# x=salary[1:4]+salary[5:]
x=salary[1:4]+salary[5:8]
x=int(x)
print('Salary :', x)
sal=input('Enter the salary into $ and Convert into Integer :')
x=salary[1:4]+salary[5:]
x=int(x)
print('Salary :', x)
'$777,009' | Rajesh-sharma92/FTSP_2020 | Python_CD3/Salary_Integer.py | Salary_Integer.py | py | 325 | python | en | code | 3 | github-code | 90 |
36318173945 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
import sys
from setuptools import setup, find_packages
import codecs
import os
import re
def read(*parts):
path = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(path, encoding='utf-8') as fobj:
return fobj.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
install_requires = [
]
tests_require = [
'nose',
]
setup(
name='trigger',
version=find_version("trigger", "__init__.py"),
url='https://github.com/weiwongfaye/python_cli_template',
license='MIT',
author='jackw',
author_email='weiwongfaye@hotmail.com',
description='PYTHON cli template',
scripts=['bin/trigger'],
classifiers=[
"Programming Language :: Python",
],
platforms='any',
keywords='splunk clustering docker',
packages=find_packages(exclude=['tests']),
include_package_data=True,
test_suite='nose.collector',
install_requires=install_requires,
tests_require=tests_require,
) | weiwongfaye/python_cli_template | setup.py | setup.py | py | 1,345 | python | en | code | 0 | github-code | 90 |
26903671523 | # input
d = float(input('Wat was de initiële populatiedichtheid: '))
r = float(input('Wat is de vruchtbaarheids parameter: '))
s = int(input('Over hoeveel tijdstappen wil de populatie dichtheid simuleren: '))
# programma
print(d)
for i in range(s - 1):
d = t = r * d * (1 - d)
print(d)
| xander27481/informatica5 | 07a - iteraties Fortus/+/Chaos.py | Chaos.py | py | 296 | python | nl | code | 0 | github-code | 90 |
20070194738 | import pandas as pd
import numpy as np
import requests
import bs4 as bs
from six.moves import urllib
from tmdbv3api import TMDb
import json
from tmdbv3api import Movie
link = "https://en.wikipedia.org/wiki/List_of_American_films_of_2020"
source = urllib.request.urlopen(link).read()
soup = bs.BeautifulSoup(source,'lxml')
tables = soup.find_all('table',class_='wikitable sortable')
df1 = pd.read_html(str(tables[0]))[0]
df2 = pd.read_html(str(tables[1]))[0]
df3 = pd.read_html(str(tables[2]))[0]
df4 = pd.read_html(str(tables[3]).replace("'1\"\'",'"1"'))[0]
df = df1.append(df2.append(df3.append(df4,ignore_index=True),ignore_index=True),ignore_index=True)
print(df)
df_2020 = df[['Title','Cast and crew']]
tmdb = TMDb()
tmdb.api_key = '05134cb28bbd34919fadc768fc18d0f3'
tmdb_movie = Movie()
def get_genre(x):
genres = []
result = tmdb_movie.search(x)
if not result:
return np.NaN
else:
movie_id = result[0].id
response = requests.get('https://api.themoviedb.org/3/movie/{}?api_key={}'.format(movie_id,tmdb.api_key))
data_json = response.json()
if data_json['genres']:
genre_str = " "
for i in range(0,len(data_json['genres'])):
genres.append(data_json['genres'][i]['name'])
return genre_str.join(genres)
else:
return np.NaN
df_2020['genres'] = df_2020['Title'].map(lambda x: get_genre(str(x)))
def get_director(x):
if " (director)" in x:
return x.split(" (director)")[0]
elif " (directors)" in x:
return x.split(" (directors)")[0]
else:
return x.split(" (director/screenplay)")[0]
def get_first_actor(x):
return ((x.split("screenplay); ")[-1]).split(", ")[0])
def get_second_actor(x):
if len((x.split("screenplay); ")[-1]).split(", ")) < 2:
return np.NaN
else:
return ((x.split("screenplay); ")[-1]).split(", ")[1])
def get_third_actor(x):
if len((x.split("screenplay); ")[-1]).split(", ")) < 3:
return np.NaN
else:
return ((x.split("screenplay); ")[-1]).split(", ")[2])
df_2020['director_name'] = df_2020['Cast and crew'].map(lambda x: get_director(str(x)))
df_2020['actor_1_name'] = df_2020['Cast and crew'].map(lambda x: get_first_actor(str(x)))
df_2020['actor_2_name'] = df_2020['Cast and crew'].map(lambda x: get_second_actor(str(x)))
df_2020['actor_3_name'] = df_2020['Cast and crew'].map(lambda x: get_third_actor(str(x)))
df_2020 = df_2020.rename(columns={'Title':'movie_title'})
new_df20 = df_2020.loc[:,['director_name','actor_1_name','actor_2_name','actor_3_name','genres','movie_title']]
new_df20['combination'] = new_df20['actor_1_name'] + ' ' + new_df20['actor_2_name'] + ' '+ new_df20['actor_3_name'] + ' '+ new_df20['director_name'] +' ' + new_df20['genres']
new_df20.isna().sum()
new_df20 = new_df20.dropna(how='any')
new_df20.isna().sum()
new_df20['movie_title'] = new_df20['movie_title'].str.lower()
old_df = pd.read_csv('/content/preprocessed_3.csv')
final_df = old_df.append(new_df20,ignore_index=True)
print(final_df)
final_df.to_csv('/content/main_data.csv',index=False) | adityanandgaokar/movie_recommendation_system_heroku | data_preprocessing_4.py | data_preprocessing_4.py | py | 3,229 | python | en | code | 0 | github-code | 90 |
2502167892 | import random
def get_score(name):
read_file = open("rating.txt", "r")
for record in read_file:
if name in record:
score = record.split(" ")[1].strip("\n")
print(f"Your rating: {score}")
read_file.close()
return
else:
read_file.close()
score = 0
write_file = open("rating.txt", "a")
write_file.write(f"{name} {score}\n")
print(f"Your rating: 0")
write_file.close()
def update_score(name_, score_):
d = {}
read_file = open("rating.txt", "r")
for line in read_file:
(name, score) = line.split()
d[str(name)] = int(score)
if name_ in d:
d[name_] += int(score_)
else:
d[name_] = score_
read_file.close()
write_file = open("rating.txt", "w")
for name_, score_ in d.items():
write_file.write("%s %s\n" % (name_, score_))
write_file.close()
def play():
default_options = ["scissors", "paper", "rock"]
player_name = input("Enter your name: ")
print(f"Hello, {player_name}")
player_options = input().split(',')
is_default = True
if len(player_options) > 1:
print("Okay, let's start")
is_default = False
default_options = player_options
while True:
computer_choice = random.choice(default_options)
player_choice = input()
if player_choice in default_options:
if not is_default:
total_length = len(default_options)
half_length = total_length // 2
if default_options[default_options.index(player_choice)] != default_options[half_length]:
if default_options[default_options.index(player_choice)] < default_options[half_length]:
move_right = half_length - (default_options.index(player_choice))
default_options = default_options[-move_right:] + default_options[:-move_right]
else:
move_left = (default_options.index(player_choice)) - half_length
default_options = default_options[move_left:] + default_options[:move_left]
if player_choice == computer_choice:
print(f"There is a draw ({computer_choice})")
update_score(player_name, "50")
else:
if is_default:
if player_choice == default_options[0] and computer_choice == default_options[2] \
or player_choice == default_options[1] and computer_choice == default_options[0] \
or player_choice == default_options[2] and computer_choice == default_options[1]:
print(f"Sorry, but computer chose {computer_choice}")
else:
print(f"Well done. Computer chose {computer_choice} and failed")
update_score(player_name, "100")
else:
if default_options.index(player_choice) > default_options.index(computer_choice):
print(f"Well done. Computer chose {computer_choice} and failed")
update_score(player_name, "100")
else:
print(f"Sorry, but computer chose {computer_choice}")
elif player_choice == "!exit":
print("Bye!")
break
elif player_choice == "!rating":
get_score(player_name)
else:
print("Invalid input")
play()
# Test values
# rock,gun,lightning,devil,dragon,water,air,paper,sponge,wolf,tree,human,snake,scissors,fire
| vladyslavmakartet/-rock_paper_scissors | Rock-Paper-Scissors/game.py | game.py | py | 3,679 | python | en | code | 0 | github-code | 90 |
18216421589 | n,m,k = map(int,input().split())
mod = 998244353
def comb(n,k):
if n < k: return 0
if n < 0 or k < 0: return 0
return fac[n]*finv[k]%mod*finv[n-k]%mod
fac = [0]*(n+1)
finv = [0]*(n+1)
fac[0] = finv[0] = 1
for i in range(1,n+1):
fac[i] = fac[i-1]*i%mod
finv[i] = pow(fac[i],mod-2,mod)
ans = 0
for i in range(k+1):
ans = (ans + m*comb(n-1,i)%mod*pow(m-1,n-1-i,mod)%mod)%mod
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02685/s603835671.py | s603835671.py | py | 407 | python | en | code | 0 | github-code | 90 |
5360443372 | """
1초/ 소리강약 체크 횟수 /h
체크 값 저장 비트 수 / b
트랙 채널개수 /c
시간 /s
"""
h, b, c, s = map(int, input().split())
result = (h * b * c * s) / 8
result /= 1024**2
print("%.1f" %result +" MB") | Damnun/CodeUp_100Q | Question_84.py | Question_84.py | py | 237 | python | ko | code | 0 | github-code | 90 |
3898100196 | # this file moves all pdf files into a folder called "reflections", which will be made if necessary
import os
# https://stackoverflow.com/questions/8858008/how-to-move-a-file,
# answered by Peter Vlaar
import os, shutil, pathlib, fnmatch
def move_dir(src: str, dst: str, pattern: str = '*'):
if not os.path.isdir(dst):
pathlib.Path(dst).mkdir(parents=True, exist_ok=True)
for f in fnmatch.filter(os.listdir(src), pattern):
shutil.move(os.path.join(src, f), os.path.join(dst, f))
move_dir('./','./_reflections/','*reflection*.pdf')
move_dir('./','./_reflections/','*Reflection*.pdf')
| ofloveandhate/python_autograder | move_reflections.py | move_reflections.py | py | 615 | python | en | code | 0 | github-code | 90 |
6884842164 | p = int(input("vvedite p (2<p<=10):"))
x,y = int(1),int(1)
for x in range (1,p):
a=[]
for y in range (1,p):
z = (x*y//p)*10 + (x*y)% p
a.append(z)
print(a)
| nmt132/132-NOVIKOV | табличбка.py | табличбка.py | py | 189 | python | en | code | 2 | github-code | 90 |
18458777139 | N = int(input())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
K = 0
S = 0
T = []
if sum(A) < sum(B):
K = -1
else:
for i in range(N):
if A[i] < B[i]:
K += 1
S = S + B[i] - A[i]
elif A[i] > B[i]:
T.append(A[i] - B[i])
else:
continue
T.sort()
L = 0
while S > L:
L += T.pop()
K += 1
print(K)
| Aasthaengg/IBMdataset | Python_codes/p03151/s905709903.py | s905709903.py | py | 403 | python | en | code | 0 | github-code | 90 |
28300553712 | import pygame
import sys
pygame.init()
display = pygame.display.set_mode((800,800))
pygame.display.set_caption('Instruction Screen')
def Instruction():
width = display.get_width()
height = display.get_height()
s = (600,600)
color_white = (225,255,255)
color_light = (180,180,180)
color_dark = (110,110,110)
smallfont = pygame.font.SysFont("Timesnewroman", 20)
buttonText = smallfont.render("Start", True, color_white)
while True:
for i in pygame.event.get():
display.fill((0,0,0))
titlefont = pygame.font.SysFont('arial',50)
title = titlefont.render("How to Play",True,(0,225,0))
display.blit(title,(275,25))
font = pygame.font.SysFont('arial',25)
line1 = font.render("You're mission is to diffuse the bomb before time runs out.",True,(0,225,0))
line2 = font.render("You will have 60 seconds to answer 10 questions correctly.",True,(0,225,0))
line3 = font.render("If either you answer incorrectly or, misspell your answer,",True,(0,225,0))
line4 = font.render("the bomb will automatically detonate.",True,(0,225,0))
line5 = font.render("Both speed and accuracy are needed if you are to succeed,",True,(0,225,0))
line6 = font.render("in diffusing the bomb.",True,(0,225,0))
line7 = font.render("Each time you successfully diffuse the bomb,",True,(0,225,0))
line8 = font.render("you will recieve a point.",True,(0,225,0))
line9 = font.render("Compete to see who can score the most points.",True,(0,225,0))
line10 = font.render("Good luck.",True,(0,225,0))
display.blit(line1,(100,100))
display.blit(line2,(100,150))
display.blit(line3,(100,200))
display.blit(line4,(100,250))
display.blit(line5,(100,300))
display.blit(line6,(100,350))
display.blit(line7,(100,400))
display.blit(line8,(100,450))
display.blit(line9,(100,500))
display.blit(line10,(100,550))
if i.type == pygame.QUIT:
pygame.quit()
if i.type == pygame.MOUSEBUTTONDOWN:
if width/2 <= mouse[0] <= width/2+140 and height/2 <= mouse[1] <= height/2+40:
pygame.quit()
mouse = pygame.mouse.get_pos()
if width/2 <= mouse[0] <= width/2+140 and height/2 <= mouse[1] <= height/2+40:
pygame.draw.rect(display,color_light,[width/2,height/2,140,40])
else:
pygame.draw.rect(display,color_dark,[width/2,height/2,140,40])
display.blit(buttonText,(width/2+50,height/2))
pygame.display.update()
Instruction()
| ColeHeilman/Preposal-and-Github-lab | Instructions.py | Instructions.py | py | 2,893 | python | en | code | 0 | github-code | 90 |
40549560016 | from .predictorproxy import PredictorProxy
from .predictor import _SAMPLE_VIDEO_FRAMES
import time
import cv2
import os
import datetime
_DEFAULT_IMG = cv2.imread(os.path.join(
os.path.dirname(__file__), 'templates', 'Roboy_chef.jpg'))
if _DEFAULT_IMG is None:
raise ValueError("Default image was not found")
class HARController:
def __init__(self, camera, view):
self.cam = camera
self.view = view
self.predictor = PredictorProxy(self.cam.width, self.cam.height)
self.predictor.on_prediction = self._on_prediction
def _on_prediction(self, prediction):
self.view.current_text = ""
self.on_prediction(prediction)
def on_prediction(prediction):
pass
def record(self, fps=25):
self.view.current_text = 'RECORDING...'
self.view.progressStart()
delay = 1 / fps # in seconds
counter = 0
# loop_start = time.time()
while counter < _SAMPLE_VIDEO_FRAMES:
ts = time.time()
_, frame = self.cam.read()
self.predictor.process_and_predict(frame)
counter += 1
ts = time.time() - ts
try:
time.sleep(delay - ts)
except:
break
# rgb_time = time.time() - loop_start
# self.cam.stop()
self.cam.stopDisplay()
self.view.current_text = 'Thinking'
self.view.showImg(_DEFAULT_IMG)
| mfedoseeva/roboy-activity-recognition | app_code/inference/HARController.py | HARController.py | py | 1,448 | python | en | code | 0 | github-code | 90 |
19379098686 | from Functions.Tests import *
from Functions.Visualizations import *
def run_tests():
# first analysis
master_results = {}
for i in range(0, 5):
test_field(master_results, i)
index = ['standard', 'food_heavy', 'middle_food', 'middle_shelter', 'shelter_heavy']
master_results = pd.DataFrame(master_results).T
# print(master_results)
master_results.index = index
print("The best-performing field was {}".format(master_results[0].idxmin()))
# field stats
field = MiddleShelterWindbreakTest(34)
food = np.count_nonzero(field.array == 2)
shelter = np.count_nonzero(field.array == 3)
crops = np.count_nonzero(field.array == 1)
total = food + shelter + crops
print('Percent food: {:.2f}%'.format(math.ceil(100 * food/total)))
print("Percent shelter: {:.2f}%".format(math.ceil(100 * shelter/total)))
print("Percent crops: {:.2f}%".format(math.floor(100 * crops/total)))
# Testing a higher crop percentage variant of the middle row_len
start_time = time.time()
field_test = MiddleShelterWindbreakTest2(34)
results = []
for j in range(100):
monarch1 = Monarch(field_test)
monarch1.move_one_day()
results.append(monarch1.status)
print("Dead percentage = {:.2f}%".format(100 * results.count('dead') / len(results)))
print("Exit percentage = {:.2f}%".format(100 * results.count('exit') / len(results)))
print("--- %s seconds ---" % (time.time() - start_time))
food = np.count_nonzero(field_test.array == 2)
shelter = np.count_nonzero(field_test.array == 3)
crops = np.count_nonzero(field_test.array == 1)
total = food + shelter + crops
print('Percent food: {:.2f}%'.format(math.ceil(100 * food/total)))
print("Percent shelter: {:.2f}%".format(math.ceil(100 * shelter/total)))
print("Percent crops: {:.2f}%".format(math.floor(100 * crops/total))) | joshfactorial/pollinator_simulation | Functions/run_tests.py | run_tests.py | py | 1,898 | python | en | code | 1 | github-code | 90 |
18381367009 | N = int(input())
tasks = [None] * N
for i in range(N):
tasks[i] = tuple(map(int,input().split()))
tasks = sorted(tasks, key = lambda x:x[1])
time = 0
for t in tasks:
if t[1] - t[0] < time:
print("No")
break
time += t[0]
else:
print("Yes") | Aasthaengg/IBMdataset | Python_codes/p02996/s977400855.py | s977400855.py | py | 257 | python | en | code | 0 | github-code | 90 |
18429507249 | import sys
n = int(input())
b = list(map(int, input().split()))
ans = []
for i in range(n):
for j in range(len(b), 0, -1):
if b[j - 1] == j:
ans.append(j)
del b[j - 1]
break
else:
print(-1)
sys.exit()
ans.reverse()
for i in ans:
print(i) | Aasthaengg/IBMdataset | Python_codes/p03089/s690781028.py | s690781028.py | py | 310 | python | en | code | 0 | github-code | 90 |
10889633535 | import numpy as np
import numpy.linalg as la
import scipy.sparse as sp
import scipy.sparse.linalg as spla
import sys
from functools import reduce
# 2022/10 現在、正方格子のみ
# ===============================================================================
# 1次元の tight-binding model
def _D1d(N, t, bc):
if N==1:
return sp.csr_matrix((N,N))
D1d = sp.diags([t,0,t], [-1,0,1], shape=(N,N), format='lil')
if bc == 'periodic':
D1d[0,-1] = D1d[-1,0] = t
else:
pass
return D1d.tocsr()
# x -> x+1 のホッピングのみ
def _D1d_p(N, t, bc):
if N==1:
return sp.csr_matrix((N,N))
D1d = sp.diags([0,t], [0,1], shape=(N,N), format='lil')
if bc == 'periodic':
D1d[-1,0] = t
else:
pass
return D1d.tocsr()
# x -> x-1 のホッピングのみ
def _D1d_m(N, t, bc):
if N==1:
return sp.csr_matrix((N,N))
D1d = sp.diags([t,0], [-1,0], shape=(N,N), format='lil')
if bc == 'periodic':
D1d[0,-1] = t
else:
pass
return D1d.tocsr()
# ===============================================================================
# Orthogonal class
# d次元 tight-binding model
def get_tight_binding_Hamiltonian(shape, t=-1., bc='periodic'):
# 周期的境界条件と固定端境界条件をサポートしている。
assert bc in ['periodic', 'fixed'], "bc should be 'periodic', 'fixed'!"
# shape には list, tuple 以外に int を用いてもよい。(その場合、1次元を表す)
if not hasattr(shape, "__iter__"):
shape = (shape,)
H = sp.csr_matrix( (np.prod(shape),np.prod(shape)) )
for i in range(len(shape)):
D = [
_D1d(N,t,bc) if i==j else sp.eye(N) for j, N in enumerate(shape)
]
H += reduce(sp.kron, D)
return H
# Symplectic class
# d次元 Ando model
def get_Ando_Hamiltonian(shape, t1, t2, t=-1., bc='periodic'):
def _Tx_p(T1,T2):
T = np.array([
[ T1, T2],
[-T2, T1]
])
return sp.csr_matrix(T)
def _Tx_m(T1,T2):
T = np.array([
[ T1,-T2],
[ T2, T1]
])
return sp.csr_matrix(T)
def _Ty_p(T1,T2):
T = np.array([
[T1, -1j*T2],
[-1j*T2, T1]
])
return sp.csr_matrix(T)
def _Ty_m(T1,T2):
T = np.array([
[T1, 1j*T2],
[1j*T2, T1]
])
return sp.csr_matrix(T)
assert bc in ['periodic', 'fixed'], "bc should be 'periodic', 'fixed'!"
# shape には list, tuple 以外に int を用いてもよい。(その場合、1次元を表す)
if not hasattr(shape, "__iter__"):
shape = (shape,1)
if len(shape) == 1:
shape = (shape[0],1)
if not np.allclose(t1**2 + t2**2, 1., atol=1e-4):
print('Warning: t1^2 + t2^2 is not unity. value={:.6g}'.format(t1**2 + t2**2))
Nx, Ny = shape[:2]
Eyes = [sp.eye(N) for N in shape]
Dx_p = [_Tx_p(t1,t2)] + Eyes
Dx_p[1] = _D1d_p(Nx, t, bc)
Dx_m = [_Tx_m(t1,t2)] + Eyes
Dx_m[1] = _D1d_m(Nx, t, bc)
Dy_p = [_Ty_p(t1,t2)] + Eyes
Dy_p[2] = _D1d_p(Ny, t, bc)
Dy_m = [_Ty_m(t1,t2)] + Eyes
Dy_m[2] = _D1d_m(Ny, t, bc)
H = np.sum([
reduce(sp.kron, D) for D in [Dx_p, Dx_m, Dy_p, Dy_m]
])
for i in range(2,len(shape)):
D = [
_D1d(N,t,bc) if i==j else sp.eye(N) for j, N in enumerate(shape)
]
D = [sp.eye(2)] + D
H += reduce(sp.kron, D)
return H
# Unitary class
# d次元 Hofstadter model
# 磁場は常にz軸方向、ゲージ不変にはしていない。
def get_Hofstadter_Hamiltonian(shape, phi, x=None, t=-1., bc='periodic'):
assert bc in ['periodic', 'fixed'], "bc should be 'periodic', 'fixed'!"
# shape には list, tuple 以外に int を用いてもよい。(その場合、1次元を表す)
if not hasattr(shape, "__iter__"):
print('Warning: Normal tight-binding Hamiltonian is returned.')
shape = (shape,1)
if len(shape) == 1:
print('Warning: Normal tight-binding Hamiltonian is returned.')
shape = (shape[0],1)
assert len(shape) in [2,3], "dimension must be 2 or 3!"
# phi は -1 ~ 1 の実数をとる。
assert np.abs(phi) <= 1., 'phi must be between [-1,1]!'
# サイトの x座標を指定しなければ、自動的に格子定数 1 となる。
if x is None:
x = np.arange(shape[0])
else:
x = np.array(x).ravel()
assert len(x) == shape[0], 'size of x must be {}!'.format(shape[0])
H = sp.csr_matrix( (np.prod(shape),np.prod(shape)) )
for i in range(len(shape)):
D = [
_D1d(N,t,bc) if i==j else sp.eye(N) for j, N in enumerate(shape)
]
if i == 1:
peierls = np.exp( -2.j*np.pi*phi*x )
D[0] = sp.diags(peierls)
D[1] = _D1d_p(shape[1],t,bc)
Hy = reduce(sp.kron, D)
H += Hy + Hy.getH()
else:
H += reduce(sp.kron, D)
return H
# オンサイトポテンシャル
def onsite_potential(H, V):
assert len(V.ravel()) == H.shape[0], 'size of V must be {}!'.format(H.shape[0])
return H + sp.diags(V.ravel()) | masaico/TB_models | tbmodels.py | tbmodels.py | py | 5,237 | python | en | code | 0 | github-code | 90 |
35224038049 | import sklearn.ensemble as skl_ensemble
from Orange.base import RandomForestModel
from Orange.data import Variable, ContinuousVariable
from Orange.preprocess.score import LearnerScorer
from Orange.regression import SklLearner, SklModel
from Orange.regression.tree import SklTreeRegressor
__all__ = ["RandomForestRegressionLearner"]
class _FeatureScorerMixin(LearnerScorer):
feature_type = Variable
class_type = ContinuousVariable
def score(self, data):
model = self(data)
return model.skl_model.feature_importances_, model.domain.attributes
class RandomForestRegressor(SklModel, RandomForestModel):
@property
def trees(self):
def wrap(tree, i):
t = SklTreeRegressor(tree)
t.domain = self.domain
t.supports_multiclass = self.supports_multiclass
t.name = "{} - tree {}".format(self.name, i)
t.original_domain = self.original_domain
if hasattr(self, 'instances'):
t.instances = self.instances
return t
return [wrap(tree, i)
for i, tree in enumerate(self.skl_model.estimators_)]
class RandomForestRegressionLearner(SklLearner, _FeatureScorerMixin):
__wraps__ = skl_ensemble.RandomForestRegressor
__returns__ = RandomForestRegressor
supports_weights = True
def __init__(self,
n_estimators=10,
criterion="squared_error",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=1.0,
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
preprocessors=None):
super().__init__(preprocessors=preprocessors)
self.params = vars()
| biolab/orange3 | Orange/regression/random_forest.py | random_forest.py | py | 1,950 | python | en | code | 4,360 | github-code | 90 |
72021774698 | from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.db import IntegrityError
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, redirect
from django.urls import reverse
from django import forms
from decimal import Decimal
from .models import User, Listing, Bid, Comment, Category
"""FORMS"""
class NewListingForm(forms.Form):
title = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Title', 'class': 'form-control'}))
description = forms.CharField(widget=forms.Textarea(attrs={"rows":"5", 'class': 'form-control'}))
price = forms.DecimalField(max_digits=6, decimal_places=2, label="Starting Bid €", widget=forms.NumberInput(attrs={'class': 'form-control'}))
image = forms.ImageField(widget=forms.FileInput(attrs={'class': 'form-control'}))
category = forms.ModelChoiceField(queryset=Category.objects.all(), to_field_name='name', required=True, widget=forms.Select(attrs={'class': 'form-control'}))
class BidForm(forms.Form):
bid = forms.DecimalField(max_digits=6, decimal_places=2, label="Bid €")
class CommentForm(forms.Form):
title = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Title'}))
body = forms.CharField(widget=forms.Textarea(attrs={"rows":"5"}))
@login_required
def index(request):
# Get current user and all existing active listings from db
user = request.user
active_listings = Listing.objects.filter(active=True)
return render(request, "auctions/index.html", {
"active_listings": active_listings,
"user": user
})
def login_view(request):
if request.method == "POST":
# Attempt to sign user in
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
# Check if authentication successful
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse("auctions:index"))
else:
return render(request, "auctions/login.html", {
"message": "Invalid username and/or password."
})
else:
return render(request, "auctions/login.html")
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse("auctions:login"))
def register(request):
if request.method == "POST":
# Get credentials entered by user
username = request.POST["username"]
email = request.POST["email"]
# Ensure password matches confirmation
password = request.POST["password"]
confirmation = request.POST["confirmation"]
if password != confirmation:
return render(request, "auctions/register.html", {
"message": "Passwords must match."
})
# Attempt to create new user or return error if username already exists
try:
user = User.objects.create_user(username, email, password)
user.save()
except IntegrityError:
return render(request, "auctions/register.html", {
"message": "Username already taken."
})
login(request, user)
return HttpResponseRedirect(reverse("auctions:index"))
else:
return render(request, "auctions/register.html")
@login_required
def newlisting(request):
if request.method == "POST":
# request info from form
form = NewListingForm(request.POST, request.FILES)
if form.is_valid:
listing = Listing(owner = request.user,
title=request.POST["title"],
description = request.POST["description"],
price = request.POST["price"],
category = Category.objects.get(name=request.POST['category']),
image = request.FILES['image']
)
listing.save()
return HttpResponseRedirect(reverse("auctions:index"))
else:
# Get all categories to display on New Listing Form options
categories = Category.objects.all()
return render(request, "auctions/newlisting.html", {
"form": NewListingForm,
"categories":categories
})
@login_required
def listing_view(request, title):
user = request.user
listing = Listing.objects.get(title=title)
comments = listing.listing_comments.all()
# Get all current users watching this listing
users_watching = listing.users_watching.all()
on_watchlist = False
# Check if this listing is on user_watchlist
for user_watching in users_watching:
if user_watching == user:
on_watchlist = True
if request.method == "POST":
# Handles adding/removing listing from Watchlist
if request.POST["formtype"] == "add":
user.watchlist.add(listing)
return redirect("auctions:listing", title)
elif request.POST["formtype"] == "remove":
user.watchlist.remove(listing)
return redirect("auctions:listing", title)
# Handles bids
elif request.POST["formtype"] == "bid":
bid = Decimal(request.POST["bid"])
if listing.price >= bid:
return render(request, "auctions/listing.html", {
"listing": listing,
"user": user,
"on_watchlist": on_watchlist,
"bid_form": BidForm,
"message" : f"Your bid must be higher than €{listing.price}"
})
else:
listing.price = request.POST["bid"]
listing.bid_count+=1
listing.current_winner = user
listing.save()
# Record bid
new_bid = Bid(bidder=user, listing=listing, amount=bid)
new_bid.save()
return redirect("auctions:listing", title)
# Handles closing the listing if the user is the owner
elif request.POST["formtype"] == "close":
listing.active = False
listing.save()
return redirect("auctions:listing", title)
# Handles Comments
elif request.POST["formtype"] == "comment":
comment_title = request.POST["title"]
comment_body = request.POST["body"]
new_comment = Comment(commenter=user, listing=listing, title=comment_title, body=comment_body)
new_comment.save()
return redirect("auctions:listing", title)
else:
# Get all current users watching this listing
users_watching = listing.users_watching.all()
on_watchlist = False
# Check if this listing is on user_watchlist
for user_watching in users_watching:
if user_watching == user:
on_watchlist = True
return render(request, "auctions/listing.html", {
"listing": listing,
"user": user,
"comments": comments,
"on_watchlist": on_watchlist,
"bid_form": BidForm,
"comment_form": CommentForm
})
@login_required
def watchlist(request):
# Define user and identify which listings are on users watchlist
user = request.user
user_watchlist = user.watchlist.all()
return render(request, "auctions/watchlist.html", {
"user": user,
"user_watchlist": user_watchlist
})
@login_required
def categories(request):
categories = Category.objects.all()
return render(request, "auctions/categories.html", {
"categories": categories
})
@login_required
def category_view(request, name):
# Get selected category
category = Category.objects.get(name=name)
# Get all active listings with selected category
active_listings = Listing.objects.filter(category=category, active=True)
return render(request, "auctions/category.html", {
"category": category,
"active_listings": active_listings
})
| jeparalta/web50 | commerce/auctions/views.py | views.py | py | 8,232 | python | en | code | 0 | github-code | 90 |
1791370275 | import os
import urllib.request
from flask import Flask, flash, request, redirect, url_for, render_template
from werkzeug.utils import secure_filename
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from matplotlib.image import imread
from tensorflow.keras.models import load_model
# App section
UPLOAD_FOLDER = 'static/uploads/'
app = Flask(__name__)
app.secret_key = "secret key"
app.config['UPLOAD_FOLDER']=UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16*1024*1024
# X-Ray Functions
image_size=180
from skimage import color
def resize_image(image_file):
image_path = UPLOAD_FOLDER + image_file # Works for heroku
image = imread(image_path)
image_shape = image.shape
if (len(image.shape)==3):
image = color.rgb2gray(image)
image_adjusted = tf.image.resize(image.reshape(image_shape[0],image_shape[1],1),(image_size,image_size))
return image_adjusted
model = load_model('xray_pneumonia_imaging_version4_92percent.h5')
def model_predict(image_data):
labels = ['PNEUMONIA','NORMAL']
image_data = np.array(image_data)/255
image_data = image_data.reshape(-1,180,180,1)
return labels[int(int(model.predict(image_data))>0.5)]
# Configuration section
ALLOWED_EXTENSIONS = set(['png','jpg','jpeg'])
def allowed_file(filename):
return '.' in filename and filename.rsplit('.',1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/')
def upload_form():
return render_template('xray-home.html')
@app.route('/',methods=['POST'])
def upload_image():
labels = ['PNEUMONIA','NORMAL']
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No image selected for uploading')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'],filename))
image_adjusted = resize_image(filename)
print("\n=======\n")
result = model_predict(image_adjusted)
print(result)
print("\n=======\n")
#print('upload_image filename: ' + filename)
flash('Image successfuly uploaded and displayed below:')
return render_template('xray-home.html',filename=filename,result=result)
else:
flash('Allowed image types are -> png, jpg, jpeg')
return redirect(request.url)
@app.route('/display/<filename>')
def display_image(filename):
#print('display_image filename: ' + filename)
return redirect(url_for('static',filename='uploads/' + filename),code=301)
@app.route('/about')
def about_page():
return render_template('xray-about.html')
if __name__ == "__main__":
app.run(debug=True)
| danish-islam/Pneumonia-Detector | xray-flask-model.py | xray-flask-model.py | py | 2,956 | python | en | code | 1 | github-code | 90 |
16749634444 | import cv2
import numpy as np
import torch
class LaMa:
"""Class is designed for image inpainting, which is the process of filling in damaged parts of an image."""
pad_mod = 8
def __init__(self, model_path: str, device: str) -> None:
self.device = device
self.model = torch.jit.load(model_path, map_location=device).to(device)
self.model.eval()
@staticmethod
def _normalize_img(np_img: np.ndarray) -> np.ndarray:
"""Normalizes the input image by converting it to a float32 array and scaling the pixel values to be between 0 and 1.
Args:
np_img (np.ndarray): image to normalize.
Returns:
np.ndarray: normalized image.
"""
if len(np_img.shape) == 2:
np_img = np_img[:, :, np.newaxis]
np_img = np.transpose(np_img, (2, 0, 1))
np_img = np_img.astype("float32") / 255
return np_img
def _forward(self, image: np.ndarray, mask: np.ndarray) -> np.ndarray:
"""Performs the forward pass through the PyTorch model using the input image and mask.
Args:
image (np.ndarray): image to process.
mask (np.ndarray): mask with the damaged areas.
Returns:
np.ndarray: the resulting image with the missing parts filled in.
"""
image = self._normalize_img(image)
mask = self._normalize_img(mask)
mask = (mask > 0) * 1
image = torch.from_numpy(image).unsqueeze(0).to(self.device)
mask = torch.from_numpy(mask).unsqueeze(0).to(self.device)
inpainted_image = self.model(image, mask)
result = inpainted_image[0].permute(1, 2, 0).detach().cpu().numpy()
result = np.clip(result * 255, 0, 255).astype("uint8")
return cv2.cvtColor(result, cv2.COLOR_RGB2BGR)
@staticmethod
def _ceil_size(x: int, mod: int) -> int:
if x % mod == 0:
return x
return (x // mod + 1) * mod
@staticmethod
def _pad_img(img: np.ndarray, mod: int) -> np.ndarray:
if len(img.shape) == 2:
img = img[:, :, np.newaxis]
height, width = img.shape[:2]
out_height = LaMa._ceil_size(height, mod)
out_width = LaMa._ceil_size(width, mod)
return np.pad(
img,
((0, out_height - height), (0, out_width - width), (0, 0)),
mode="symmetric",
)
@torch.no_grad()
def __call__(self, image: np.ndarray, mask: np.ndarray) -> np.ndarray:
"""Performs the inpainting process by padding the input image and mask, passing them through the PyTorch model.
Args:
image (np.ndarray): image to process
mask (np.ndarray): mask with the damaged areas.
Returns:
np.ndarray: the resulting image with the missing parts filled in.
"""
origin_height, origin_width = image.shape[:2]
pad_image = self._pad_img(image, mod=self.pad_mod)
pad_mask = self._pad_img(mask, mod=self.pad_mod)
result = self._forward(pad_image, pad_mask)
result = result[0:origin_height, 0:origin_width, :]
mask = mask[:, :, np.newaxis]
result = result * (mask / 255) + image[:, :, ::-1] * (1 - (mask / 255))
return result.astype("float32")
| logo-wizard/logo-wizard-ml-scripts | eraser/eraser.py | eraser.py | py | 3,306 | python | en | code | 1 | github-code | 90 |
18414072029 | import random
from operator import itemgetter
import sys
sys.setrecursionlimit(1000000)
ans=0
def gcd(x,y):
x,y=max(abs(x),abs(y)),min(abs(x),abs(y))
if x%y==0:
return y
while x%y!=0:
x,y=y,x%y
return y
def gcd_all(l):
rt=l.pop()
for i in l:
rt=gcd(i,rt)
return rt
n=int(input())
g=[]
a=[int(i) for i in input().split()]
ans=0
d=a[-1]
m=a[-1]
mi=-1
k1=[]
flg=0
for i in range(n):
gg=gcd(a[i],d)
if gg<m:
m=gg
mi=i
ans=max(ans,gcd_all(a[:mi]+(a[mi+1:] if mi+1<n else [])))
d=a[0]
m=a[0]
mi=0
k1=[]
flg=0
for i in range(n):
gg=gcd(a[i],d)
if gg<m:
m=gg
mi=i
ans=max(ans,gcd_all(a[:mi]+(a[mi+1:] if mi+1<n else [])))
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03061/s493423004.py | s493423004.py | py | 737 | python | en | code | 0 | github-code | 90 |
29474157511 | import pandas as pd
import torch
from tqdm.auto import tqdm
from transformers import AutoTokenizer
class CustomDataset(torch.utils.data.Dataset):
def __init__(
self,
data_file,
state,
text_columns,
target_columns,
max_length=256,
model_name="klue/roberta-small",
):
self.state = state
self.data = data_file
self.text_columns = text_columns
self.max_length = max_length
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
if self.state == "test":
self.inputs = self.preprocessing(self.data)
else:
self.target_columns = target_columns if target_columns is not None else []
self.inputs, self.targets = self.preprocessing(self.data)
def __getitem__(self, idx):
if self.state == "test":
return {"input_ids": torch.tensor(self.inputs[idx], dtype=torch.long)}
else:
return {
"input_ids": torch.tensor(self.inputs[idx], dtype=torch.long),
"labels": torch.tensor(self.targets[idx], dtype=torch.long),
}
def __len__(self):
return len(self.inputs)
def tokenizing(self, dataframe: pd.DataFrame) -> list:
"""
토크나이징
Args :
dataframe (DataFrame): 토크나이징할 데이터
Return :
data (list) : 학습할 문장 토큰 리스트
"""
data = []
for item in tqdm(
dataframe["question"], desc="Tokenizing", total=len(dataframe["question"])
):
text = item
# text = [item for text_column in self.text_columns]
outputs = self.tokenizer(
text,
add_special_tokens=True,
padding="max_length",
truncation=True,
max_length=self.max_length,
)
data.append(outputs["input_ids"])
return data
def preprocessing(self, data):
inputs = self.tokenizing(data)
if self.state == "test":
return inputs
else:
try:
targets = data[self.target_columns]
except:
targets = []
return inputs, targets
| boostcampaitech5/level3_nlp_finalproject-nlp-08 | model/Filter/dataloader.py | dataloader.py | py | 2,313 | python | en | code | 29 | github-code | 90 |
14478284531 | """Tests for class :class:`ncdata.NcDimension`."""
from unittest import mock
import numpy as np
import pytest
from ncdata import NcDimension
class Test_NcDimension__init__:
def test_simple(self):
name = "dimname"
# Use an array object for test size, just so we can check "is".
# N.B. assumes the constructor copies the reference - which could change.
size = np.array(4)
sample = NcDimension(name, size)
# No data, no dtype. Variables don't have 'shape' anyway
assert sample.name is name
assert sample.size is size
@pytest.mark.parametrize("size", [0, 2])
@pytest.mark.parametrize(
"unlim",
[0, 1, False, True, "x"],
ids=["unlim_0", "unlim_1", "unlim_F", "unlim_T", "unlim_x"],
)
def test_unlimited(self, size, unlim):
name = "dimname"
sample = NcDimension(name, size, unlimited=unlim)
expected_unlim = bool(unlim) or size == 0
assert isinstance(sample.unlimited, bool)
assert sample.unlimited == expected_unlim
# assert sample.isunlimited() == sample.unlimited
class Test_NcDimension__str_repr:
@pytest.fixture(params=(0, 23), autouse=True)
def size(self, request):
return request.param
@pytest.fixture(params=("fixed", "unlimited"), autouse=True)
def unlim_type(self, request):
return request.param
def test_repr_sized(self, size, unlim_type):
kwargs = {}
unlimited = unlim_type == "unlimited"
if unlimited:
kwargs["unlimited"] = True
sample = NcDimension("this", size, **kwargs)
result = repr(sample)
expected = f"NcDimension('this', {size})"
if size == 0 or unlimited:
expected = expected[:-1] + ", unlimited=True)"
assert result == expected
def test_str_repr_same(self, size, unlim_type):
# In all cases, str == repr
sample = NcDimension("this", size)
result = str(sample)
assert result == repr(sample)
| pp-mo/ncdata | tests/unit/core/test_NcDimension.py | test_NcDimension.py | py | 2,034 | python | en | code | 5 | github-code | 90 |
29022786430 | #!/usr/bin/env python
import json
import gzip
import uproot
import numexpr
import numpy as np
from coffea import hist, lookup_tools
from coffea.util import load, save
from coffea.hist import plot
corrections = {}
extractor = lookup_tools.extractor()
extractor.add_weight_sets(["2016_n2ddt_ * correction_files/n2ddt_transform_2016MC.root"])
extractor.add_weight_sets(["2017_n2ddt_ * correction_files/n2ddt_transform_2017MC.root"])
extractor.add_weight_sets(["2018_n2ddt_ * correction_files/n2ddt_transform_2018MC.root"])
extractor.add_weight_sets(["2016BF_mutrigger_ * correction_files/Muon2016_TriggerEfficienciesAndSF_RunBtoF.root"])
extractor.add_weight_sets(["2016GH_mutrigger_ * correction_files/Muon2016_TriggerEfficienciesAndSF_RunGH.root"])
extractor.add_weight_sets(["2017_mutrigger_ * correction_files/EfficienciesAndSF_RunBtoF_Nov17Nov2017.root"])
extractor.add_weight_sets(["2018_mutrigger_ * correction_files/Muon2018_RunABCD_AfterHLTUpdate_SF_trig.root"])
extractor.add_weight_sets(["2016BF_muid_ * correction_files/Muon2016_EfficienciesAndSF_RunBtoF.root"])
extractor.add_weight_sets(["2016GH_muid_ * correction_files/Muon2016_EfficienciesAndSF_RunGH.root"])
extractor.add_weight_sets(["2017_muid_ * correction_files/Muon2017_RunBCDEF_SF_ID.json"])
extractor.add_weight_sets(["2018_muid_ * correction_files/Muon2018_RunABCD_SF_ID.json"])
extractor.add_weight_sets(["2016BF_muiso_ * correction_files/Muon2016_IsoEfficienciesAndSF_RunBtoF.root"])
extractor.add_weight_sets(["2016GH_muiso_ * correction_files/Muon2016_IsoEfficienciesAndSF_RunGH.root"])
extractor.add_weight_sets(["2017_muiso_ * correction_files/Muon2017_RunBCDEF_SF_ISO.json"])
extractor.add_weight_sets(["2018_muiso_ * correction_files/Muon2018_RunABCD_SF_ISO.json"])
extractor.add_weight_sets(["W_qcd_kfactors * correction_files/WJets_QCD_NLO.root"])
extractor.add_weight_sets(["Z_qcd_kfactors * correction_files/ZJets_QCD_NLO.root"])
extractor.finalize()
evaluator = extractor.make_evaluator()
corrections['2016_n2ddt_rho_pt'] = evaluator['2016_n2ddt_Rho2D']
corrections['2017_n2ddt_rho_pt'] = evaluator['2017_n2ddt_Rho2D']
corrections['2018_n2ddt_rho_pt'] = evaluator['2018_n2ddt_Rho2D']
lumiw2016_GH = 16.146 / (16.146 + 19.721)
lumiw2016_BCDEF = 19.721 / (16.146 + 19.721)
corrections['2016_mutrigweight_pt_abseta'] = evaluator['2016BF_mutrigger_Mu50_OR_TkMu50_PtEtaBins/efficienciesDATA/pt_abseta_DATA']
corrections['2016_mutrigweight_pt_abseta']._values *= lumiw2016_BCDEF
corrections['2016_mutrigweight_pt_abseta']._values += evaluator['2016GH_mutrigger_Mu50_OR_TkMu50_PtEtaBins/efficienciesDATA/pt_abseta_DATA']._values * lumiw2016_GH
corrections['2016_mutrigweight_pt_abseta_mutrigweightShift'] = evaluator['2016BF_mutrigger_Mu50_OR_TkMu50_PtEtaBins/efficienciesDATA/pt_abseta_DATA_error']
bf = evaluator['2016BF_mutrigger_Mu50_OR_TkMu50_PtEtaBins/efficienciesDATA/pt_abseta_DATA_error']._values
gh = evaluator['2016GH_mutrigger_Mu50_OR_TkMu50_PtEtaBins/efficienciesDATA/pt_abseta_DATA_error']._values
corrections['2016_mutrigweight_pt_abseta_mutrigweightShift']._values = np.hypot(bf*lumiw2016_BCDEF, gh*lumiw2016_GH)
corrections['2017_mutrigweight_pt_abseta'] = evaluator['2017_mutrigger_Mu50_PtEtaBins/efficienciesDATA/pt_abseta_DATA']
corrections['2017_mutrigweight_pt_abseta_mutrigweightShift'] = evaluator['2017_mutrigger_Mu50_PtEtaBins/efficienciesDATA/pt_abseta_DATA_error']
corrections['2018_mutrigweight_pt_abseta'] = evaluator['2018_mutrigger_Mu50_OR_OldMu100_OR_TkMu100_PtEtaBins/efficienciesDATA/pt_abseta_DATA']
corrections['2018_mutrigweight_pt_abseta_mutrigweightShift'] = evaluator['2018_mutrigger_Mu50_OR_OldMu100_OR_TkMu100_PtEtaBins/efficienciesDATA/pt_abseta_DATA_error']
corrections['2016_muidweight_abseta_pt'] = evaluator['2016BF_muid_MC_NUM_LooseID_DEN_genTracks_PAR_pt_eta/efficienciesDATA/abseta_pt_DATA']
corrections['2016_muidweight_abseta_pt']._values *= lumiw2016_BCDEF
corrections['2016_muidweight_abseta_pt']._values += evaluator['2016GH_muid_MC_NUM_LooseID_DEN_genTracks_PAR_pt_eta/efficienciesDATA/abseta_pt_DATA']._values * lumiw2016_GH
corrections['2016_muidweight_abseta_pt_muidweightShift'] = evaluator['2016BF_muid_MC_NUM_LooseID_DEN_genTracks_PAR_pt_eta/efficienciesDATA/abseta_pt_DATA_error']
bf = evaluator['2016BF_muid_MC_NUM_LooseID_DEN_genTracks_PAR_pt_eta/efficienciesDATA/abseta_pt_DATA_error']._values
gh = evaluator['2016GH_muid_MC_NUM_LooseID_DEN_genTracks_PAR_pt_eta/efficienciesDATA/abseta_pt_DATA_error']._values
corrections['2016_muidweight_abseta_pt_muidweightShift']._values = np.hypot(bf*lumiw2016_BCDEF, gh*lumiw2016_GH)
corrections['2017_muidweight_abseta_pt'] = evaluator['2017_muid_NUM_LooseID_DEN_genTracks/abseta_pt_value']
corrections['2017_muidweight_abseta_pt_muidweightShift'] = evaluator['2017_muid_NUM_LooseID_DEN_genTracks/abseta_pt_error']
corrections['2018_muidweight_abseta_pt'] = evaluator['2018_muid_NUM_LooseID_DEN_genTracks/abseta_pt_value']
corrections['2018_muidweight_abseta_pt_muidweightShift'] = evaluator['2018_muid_NUM_LooseID_DEN_genTracks/abseta_pt_error']
corrections['2016_muisoweight_abseta_pt'] = evaluator['2016BF_muiso_LooseISO_LooseID_pt_eta/efficienciesDATA/abseta_pt_DATA']
corrections['2016_muisoweight_abseta_pt']._values *= lumiw2016_BCDEF
corrections['2016_muisoweight_abseta_pt']._values += evaluator['2016GH_muiso_LooseISO_LooseID_pt_eta/efficienciesDATA/abseta_pt_DATA']._values * lumiw2016_GH
corrections['2016_muisoweight_abseta_pt_muisoweightShift'] = evaluator['2016BF_muiso_LooseISO_LooseID_pt_eta/efficienciesDATA/abseta_pt_DATA_error']
bf = evaluator['2016BF_muiso_LooseISO_LooseID_pt_eta/efficienciesDATA/abseta_pt_DATA_error']._values
gh = evaluator['2016GH_muiso_LooseISO_LooseID_pt_eta/efficienciesDATA/abseta_pt_DATA_error']._values
corrections['2016_muisoweight_abseta_pt_muisoweightShift']._values = np.hypot(bf*lumiw2016_BCDEF, gh*lumiw2016_GH)
corrections['2017_muisoweight_abseta_pt'] = evaluator['2017_muiso_NUM_LooseRelIso_DEN_LooseID/abseta_pt_value']
corrections['2017_muisoweight_abseta_pt_muisoweightShift'] = evaluator['2017_muiso_NUM_LooseRelIso_DEN_LooseID/abseta_pt_error']
corrections['2018_muisoweight_abseta_pt'] = evaluator['2018_muiso_NUM_LooseRelIso_DEN_LooseID/abseta_pt_value']
corrections['2018_muisoweight_abseta_pt_muisoweightShift'] = evaluator['2018_muiso_NUM_LooseRelIso_DEN_LooseID/abseta_pt_error']
gpar = np.array([1.00626, -1.06161, 0.0799900, 1.20454])
cpar = np.array([1.09302, -0.000150068, 3.44866e-07, -2.68100e-10, 8.67440e-14, -1.00114e-17])
fpar = np.array([1.27212, -0.000571640, 8.37289e-07, -5.20433e-10, 1.45375e-13, -1.50389e-17])
def msd_weight(pt, eta):
genw = gpar[0] + gpar[1]*np.power(pt*gpar[2], -gpar[3])
ptpow = np.power.outer(pt, np.arange(cpar.size))
cenweight = np.dot(ptpow, cpar)
forweight = np.dot(ptpow, fpar)
weight = np.where(np.abs(eta)<1.3, cenweight, forweight)
return genw*weight
corrections['msdweight'] = msd_weight
with uproot.open("correction_files/kfactors.root") as kfactors:
ewkW_num = kfactors['EWKcorr/W']
ewkZ_num = kfactors['EWKcorr/Z']
# ewkW_denom = kfactors['WJets_LO/inv_pt']
# ewkZ_denom = kfactors['ZJets_LO/inv_pt']
ewkW_denom = kfactors['WJets_012j_NLO/nominal']
ewkZ_denom = kfactors['ZJets_012j_NLO/nominal']
edges = ewkW_num.edges
assert(all(np.array_equal(edges, h.edges) for h in [ewkW_denom, ewkZ_num, ewkZ_denom]))
ptrange = slice(np.searchsorted(edges, 250.), np.searchsorted(edges, 1000.) + 1)
corrections['W_nlo_over_lo_ewk'] = lookup_tools.dense_lookup.dense_lookup(ewkW_num.values[ptrange] / ewkW_denom.values[ptrange], edges[ptrange])
corrections['Z_nlo_over_lo_ewk'] = lookup_tools.dense_lookup.dense_lookup(ewkZ_num.values[ptrange] / ewkZ_denom.values[ptrange], edges[ptrange])
with uproot.open("correction_files/WJets_QCD_NLO.root") as kfactors:
qcdW_2016_nlo = kfactors['W_NLO_QCD_2016']
qcdW_2017_nlo = kfactors['W_NLO_QCD_2017']
with uproot.open("correction_files/ZJets_QCD_NLO.root") as kfactors:
qcdZ_2016_nlo = kfactors['Z_NLO_QCD_2016']
qcdZ_2017_nlo = kfactors['Z_NLO_QCD_2017']
edges = qcdW_2016_nlo.edges
assert(all(np.array_equal(edges, h.edges) for h in [qcdW_2017_nlo, qcdZ_2016_nlo, qcdZ_2017_nlo]))
ptrange = slice(np.searchsorted(edges, 250.), np.searchsorted(edges, 1000.) + 1)
corrections['2016_W_nlo_qcd'] = lookup_tools.dense_lookup.dense_lookup(qcdW_2016_nlo.values[ptrange], edges[ptrange])
corrections['2017_W_nlo_qcd'] = lookup_tools.dense_lookup.dense_lookup(qcdW_2017_nlo.values[ptrange], edges[ptrange])
corrections['2016_Z_nlo_qcd'] = lookup_tools.dense_lookup.dense_lookup(qcdZ_2016_nlo.values[ptrange], edges[ptrange])
corrections['2017_Z_nlo_qcd'] = lookup_tools.dense_lookup.dense_lookup(qcdZ_2017_nlo.values[ptrange], edges[ptrange])
with uproot.open("correction_files/pileUp_Cert_271036-284044_13TeV_23Sep2016ReReco_Collisions16_JSON.root") as fin_pileup:
norm = lambda x: x / x.sum()
data_pu = norm(fin_pileup["pileup"].values)
data_pu_puUp = norm(fin_pileup["pileup_plus"].values)
data_pu_puDown = norm(fin_pileup["pileup_minus"].values)
# https://github.com/cms-sw/cmssw/blob/master/SimGeneral/MixingModule/python/mix_2016_25ns_Moriond17MC_PoissonOOTPU_cfi.py
mc_pu = np.array([
1.78653e-05 ,2.56602e-05 ,5.27857e-05 ,8.88954e-05 ,0.000109362 ,0.000140973 ,0.000240998 ,0.00071209 ,
0.00130121 ,0.00245255 ,0.00502589 ,0.00919534 ,0.0146697 ,0.0204126 ,0.0267586 ,0.0337697 ,0.0401478 ,
0.0450159 ,0.0490577 ,0.0524855 ,0.0548159 ,0.0559937 ,0.0554468 ,0.0537687 ,0.0512055 ,0.0476713 ,
0.0435312 ,0.0393107 ,0.0349812 ,0.0307413 ,0.0272425 ,0.0237115 ,0.0208329 ,0.0182459 ,0.0160712 ,
0.0142498 ,0.012804 ,0.011571 ,0.010547 ,0.00959489 ,0.00891718 ,0.00829292 ,0.0076195 ,0.0069806 ,
0.0062025 ,0.00546581 ,0.00484127 ,0.00407168 ,0.00337681 ,0.00269893 ,0.00212473 ,0.00160208 ,
0.00117884 ,0.000859662 ,0.000569085 ,0.000365431 ,0.000243565 ,0.00015688 ,9.88128e-05 ,
6.53783e-05 ,3.73924e-05 ,2.61382e-05 ,2.0307e-05 ,1.73032e-05 ,1.435e-05 ,1.36486e-05 ,1.35555e-05 ,
1.37491e-05 ,1.34255e-05 ,1.33987e-05 ,1.34061e-05 ,1.34211e-05 ,1.34177e-05 ,1.32959e-05 ,1.33287e-05
])
mc_pu = np.r_[mc_pu, np.zeros(25)]
mask = mc_pu > 0.
corr = data_pu.copy()
corr_puUp = data_pu_puUp.copy()
corr_puDown = data_pu_puDown.copy()
corr[mask] /= mc_pu[mask]
corr_puUp[mask] /= mc_pu[mask]
corr_puDown[mask] /= mc_pu[mask]
pileup_corr = lookup_tools.dense_lookup.dense_lookup(corr, fin_pileup["pileup"].edges)
pileup_corr_puUp = lookup_tools.dense_lookup.dense_lookup(corr_puUp, fin_pileup["pileup"].edges)
pileup_corr_puDown = lookup_tools.dense_lookup.dense_lookup(corr_puDown, fin_pileup["pileup"].edges)
corrections['2016_pileupweight'] = pileup_corr
corrections['2016_pileupweight_puUp'] = pileup_corr_puUp
corrections['2016_pileupweight_puDown'] = pileup_corr_puDown
pileup_corr = load('correction_files/pileup_mc.coffea')
del pileup_corr['data_obs_jet']
del pileup_corr['data_obs_mu']
with uproot.open("correction_files/pileup_Cert_294927-306462_13TeV_PromptReco_Collisions17_withVar.root") as fin_pileup:
norm = lambda x: x / x.sum()
data_pu = norm(fin_pileup["pileup"].values)
data_pu_puUp = norm(fin_pileup["pileup_plus"].values)
data_pu_puDown = norm(fin_pileup["pileup_minus"].values)
pileup_corr_puUp = {}
pileup_corr_puDown = {}
for k in pileup_corr.keys():
if pileup_corr[k].value.sum() == 0:
print("sample has no MC pileup:", k)
continue
mc_pu = norm(pileup_corr[k].value)
mask = mc_pu > 0.
corr = data_pu.copy()
corr_puUp = data_pu_puUp.copy()
corr_puDown = data_pu_puDown.copy()
corr[mask] /= mc_pu[mask]
corr_puUp[mask] /= mc_pu[mask]
corr_puDown[mask] /= mc_pu[mask]
pileup_corr[k] = lookup_tools.dense_lookup.dense_lookup(corr, fin_pileup["pileup"].edges)
pileup_corr_puUp[k] = lookup_tools.dense_lookup.dense_lookup(corr_puUp, fin_pileup["pileup"].edges)
pileup_corr_puDown[k] = lookup_tools.dense_lookup.dense_lookup(corr_puDown, fin_pileup["pileup"].edges)
corrections['2017_pileupweight_dataset'] = pileup_corr
corrections['2017_pileupweight_dataset_puUp'] = pileup_corr_puUp
corrections['2017_pileupweight_dataset_puDown'] = pileup_corr_puDown
with uproot.open("correction_files/pileUp_Cert_314472-325175_13TeV_PromptReco_Collisions18_JSON.root") as fin_pileup:
norm = lambda x: x / x.sum()
data_pu = norm(fin_pileup["pileup"].values)
data_pu_puUp = norm(fin_pileup["pileup_plus"].values)
data_pu_puDown = norm(fin_pileup["pileup_minus"].values)
# https://github.com/cms-sw/cmssw/blob/master/SimGeneral/MixingModule/python/mix_2018_25ns_JuneProjectionFull18_PoissonOOTPU_cfi.py
mc_pu = np.array([
4.695341e-10, 1.206213e-06, 1.162593e-06, 6.118058e-06, 1.626767e-05,
3.508135e-05, 7.12608e-05, 0.0001400641, 0.0002663403, 0.0004867473,
0.0008469, 0.001394142, 0.002169081, 0.003198514, 0.004491138,
0.006036423, 0.007806509, 0.00976048, 0.0118498, 0.01402411,
0.01623639, 0.01844593, 0.02061956, 0.02273221, 0.02476554,
0.02670494, 0.02853662, 0.03024538, 0.03181323, 0.03321895,
0.03443884, 0.035448, 0.03622242, 0.03674106, 0.0369877,
0.03695224, 0.03663157, 0.03602986, 0.03515857, 0.03403612,
0.0326868, 0.03113936, 0.02942582, 0.02757999, 0.02563551,
0.02362497, 0.02158003, 0.01953143, 0.01750863, 0.01553934,
0.01364905, 0.01186035, 0.01019246, 0.008660705, 0.007275915,
0.006043917, 0.004965276, 0.004035611, 0.003246373, 0.002585932,
0.002040746, 0.001596402, 0.001238498, 0.0009533139, 0.0007282885,
0.000552306, 0.0004158005, 0.0003107302, 0.0002304612, 0.0001696012,
0.0001238161, 8.96531e-05, 6.438087e-05, 4.585302e-05, 3.23949e-05,
2.271048e-05, 1.580622e-05, 1.09286e-05, 7.512748e-06, 5.140304e-06,
3.505254e-06, 2.386437e-06, 1.625859e-06, 1.111865e-06, 7.663272e-07,
5.350694e-07, 3.808318e-07, 2.781785e-07, 2.098661e-07, 1.642811e-07,
1.312835e-07, 1.081326e-07, 9.141993e-08, 7.890983e-08, 6.91468e-08,
6.119019e-08, 5.443693e-08, 4.85036e-08, 4.31486e-08, 3.822112e-08
])
mask = mc_pu > 0.
corr = data_pu.copy()
corr_puUp = data_pu_puUp.copy()
corr_puDown = data_pu_puDown.copy()
corr[mask] /= mc_pu[mask]
corr_puUp[mask] /= mc_pu[mask]
corr_puDown[mask] /= mc_pu[mask]
pileup_corr = lookup_tools.dense_lookup.dense_lookup(corr, fin_pileup["pileup"].edges)
pileup_corr_puUp = lookup_tools.dense_lookup.dense_lookup(corr_puUp, fin_pileup["pileup"].edges)
pileup_corr_puDown = lookup_tools.dense_lookup.dense_lookup(corr_puDown, fin_pileup["pileup"].edges)
corrections['2018_pileupweight'] = pileup_corr
corrections['2018_pileupweight_puUp'] = pileup_corr_puUp
corrections['2018_pileupweight_puDown'] = pileup_corr_puDown
with uproot.open("correction_files/RUNTriggerEfficiencies_SingleMuon_Run2016_V2p1_v03.root") as fin:
denom = fin["DijetTriggerEfficiencySeveralTriggers/jet1SoftDropMassjet1PtDenom_cutJet"]
num = fin["DijetTriggerEfficiencySeveralTriggers/jet1SoftDropMassjet1PtPassing_cutJet"]
eff = num.values/np.maximum(denom.values, 1)
efferr = plot.clopper_pearson_interval(num.values, denom.values)
msd_bins, pt_bins = num.edges
# Cut pt < 200
cutpt = pt_bins >= 200
pt_bins = pt_bins[cutpt]
cutpt = cutpt[:-1]
eff = eff[:,cutpt]
eff_trigweightDown = efferr[0,:,cutpt]
eff_trigweightUp = efferr[1,:,cutpt]
corrections['2016_trigweight_msd_pt'] = lookup_tools.dense_lookup.dense_lookup(eff, (msd_bins, pt_bins))
corrections['2016_trigweight_msd_pt_trigweightDown'] = lookup_tools.dense_lookup.dense_lookup(eff_trigweightDown, (msd_bins, pt_bins))
corrections['2016_trigweight_msd_pt_trigweightUp'] = lookup_tools.dense_lookup.dense_lookup(eff_trigweightUp, (msd_bins, pt_bins))
with uproot.open("correction_files/TrigEff_2017BtoF_noPS_Feb21.root") as fin:
denom = fin["h_denom"]
num = fin["h_numer"]
eff = num.values/np.maximum(denom.values, 1)
efferr = plot.clopper_pearson_interval(num.values, denom.values)
msd_bins, pt_bins = num.edges
# Cut pt < 200
pt_bins = pt_bins[8:]
eff = eff[:,8:]
eff_trigweightDown = efferr[0,:,8:]
eff_trigweightUp = efferr[1,:,8:]
corrections['2017_trigweight_msd_pt'] = lookup_tools.dense_lookup.dense_lookup(eff, (msd_bins, pt_bins))
corrections['2017_trigweight_msd_pt_trigweightDown'] = lookup_tools.dense_lookup.dense_lookup(eff_trigweightDown, (msd_bins, pt_bins))
corrections['2017_trigweight_msd_pt_trigweightUp'] = lookup_tools.dense_lookup.dense_lookup(eff_trigweightUp, (msd_bins, pt_bins))
with uproot.open("correction_files/TrigEff_2018_Feb21.root") as fin:
denom = fin["h_denom"]
num = fin["h_numer"]
eff = num.values/np.maximum(denom.values, 1)
efferr = plot.clopper_pearson_interval(num.values, denom.values)
msd_bins, pt_bins = num.edges
# Cut pt < 200
pt_bins = pt_bins[8:]
eff = eff[:,8:]
eff_trigweightDown = efferr[0,:,8:]
eff_trigweightUp = efferr[1,:,8:]
corrections['2018_trigweight_msd_pt'] = lookup_tools.dense_lookup.dense_lookup(eff, (msd_bins, pt_bins))
corrections['2018_trigweight_msd_pt_trigweightDown'] = lookup_tools.dense_lookup.dense_lookup(eff_trigweightDown, (msd_bins, pt_bins))
corrections['2018_trigweight_msd_pt_trigweightUp'] = lookup_tools.dense_lookup.dense_lookup(eff_trigweightUp, (msd_bins, pt_bins))
with open("correction_files/TriggerBitMap.json") as fin:
trigger_bitmap = json.load(fin)
def triggermask(names, triggerMap):
version = names['version']
hltNames = names['names']
branchName = names['branchName']
if version in triggerMap:
bits = triggerMap[version]
else:
raise ValueError("Cannot find triggerbit map of the requested bit version =%s. Possible versions are: %s" % (version, ",".join(triggerMap.keys())))
tCuts = []
mask = np.array(0, dtype='uint64')
for hltName in hltNames:
if hltName not in bits:
raise ValueError("Cannot find the TriggerBit for %s" % hltName)
mask |= np.array(1<<int(bits[hltName]), dtype=mask.dtype)
return mask
triggerNames_2016 = {
"version": "zprimebit-15.01",
"branchName":"triggerBits",
"names": [
"HLT_PFHT800_v*",
"HLT_PFHT900_v*",
"HLT_AK8PFJet360_TrimMass30_v*",
'HLT_AK8PFHT700_TrimR0p1PT0p03Mass50_v*',
"HLT_PFHT650_WideJetMJJ950DEtaJJ1p5_v*",
"HLT_PFHT650_WideJetMJJ900DEtaJJ1p5_v*",
"HLT_AK8DiPFJet280_200_TrimMass30_BTagCSV_p20_v*" ,
"HLT_PFJet450_v*",
]
}
corrections['2016_triggerMask'] = triggermask(triggerNames_2016, trigger_bitmap)
triggerNames_2017 = {
"version": "zprimebit-15.01",
"branchName":"triggerBits",
"names": [
"HLT_AK8PFJet330_PFAK8BTagCSV_p17_v*",
"HLT_PFHT1050_v*",
"HLT_AK8PFJet400_TrimMass30_v*",
"HLT_AK8PFJet420_TrimMass30_v*",
"HLT_AK8PFHT800_TrimMass50_v*",
"HLT_PFJet500_v*",
"HLT_AK8PFJet500_v*"
]
}
corrections['2017_triggerMask'] = triggermask(triggerNames_2017, trigger_bitmap)
triggerNames_2018 = {
"version": "zprimebit-15.01",
"branchName": "triggerBits",
"names": [
'HLT_AK8PFJet400_TrimMass30_v*',
'HLT_AK8PFJet420_TrimMass30_v*',
'HLT_AK8PFHT800_TrimMass50_v*',
'HLT_PFHT1050_v*',
'HLT_PFJet500_v*',
'HLT_AK8PFJet500_v*',
'HLT_AK8PFJet330_PFAK8BTagCSV_p17_v*',
"HLT_AK8PFJet330_TrimMass30_PFAK8BoostedDoubleB_np4_v*",
],
}
corrections['2018_triggerMask'] = triggermask(triggerNames_2018, trigger_bitmap)
def read_xsections(filename):
out = {}
with open(filename) as fin:
for line in fin:
line = line.strip()
if len(line) == 0 or line[0] == '#':
continue
dataset, xsexpr, *_ = line.split()
try:
xs = float(numexpr.evaluate(xsexpr))
except:
print("numexpr evaluation failed for line: %s" % line)
raise
if xs <= 0:
warnings.warn("Cross section is <= 0 in line: %s" % line, RuntimeWarning)
out[dataset] = xs
return out
# curl -O https://raw.githubusercontent.com/kakwok/ZPrimePlusJet/newTF/analysis/ggH/xSections.dat
corrections['xsections'] = read_xsections("metadata/xSections.dat")
save(corrections, 'corrections.coffea')
| nsmith-/coffeandbacon | analysis/compile_corrections.py | compile_corrections.py | py | 20,687 | python | en | code | 1 | github-code | 90 |
40239787688 | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 29 12:20:40 2020
@author: SethHarden
"""
import math
# Add any extra import statements you may need here
"""
We receive 2 arrays (A,B)
We are told their lengths with N.
Determine if there is a way to make A == B by reversing any sub arrays from array B (any number of times)
what do we need to know about the arrays.
what can we do with the arrays?
how can we compare it?
Return a boolean value (true, false)
Let's setup our array scanning system.
QUESTIONS?
The arrays have length N, N should always be equal
If they are not the same length can we automatically return a false?
----------
We can go through each index of A and see if that exists in any of B's indexes.
This is like
O(N**2)
Potential libraries
from bisect import bisect_left
"""
# Add any helper functions you may need here
def are_they_equal(array_a, array_b):
outcome = False
for i in array_a: #O(n)
if i in array_b: #O(n**2)
outcome = True
else:
outcome = False
return outcome
# These are the tests we use to determine if the solution is correct.
# You can add your own at the bottom, but they are otherwise not editable!
def printString(string):
print('[\"', string, '\"]', sep='', end='')
test_case_number = 1
def check(expected, output):
global test_case_number
result = False
if expected == output:
result = True
rightTick = '\u2713'
wrongTick = '\u2717'
if result:
print(rightTick, 'YES Test #', test_case_number, sep='')
else:
print(wrongTick, 'NO Test #', test_case_number, ': Expected ', sep='', end='')
printString(expected)
print(' Your output: ', end='')
printString(output)
print()
test_case_number += 1
if __name__ == "__main__":
n_1 = 4
a_1 = [1, 2, 3, 4]
b_1 = [1, 4, 3, 2]
expected_1 = True
output_1 = are_they_equal(a_1, b_1)
check(expected_1, output_1)
n_2 = 4
a_2 = [1, 2, 3, 4]
b_2 = [1, 2, 3, 5]
expected_2 = False
output_2 = are_they_equal(a_2, b_2)
check(expected_2, output_2)
# Add your own test cases here
| sethmh82/SethDevelopment | Python/Array-Reverse-to-Make-Equal.py | Array-Reverse-to-Make-Equal.py | py | 2,127 | python | en | code | 1 | github-code | 90 |
18323451309 | def main():
n = int(input())
d = list(map(int,input().split()))
mod = 998244353
if d[0]!=0:
print(0)
return
D = {0:1}
for i in range(1,n):
if d[i]==0:
print(0)
return
if D.get(d[i]) == None:
D[d[i]] = 1
else:
D[d[i]] += 1
ans = 1
if sorted(D.keys())[-1]!=len(D.keys())-1:
print(0)
return
for k in range(1,len(D.keys())):
ans *= pow(D[k-1],D[k],mod)
ans = ans % mod
print(ans)
if __name__ == "__main__":
main()
| Aasthaengg/IBMdataset | Python_codes/p02866/s221872293.py | s221872293.py | py | 575 | python | en | code | 0 | github-code | 90 |
4539893840 | import cv2
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import convolve2d
from scipy.ndimage.filters import gaussian_filter
import skimage.io as io
from nonmaxsuppts import nonmaxsuppts
K = 0.04
def detect_features(image):
"""
Computer Vision 600.461/661 Assignment 2
Args:
image (numpy.ndarray): The input image to detect features on. Note: this is NOT the image name or image path.
Returns:
pixel_coords (list of tuples): A list of (row,col) tuples of detected feature locations in the image
"""
harris_threshold = 0.5
sobel_vertical_kernel = [[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]
]
sobel_horizontal_kernel = np.rot90(sobel_vertical_kernel)
I_x = convolve2d(image, sobel_vertical_kernel, mode='same', boundary='symm')
I_y = convolve2d(image, sobel_horizontal_kernel, mode='same', boundary='symm')
I_xx = I_x * I_x
I_yy = I_y * I_y
I_xy = I_x * I_y
I_xx = gaussian_filter(I_xx, 3)
I_yy = gaussian_filter(I_yy, 3)
I_xy = gaussian_filter(I_xy, 3)
R = (I_xx * I_yy - I_xy**2) - K*(I_xx + I_yy)**2
corners = nonmaxsuppts(R, 5, harris_threshold)
return corners
| warmspringwinds/jhu_cv_homeworks | hw_2/detect_features.py | detect_features.py | py | 1,188 | python | en | code | 0 | github-code | 90 |
15756421891 | from collections import deque
import pickle
string_f = '''75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23'''
rows = []
lines = string_f.split('\n')
for string in lines:
rows.append(string.split(' '))
rows_main = []
i = 0
for num_str_list in rows:
rows_main.append([])
for x in num_str_list:
rows_main[i].append(int(x))
i+=1
graph = {}
for x in rows_main:
index_x = rows_main.index(x)
for y in x:
index_y = x.index(y)
if index_x+1 < len(rows_main):
try:
graph[y, index_x, index_y] += [ (rows_main[index_x + 1][index_y],index_x+1, index_y), (rows_main[index_x + 1][index_y + 1],index_x +1, index_y+1) ]
except KeyError:
graph[y, index_x, index_y] = [ (rows_main[index_x + 1][index_y],index_x+1, index_y), (rows_main[index_x + 1][index_y + 1],index_x+1, index_y+1) ]
else:
pass
print(graph)
def find_all_paths(graph, start, end, path=[]):
input()
print('start ={} end={}'.format(start,end))
path = path + [start]
if start == end:
print('{}=={} -> paths={}'.format(start, end, path))
return [path]
if not start in graph:
print('{} такого нет в графе'.format(start))
return []
paths = []
print('paths={}'.format(paths))
for node in graph[start]:
#input()
#print('node=',node)
if node not in path:
print('{} node not in path {}'.format(node,path))
newpaths = find_all_paths(graph, node, end, path)
for newpath in newpaths:
paths.append(newpath)
return paths
s = 0
i = 0
summ = 0
for x in rows_main[-1]:
print((x,14,i))
paths = find_all_paths(graph,(75,0,0),(x,14,i))
k = 0
for z in paths:
print(k," = ",z)
k+=1
s += len(paths)
i += 1
for zs in paths:
temp_summ = 0
for xs in zs:
temp_summ += xs[0]
if temp_summ > summ:
summ = temp_summ
#input()
print(s)
print(summ)
'''
paths = []
temp = []
for x,y in graph.items():
print('selected={}->{} selected={}->{}'.format(x,y[0],x,y[1]))
input()
for z in paths:
print('x={} paths={} z={}'.format(x,paths,z))
input()
if z[-1] == x:
print('{} removed'.format(z))
temp = z
paths.remove(z)
if len(temp)>0:
paths.append(temp + [y[0]])
paths.append(temp + [y[1]])
else:
paths.append(temp + [x, y[0]])
paths.append(temp + [x, y[1]])
print('paths={}'.format(paths))
f=open('paths.data','wb')
pickle.dump(paths, f)# помещаем объект в файлf.close()
print('Paths len = ',len(paths))
sum = 0
for x in paths:
temp_sum = 0
for y in x:
temp_sum += y[0]
if temp_sum>sum:
sum = temp_sum
print(sum)
'''
| dzinrai/my_euler_proj | e18.py | e18.py | py | 3,344 | python | en | code | 0 | github-code | 90 |
18407712869 | from collections import deque
m, k = [int(i) for i in input().split()]
if k >= 2 ** m:
print(-1)
exit()
if k == 0:
q = deque([])
for i in range(2 ** m):
q.append(i)
q.appendleft(i)
print(*q)
exit()
if m == 1 and k == 1:
print(-1)
exit()
S = set(i for i in range(2 ** m))
q = deque([0, k])
S.remove(0)
S.remove(k)
for s in S:
q.appendleft(s)
q.append(s)
q.appendleft(0)
q.append(k)
print(*q)
| Aasthaengg/IBMdataset | Python_codes/p03046/s624134655.py | s624134655.py | py | 456 | python | en | code | 0 | github-code | 90 |
14316115270 | from django.apps import apps
from rest_framework import serializers
from rest_flex_fields import FlexFieldsModelSerializer
from drf_extra_fields.fields import Base64ImageField
__all__ = [
'OrderExtensionSerializer',
]
Order = apps.get_model(*'order.Order'.split())
OrderExtension = apps.get_model(*'order.OrderExtension'.split())
class OrderExtensionSerializer(FlexFieldsModelSerializer):
"""Serializer for :model:`order.OrderExtension`:
`**Fields:**`
01. `id` : `AutoField`
02. `order` : `ForeignKey` [:model:`order.Order`]
03. `payment_proof` : `FileField`
`**Reverse Fields:**`
"""
order = serializers.SlugRelatedField(
queryset=Order.objects.all(),
slug_field='token',
allow_null=False,
required=True,
many=False,
)
payment_proof = Base64ImageField(required=False)
class Meta:
model = OrderExtension
fields = [
# Fields
'id',
'order',
'payment_proof',
# Reverse Fields
]
read_only_fields = []
# def create(self, validated_data):
# return super().create(validated_data)
# def update(self, instance, validated_data):
# return super().update(instance, validated_data)
| Chaoslecion123/Diver | saleor/rest/serializers/order_extension.py | order_extension.py | py | 1,357 | python | en | code | 0 | github-code | 90 |
14981771755 | def merge_sort(arr, left, right):
if left < right:
mid = (left + right) // 2
# Recursively sort first half and second half
merge_sort(arr, left, mid)
merge_sort(arr, mid + 1, right)
# Merge the sorted halves
merge(arr, left, mid, right)
# Wrapper function to initiate the sort
def initiate_merge_sort(arr):
merge_sort(arr, 0, len(arr) - 1)
# Define a merge function
def merge(arr, left, mid, right):
temp = []
i, j = left, mid + 1
while i <= mid and j <= right:
if arr[i] < arr[j]:
temp.append(arr[i])
i += 1
else:
temp.append(arr[j])
j += 1
temp.extend(arr[i:mid+1])
temp.extend(arr[j:right+1])
arr[left:right+1] = temp
# Display the entire array after each merge operation
print(" ".join(map(str, arr)))
# Loop to read multiple sets of test data
while True:
try:
n = int(input())
unsorted_arr = list(map(int, input().split()))
initiate_merge_sort(unsorted_arr)
except EOFError:
break
| CQUer-nanzhong/ClassContents-Hw-and-Lab-of-Data-Structure-and-Algorithm | Homework-by-python/Homework-Week-9/Hw 9_1 二路归并排序.py | Hw 9_1 二路归并排序.py | py | 1,159 | python | en | code | 1 | github-code | 90 |
20667393565 | def solution(dartResult):
answer = 0
temp = []
import re
# 스타상 : 해당 점수와 바로 전에 얻은 점수를 각각 2배, 다른 효과(스타, 아차)와 중첩 가능
# 아차상 : 해당 점수 마이너스
# 3번의 기회
num_list = re.split(r'[^0-9]', dartResult)[:-1]
bonus_option_list = re.findall(r'[\D]', dartResult)
for num, bonus_option in zip(num_list, bonus_option_list):
if num == '':
if bonus_option == '*':
if len(temp) > 1:
temp[-1] *= 2
temp[-2] *= 2
else:
temp[-1] *= 2
else:
temp[-1] *= -1
else:
num = int(num)
if bonus_option == 'D':
num **= 2
elif bonus_option == 'T':
num **= 3
temp.append(num)
answer = sum(temp)
return answer | jjjk84/code_study | 프로그래머스/lv1/17682. [1차] 다트 게임/[1차] 다트 게임.py | [1차] 다트 게임.py | py | 950 | python | ko | code | 0 | github-code | 90 |
18354324439 | from bisect import bisect_left
s = list(input())
t = list(input())
alf=[[] for _ in range(26)]
for i in range(len(s)):
alf[ord(s[i])-97].append(i)
now_alf = -1
sets = 0
i = 0
while i < len(t):
next_alf = ord(t[i])-97
if len(alf[next_alf]) == 0:
print(-1)
exit()
if now_alf > alf[next_alf][-1]:
now_alf = -1
sets += 1
else:
serch_next_alf = bisect_left(alf[next_alf], now_alf)
now_alf = alf[next_alf][serch_next_alf]+1
i += 1
print(sets*len(s)+now_alf) | Aasthaengg/IBMdataset | Python_codes/p02937/s484666916.py | s484666916.py | py | 536 | python | en | code | 0 | github-code | 90 |
72682869417 | import streamlit as st
def main():
st.title("Streamlit Session State Tutorial")
st.subheader("Counter Example")
# Streamlit runs from top to bottom on every iteration so
# we check if 'count' has already been initialized in st.session_state
# if no, the initialize count to 0
# if count is already initialized, don't do anything
if 'count' not in st.session_state:
st.session_state.count = 0
# Create a button which will increment the counter
increment = st.button('Increment')
if increment:
st.session_state.count += 1
# A button to decrement the counter
decrement = st.button('Decrement')
if decrement:
st.session_state.count -= 1
st.write("Count =", st.session_state.count )
st.subheader("Callback Example - Mirrored Widgets")
def update_first():
st.session_state.second = st.session_state.first
def update_second():
st.session_state.first = st.session_state.second
st.text_input(label="Textbox 1", key="first", on_change=update_first)
st.text_input(label="Textbox 2", key="second", on_change=update_second)
if __name__ == '__main__':
main()
| PacktPublishing/Web-App-Development-Made-Simple-with-Streamlit | Chapter15/Chapter15-session_state.py | Chapter15-session_state.py | py | 1,096 | python | en | code | 5 | github-code | 90 |
37136471876 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : game_stats.py
@Time : 2019/03/31 01:11:49
@Author : leacoder
@Version : 1.0
@Contact : leacock1991@gmail.com
@License :
@Desc : None
'''
# here put the import lib
#在这个游戏运行期间, 我们只创建一个GameStats 实例, 但每当玩家开始新游戏时, 需要重置一些统计信息。
#我们在方法reset_stats() 中初始化大部分统计信息,而不是在__init__() 中直接初始化它们。
class GameStats():
"""跟踪游戏的统计信息"""
def __init__(self, ai_settings):
"""初始化统计信息"""
self.ai_settings = ai_settings
self.reset_stats()
# 让游戏一开始处于非活动状态 单击Play按钮来开始游戏
self.game_active = False
# 在任何情况下都不应重置最高得分
self.high_score = 0
def reset_stats(self):
"""初始化在游戏运行期间可能变化的统计信息"""
self.ships_left = self.ai_settings.ship_limit
self.score = 0
self.level = 1 | lichangke/Python3_Project | Python编程从入门到实践/alien_invasion/game_stats.py | game_stats.py | py | 1,109 | python | zh | code | 3 | github-code | 90 |
8868862502 | from Word.Syllabizer import WordSyllabizer
class Cursor(object):
def __init__(self, character, characters):
super(Cursor, self).__init__()
self.character = character
self.characters = characters
self.position = 0
self.syllable_break = False
self.prev_symbols = []
self.next_symbols = []
self.prev_phonemes = []
self.next_phonemes = []
def prev_symbols_set(self):
self.prev_symbols = reversed(self.characters[0:self.position])
def next_symbols_set(self):
self.next_symbols = reversed(
self.characters[self.position:len(self.characters)])
def prev_phonemes_set(self):
self.prev_phonemes = filter(
lambda x: x.as_repr("type") == "PHONEME", self.prev_symbols)
def next_phonemes_set(self):
self.next_phonemes = filter(
lambda x: x.as_repr("type") == "PHONEME", self.next_symbols)
def prev(self):
if self.position > 0:
self.position -= 1
return self.characters[self.position]
def next(self):
if self.position < len(self.characters):
self.position += 1
return self.characters[self.position]
def reset(self):
self.prev_symbols_set()
self.next_symbols_set()
self.prev_phonemes_set()
self.next_phonemes_set()
class WordSyllabizerTrouvain(WordSyllabizer):
def __init__(self):
pass
def _get_positions(self, word):
return self._apply_rules(word.phonetic_spelling().characters())
def _apply_rules(self, characters):
tokens = map(lambda x: Cursor(x, characters), characters)
self._rule2(tokens)
self._rule3(tokens)
return self._create_separation_points(tokens)
def _rule2(self, tokens): # FIXME (only works with osxtts)
for i in range(0, len(tokens)):
if (i + 1) < len(tokens) and \
tokens[i].character.as_repr(
"phoneme_type"
) == "1" and tokens[i+1].character.as_repr(
"phoneme_type"
) == "V":
tokens[i].position = i
tokens[i].reset()
p = 1
while p < len(
tokens[i].next_phonemes
) and tokens[i].next_phonemes[p].character.as_repr(
"TYPE"
) == 'C':
p += 1
if (p > 2 and tokens[i].next_phonemes[p]):
while (tokens[i].character.as_repr("TYPE") != 'C'):
i += 1
tokens[i].syllable_break = True
return tokens
def _rule3(self, tokens):
first_vowel = False
consonant_middle = False
first_vowel_position = None
consonant_middle_position = None
for i in range(0, len(tokens)):
if tokens[i].character.as_repr("phoneme_type") == 'V':
if not first_vowel:
first_vowel = True
first_vowel_position = i
if first_vowel and consonant_middle:
tokens[first_vowel_position].position = first_vowel_position
tokens[first_vowel_position].reset()
if not len(tokens[first_vowel_position].prev_phonemes):
tokens[consonant_middle_position].syllable_break = True
else:
tokens[first_vowel_position].syllable_break = True
first_vowel = True
first_vowel_position = i
consonant_middle = False
if tokens[i].character.as_repr("phoneme_type") == 'C':
if first_vowel:
if tokens[i].syllable_break:
first_vowel = False
consonant_middle = False
else:
consonant_middle = True
if not consonant_middle_position:
consonant_middle_position = i
return tokens
def _create_separation_points(self, tokens):
separation_points = []
position = 1
for i in range(0, len(tokens)):
if tokens[i].syllable_break:
separation_points.append(position)
if tokens[i].character.as_repr('type') == 'PHONEME':
position += 1
return separation_points
| pepperpepperpepper/WordSynth | WordSynth/Word/Syllabizer/Trouvain.py | Trouvain.py | py | 4,566 | python | en | code | 0 | github-code | 90 |
37655136156 | from flask import Flask
from flask import request
import requests
import json
import threading
app = Flask(__name__)
@app.route("/")
def receive_code():
code = request.args.get('code', '')
if code is not "":
print("Code received:" + code)
url = "https://iam.viessmann.com/idp/v2/token"
header = {"Content-Type": "application/x-www-form-urlencoded"}
data = "grant_type=authorization_code&client_id=9ceff2a5f57d345a580142626e3b4a7f&redirect_uri=http://localhost:4200/&code_verifier=2e21faa1-db2c-4d0b-a10f-575fd372bc8c-575fd372bc8c&code="+code
response = requests.post(url=url, headers=header, data=data)
if response.ok:
access_token = json.loads(response.text).get('access_token')
header = {"Authorization": "Bearer " + access_token}
req1 = "https://api.viessmann.com/iot/v1/equipment/installations/952499/gateways/7637415022052208/devices/0/features/heating.sensors.temperature.outside"
response = requests.get(url=req1, headers=header)
return "Außentemperatur: " + str(response.json()["data"]["properties"]["value"]["value"])
else:
return response
return "No code received"
if __name__ == "__main__":
args = {'host': '0.0.0.0'}
threading.Thread(target=app.run, kwargs=args).start() | fschw/dashboard | flasktest.py | flasktest.py | py | 1,335 | python | en | code | 0 | github-code | 90 |
28318156910 | # Reversi (Othello)
import random
import sys
def ispiši(ploča):
# Ispisuje ploču. Ne vraća ništa.
vodoravna = ' +---+---+---+---+---+---+---+---+'
uspravne = ' | | | | | | | | |'
print( ' 1 2 3 4 5 6 7 8')
print(vodoravna)
for y in range(8):
print(uspravne)
print(y+1, end=' ')
for x in range(8):
print('| {}'.format(ploča[x][y]), end=' ')
print('|')
print(uspravne)
print(vodoravna)
def isprazni(ploča):
# Ispražnjuje ploču, osim originalne početne pozicije.
for x in range(8):
for y in range(8):
ploča[x][y] = ' '
# Početna pozicija:
ploča[3][3] = 'X'
ploča[3][4] = 'O'
ploča[4][3] = 'O'
ploča[4][4] = 'X'
def nova_ploča():
# Stvara novu, praznu ploču kao strukturu podataka.
ploča = []
for i in range(8):
ploča.append([' '] * 8)
return ploča
def potez_valja(ploča, znak, xpoč, ypoč):
# Vraća False ako igračev potez na mjesto xpoč, ypoč ne valja.
# Inače vraća listu mjesta koja bi igrač osvojio kad bi tu igrao.
if not na_ploči(xpoč, ypoč) or ploča[xpoč][ypoč] != ' ':
return False
ploča[xpoč][ypoč] = znak # privremeno postavi znak na ploču
if znak == 'X':
protuznak = 'O'
else:
protuznak = 'X'
preokrenuti = []
for xsmjer, ysmjer in [[0, 1], [1, 1], [1, 0], [1, -1], [0, -1], [-1, -1], [-1, 0], [-1, 1]]:
x, y = xpoč, ypoč
x += xsmjer # prvi korak u tom smjeru
y += ysmjer # prvi korak u tom smjeru
if na_ploči(x, y) and ploča[x][y] == protuznak:
# Do našeg mjesta postoji protivnički znak
x += xsmjer
y += ysmjer
if not na_ploči(x, y):
continue
while ploča[x][y] == protuznak:
x += xsmjer
y += ysmjer
if not na_ploči(x, y): # izađi iz while petlje, nastavi for petlju
break
if not na_ploči(x, y):
continue
if ploča[x][y] == znak:
# Treba preokrenuti neke znakove. Idi natrag do početnog položaja, pamteći sve položaje putem.
while True:
x -= xsmjer
y -= ysmjer
if (x, y) == (xpoč, ypoč):
break
preokrenuti.append([x, y])
ploča[xpoč][ypoč] = ' ' # vrati prazno mjesto
if not preokrenuti: # Ako nema preokrenutih, potez ne valja.
return False
return preokrenuti
def na_ploči(x, y):
# Vraća True ako koordinate leže na ploči.
return 0 <= x <= 7 and 0 <= y <= 7
def ploča_s_mogućim_potezima(ploča, znak):
# Vraća novu ploču s mogućim potezima igrača (znak) označenima točkom.
nova_ploča = kopiraj(ploča)
for x, y in dobri_potezi(nova_ploča, znak):
nova_ploča[x][y] = '.'
return nova_ploča
def dobri_potezi(ploča, znak):
# Vraća listu [x, y] listi - valjanih poteza igrača (znak) na ploči.
popis_dobrih_poteza = []
for x in range(8):
for y in range(8):
if potez_valja(ploča, znak, x, y):
popis_dobrih_poteza.append([x, y])
return popis_dobrih_poteza
def boduj(ploča):
# Odredi bodove brojeći znakove. Vraća rječnik s ključevima 'X' i 'O'.
x_bodovi = 0
o_bodovi = 0
for x in range(8):
for y in range(8):
if ploča[x][y] == 'X':
x_bodovi += 1
if ploča[x][y] == 'O':
o_bodovi += 1
return {'X':x_bodovi, 'O':o_bodovi}
def odabir_znakova():
# Neka igrač odabere koji znak želi biti.
# Vraća listu [znak igrača, znak računala].
znak = ''
while znak not in ('X', 'O'):
print('Želiš li biti X ili O?')
znak = input().upper()
# prvi znak u listi pripada igraču, drugi računalu
if znak == 'X':
return ['X', 'O']
else:
return ['O', 'X']
def tko_će_prvi():
# Slučajno odaberi tko ima prvi potez.
if random.randint(0, 1) == 0:
return 'računalo'
else:
return 'igrač'
def ponovo():
# Vraća True ili False ovisno o tome želi li igrač ponovo igrati.
print('Želiš li novu igru? (da ili ne)')
return input().lower().startswith('d')
def učini_potez(ploča, znak, xpoč, ypoč):
# Stavi znak na ploču na mjesto xpoč, ypoč, te preokreni protivnikove znakove.
# Vraća True ili False ovisno o tome je li potez valjan.
preokrenuti = potez_valja(ploča, znak, xpoč, ypoč)
if not preokrenuti:
return False
ploča[xpoč][ypoč] = znak
for x, y in preokrenuti:
ploča[x][y] = znak
return True
def kopiraj(ploča):
# Napravi i vrati duplikat ploče.
kopija = nova_ploča()
for x in range(8):
for y in range(8):
kopija[x][y] = ploča[x][y]
return kopija
def u_kutu(x, y):
# Vraća True ako je pozicija u jednom od 4 kuta.
return {x, y} <= {0, 7}
def potez_igrača(ploča, znak_igrača):
# Neka igrač unese svoj potez.
# Vraća potez [x, y], ili 'savjet', ili 'izlaz'.
znamenke_1_do_8 = set('1 2 3 4 5 6 7 8'.split())
while True:
print('Unesi potez, ili "izlaz" za prekid igre,\n ili "savjet" za uključivanje/isključivanje savjeta.')
potez = input().lower()
if potez == 'izlaz':
return 'izlaz'
if potez == 'savjet':
return 'savjet'
if len(potez) == 2 and set(move) <= znamenke_1_do_8:
x = int(move[0]) - 1
y = int(move[1]) - 1
if not potez_valja(ploča, igračev_znak, x, y):
continue
else:
break
else:
print('To nije dobar potez. Unesi x znamenku (1-8) pa y znamenku (1-8).')
print('Recimo, 81 će biti gornji desni kut.')
return [x, y]
def potez_računala(ploča, znak_računala):
# Ako je zadana ploča i znak kojim računalo igra, odredi kamo igrati
# i vrati taj potez kao [x, y] listu.
mogući_potezi = dobri_potezi(ploča, znak_računala)
# slučajno promiješaj moguće poteze
random.shuffle(mogući_potezi)
# uvijek igraj u kut ako je moguće
for x, y in mogući_potezi:
if u_kutu(x, y):
return [x, y]
# idi kroz sve moguće poteze i zapamti onaj koji daje najviše bodova
najbolji_rezultat = -1
for x, y in mogući_potezi:
kopija = kopiraj(ploča)
učini_potez(kopija, znak_računala, x, y)
rezultat = boduj(kopija)[znak_računala]
if rezultat > najbolji_rezultat:
najbolji_potez = [x, y]
najbolji_rezultat = rezultat
return najbolji_potez
def rezultat(znak_igrača, znak_računala):
# Ispisuje trenutni rezultat
bodovi = boduj(glavna_ploča)
print('Imaš {} bodova. Računalo ima {} bodova.'.format(bodovi[znak_igrača], bodovi[znak_računala]))
print('Dobrodošao u Reversi!')
while True:
# Resetiraj ploču i igru.
glavna_ploča = nova_ploča()
isprazni(glavna_ploča)
if tko_će_prvi() == 'igrač':
na_redu = 'X'
else:
na_redu = 'O'
print(na_redu, 'igra prvi.')
while True:
ispiši(glavna_ploča)
bodovi = boduj(glavna_ploča)
print('X ima {} bodova. O ima {} bodova.'.format(bodovi['X'], bodovi['O']))
input('Pritisni Enter za nastavak.')
if na_redu == 'X':
# X-ov red.
protuznak = 'O'
x, y = potez_računala(glavna_ploča, 'X')
učini_potez(glavna_ploča, 'X', x, y)
else:
# O-ov red.
protuznak = 'X'
x, y = potez_računala(glavna_ploča, 'O')
učini_potez(glavna_ploča, 'O', x, y)
if not dobri_potezi(glavna_ploča, protuznak):
break
else:
na_redu = protuznak
# Prikaz konačnog rezultata
ispiši(glavna_ploča)
bodovi = boduj(glavna_ploča)
print('Konačni rezultat: X {} bodova, O {} bodova.'.format(bodovi['X'], bodovi['O']))
if not ponovo():
break
| vedgar/inventwithpython3rded | translations/hr/src/UIsim1.py | UIsim1.py | py | 8,315 | python | hr | code | 0 | github-code | 90 |
70291264618 | __author__ = "Alien"
class Settings():
'''存储所有设置的类'''
def __init__(self):
# 屏幕长
self.screen_width = 1200
# 屏幕宽
self.screen_height = 800
# 屏幕背景色
self.bg_color = (230,230,230)
# 每次移动的像素点
self.ship_speed_factor = 3.5
# 子弹设置
self.bullet_speed_factor = 1
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = 60,60,60
self.bullets_allowed = 100
# 外星人设置
self.alien_speed_factor = 1
self.fleet_drop_speed = 10
# fleet_direction为1表示向右移动,-1表示向左移动
self.fleet_direction = 1
| Big-Belphegor/python-stu | Frist_project/settings.py | settings.py | py | 736 | python | en | code | 0 | github-code | 90 |
17974160189 | K = int(input())
N = 50
n = K // N
ans = [49+n] * N
K %= N
for i in range(N):
if i < K:
ans[i] += N - K + 1
else:
ans[i] -= K
print(N)
print(*ans) | Aasthaengg/IBMdataset | Python_codes/p03646/s580709218.py | s580709218.py | py | 160 | python | en | code | 0 | github-code | 90 |
5908490599 | # Problem description:
# https://github.com/HackBulgaria/Programming0-1/tree/master/week3/1-Baby-Steps
def square(x):
return x ** 2
#print(square(5))
def fact(x):
start = 1
product = 1
while start <= x:
product *= start
start += 1
return product
#print(fact(6))
def count_elements(x):
counter = 0
for i in x:
counter += 1
return counter
#print(count_elements(['ivo', 'jeko', 'edi']))
def member(x, xs):
is_member = True
if x not in xs:
is_member = False
return is_member
#print(member('Python', ['Django', 'Rails']))
students = ["Rado", "Ivo", "Maria", "Nina"]
grades = [3, 4.5, 5.5, 6]
def grades_that_pass(students, grades, limit):
result = []
for count, value in enumerate(grades):
if value >= limit:
result += [students[count]]
return result
#print(grades_that_pass(students, grades, 4.0))
| keremidarski/python_playground | Programming 0/week 3/01_begin_functions.py | 01_begin_functions.py | py | 999 | python | en | code | 0 | github-code | 90 |
1865113868 | def leftRotate(arr, d, n):
for i in range(gcd(d, n)):
temp = arr[i]
j = i
while 1:
k = j + d
if k >= n:
k = k - n
if k == i:
break
arr[j] = arr[k]
j = k
arr[j] = temp
def printArray(arr, size):
for i in range(size):
print("%d" % arr[i], end=" ")
def gcd(a, b):
if b == 0:
return a
else:
return gcd(b, a % b)
n = int(input())
arr = []
for i in range(n):
t = int(input())
arr.append(t)
nos = int(input())
leftRotate(arr, nos, n)
printArray(arr, n)
| Anjan50/Python | Basic Programs/array_roatation_n_times.py | array_roatation_n_times.py | py | 624 | python | en | code | 17 | github-code | 90 |
376952219 |
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5 import QtCore, QtGui
import os, time, subprocess, sys, traceback
from widget_gui import Ui_WidgetAssist
from user_alert import Ui_AlertWindow
import widget_module as mod
icon_file=os.path.join('Dependencies', 'wInstaller.ico')
thread=mod.threading()
log = mod.logging()
init = mod.init_process()
file = mod.filework()
class ThreadClass1(QtCore.QThread):
disable_sig = QtCore.pyqtSignal()
enable_sig = QtCore.pyqtSignal()
def __init__(self, parent = None):
super(ThreadClass1, self).__init__(parent)
def run(self):
self._stopped = False
connection_class = mod.connections()
status = mod.status_class()
thread = mod.threading()
print(f'Started T1')
global already_processed
already_processed = []
while self._stopped == False:
all_connected = connection_class.find_samsung_modem()
if len(all_connected) == 0:
## check to see if device is in queue list
if len(connection_class.check_queued()) == 0:
## no devices in current process and clear GUI
status.set_status('', '')
already_processed=[]
else:
pass
elif len(all_connected) > 1:
status.set_status('ERROR', f'Too many devices found connected: {len(all_connected)}')
elif len(all_connected) == 1:
if all_connected[0] in already_processed:
pass
else:
self.disable_sig.emit()
## add to list to not re-run more than once
already_processed.append(all_connected[0])
## wait for possible devices that disconnect on first connect
## causing issues forcing technician to restart process (replug)
time.sleep(1.5)
init.step_one(all_connected[0])
time.sleep(1)
self.enable_sig.emit()
def stop(self):
self._stopped = True
class ThreadClass2(QtCore.QThread):
update_info_sig = QtCore.pyqtSignal(str, str)
def __init__(self, parent = None):
super(ThreadClass2, self).__init__(parent)
def run(self):
self._stopped = False
status=mod.status_class()
past_status = ''
self.update_info_sig.emit('', '')
while self._stopped == False:
try:
## [portnum, status]
curr_status = status.read_status()
if curr_status[1] != past_status:
self.update_info_sig.emit(curr_status[1], curr_status[0])
past_status=curr_status[1]
time.sleep(.25)
except Exception as e:
log.log_errors(f'ThreadClass2: {e}{traceback.format_exc()}')
time.sleep(1)
def stop(self):
self._stopped = True
class MainWindow(QMainWindow, Ui_WidgetAssist):
def __init__(self, parent = None):
super(MainWindow, self).__init__(parent)
self.main_win = QMainWindow()
self.ui_widgetapp = Ui_WidgetAssist()
self.ui_widgetapp.setupUi(self.main_win)
self.main_win.setWindowTitle('WidgetAssist: [GoToSearch] (v3.0)')
self.alert_win = QMainWindow()
self.alert_ui = Ui_AlertWindow()
self.alert_win.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
self.alert_ui.setupUi(self.alert_win)
self.dev_processing = ThreadClass1(self)
self.dev_processing.disable_sig.connect(self.disable_buttons)
self.dev_processing.enable_sig.connect(self.enable_buttons)
self.dev_gui_update = ThreadClass2(self)
self.dev_gui_update.update_info_sig.connect(self.update_device_info)
self.dev_gui_update.start()
self.ui_widgetapp.reboot.triggered.connect(lambda: thread.create_thread(f'processing().reboot_device()'))
self.ui_widgetapp.shutdown.triggered.connect(lambda: thread.create_thread(f'processing().poweroff_device()'))
self.ui_widgetapp.retry_proc.triggered.connect(lambda: thread.create_thread(f'processing().reset_device()'))
self.alert_ui.browse_apk.clicked.connect(self.load_apk)
self.ui_widgetapp.n_log.triggered.connect(log.open_normal_log)
self.ui_widgetapp.e_log.triggered.connect(log.open_error_log)
self.ui_widgetapp.fail_log.triggered.connect(log.open_fail_count)
self.ui_widgetapp.pass_log.triggered.connect(log.open_succ_count)
## detect for first init
self.check_apk_loaded()
def check_apk_loaded(self):
"""
This is created to make sure the user
uploads their apk on first init
"""
if file.check_apk_exists() == 1:
self.disable_buttons()
self.alert_win.show()
else:
## apk exists, starts processing thread
self.dev_processing.start()
def update_device_info(self, msg_info, port_num):
try:
if msg_info == '' and port_num == '':
self.ui_widgetapp.PORT_WIN.setText('')
self.ui_widgetapp.TEXT_WIN.setText('')
else:
port_text = f'Detected {port_num}'
self.ui_widgetapp.PORT_WIN.setText(port_text)
self.ui_widgetapp.TEXT_WIN.append("".join(msg_info))
self.ui_widgetapp.TEXT_WIN.verticalScrollBar().setValue(self.ui_widgetapp.TEXT_WIN.verticalScrollBar().maximum())
except Exception as error:
log.log_errors(f'GUI_Update: ')
def enable_buttons(self):
try:
self.ui_widgetapp.reboot.setDisabled(False)
self.ui_widgetapp.shutdown.setDisabled(False)
self.ui_widgetapp.retry_proc.setDisabled(False)
self.ui_widgetapp.n_log.setDisabled(False)
self.ui_widgetapp.e_log.setDisabled(False)
self.ui_widgetapp.pass_log.setDisabled(False)
self.ui_widgetapp.fail_log.setDisabled(False)
except Exception as e:
log.log_errors(f'enable_btns: {e}')
def disable_buttons(self):
try:
self.ui_widgetapp.reboot.setDisabled(True)
self.ui_widgetapp.shutdown.setDisabled(True)
self.ui_widgetapp.retry_proc.setDisabled(True)
self.ui_widgetapp.n_log.setDisabled(True)
self.ui_widgetapp.e_log.setDisabled(True)
self.ui_widgetapp.pass_log.setDisabled(True)
self.ui_widgetapp.fail_log.setDisabled(True)
except Exception as e:
print(f'disable_btns: {e}')
log.log_errors(f'disable_btns: {e}')
def load_apk(self):
imported_apk_file = QFileDialog.getOpenFileName(self, 'Select .apk file given for processing', 'c:\\',"APK File (*.apk)")
try:
if imported_apk_file:
imported_apk_file_path=imported_apk_file[0]
if file.import_apk_file(imported_apk_file_path) == 1:
log.log_normal(f'Imported install.apk for first time use')
self.hide_alert()
## start processing thread
self.dev_processing.start()
## re-enable buttons
self.enable_buttons()
else:
log.log_errors(f'Error while loading install.apk\n{traceback.format_exc()}')
except Exception as e:
print(f'Import_APK: {e}\n{traceback.format_exc()}')
log.log_errors(f'Import_APK: {e}\n{traceback.format_exc()}')
def show_alert(self):
self.alert_win.show()
def hide_alert(self):
self.alert_win.hide()
def hide(self):
self.main_win.hide()
def show(self):
self.main_win.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
app.setWindowIcon(QtGui.QIcon(icon_file))
try:
## make sure if demo apk is
## only installed to alert user
## on every runtime instance
main_win = MainWindow()
main_win.show()
main_win.activateWindow()
sys.exit(app.exec_())
except Exception as e:
log.log_errors(f'Main: {e}\n{traceback.format_exc()}')
| P3nguin-M/WidgetAssist_GoToSearch | WidgetAssist_GoToSearch.py | WidgetAssist_GoToSearch.py | py | 7,251 | python | en | code | 0 | github-code | 90 |
22309490617 | from urllib import error, parse, request
from configparser import ConfigParser
from pprint import pp
import argparse
import style
import json
import sys
# A linha de código abaixo fará todas as chamadas de API constantemente
URL = "http://api.openweathermap.org/data/2.5/weather"
"""
Semelhante aos códigos de resposta HTTP, a API de clima fornece um código de condição climática com cada resposta.
Esse código categoriza as condições climáticas em grupos definidos por um intervalo de números de identificação.
"""
THUNDERSTORM = range(200, 300)
DRIZZLE = range(300, 400)
RAIN = range(500, 600)
SNOW = range(600, 700)
ATMOSPHERE = range(700, 800)
CLEAR = range(800, 801)
CLOUDY = range(801, 900)
def _get_api_key():
config = ConfigParser()
config.read("secrets.ini")
return config["openweather"]["api_key"]
def read_user_cli_args():
parser = argparse.ArgumentParser(description="inform weather for a city")
parser.add_argument("city", nargs="+", type=str, help="Insert the city name")
parser.add_argument("-i", "--imperial", action="store_true", help="Temp. in imperial units")
return parser.parse_args()
"""
Teste no terminal a aplicação, irá aparecer a seguinte mensagem de erro:
usage: weather.py [-h]
weather.py: error: unrecognized arguments: vienna
O Python primeiro imprime informações de uso em seu console. Essas informações sugerem a ajuda interna (-h) fornecida pelo ArgumentParser.
Em seguida, ele informa que seu analisador não reconheceu o argumento que você passou para o programa usando sua CLI.
"""
"""
No entanto, se você executar o script e passar um nome de cidade como entrada, ainda não poderá ver nenhuma saída exibida de volta ao seu console.
Volte para weather.py e edite o código em seu bloco de código condicional na parte inferior do arquivo:
"""
def build_query(city_input, imperial=False):
api_key = _get_api_key()
city_name = " ".join(city_input)
url_encoded_city_name = parse.quote_plus(city_name)
units = "imperial" if imperial else "metric"
url = (
f"{URL}?q={url_encoded_city_name}"
f"&units={units}&appid={api_key}"
)
return url
"""
Você começou adicionando uma nova instrução de importação na linha 5. Você usará uma função do módulo urllib.parse
na linha 24 para ajudar a limpar a entrada do usuário para que a API possa consumi-la com segurança.
"""
def get_data(query_url):
try:
response = request.urlopen(query_url)
except error.HTTPError as http_error:
if http_error.code == 401:
sys.exit("ACCESS DENIED!")
elif http_error.code == 404:
sys.exit("Weather Data Inexist!")
else:
sys.exit(f"Something went strange ... ({http_error.code})")
data = response.read()
try:
return json.loads(data)
except json.JSONDecodeError:
sys.exit("Ineligible answer!")
def display_info(weather_data, imperial=False):
city = weather_data["name"]
weather_id = weather_data["weather"][0]["id"]
weather_description = weather_data["weather"][0]["description"]
temperature = weather_data["main"]["temp"]
style.change_color(style.REVERSE)
print(f"{city:^{style.PADDING}}", end="")
style.change_color(style.RESET)
weather_symbol, color = _select_weather_display_params(weather_id)
style.change_color(color)
print(f"\t{weather_symbol}", end=" ")
print(f"{weather_description.capitalize():^{style.PADDING}}", end=" ")
style.change_color(style.RESET)
print(f"({temperature}°{'F' if imperial else 'C'})")
def _select_weather_display_params(weather_id):
if weather_id in THUNDERSTORM:
display_params = ("💥", style.RED)
elif weather_id in DRIZZLE:
display_params = ("💧", style.CYAN)
elif weather_id in RAIN:
display_params = ("💦", style.BLUE)
elif weather_id in SNOW:
display_params = ("⛄️", style.WHITE)
elif weather_id in ATMOSPHERE:
display_params = ("🌀", style.BLUE)
elif weather_id in CLEAR:
display_params = ("🔆", style.YELLOW)
elif weather_id in CLOUDY:
display_params = ("💨", style.WHITE)
else: # In case the API adds new weather codes
display_params = ("🌈", style.RESET)
return display_params
"""
Com esta atualização, você adicionou um emoji a cada ID de clima
e resumiu os dois parâmetros de exibição em uma tupla.
"""
"""
Nota: Com essas adições, você tornou seu aplicativo de previsão do tempo Python
mais fácil de usar para desenvolvedores e não desenvolvedores!
"""
if __name__ == "__main__":
user_args = read_user_cli_args()
query_url = build_query(user_args.city, user_args.imperial)
weather_data = get_data(query_url)
display_info(weather_data, user_args.imperial)
"""
Com esta última alteração configurada em seu aplicativo de clima Python, você terminou de criar sua ferramenta CLI.
Agora você pode acessar seu mecanismo de busca favorito, procurar alguns nomes divertidos de cidades e passar o resto deste dia chuvoso
procurando um lugar onde você possa sonhar em passar suas próximas férias.
"""
| CarlosViniMSouza/Weather-CLI-App | main.py | main.py | py | 5,216 | python | pt | code | 0 | github-code | 90 |
10605529190 | import requests
import json
import pandas as pd
from abc import ABC, abstractmethod
import quandl
import os
from dotenv import load_dotenv
class PricesFetcher(ABC):
"""
Price fetcher abstract class
"""
@abstractmethod
def get_prices(self, symbol: str) -> pd.DataFrame:
pass
class QuandlPricesFetcher:
def __init__(self):
"""
Initialises Quandl api by
getting api key from .env
"""
load_dotenv()
self.api_key = os.environ["QUANDL_API_KEY"]
quandl.ApiConfig.api_key = self.api_key
def get_prices(self, symbol: str) -> pd.DataFrame:
"""
Fetches Data from quandl api, cleans data
taking the adjust close as the value. Returns as
pandas dataframe
"""
data = quandl.get(symbol)
df = pd.DataFrame(data)
df = df.reset_index()
df = df.rename(columns={"Date": "Timestamp", "Adj Close": "Adjusted Close"})
df = df.set_index("Timestamp")
df = df.sort_index()
return df
class AlphaVantagePricesFetcher(PricesFetcher):
def __init__(self):
"""
Initialises alpha vantage api by
getting api key from .env
"""
load_dotenv()
self.api_key = os.environ["ALPHA_VANTAGE_API_KEY"]
def get_prices(self, symbol: str) -> pd.DataFrame:
"""
Fetches Data from alpha vantage api, cleans data
taking the close as the value. Returns as
pandas dataframe
"""
os.system("sleep 5")
url = f"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={symbol}&apikey={self.api_key}"
response = requests.get(url)
data = json.loads(response.text)
daily_data = data["Time Series (Daily)"]
df = pd.DataFrame.from_dict(daily_data, orient="index")
df.index = pd.to_datetime(df.index)
df = df.astype(float)
df = df.sort_index()
df = df[["4. close"]] # Change this line to use the "close" column
df = df.rename(columns={"4. close": symbol})
return df
| PRJM1999/Airline-Stocks | server/api/data_retrieval.py | data_retrieval.py | py | 2,120 | python | en | code | 0 | github-code | 90 |
72292578218 | """
Van Eck's sequence
times :
python3.9: 8.9s
pypy3 : 0.7s
"""
startseq = [9,19,1,6,0,5,4]
# iterations = 2020
iterations = 30_000_000
# array of last positions
pos = [0] * iterations
for idx, v in enumerate(startseq[:-1], start=1):
pos[v] = idx
last = startseq[-1]
for i in range(len(startseq), iterations):
nextval = i - pos[last] if pos[last] else 0
pos[last] = i
last = nextval
print(last)
| ldgeo/adventofcode | 2020/day15.py | day15.py | py | 421 | python | en | code | 1 | github-code | 90 |
12356466256 | import argparse
import os
from sklearn.metrics import accuracy_score, confusion_matrix
import pandas as pd
from tqdm import tqdm
from config.conf import CONFIG
import numpy as np
import json
from rouge import Rouge
from nltk import PorterStemmer
import argparse
stemmer = PorterStemmer()
baselines = ["single", "ppl", "gltr"]
splits = ["val", "test"]
def rouge_calculation(hypotheses, references):
assert (len(hypotheses) == len(references))
hypoth = [" ".join([stemmer.stem(i) for i in line.split()]) for line in hypotheses]
max_scores = []
for i, hyp in enumerate(hypoth):
refs = [" ".join([stemmer.stem(i) for i in line.split()]) for line in references[i]]
hyps = [hyp] * len(refs)
rouge = Rouge()
scores = rouge.get_scores(hyps, refs, avg=False)
scores_sorted = sorted(scores, key=lambda kv: kv["rouge-l"]["f"], reverse=True)
# print("#" * 8)
# print(hyps)
# print(refs)
# print(scores)
# print(f"{len(scores)}")
# print(scores_sorted)
# break
max_scores.append(scores_sorted[0])
return max_scores
def prepare_args():
parser = argparse.ArgumentParser(description='Save ChatGPT QA data into mongoDB')
parser.add_argument('--data_dir', help='Where to load', default='/data/tsq/CK/data')
parser.add_argument('--source_type', help='open or api', default='open',
choices=['open', 'api'])
parser.add_argument('--time', help='When is the chat', default='before0301')
parser.add_argument('--language', help='en/zh', default='en',
choices=['en', 'zh'])
parser.add_argument('--source_dataset', help='Which dataset', default='HC3')
parser.add_argument('--file_name', help='Which dataset', default='HC3_en.jsonl')
# json
parser.add_argument('--times', help='For Changing', type=str, nargs='+', default=
"2023-01-18"
# "all"
)
parser.add_argument('--pp_suffixes', help='For Changing', type=str, nargs='+', default=
["base"]
# ["base", "para", "prompt", "prompt_para"]
# [""]
)
# detection train
parser.add_argument('--lgb_dir', type=str, default='/data/tsq/CK/model/lgb', )
parser.add_argument('--train_time_setting', type=str, default='only01',
choices=['only01', '01to03', ])
parser.add_argument('--train_feature_settings', help='For Changing features',
type=str, nargs='+', default=['bottom10_single', 'random10_single', 'single', 'ppl', 'gltr'])
parser.add_argument('--seeds', help='For Changing trails', type=int, nargs='+', default=[1, 2, 3, 4, 5])
# task
parser.add_argument('--start_id', help='start id', type=int, default=0)
parser.add_argument('--end_id', help='end id', type=int, default=1000)
parser.add_argument('--task', type=str, default='evaluate',
choices=['evaluate', 'calculate_detect_std'])
args = parser.parse_args()
return args
class Evaluator:
def __init__(self, args):
self.args = args
self.input_josnl_dir = os.path.join(args.data_dir, args.source_type, args.time, args.language,
args.source_dataset)
self.input_jsonl_path = os.path.join(self.input_josnl_dir, args.file_name)
# where to load features
self.eval_dir = os.path.join(self.args.data_dir, "api", "after0301",
self.args.language,
"HC3_eval")
if not os.path.exists(self.eval_dir):
os.makedirs(self.eval_dir)
def load_question_and_refs(self):
questions, refs_lst = [], []
with open("/data/tsq/CK/data/open/before0301/en/HC3/HC3_en.jsonl", 'r') as fin:
lines = fin.readlines()[self.args.start_id:self.args.end_id]
for line in lines:
json_obj = json.loads(line.strip())
question = json_obj["question"]
refs = json_obj["human_answers"]
questions.append(question)
refs_lst.append(refs)
return questions, refs_lst
def evaluate(self, time_qualifier, pp_suffix="base"):
# date
mm_dd = time_qualifier[-5:]
if self.args.source_dataset == "HC3" and self.args.source_type == "open":
input_jsonl_path = self.input_jsonl_path
else:
input_jsonl_path = os.path.join(self.input_josnl_dir, f"data{mm_dd}_{pp_suffix}.jsonl")
# answers
raw_answers = []
res_lst = []
with open(input_jsonl_path, 'r') as fin:
lines = fin.readlines()[self.args.start_id:self.args.end_id]
for line in tqdm(lines, total=len(lines)):
json_obj = json.loads(line.strip())
# get source_task, q and a
if self.args.source_dataset == "HC3" and self.args.source_type == "open":
answer = json_obj["chatgpt_answers"][0]
else:
answer = json_obj["a"]
# answer
raw_answers.append(answer)
res_lst.append(json_obj)
# questions and refs
questions, refs_lst = self.load_question_and_refs()
# calculate score
scores = rouge_calculation(raw_answers, refs_lst)
assert len(scores) == len(raw_answers)
# save
save_path = os.path.join(self.eval_dir, f"feature{mm_dd}_{pp_suffix}.json")
with open(save_path, 'w') as fout:
json.dump(scores, fout)
print(f"scores is output at: {save_path}")
def calculate_detect_std(self):
detect_dir = os.path.join(self.args.lgb_dir, f"train{self.args.train_time_setting}")
# init
sp2model2acc_lst = {}
for sp in splits:
model2sp2acc_lst = {}
for feature_setting in self.args.train_feature_settings:
model2sp2acc_lst[feature_setting] = []
sp2model2acc_lst[sp] = model2sp2acc_lst
# calculate acc for single_ppl_gltr
for seed in self.args.seeds:
single_ppl_gltr_dir = os.path.join(detect_dir, f"single_ppl_gltr_trail{seed}")
for sp in splits:
if sp == "val":
file_name = "val223-01-18.csv"
else:
file_name = "test2000-35days.csv"
single_ppl_gltr_path = os.path.join(single_ppl_gltr_dir, file_name)
# model and its acc
spg_df = pd.read_csv(single_ppl_gltr_path)
model2pred = {}
for index, row in spg_df.iterrows():
for key in baselines:
prob = row[key]
if prob > 50:
pred = 1
else:
pred = 0
try:
model2pred[key].append(pred)
except KeyError:
model2pred[key] = [pred]
# calculate acc
for k, preds in model2pred.items():
acc = accuracy_score(spg_df[["label"]], preds)
tn, fp, fn, tp = confusion_matrix(spg_df[["label"]], preds).ravel()
sp2model2acc_lst[sp][k].append({
"acc": acc,
"tn": tn/len(preds),
"fp": fp/len(preds),
"fn": fn/len(preds),
"tp": tp/len(preds),
})
# load acc for feature
for seed in self.args.seeds:
for key in self.args.train_feature_settings:
if key in baselines:
continue
result_dir = os.path.join(detect_dir, f"{key}_trail{seed}")
for sp in splits:
res_file_csv = os.path.join(result_dir, f"offline_{sp}.csv")
# model's predicts and labels
res_df = pd.read_csv(res_file_csv)
acc = accuracy_score(res_df[["label"]], res_df[["preds"]])
tn, fp, fn, tp = confusion_matrix(res_df[["label"]], res_df[["preds"]]).ravel()
sp2model2acc_lst[sp][key].append({
"acc": acc,
"tn": tn/len(res_df[["preds"]]),
"fp": fp/len(res_df[["preds"]]),
"fn": fn/len(res_df[["preds"]]),
"tp": tp/len(res_df[["preds"]]),
})
# calculate mean and std for each model
keys = ["acc", "tn", "fp", "fn", "tp"]
for sp, model2acc_lst in sp2model2acc_lst.items():
for model, acc_lst in model2acc_lst.items():
for k in keys:
k_acc_lst = [i[k] for i in acc_lst]
if k == "acc":
print(f"split {sp}, metric {k}, model {model}, average {np.mean(k_acc_lst)}, std {np.std(k_acc_lst)}")
# s = """${_avg}_{{\pm{_std}}}$""".format(_avg=f"{np.mean(k_acc_lst):.3f}", _std=f"{np.std(k_acc_lst):.3f}")
# print(s)
else:
print(f"split {sp}, metric {k}, model {model}, average {np.mean(k_acc_lst)}, std {np.std(k_acc_lst)}")
s = """${_avg}_{{\pm{_std}}}$""".format(_avg=f"{np.mean(k_acc_lst)*100:.1f}", _std=f"{np.std(k_acc_lst)*100:.1f}")
print(s)
if __name__ == '__main__':
args = prepare_args()
evaluator = Evaluator(args)
if args.task == 'evaluate':
# set time_qualifier
if args.times == ["all"] or args.times == "all":
files = os.listdir("/data/tsq/CK/data/api/after0301/en/HC3")
names = []
for file in files:
if file.startswith("data"):
names.append(file.split("_")[0])
# filter complicate days
final_names = set(names)
time_qualifiers = list(final_names)
time_qualifiers.sort()
else:
time_qualifiers = list(args.times)
print(f"time_qualifiers {time_qualifiers}")
print(f"pp_suffixes {args.pp_suffixes}")
for _time_qualifier in time_qualifiers:
for pp_suffix in args.pp_suffixes:
evaluator.evaluate(_time_qualifier, pp_suffix)
elif args.task == 'calculate_detect_std':
evaluator.calculate_detect_std()
| THU-KEG/ChatLog | data/evaluation.py | evaluation.py | py | 10,650 | python | en | code | 90 | github-code | 90 |
13221336381 | from django.db import models
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
name = models.CharField(max_length=255)
class Gender(models.Model):
gender = models.CharField(max_length=255)
def __str__(self):
return self.gender
class Subject(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Teacher(models.Model):
gender = models.ForeignKey(Gender, on_delete=models.CASCADE)
name = models.CharField(max_length=255)
subject = models.ForeignKey(Subject, on_delete=models.CASCADE)
activ = models.BooleanField(default=False)
money = models.IntegerField()
group_count = models.IntegerField(default=0)
def __str__(self):
return self.name
class Group(models.Model):
teacher = models.ForeignKey(Teacher, on_delete=models.CASCADE)
name = models.CharField(max_length=255)
student_count = models.PositiveIntegerField(default=0)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
q = Group.objects.filter(teacher=self.teacher)
if q.count() < self.teacher.group_count and q.count() != self.teacher.group_count:
return super().save(*args, **kwargs)
else:
raise SystemError(f"Group qo'shish chegaralangan! maksimum {self.teacher.group_count}!")
class Student(models.Model):
gender = models.ForeignKey(Gender, on_delete=models.CASCADE)
group = models.ForeignKey(Group, on_delete=models.CASCADE)
teacher = models.ForeignKey(Teacher, on_delete=models.CASCADE, blank=True, null=True)
name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
activ = models.BooleanField(default=False)
price = models.IntegerField(default=0)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
q = Student.objects.filter(group=self.group)
if q.count() < self.group.student_count and q.count() != self.group.student_count:
return super().save(*args, **kwargs)
else:
raise SystemError(f"Student qo'shish chegaralangan! maksimum {self.group.student_count}!")
class Registration(models.Model):
gender = models.ForeignKey(Gender, on_delete=models.CASCADE)
name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
age = models.IntegerField()
subject = models.ForeignKey(Subject, on_delete=models.CASCADE)
register = models.BooleanField(default=False)
| oktamovabdulaziz/studys | main/models.py | models.py | py | 2,563 | python | en | code | 1 | github-code | 90 |
20574749891 | import cv2
import numpy as np
import time
import PoseModule as pm
cap = cv2.VideoCapture("AITrainer/zoom_0.mp4")#"AiTrainer/curls.mp4"
detector = pm.poseDetector()
count = 0
dir = 0
pTime = 0
while True:
success, img = cap.read()
img = cv2.resize(img, (1280, 720))
img = detector.findPose(img, False)
lmList = detector.findPosition(img, False)
if len(lmList) != 0:
# Shoulder, Stomach and knee
angle = detector.findAngle(img, 11, 23, 25)
per = np.interp(angle, (160, 310), (0, 100))
bar = np.interp(angle, (220, 310), (650, 100))
color = (255, 0, 255)
if per == 100:
color = (0, 255, 0)
if dir == 0:
count += 1.0
dir = 1
if per == 0:
color = (0, 255, 0)
if dir == 1:
count += 1.0
dir = 0
#print(count)
cv2.putText(img,str(count),(100,300),cv2.FONT_HERSHEY_SIMPLEX,5,(255,0,0),5)
# Draw Bar
cv2.rectangle(img, (1100, 100), (1175, 650), color, 3)
cv2.rectangle(img, (1100, int(bar)), (1175, 650), color, cv2.FILLED)
cv2.putText(img, f'{int(per)} %', (1100, 75), cv2.FONT_HERSHEY_PLAIN, 4,
color, 4)
# Draw bending lines and angle
cv2.rectangle(img, (0, 450), (250, 720), (0, 255, 0), cv2.FILLED)
cv2.putText(img, str(int(count)), (45, 670), cv2.FONT_HERSHEY_PLAIN, 15,
(255, 0, 0), 25)
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (50, 100), cv2.FONT_HERSHEY_PLAIN, 5,
(255, 0, 0), 5)
cv2.imshow("Image", img)
cv2.waitKey(1)
| Ramasubramanya-MS/Fall-Accident-Detection-Using-CNN-CV | CV - Fall Detection - PoseModule/AITrainer.py | AITrainer.py | py | 1,721 | python | en | code | 6 | github-code | 90 |
30947215038 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 17:17:25 2018
@author: user
"""
test_string=input("Enter string:")
l=[]
l=test_string.split()
wordfreq=[l.count(p) for p in l]
print(dict(zip(l,wordfreq))) | ranjuinrush/excercise | ex4/untitled8.py | untitled8.py | py | 218 | python | en | code | 0 | github-code | 90 |
5064454679 |
import yaml, os.path
from core.config.fuzzer import FuzzerConfig
from core.config.jsanlyzer import JsAnalyzerConfig
from core.scanner.excluder import Excluder
from core.jsanalyzer.anlysis import ExtractorsLoader
from core.logger import Level
from core.utils import *
from core.config.builder import ConfigBuilder
from core.request import Headers
from core.config.module import *
from core.config.rock import *
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
class OptionsBuilder(ConfigBuilder):
def __init__(self, data) -> None:
ConfigBuilder.__init__(self, data)
def buildSharedData(self, cfghandler: Config):
cfghandler.SetTarget(self.data.target)
cfghandler.SetThreads(self.data.threads)
cfghandler.SetHeaders(self.buildHeaders())
cfghandler.SetVerbosity(self.buildVerbosity())
def buildSharedEnumerationData(self, cfg: EnumerationConfig):
self.buildSharedData(cfg)
cfg.SetTimeout(self.data.timeout)
cfg.SetSources(self.buildSources())
sources = self.buildSources()
if sources:
cfg.SetSources(sources)
if self.data.recursive:
cfg.enableRecursive()
def buildVerbosity(self):
v = Verbosity()
if self.data.verbose:
v.Enable()
level = self.data.level
if level == 1:
v.SetLevel(Level.CRITICAL)
elif level == 2:
v.SetLevel(Level.INFO)
elif level == 3:
v.SetLevel(Level.DEBUG)
else:
raise ValueError("Invalid verbosity level")
return v
def buildSources(self):
if self.data.sources:
if ',' in self.data.sources:
return self.data.sources.split(',')
else:
return [self.data.sources]
def buildModule(self):
module = ConfigBuilder.buildModule(self)
endpoint = dict()
url = module.GetTarget() # buildSharedData() had set url as a target here
endpoint['url'] = url
endpoint['params'] = list()
if self.data.post:
endpoint['m_type'] = "POST"
for i in self.data.post.split('&'):
param = dict()
pname, temp = i.split('=')
pvalue, p_type = temp.split('|') if '|' in temp else [temp, '']
param['name'] = pname
param['value'] = pvalue
param['p_type'] = p_type
endpoint['params'].append(param)
else:
endpoint['m_type'] = "GET"
module.SetTarget(endpoint)
return module
def buildList3rConfig(self):
if self.data.sublist3r:
list3r = List3rConfig()
self.buildSharedEnumerationData(list3r)
return list3r
def buildFinderConfig(self):
finder = FinderConfig()
apisfmt = self.data.subfinder_apis
self.buildSharedEnumerationData(finder)
finder.SetMaxEnumerationTime(self.data.maxEnumerationTime)
if self.data.subfinder_all:
finder.UseAll()
if apisfmt:
apis = {}
if os.path.isfile(apisfmt):
try:
with open(apisfmt, 'r') as stream:
data = yaml.load(stream, Loader=Loader)
for key in data.keys():
apis[key.capitalize()] = data[key]
except:
raise ValueError("Invalid yaml")
elif ',' in apisfmt:
for fmt in apisfmt.split(','):
if ':' in fmt:
key, value = fmt.split(':')
apis[key.capitalize()] = value.split('+') if '+' in value else [value]
else:
raise ValueError("Invalid subfinder APIs format")
else:
if ':' in apisfmt:
key, value = apisfmt.split(':')
apis[key.capitalize()] = value.split('+') if '+' in value else [value]
else:
raise ValueError("Invalid subfinder APIs format")
finder.SetAPIs(apis)
return finder
def buildReverseIPConfig(self):
if self.data.revip:
revip = ReverseIPConfig()
self.buildSharedEnumerationData(revip)
return revip
def buildHeaders(self):
return parse_headers_from_file(self.data.headers) if self.data.headers and os.path.isfile(self.data.headers) else Headers(Headers.Parser.toDict(self.data.headers) if self.data.headers else dict())
def buildMode(self):
mode = self.data.mode.lower()
modecfg = RockMode()
if '+' in mode and ('s', 'scan') and ('r', 'recon'):
modecfg.SetModeToBoth()
elif mode in ('s', 'scan'):
modecfg.SetModeToScan()
elif mode in ('r', 'recon'):
modecfg.SetModeToRecon()
elif mode in ('c', 'crawl'):
modecfg.SetModeToCrawl()
elif mode in ('a', 'jsanalyze'):
modecfg.SetModeToJsAnalyze()
elif mode in ('f', 'fuzz'):
modecfg.SetModeToFuzz()
return modecfg
def buildExcluder(self):
excluder = Excluder()
if self.data.included_modules:
included = self.data.included_modules.split(',') if ',' in self.data.included_modules else [self.data.included_modules]
exclude_case = False
excluded = []
for module in included:
if module.startswith('-'):
exclude_case = True
excluded.append(module[1:])
if exclude_case:
excluder.excludeL(excluded)
else:
excluder.includeL(included)
return excluder
def buildModulesOptions(self):
options = ModulesOptions()
if self.data.collaborator: # It's shared between modules
options.register_common('collaborator', self.data.collaborator)
if self.data.sqlmap:
options.register('sqli', 'sqlmap', self.data.sqlmap)
if self.data.xsshunter:
options.register('xss', 'xsshunter', self.data.xsshunter)
return options
def buildOutput(self):
if self.data.output:
output = OutputConfig()
output.SetFileName(self.data.output)
output.enableOutput()
if self.data.format == "text":
output.SetFormat(Format.Text)
elif self.data.format == "json":
output.SetFormat(Format.Json)
else:
return None
return output
def buildCrawler(self):
crawler = CrawlerConfig()
self.buildSharedData(crawler)
crawler.SetDepth(self.data.depth)
if self.data.subsInScope:
crawler.enableSubsInScope()
if self.data.insecure:
crawler.enableInsecure()
if self.data.nocrawl:
crawler.disable()
if self.data.sc:
crawler.enableGetStatusCode()
if self.data.noOutOfScope:
crawler.enableNoOutOfScope()
if self.data.disallowed:
crawler.SetDisallowed( self.data.disallowed.split(',') )
return crawler
def buildJsAnalyzer(self):
jsAnalyzer = JsAnalyzerConfig()
self.buildSharedData(jsAnalyzer)
jsAnalyzer.SetCrawlerConfig( self.buildCrawler() )
# Load extractors
extLoader = ExtractorsLoader()
if self.data.by_platforms:
extLoader.LoadByPlatforms( self.data.by_platforms.split(',') )
elif self.data.by_keys:
extLoader.LoadByKeys( self.data.by_keys.split(',') )
else:
extLoader.LoadAll()
# Pass the Extractors list to the config
jsAnalyzer.SetExtractors( extLoader.GetAll() )
return jsAnalyzer
def buildFuzzer(self):
if not self.data.wordlists:
raise Exception("You must pass wordlists using '--wordlists' option")
fuzzer = FuzzerConfig()
self.buildSharedData(fuzzer)
fuzzer.SetWordLists( self.data.wordlists.split(',') )
# Set post params
if self.data.post:
if '|' in self.data.post:
raise ValueError("parameter type not required in the fuzz mode")
fuzzer.SetData( self.data.post )
if self.data.method:
fuzzer.SetMethod( self.data.method.upper() )
if self.data.matchers:
matchers = {}
for matcher in self.data.matchers.split('-'):
matcher = matcher.split(':') if ':' in matcher else [ matcher, '' ]
matchers[ matcher[0] ] = matcher[1]
fuzzer.SetMatchers( matchers )
if self.data.filters:
filters = {}
for filter in self.data.filters.split('-'):
filter = filter.split(':') if ':' in filter else [ filter, '' ]
filters[ filter[0] ] = filter[1]
fuzzer.SetFilters( filters )
if self.data.inputMode:
fuzzer.SetInputMode( self.data.inputMode )
if self.data.matcherMode:
fuzzer.SetMatcherMode( self.data.matcherMode )
if self.data.filterMode:
fuzzer.SetFilterMode( self.data.filterMode )
if self.data.strategy:
fuzzer.SetAutoCalibrationStrategy( self.data.strategy )
if self.data.frecursion:
fuzzer.EnableRecursion()
if bool( self.data.fdepth ):
fuzzer.SetRecursionDepth( self.data.fdepth )
if self.data.fstrategy:
fuzzer.SetRecursionStrategy( self.data.fstrategy )
return fuzzer
| abdallah-elsharif/WRock | ui/cli/builder.py | builder.py | py | 9,934 | python | en | code | 26 | github-code | 90 |
24259597894 | import telebot
import random
from telebot import types
# Загружаем список поговорок
file = open('facts.txt', 'r', encoding='UTF-8')
facts = file.read().split('\n')
file.close()
# Создаем бота
bot = telebot.TeleBot('5681197522:AAG18F0ArwMg2oIKjJB2gm0EyVHlrwhRXJQ')
# Команда start
@bot.message_handler(commands=["start"])
def start(message, res=False):
# Добавляем две кнопки
markup=types.ReplyKeyboardMarkup(resize_keyboard=True)
item1=types.KeyboardButton("Факт")
item2=types.KeyboardButton("Связаться со мной")
markup.add(item1, item2)
bot.send_message(message.chat.id, 'Привет! Нажми "Факт" для получения интересного факта о концерне BMW', reply_markup=markup)
@bot.message_handler(content_types=["text"])
def handle_text(message):
# Если юзер прислал 1, выдаем ему случайный факт
if message.text.strip() == 'Факт' :
answer = random.choice(facts)
elif message.text.strip() == 'Связаться со мной':
answer = 't.me/ksblv'
else:
answer = 'Я тебя не понимаю('
# Отсылаем юзеру сообщение в его чат
bot.send_message(message.chat.id, answer)
# Запускаем бота
bot.polling(none_stop=True, interval=0)
| ksblv/Telegram-Bot | bot.py | bot.py | py | 1,493 | python | ru | code | 0 | github-code | 90 |
19737013946 | from selenium import webdriver
from selenium.webdriver import ActionChains
import time
#FILEDIR = "C:/Users/inasahu/PycharmProjects/SeleniumPython/"
FILEDIR = "C:/Users/Anurag/PycharmProjects/Python-Selenium/"
driver = webdriver.Chrome(executable_path=FILEDIR + "Drivers/chromedriver.exe")
# implicit wait
driver.implicitly_wait(5)
driver.get("http://demo.guru99.com/test/upload/")
driver.maximize_window()
print(driver.title)
time.sleep(3)
input_tag_id = "//*[@id='uploadfile_0']"
driver.find_element_by_xpath(input_tag_id).send_keys("C:/Users/Anurag/Downloads/Hair-Loss-Protocol.png")
time.sleep(3)
driver.quit()
| 2310anuragsahu/Python-Selenium | File Upload.py | File Upload.py | py | 624 | python | en | code | 1 | github-code | 90 |
18506637619 | N = int(input())
found = False
for x in range(0, 25+1):
for y in range(0, 14+1):
if 4 * x + 7 * y == N:
found = True
if found:
print("Yes")
else:
print("No")
| Aasthaengg/IBMdataset | Python_codes/p03285/s201648764.py | s201648764.py | py | 191 | python | en | code | 0 | github-code | 90 |
20389460079 | from tg.projects.create_sagemaker_routine import SagemakerRoutine
from tg.projects.alternative.alternative_task import AlternativeTrainingTask
from tg.common.delivery.sagemaker import Autonamer, download_and_open_sagemaker_result
from tg.common.ml.batched_training import context as btc
def debug_run(in_docker = False):
task = AlternativeTrainingTask('tsa-mini', network_type=btc.Dim3NetworkType.AlonAttention)
task.settings.training_batch_limit = 1
task.settings.evaluation_batch_limit = 1
task.settings.epoch_count = 5
routine = SagemakerRoutine(task)
if not in_docker:
result = routine.attached().execute()
rs_display = [c['roc_auc_score_display'] for c in result['output']['history']]
print(rs_display)
else:
routine.local().execute()
def remote_run(fixed, **kwargs):
fixed['dataset'] = 'tsa-full'
aut = Autonamer(AlternativeTrainingTask, 'att', fixed)
tasks = aut.build_tasks(**kwargs)
for task in tasks:
routine = SagemakerRoutine(task)
routine.remote().execute()
if __name__ == '__main__':
pass
#debug_run(False)
#remote_run({}, epoch_count=[3], batch_size=[10000, 20000, 50000, 100000, 200000])
#remote_run({}, hidden_size=[10, 20, 50, 100], learning_rate=[0.05, 0.02, 0.01])
#remote_run({}, hidden_size=[100], learning_rate=[0.01], context_length=[5, 9, 15, 21], features=['p', None])
# remote_run({}, hidden_size=[100], learning_rate=[0.01], context_length=[5, 9, 15, 21], features=['p', None], batch_size=50000)
cond = dict(hidden_size=100, learning_rate=0.01, context_length=15, epoch_count=30, features='p')
#remote_run(cond,reduction_type = [btc.ReductionType.Pivot], batch_size=50000)
remote_run(cond, reduction_type = [btc.ReductionType.Dim3, btc.ReductionType.Dim3Folded], network_type = [btc.Dim3NetworkType.AlonAttention, btc.Dim3NetworkType.AlonAttentionWithoutFullyConnected, btc.Dim3NetworkType.LSTM, btc.Dim3NetworkType.SelfAttentionAndLSTM])
| okulovsky/grammar_ru | tg/projects/alternative/run_training.py | run_training.py | py | 2,005 | python | en | code | 11 | github-code | 90 |
28518139587 | from selenium import webdriver
import time
import re
from selenium.webdriver.common.by import By
import pandas as pd
import csv
from selenium import webdriver
from pandas import DataFrame
from selenium.webdriver.common.keys import Keys
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.action_chains import ActionChains
browser = webdriver.Chrome(ChromeDriverManager().install())
def crawlPaperFromNIPS(page):
browser.get('https://www.facebook.com/')
# write email
browser.find_element_by_id('email').send_keys('')
# write password
browser.find_element_by_id('pass').send_keys('')
browser.find_element_by_name('login').click()
time.sleep(5)
browser.get('https://www.facebook.com/BinhLuanVeDangCongSan/photos/a.395284967296203/1912573778900640')
time.sleep(5)
check_break = 0
post = browser.find_elements_by_xpath(
"//span[contains(@class,'d2edcug0 hpfvmrgz qv66sw1b c1et5uql lr9zc1uh a8c37x1j keod5gw0 nxhoafnm aigsh9s9')]")
time.sleep(0.5)
for p in post:
try:
text = str(p.get_attribute('innerHTML'))
if check_break == 1:
break
if "Top Comments" in text or "Top comments" in text or "Most relevant" in text or "Most Relevant" in text:
# browser.execute_script("arguments[0].scrollIntoView();", p)
location = p.location
pageYOffset = location['y']
browser.execute_script("window.scrollTo(0, " + str(pageYOffset - 150) + ");")
time.sleep(2)
p.click()
time.sleep(2)
post1 = browser.find_elements_by_xpath(
"//span[contains(@class,'d2edcug0 hpfvmrgz qv66sw1b c1et5uql lr9zc1uh a8c37x1j keod5gw0 nxhoafnm aigsh9s9')]")
for pp in post1:
text = str(pp.get_attribute('innerHTML'))
if "All comments" in text or "All Comments" in text:
pp.click()
time.sleep(2)
check_break = 1
break
except:
pass
while True:
check = 0
post = browser.find_elements_by_xpath(
"//span[contains(@class,'d2edcug0 hpfvmrgz qv66sw1b c1et5uql lr9zc1uh a8c37x1j keod5gw0 nxhoafnm aigsh9s9')]")
time.sleep(0.5)
try:
for p in post:
text = p.get_attribute('innerHTML')
if "more comments" in text:
browser.execute_script("arguments[0].scrollIntoView();", p)
time.sleep(1.5)
p.click()
time.sleep(1.5)
check = 1
break
elif "previous comments" in text:
browser.execute_script("arguments[0].scrollIntoView();", p)
time.sleep(1.5)
p.click()
time.sleep(1.5)
check = 1
break
except:
pass
if check == 0:
break
while True:
check_reply_cmt = 0
try:
post = browser.find_elements_by_xpath(
"//span[contains(@class,'d2edcug0 hpfvmrgz qv66sw1b c1et5uql lr9zc1uh a8c37x1j')]")
for p in post:
text = str(p.get_attribute('innerHTML'))
if "View" in text or "replies" in text or "reply" in text or "Replies" in text or "reply" in text:
if "Hide" not in text or "hide" not in text:
check_reply_cmt = 1
browser.execute_script("arguments[0].scrollIntoView();", p)
time.sleep(1.5)
p.click()
time.sleep(1.5)
except:
pass
time.sleep(0.5)
if check_reply_cmt == 0:
break
else:
continue
while True:
check_see_cmt = 0
post = browser.find_elements_by_xpath(
"//div[contains(@class,'oajrlxb2 g5ia77u1 qu0x051f esr5mh6w e9989ue4 r7d6kgcz rq0escxv nhd2j8a9 nc684nl6 p7hjln8o kvgmc6g5 cxmmr5t8 oygrvhab')]")
time.sleep(0.4)
for p in post:
try:
if "See more" in str(p.get_attribute('innerHTML')) or "See More" in str(p.get_attribute('innerHTML')):
browser.execute_script("arguments[0].scrollIntoView();", p)
time.sleep(1)
check_see_cmt = 1
p.click()
time.sleep(1.5)
except:
pass
if check_see_cmt == 0:
break
keys = ['review_text', 'lable']
list_temp = []
post = browser.find_elements_by_xpath(
"//div[@style='text-align: start;']")
for p in post:
element = p.find_elements_by_tag_name('a')
if len(element) > 0:
browser.execute_script("""var element = arguments[0];element.parentNode.removeChild(element);""",
element[0])
text = re.sub('<[^<]+?>', '', p.get_attribute('innerHTML'))
message = {"review_text": text, "lable": -1}
list_temp.append(message)
df = DataFrame(list_temp, columns=['review_text', 'lable'])
export_csv = df.to_csv(r'pandaresult.csv', index=None, header=True)
print("Done!")
try:
crawlPaperFromNIPS("")
except:
pass
# finally:
# browser.close()
# browser.quit()
| vantugithub/crawl_cmt_fb | crawl_data_fb_photos.py | crawl_data_fb_photos.py | py | 5,555 | python | en | code | 0 | github-code | 90 |
40315928204 | '''
Input: a List of integers
Returns: a List of integers
'''
def moving_zeroes(arr):
# sort array
# loop through array swapping elements from current position to last
# if the current position value is 0
# append element to arr and remove element
arr.sort()
for i in range(len(arr)):
if arr[i] == 0:
arr.append(arr[i])
arr.remove(arr[i])
return arr
if __name__ == '__main__':
# Use the main function here to test out your implementation
arr = [0, 3, 1, 0, -2]
print(f"The resulting of moving_zeroes is: {moving_zeroes(arr)}")
| IanCarreras/first-pass-solution | moving_zeroes/moving_zeroes.py | moving_zeroes.py | py | 602 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.