index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
15,800 | 53574d75c46789ccfdd52b94c19254a63027e5a2 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 31 10:52:39 2020
@author: evely
"""
from lifestore_file import lifestore_products
from lifestore_file import lifestore_sales
from lifestore_file import lifestore_searches
print("Welcome to LifeStore Inventory")
print("Account Login")
users = ["user1", "user2", "user3"] # usuarios permitidos
users_passwords = ["111","222","333"]
admins = ["admin1", "admin2", "admin3"]# administradores permitidos
admins_passwords = ["a111","a222","a333"]
#Prueba de acceso
access = True
admin = False
user= False
while access:
enter_username = input("Enter username: ")
enter_password = input("Enter password: ")
if enter_username in users:
if enter_username == users[0] and enter_password == users_passwords[0]:
access = False
user = True
print("LOGIN as user1")
elif enter_username == users[1] and enter_password == users_passwords[1]:
access = False
user = True
print("LOGIN as user2")
elif enter_username == users[2] and enter_password == users_passwords[2]:
access = False
user = True
print("LOGIN as user3")
else:
print("Invalid username or password. Please try again.")
elif enter_username in admins:
if enter_username == admins[0] and enter_password == admins_passwords[0]:
access = False
admin = True
print("LOGIN as admin1")
elif enter_username == admins[1] and enter_password == admins_passwords[1]:
access = False
admin = True
print("LOGIN as admin2")
elif enter_username == admins[2] and enter_password == admins_passwords[2]:
access = False
admin = True
print("LOGIN as admin3")
else:
print("Invalid username or password. Please try again.")
else:
print("Sorry, something went worng....")
print("That account does not exist. Please enter a different account.\n(TIP: user or admin)")
access = True
continuar = input("Next:(yes/no)")
while continuar == "yes":
option_1 = False
option_2 = False
option_3 = False
option_4 = False
option_5 = False
option_6 = False
option_7 = False
option_8 = False
option_9 = False
if admin == True or user == True:
print("You access as administrator\n Options as admin:\n 1)TOP 50 searches\n 2)BOTTOM searches\n"
" 3)PRODUCTS searches by category \n 4)TOTAL SEARCHES AND SALES by category \n 5)PRODUCTS sales by category\n"
" 6)TOP 50 sales\n 7)TOP 20 and BOTTOM 20 Reviews\n 8)ANUAL REVENUE-SALES and AVERAGE REVENUE BY MONTH \n"
" 9)TOTAL SALES by MONTH and MONTHS TOP sales")
select_option =input("Select the option: ")
if select_option == "1":
print("Selected: ---TOP 50 SEARCHES---")
option_1 = True
elif select_option == "2":
print("Selected: Bottom searches")
option_2 = True
elif select_option == "3":
print("Selected: Products searches by category")
option_3 = True
elif select_option == "4":
print("Selected:--- TOTAL SEARCHES AND SALES by category---")
option_4 = True
elif select_option == "5":
print("Selected: Products sales by category")
option_5 = True
elif select_option == "6":
print("Selected: --TOP 50 SALES--")
option_6 = True
elif select_option == "7":
print("Selected:-- TOP 20 and BOTTOM 20 REVIEWS--")
option_7 = True
elif select_option == "8":
print("Selected:--ANUAL REVENUE-SALES and AVERAGE REVENUE BY MONTH--")
option_8 = True
elif select_option == "9":
print("Selected:-- TOTAL SALES by MONTH and MONTHS TOP SALES-- ")
option_9 = True
else:
print(("Sorry, something went worng...."))
print(("Try with 1 ,2, 3, 4 up to 9"))
#print(option_1)
#print(option_2)
#--------Separar las sublistas de las listas principales-------------
frequency_lst = list()
for searches in lifestore_searches:
frequency_lst.append(searches[1])
name_lst = list()
prices = list()
id_lst = list()
category_lst = list()
frequency_category = list()
for products in lifestore_products:
name_lst.append(products[1])
id_lst.append(products[0])
frequency_category.append(products[3])
prices.append(products[2])
if products[3] not in category_lst:
category_lst.append(products[3])
dates = list() #todas las fechas
dates2= list() #fechas unicas
frequency_sales = list()
scores_frequency = list()
refund = list()
for sales in lifestore_sales:
frequency_sales.append(sales[1]) #cuantas veces se vende un producto
scores_frequency.append(sales[2])
refund.append(sales[4])
dates.append(sales[3]) #todos los dias del año que hubo venta
if sales[3] not in dates2:
dates2.append(sales[3])
#----------------------SECCION BUSQUEDAS--------------------------------
#1a)PRODUCTOS CON MAYORES BUSQUEDAS
#-----------Contar las veces que se busca el id producto --------------
final_lst = list()
reporte_category = list()
for ids in range(len(id_lst)):#lista con los ids que tuvieron busqueda
count = 0
for freq in range(len(frequency_lst)): # listas de busquedas
if(frequency_lst[freq] == id_lst[ids]): # cuantas veces se repite la busqueda por id
count = count + 1
report_search = [count,id_lst[ids],name_lst[ids]]
report_category_search = [count,name_lst[ids],frequency_category[ids]]
final_lst.append(report_search)
reporte_category.append(report_category_search) #reporte con el total de busquedas
#print(reporte_category)
final_lst.sort(reverse=True)#ordenar de mayor a menor las busquedas
final_lst2 = final_lst[0:50]# rango de mayores busquedas
final_lst3 = final_lst2#lista con resultados finales de mayores busquedas
reporte_category.sort()
#print(final_lst)
#RESULTADOS DE LAS MAYORES BUSQUEDAS--VALIDADO
#Resultados 1a):
if option_1 == 1:
print("Loading...")
print("Results:")
print("PRODUCTS SEARCHES TOP 50")
for searches_top in final_lst2:
print("|Searches:",searches_top[0],"|"" \n| Product:",searches_top[2], "|id: ",searches_top[1],"|")
print("____________________________________________________________________________________")
print("Completed")
continuar = input("new selection (yes/no): ")
if continuar == "yes":
print("loading...")
elif continuar == "no":
print("logggin out..")
else: print("type: yes or no")
continue
if option_2 == 1:
print("PRODUCTS SEARCHES BOTTOM 50")
print("Loading...")
print("Results:")
for searches_bottom in final_lst[-51:-1]:
print("|Searches: ",searches_bottom[0],"|\n""| Product:",searches_bottom[2], "|id: ",searches_bottom[1],"|")
print("_________________________________________________________________________")
print("Completed")
continuar = input("new selection (yes/no): ")
if continuar == "yes":
print("loading...")
elif continuar == "no":
print("logggin out..")
else: print("type: yes or no")
continue
#2a)BUSQUEDAS POR CATEGORIA:
#--------------..Calcular los productos con menores busquedas------------
# 1)Hacer lista clasificando las categorias
procesadores_search = list()
procesadores_search2 = list()
tarjetas_video_search = list()
tarjetas_video_search2 = list()
tarjetas_madre_search = list()
tarjetas_madre_search2 = list()
discos_duros_search = list()
discos_duros_search2 = list()
memorias_usb_search = list()
memorias_usb_search2 = list()
pantallas_search = list()
pantallas_search2 = list()
bocinas_search = list()
bocinas_search2 = list()
audifonos_search = list()
audifonos_search2 = list()
for category_search in reporte_category:
if category_search[2] == "procesadores":
procesadores_search.append(category_search[0]) #Lista de solo cantidad de busquedas x categoria
procesadores_search2.append(category_search)#Lista de cantidad de busquedas,producto x categoria
elif category_search[2] == "tarjetas de video":
tarjetas_video_search.append(category_search[0])
tarjetas_video_search2.append(category_search)
elif category_search[2] == "tarjetas madre":
tarjetas_madre_search.append(category_search[0])
tarjetas_madre_search2.append(category_search)
elif category_search[2] == "discos duros":
discos_duros_search.append(category_search[0])
discos_duros_search2.append(category_search)
elif category_search[2] == "memoria usb":
memorias_usb_search.append(category_search[0])
memorias_usb_search2.append(category_search)
elif category_search[2] == "pantallas":
pantallas_search.append(category_search[0])
pantallas_search2.append(category_search)
elif category_search[2] == "bocinas":
bocinas_search.append(category_search[0])
bocinas_search2.append(category_search)
elif category_search[2] == "audifonos":
audifonos_search.append(category_search[0])
audifonos_search2.append(category_search)
procesadores_search2.sort()
tarjetas_video_search2.sort()
tarjetas_madre_search2.sort()
discos_duros_search2.sort()
memorias_usb_search2.sort()
pantallas_search2.sort()
bocinas_search2.sort()
audifonos_search2.sort()
#RESULTADOS DE BUSQUEDAS MENOS A MAS POR PRODUCTO EN CADA CATEGORIA
#Resultados 2a)
if option_3 == 1:
print('SEARCHES IN: "PROCESADORES"\n'
'Loading...\n'
'Results:\n')
for column in procesadores_search2:
print("|Category: ",column[2],"| Product:",column[1], "|Total SEARCHES: ",column[0],"|\n")
print("_____________________________________________________________________________")
print("Completed")
print('SEARCHES IN: "TARJETAS DE VIDEO"\n'
'Loading...\n'
'Results:\n')
for column in tarjetas_video_search2:
print("|Category: ",column[2],"| Product:",column[1], "|Total SEARCHES: ",column[0],"|\n")
print("_____________________________________________________________________________")
print("Completed")
print('SEARCHES IN: "TARJETAS MADRE"\n'
'Loading...\n'
'Results:\n')
for column in tarjetas_madre_search2:
print("|Category: ",column[2],"| Product:",column[1], "|Total SEARCHES: ",column[0],"|\n")
print("_____________________________________________________________________________")
print("Completed")
print('SEARCHES IN: "DISCOS DUROS"\n'
'Loading...\n'
'Results:\n')
for column in discos_duros_search2:
print("|Category: ",column[2],"| Product:",column[1], "|Total SEARCHES: ",column[0],"|\n")
print("_____________________________________________________________________________")
print("Completed")
print('SEARCHES IN: "MEMORIAS USB"\n'
'Loading...\n'
'Results:\n')
for column in memorias_usb_search2:
print("|Category: ",column[2],"| Product:",column[1], "|Total SEARCHES: ",column[0],"|\n")
print("_____________________________________________________________________________")
print("Completed")
print('SEARCHES IN: "PANTALLAS"\n'
'Loading...\n'
'Results:\n')
for column in pantallas_search2:
print("|Category: ",column[2],"| Product:",column[1], "|Total SEARCHES: ",column[0],"|\n")
print("_____________________________________________________________________________")
print("Completed")
print('SEARCHES IN: "BOCINAS"\n'
'Loading...\n'
'Results:\n')
for column in bocinas_search2:
print("|Category: ",column[2],"| Product:",column[1], "|Total SEARCHES: ",column[0],"|\n")
print("_____________________________________________________________________________")
print("Completed")
print('SEARCHES IN: "AUDIFONOS"\n'
'Loading...\n'
'Results:\n')
for column in audifonos_search2:
print("|Category: ",column[2],"| Product:",column[1], "|Total SEARCHES: ",column[0],"|\n")
print("_____________________________________________________________________________")
print("Completed")
continuar = input("new selection (yes/no): ")
if continuar == "yes":
print("loading...")
elif continuar == "no":
print("logggin out..")
else: print("type: yes or no")
continue
# 1c) BUSQUEDAS TOTALES DE LAS CATEGORIAS
# 2. -----------Calcular busquedas totales en cada categoria--------------
total_procesadores = 0
for procesador_search in procesadores_search:
total_procesadores = total_procesadores + procesador_search
total_tarjetasv = 0
for tarjetasv_search in tarjetas_video_search:
total_tarjetasv = total_tarjetasv + tarjetasv_search
total_tarjetasm = 0
for tarjetasm_search in tarjetas_madre_search:
total_tarjetasm = total_tarjetasm + tarjetasm_search
total_discod = 0
for dscosd_search in discos_duros_search:
total_discod = total_discod + dscosd_search
total_memorias = 0
for memorias_search in memorias_usb_search:
total_memorias = total_memorias + memorias_search
#print(total_memorias)
total_pantallas = 0
for pantalla_search in pantallas_search:
total_pantallas = total_pantallas + pantalla_search
#print(total_pantallas)
total_bocinas = 0
for bocina_search in bocinas_search:
total_bocinas = total_bocinas + bocina_search
#print(total_bocinas)
total_audifonos = 0
for audifono_search in audifonos_search:
total_audifonos = total_audifonos + audifono_search
#print(total_audifonos)
#3. Concatenar una lista: usando las ventas totales por
#categoria y el nombre de la categoria
#Crear lista de las busquedas totales por categoria
categories = ([total_procesadores , total_tarjetasv , total_tarjetasm
, total_discod , total_memorias , total_pantallas
, total_bocinas , total_audifonos])
#Lista final de categoria con su respetivas busquedas totales
final_category = list()
for i in range(len(categories)):
total_category = [categories[i],category_lst[i]]
final_category.append(total_category)
#print(total_category)
final_category.sort()
#print(final_category)
#RESULTADOS DE LAS BUSQUEDAS TOTALES POR CATEGORIA
#RESULTADOS DE BUSQUEDAS TOTALES DE CADA CATEGORIA -validado
#Resultados 1c):
if option_4 == 1:
print("Loading...")
print("Results:")
print("TOTAL SEARCHES by CATEGORY:\n")
for scategory in final_category:
print("|Searches:",scategory[0], " Category:",scategory[1])
print("|---------------------------------------------------|")
print("Completed")
#<-------------------TERMINA SECCION 1 BUSQUEDAS-------------------------->
#---------------------------SECION2 VENTAS-------------------------------->
# Contar las ventas de cada producto y sacar las mayores
# 2b) MAYORES VENTAS POR PRODUCTO
sales_lst = list()
sales_lst2 = list()
sales_lst3 = list() # para sacar ventas por mes
for ids in range(len(id_lst)):
count = 0
for freq in range(len(frequency_sales)):
if (frequency_sales[freq] == id_lst[ids]):
count = count + 1
#print(count)
report_sales = [count,id_lst[ids],name_lst[ids]]
report_sales2 =[count,name_lst[ids],frequency_category[ids]]
sales_lst.append(report_sales)
sales_lst2.append(report_sales2)
#print(sales_lst2)
sales_lst.sort(reverse=True)#ordenado de mayor a menor
sales_max = sales_lst[0:50] #mayores 50 ventas por producto
sales_lst2.sort()
sales_min = sales_lst2[0:50]#menores 50 ventas por producto
#print(sales_max)
#RESULTADOS DE LOS PRODUCTOS CON MAYORES VENTAS
#Resultados 1b): validaddo
if option_6 == 1:
print("Loading...")
print("Results:")
print(" PRODUCTS SALES TOP 50:\n")
for sales_top in sales_max:
print("|Total SALES: ",sales_top[0],"|id: ",sales_top[1],"| Product:",sales_top[2],"|")
print("-----------------------------------------------------------------------------\n")
print("Completed")
continuar = input("new selection (yes/no): ")
if continuar == "yes":
print("loading...")
elif continuar == "no":
print("logggin out..")
else: print("type: yes or no")
continue
procesadores_sales= list()
procesadores_sales2= list()
tarjetas_video_sales = list()
tarjetas_video_sales2 = list()
tarjetas_madre_sales = list()
tarjetas_madre_sales2 = list()
discos_duros_sales = list()
discos_duros_sales2 = list()
memorias_usb_sales = list()
memorias_usb_sales2 = list()
pantallas_sales = list()
pantallas_sales2 = list()
bocinas_sales = list()
bocinas_sales2 = list()
audifonos_sales = list()
audifonos_sales2 = list()
#FALTA SACAR VENTAS TOTALES EN CADA categoria y hacer lista de las categorias con menos ventas
for col in sales_lst2:
if col[2] == "procesadores":
procesadores_sales.append(col)
procesadores_sales2.append(col[0])
elif col[2] == "tarjetas de video":
tarjetas_video_sales.append(col)
tarjetas_video_sales2.append(col[0])
elif col[2] == "tarjetas madre":
tarjetas_madre_sales.append(col)
tarjetas_madre_sales2.append(col[0])
elif col[2] == "discos duros":
discos_duros_sales.append(col)
discos_duros_sales2.append(col[0])
elif col[2] == "memorias usb":
memorias_usb_sales.append(col)
memorias_usb_sales2.append(col[0])
elif col[2] == "pantallas":
pantallas_sales.append(col)
pantallas_sales2.append(col[0])
elif col[2] == "bocinas":
bocinas_sales.append(col)
bocinas_sales2.append(col[0])
elif col[2] == "audifonos":
audifonos_sales.append(col)
audifonos_sales2.append(col[0])
#RESULTADOS DE VENTAS MENOS A MAS POR PRODUCTO EN CADA CATEGORIA
#Resultados 2b):
if option_5 == 1:
print('SEARCHES IN: "PROCESADORES"\n'
'Loading...\n'
'Results:\n')
for column in procesadores_sales:
print("|Category: ",column[2],"| Product:",column[1], "|Total SALES: ",column[0],"|\n")
print("--------------------------------------------------------------------------------------------------\n")
print("Completed")
print('SEARCHES IN: "TARJETAS DE VIDEO"\n'
'Loading...\n'
'Results:\n')
for column in tarjetas_video_sales:
print("|Category: ",column[2],"| Product:",column[1], "|Total SALES: ",column[0],"|\n")
print("--------------------------------------------------------------------------------------------------\n")
print("Completed")
print('SEARCHES IN: "TARJETAS MADRE"\n'
'Loading...\n'
'Results:\n')
for column in tarjetas_madre_sales:
print("|Category: ",column[2],"| Product:",column[1], "|Total SALES: ",column[0],"|\n")
print("--------------------------------------------------------------------------------------------------\n")
print("Completed")
print('SEARCHES IN: "DISCOS DUROS"\n'
'Loading...\n'
'Results:\n')
for column in discos_duros_sales:
print("|Category: ",column[2],"| Product:",column[1], "|Total SALES: ",column[0],"|\n")
print("--------------------------------------------------------------------------------------------------\n")
print("Completed")
print('SEARCHES IN: "MEMORIAS USB"\n'
'Loading...\n'
'Results:\n')
for column in memorias_usb_sales:
print("|Category: ",column[2],"| Product:",column[1], "|Total SALES: ",column[0],"|\n")
print("--------------------------------------------------------------------------------------------------\n")
print("Completed")
print('SEARCHES IN: "PANTALLAS"\n'
'Loading...\n'
'Results:\n')
for column in pantallas_sales:
print("|Category: ",column[2],"| Product:",column[1], "|Total SALES: ",column[0],"|\n")
print("--------------------------------------------------------------------------------------------------\n")
print("Completed")
print('SEARCHES IN: "BOCINAS"\n'
'Loading...\n'
'Results:\n')
for column in bocinas_sales:
print("|Category: ",column[2],"| Product:",column[1], "|Total SALES: ",column[0],"|\n")
print("--------------------------------------------------------------------------------------------------\n")
print("Completed")
print('SEARCHES IN: "AUDIFONOS"\n'
'Loading...\n'
'Results:\n')
for column in audifonos_sales:
print("|Category: ",column[2],"| Product:",column[1], "|Total SALES: ",column[0],"|\n")
print("--------------------------------------------------------------------------------------------------\n")
print("Completed")
continuar = input("new selection (yes/no): ")
if continuar == "yes":
print("loading...")
elif continuar == "no":
print("logggin out..")
else: print("type: yes or no")
continue
# 1c) VENTAS TOTALES DE LAS CATEGORIAS
# 2. Calcular ventas totales en cada categoria
ventas_procesadores = 0
for procesador_sales in procesadores_sales2:
ventas_procesadores = ventas_procesadores + procesador_sales
ventas_tarjetasv = 0
for tarjetasv_sales in tarjetas_video_sales2:
ventas_tarjetasv = ventas_tarjetasv + tarjetasv_sales
ventas_tarjetasm = 0
for tarjetasm_sales in tarjetas_madre_sales2:
ventas_tarjetasm = ventas_tarjetasm + tarjetasm_sales
ventas_discod = 0
for dscosd_sales in discos_duros_sales2:
ventas_discod = ventas_discod + dscosd_sales
ventas_memorias = 0
for memorias_sales in memorias_usb_sales2:
ventas_memorias = ventas_memorias + memorias_sales
#print(total_memorias)
ventas_pantallas = 0
for pantalla_sales in pantallas_sales2:
ventas_pantallas = ventas_pantallas + pantalla_sales
#print(total_pantallas)
ventas_bocinas = 0
for bocina_sales in bocinas_sales2:
ventas_bocinas = ventas_bocinas + bocina_sales
#print(total_bocinas)
ventas_audifonos = 0
for audifono_sales in audifonos_sales2:
ventas_audifonos = ventas_audifonos + audifono_sales
#print(total_audifonos)
#3. Concatenar una lista: usando las ventas totales por
#categoria y el nombre de la categoria
#Crear lista de las busquedas totales por categoria
categories_ventas = ([ventas_procesadores , ventas_tarjetasv , ventas_tarjetasm
,ventas_discod , ventas_memorias , ventas_pantallas
, ventas_bocinas , ventas_audifonos])
#Lista final de categoria con su respetivas busquedas totales
final_ventas = list()
for i in range(len(categories_ventas)):
total_ventas = [categories_ventas[i],category_lst[i]]
final_ventas.append(total_ventas)
#print(total_ventas)
#RESULTADOS DE VENTAS TOTALES POR CATEGORIA
#Resultados 2c):
#print(final_ventas)
final_ventas.sort()
#RESULTADOS DE VENTAS TOTALES DE CADA CATEGORIA -validado
if option_4 == 1:
print("Loading...")
print("Results:")
print("TOTAL SALES by CATEGORY:")
for sales_cat in final_ventas:
print("|SALES:",sales_cat[0], " Category:",sales_cat[1])
print("|---------------------------------------------------|")
print("Completed")
continuar = input("new selection (yes/no): ")
if continuar == "yes":
print("loading...")
elif continuar == "no":
print("logggin out..")
else: print("type: yes or no")
continue
#<-----------------------TERMINA SECCION 2 VENTAS---------------------------->
#---------------------------SECION 3 RESEÑAS--------------------------------->
# Calcular -->
# 1)Hacer lista clasificando las
name_reseñas = list()
for ids_reseñas in range(len(id_lst)):
for tscores in range(len(frequency_sales)):
if id_lst[ids_reseñas] == frequency_sales[tscores]:
name_reseñas.append(name_lst[ids_reseñas])
reseñas = list()
for i in range(len(scores_frequency)):
total_scores = [name_reseñas[i],scores_frequency[i], refund[i]]
reseñas.append(total_scores)
#print(reseñas)
reporte_reseña = list()
for ids in range(len(id_lst)):#lista con los ids que tuvieron busqueda
count = 0
sumas_score = 0
for freqq in range(len(frequency_sales)): # listas de busquedas
if(frequency_sales[freqq] == id_lst[ids]): # cuantas veces se repite la busqueda por id
count = count + 1
sumas_score = sumas_score + scores_frequency[freqq]
if sumas_score != 0:
promedio = sumas_score/count
else:
promedio = sumas_score
#print(sumas_score)
elif refund[freqq] == 0:
refund_status = "No refund"
else:
refund_status ="With refund"
report_scores = [promedio, name_lst[ids], refund_status]
reporte_reseña.append(report_scores)
#print(reporte_reseña)
reporte_reseña.sort()
reporte_reseña2 = sorted(reporte_reseña, reverse = True)
#RESULTADOS DE MEJORES Y PEORES RESEÑAS -validado
if option_7 == 1:
print("Loading...")
print("Results:")
print("WORST 20 REVIEWS:")
for reviews in reporte_reseña[0:20]:
print("|REVIEWS AVERAGE:",reviews[0]," Status Refund:",reviews[2], " Product:",reviews[1],"|")
print("|------------------------------------------------------------------------------------|")
print("Completed")
print("Loading...")
print("Results:")
print("\n" "BETTER 20 REVIEWS:")
for reviews in reporte_reseña2[0:20]:
print("|REVIEWS AVERAGE:",reviews[0]," Status Refund:",reviews[2], " Product:",reviews[1],"|")
print("|------------------------------------------------------------------------------------|")
print("Completed")
continuar = input("new selection (yes/no): ")
if continuar == "yes":
print("loading...")
elif continuar == "no":
print("logggin out..")
else: print("type: yes or no")
continue
#-------------------------------SECCION 4 finales ----------------------------------->
#Calculo de ventas anuales--------------------------------------- SI
reporte_score = list()
for ids in range(len(id_lst)):
count = 0
for id_sale in range(len(frequency_sales)):
if (frequency_sales[id_sale] == id_lst[ids]):
count = count + 1
reporte_score.append(count)
#print(reporte_score)
ingresos_anuales = list()
ventas_anuales = list()
total_ingreso = 0
total_ventas = 0
for price in range(len(prices)):#ciclo para todos los precios de la lista
ingresos_anuales.append(prices[price] * reporte_score[price])#precio del producto por el numero de ventas x producto
for ingreso in ingresos_anuales:
total_ingreso = total_ingreso + ingreso
#print(total_ingreso)
for ventas in reporte_score:
total_ventas = total_ventas + ventas
if option_8 == 1:
print("\n""TOTAL ANUAL REVENUE AND SALES ")
print("--------------------------------------------------------------------------------------------------\n")
print("| REVENUE : $",total_ingreso, "|SALES: ",total_ventas,"|\n")
print("--------------------------------------------------------------------------------------------------\n")
#Organizar en meses las ventas y sacar el promedio de cada mes
new_dates = list()# cambiar posicion de mes de la lista principal de dates
new_dates2 = list()
for date in dates:
dia = date[0:3]
mes = date[3:6]
año = date[6:]
new_date = mes #+ dia + año
new_dates.append(new_date)
#new_dates2.append(new_date)
dates_double = list()
dates_double2 = list()
for date2 in dates2: #cambiar posicion de mes de la lista secundaria de dates
dia = date2[0:3]
mes = date2[3:6]
año = date2[6:]
date_double = mes #+ dia + año
dates_double.append(date_double)
#dates_double2.append(date_double2)
#Lista de meses del 1 al 12
meses = ['01/','02/','03/','04/','05/','06/','07/','08/','09/','10/','11/','12/']
#print(dates_double2)
#Hacer la comparativa de fechas para calcular las ventas por mes
reporte_mensual = list()
count_lst = list()
reporte_mensual2 = list()
for mes in range(len(meses)):
count = 0
for date_new in range(len(new_dates)):
if (new_dates[date_new] == meses[mes]):
count = count + 1
report_mes = [meses[mes],count]
report_mes2 = [count,meses[mes]]
reporte_mensual2.append(report_mes2)
reporte_mensual.append(report_mes)
reporte_mensual.sort()
reporte_mensual2.sort(reverse=True)
#print(reporte_mensual2)
if option_9 == 1:
print("MONTH SALES")
print("Loading...")
print("Results:")
print("|-------------------------------|")
print("| TOTAL SALES JANUARY: ",reporte_mensual[0][1]," |")
print("|-------------------------------|")
print("| TOTAL SALES FEBRUARY: ",reporte_mensual[1][1]," |")
print("|-------------------------------|")
print("| TOTAL SALES MARCH: ",reporte_mensual[2][1]," |")
print("|-------------------------------|")
print("| TOTAL SALES APRIL: ",reporte_mensual[3][1]," |")
print("|-------------------------------|")
print("| TOTAL SALES MAY: ",reporte_mensual[4][1]," |")
print("|-------------------------------|")
print("| TOTAL SALES JUNE: ",reporte_mensual[5][1]," |")
print("|-------------------------------|")
print("| TOTAL SALES JULY: ",reporte_mensual[6][1]," |")
print("|-------------------------------|")
print("| TOTAL SALES AUGUST: ",reporte_mensual[7][1]," |")
print("|-------------------------------|")
print("| TOTAL SALES SEPTEMBER: ",reporte_mensual[8][1]," |")
print("|-------------------------------|")
print("| TOTAL SALES OCTUBER ",reporte_mensual[9][1]," |")
print("|-------------------------------|")
print("| TOTAL SALES NOVEMBER: ",reporte_mensual[10][1]," |")
print("|-------------------------------|")
print("| TOTAL SALES DECEMBER: ",reporte_mensual[11][1]," |")
print("|-------------------------------|")
print("Loading...")
print("Results:")
print("TOP SALES by MONTH:")
for ventas2 in reporte_mensual2[0:7]:
print("|------------------------------|")
print("|MES:",ventas2[1],"|Total de ventas:",ventas2[0],)
print("|------------------------------|")
print("Completed")
continuar = input("new selection (yes/no): ")
if continuar == "yes":
print("loading...")
elif continuar == "no":
print("logggin out..")
else: print("type: yes or no")
continue
#------------Calcular cuanto se vende por mes------------------------------
revenue = list()
for date_new in range(len(new_dates)):
count = 0
for mes in range(len(meses)):
if (new_dates[date_new] == meses[mes]):
count = count + 1
ganancias = [new_dates[date_new] , frequency_sales[date_new]]
revenue.append(ganancias)
#print(revenue)
enero = list()
febrero = list()
marzo = list()
abril = list()
mayo = list()
junio = list()
julio = list()
agosto = list()
septiembre = list()
octubre = list()
noviembre = list()
diciembre = list()
for mes_mes in revenue:
if mes_mes[0] == meses[0]:
enero.append(mes_mes)
elif mes_mes[0] == meses[1]:
febrero.append(mes_mes)
elif mes_mes[0] == meses[2]:
marzo.append(mes_mes)
elif mes_mes[0] == meses[3]:
abril.append(mes_mes)
elif mes_mes[0] == meses[4]:
mayo.append(mes_mes)
elif mes_mes[0] == meses[5]:
junio.append(mes_mes)
elif mes_mes[0] == meses[6]:
julio.append(mes_mes)
elif mes_mes[0] == meses[7]:
agosto.append(mes_mes)
elif mes_mes[0] == meses[8]:
septiembre.append(mes_mes)
elif mes_mes[0] == meses[9]:
octubre.append(mes_mes)
elif mes_mes[0] == meses[10]:
noviembre.append(mes_mes)
elif mes_mes[0] == meses[11]:
diciembre.append(mes_mes)
else:
"no existe"
#print(marzo)
#ENERO--->
#Tomo los ids de las ventas de enero para compararlos con los ids y su precio
ids_enero = list()
for veces in enero:
ids_enero.append(veces[1])
#print(ids_enero)
# Creo un reporte que me dice que ids si tuvieron ventas en enero
reporte_enero = list()
for ids in range(len(id_lst)):
count = 0
for id_enero in range(len(ids_enero)):
if (ids_enero[id_enero] == id_lst[ids]):
count = count + 1
reporte_enero.append(count)
#print(reporte_enero)
#Multiplico cada uno de los ids con ventas por su precio correspondiente
ingresos_enero = list()
for price_enero in range(len(prices)):#ciclo para todos los precios de la lista
ingresos_enero.append(prices[price_enero] * reporte_enero[price_enero])#precio del producto por el numero de ventas x producto
#print(ingresos_enero)
#Calculo de las ventas totales de enero de los ids
total_enero = 0
for ingreso_enero in ingresos_enero:
total_enero = total_enero + ingreso_enero
#print(total_enero)
#FEBRERO--->
#Tomo los ids de las ventas de febrero para compararlos con los ids y su precio
ids_febrero = list()
for veces_febrero in febrero:
ids_febrero.append(veces_febrero[1])
#print(ids_febrero)
# Creo un reporte que me dice que ids si tuvieron ventas en febrero
reporte_febrero = list()
for ids in range(len(id_lst)):
count = 0
for id_febrero in range(len(ids_febrero)):
if (ids_febrero[id_febrero] == id_lst[ids]):
count = count + 1
reporte_febrero.append(count)
#print(reporte_febrero)
#Multiplico cada uno de los ids con ventas por su precio correspondiente
ingresos_febrero = list()
for price_febrero in range(len(prices)):#ciclo para todos los precios de la lista
ingresos_febrero.append(prices[price_febrero] * reporte_febrero[price_febrero])#precio del producto por el numero de ventas x producto
#print(ingresos_febrero)
#Calculo de las ventas totales de enero de los ids
total_febrero = 0
for ingreso_febrero in ingresos_febrero:
total_febrero = total_febrero + ingreso_febrero
#MARZO--->
#Tomo los ids de las ventas de marzo para compararlos con los ids y su precio
ids_marzo = list()
for veces_marzo in marzo:
ids_marzo.append(veces_marzo[1])
#print(ids_marzo)
# Creo un reporte que me dice que ids si tuvieron ventas en marzo
reporte_marzo = list()
for ids in range(len(id_lst)):
count = 0
for id_marzo in range(len(ids_marzo)):
if (ids_marzo[id_marzo] == id_lst[ids]):
count = count + 1
reporte_marzo.append(count)
#print(reporte_marzo)
#Multiplico cada uno de los ids con ventas por su precio correspondiente
ingresos_marzo = list()
for price_marzo in range(len(prices)):#ciclo para todos los precios de la lista
ingresos_marzo.append(prices[price_marzo] * reporte_marzo[price_marzo])#precio del producto por el numero de ventas x producto
#print(ingresos_marzo)
#Calculo de las ventas totales de marzo de los ids
total_marzo = 0
for ingreso_marzo in ingresos_marzo:
total_marzo = total_marzo + ingreso_marzo
#ABRIL--->
#Tomo los ids de las ventas de abril para compararlos con los ids y su precio
ids_abril = list()
for veces_abril in abril:
ids_abril.append(veces_abril[1])
#print(ids_abril)
# Creo un reporte que me dice que ids si tuvieron ventas en abril
reporte_abril = list()
for ids in range(len(id_lst)):
count = 0
for id_abril in range(len(ids_abril)):
if (ids_abril[id_abril] == id_lst[ids]):
count = count + 1
reporte_abril.append(count)
#print(reporte_abril)
#Multiplico cada uno de los ids con ventas por su precio correspondiente
ingresos_abril = list()
for price_abril in range(len(prices)):#ciclo para todos los precios de la lista
ingresos_abril.append(prices[price_abril] * reporte_abril[price_abril])#precio del producto por el numero de ventas x producto
#print(ingresos_abril)
#Calculo de las ventas totales de abril de los ids
total_abril = 0
for ingreso_abril in ingresos_abril:
total_abril = total_abril + ingreso_abril
#MAYO--->
#Tomo los ids de las ventas de mayo para compararlos con los ids y su precio
ids_mayo = list()
for veces_mayo in mayo:
ids_mayo.append(veces_mayo[1])
#print(ids_mayo)
# Creo un reporte que me dice que ids si tuvieron ventas en mayo
reporte_mayo = list()
for ids in range(len(id_lst)):
count = 0
for id_mayo in range(len(ids_mayo)):
if (ids_mayo[id_mayo] == id_lst[ids]):
count = count + 1
reporte_mayo.append(count)
#print(reporte_mayo)
#Multiplico cada uno de los ids con ventas por su precio correspondiente
ingresos_mayo = list()
for price_mayo in range(len(prices)):#ciclo para todos los precios de la lista
ingresos_mayo.append(prices[price_mayo] * reporte_mayo[price_mayo])#precio del producto por el numero de ventas x producto
#print(ingresos_mayo)
#Calculo de las ventas totales de mayo de los ids
total_mayo = 0
for ingreso_mayo in ingresos_mayo:
total_mayo= total_mayo + ingreso_mayo
#JUNIO--->
#Tomo los ids de las ventas de junio para compararlos con los ids y su precio
ids_junio = list()
for veces_junio in junio:
ids_junio.append(veces_junio[1])
#print(ids_junio)
# Creo un reporte que me dice que ids si tuvieron ventas en junio
reporte_junio = list()
for ids in range(len(id_lst)):
count = 0
for id_junio in range(len(ids_junio)):
if (ids_junio[id_junio] == id_lst[ids]):
count = count + 1
reporte_junio.append(count)
#print(reporte_junio)
#Multiplico cada uno de los ids con ventas por su precio correspondiente
ingresos_junio = list()
for price_junio in range(len(prices)):#ciclo para todos los precios de la lista
ingresos_junio.append(prices[price_junio] * reporte_junio[price_junio])#precio del producto por el numero de ventas x producto
#print(ingresos_marzo)
#Calculo de las ventas totales de marzo de los ids
total_junio = 0
for ingreso_junio in ingresos_junio:
total_junio = total_junio + ingreso_junio
#JULIO--->
#Tomo los ids de las ventas de julio para compararlos con los ids y su precio
ids_julio = list()
for veces_julio in julio:
ids_julio.append(veces_julio[1])
#print(ids_julio)
# Creo un reporte que me dice que ids si tuvieron ventas en julio
reporte_julio = list()
for ids in range(len(id_lst)):
count = 0
for id_julio in range(len(ids_julio)):
if (ids_julio[id_julio] == id_lst[ids]):
count = count + 1
reporte_julio.append(count)
#print(reporte_julio)
#Multiplico cada uno de los ids con ventas por su precio correspondiente
ingresos_julio = list()
for price_julio in range(len(prices)):#ciclo para todos los precios de la lista
ingresos_julio.append(prices[price_julio] * reporte_julio[price_julio])#precio del producto por el numero de ventas x producto
#print(ingresos_julio)
#Calculo de las ventas totales del mes de los ids
total_julio = 0
for ingreso_julio in ingresos_julio:
total_julio = total_julio + ingreso_julio
#AGOSTO--->
#Tomo los ids de las ventas de agosto para compararlos con los ids y su precio
ids_agosto = list()
for veces_agosto in agosto:
ids_agosto.append(veces_agosto[1])
#print(ids_agosto)
# Creo un reporte que me dice que ids si tuvieron ventas en agosto
reporte_agosto = list()
for ids in range(len(id_lst)):
count = 0
for id_agosto in range(len(ids_agosto)):
if (ids_agosto[id_agosto] == id_lst[ids]):
count = count + 1
reporte_agosto.append(count)
#print(reporte_agosto)
#Multiplico cada uno de los ids con ventas por su precio correspondiente
ingresos_agosto = list()
for price_agosto in range(len(prices)):#ciclo para todos los precios de la lista
ingresos_agosto.append(prices[price_agosto] * reporte_agosto[price_agosto])#precio del producto por el numero de ventas x producto
#print(ingresos_agosto)
#Calculo de las ventas totales de agosto de los ids
total_agosto = 0
for ingreso_agosto in ingresos_agosto:
total_agosto= total_agosto + ingreso_agosto
#SEPTIEMBRE--->
#Tomo los ids de las ventas de septiembre para compararlos con los ids y su precio
ids_septiembre = list()
for veces_septiembre in septiembre:
ids_septiembre.append(veces_septiembre[1])
#print(ids_septiembre)
# Creo un reporte que me dice que ids si tuvieron ventas en septiembre
reporte_septiembre= list()
for ids in range(len(id_lst)):
count = 0
for id_septiembre in range(len(ids_septiembre)):
if (ids_septiembre[id_septiembre] == id_lst[ids]):
count = count + 1
reporte_septiembre.append(count)
#print(reporte_septiembre)
#Multiplico cada uno de los ids con ventas por su precio correspondiente
ingresos_septiembre = list()
for price_septiembre in range(len(prices)):#ciclo para todos los precios de la lista
ingresos_septiembre.append(prices[price_septiembre] * reporte_septiembre[price_septiembre])#precio del producto por el numero de ventas x producto
#print(ingresos_septiembre)
#Calculo de las ventas totales de septiembre de los ids
total_septiembre = 0
for ingreso_septiembre in ingresos_septiembre:
total_septiembre = total_septiembre + ingreso_septiembre
#OCTUBRE--->
#Tomo los ids de las ventas de octubre para compararlos con los ids y su precio
ids_octubre = list()
for veces_octubre in octubre:
ids_octubre.append(veces_octubre[1])
#print(ids_octubre)
# Creo un reporte que me dice que ids si tuvieron ventas en octubre
reporte_octubre= list()
for ids in range(len(id_lst)):
count = 0
for id_octubre in range(len(ids_octubre)):
if (ids_octubre[id_octubre] == id_lst[ids]):
count = count + 1
reporte_octubre.append(count)
#print(reporte_octubre)
#Multiplico cada uno de los ids con ventas por su precio correspondiente
ingresos_octubre = list()
for price_octubre in range(len(prices)):#ciclo para todos los precios de la lista
ingresos_octubre.append(prices[price_octubre] * reporte_octubre[price_octubre])#precio del producto por el numero de ventas x producto
#print(ingresos_octubre)
#Calculo de las ventas totales de octubre de los ids
total_octubre = 0
for ingreso_octubre in ingresos_octubre:
total_octubre = total_octubre + ingreso_octubre
#NOVIEMBRE--->
#Tomo los ids de las ventas por mes para compararlos con los ids y su precio
ids_noviembre = list()
for veces_noviembre in noviembre:
ids_noviembre.append(veces_noviembre[1])
#print(ids_noviembre)
# Creo un reporte que me dice que ids si tuvieron ventas en el mes
reporte_noviembre= list()
for ids in range(len(id_lst)):
count = 0
for id_noviembre in range(len(ids_noviembre)):
if (ids_noviembre[id_noviembre] == id_lst[ids]):
count = count + 1
reporte_noviembre.append(count)
#print(reporte_noviembre)
#Multiplico cada uno de los ids con ventas por su precio correspondiente
ingresos_noviembre = list()
for price_noviembre in range(len(prices)):#ciclo para todos los precios de la lista
ingresos_noviembre.append(prices[price_noviembre] * reporte_noviembre[price_noviembre])#precio del producto por el numero de ventas x producto
#print(ingresos_noviembre)
#Calculo de las ventas totales del mes de los ids
total_noviembre = 0
for ingreso_noviembre in ingresos_noviembre:
total_noviembre = total_noviembre + ingreso_noviembre
#DICIEMBRE--->
#Tomo los ids de las ventas por mes para compararlos con los ids y su precio
ids_diciembre = list()
for veces_diciembre in diciembre:
ids_diciembre.append(veces_diciembre[1])
#print(ids_diciembre))
# Creo un reporte que me dice que ids si tuvieron ventas en el mes
reporte_diciembre= list()
for ids in range(len(id_lst)):
count = 0
for id_diciembre in range(len(ids_diciembre)):
if (ids_diciembre[id_diciembre] == id_lst[ids]):
count = count + 1
reporte_diciembre.append(count)
#print(reporte_diciembre)
#Multiplico cada uno de los ids con ventas por su precio correspondiente
ingresos_diciembre = list()
for price_diciembre in range(len(prices)):#ciclo para todos los precios de la lista
ingresos_diciembre.append(prices[price_diciembre] * reporte_diciembre[price_diciembre])#precio del producto por el numero de ventas x producto
#print(ingresos_diciembre)
#Calculo de las ventas totales del mes de los ids
total_diciembre = 0
for ingreso_diciembre in ingresos_diciembre:
total_diciembre = total_diciembre + ingreso_diciembre
if option_8 == 1:
print("\n""REVENUE by MONTH")
print("Loading...")
print("Results:")
print("|--------------------------------------|")
print("| TOTAL REVENUE JANUARY: $",total_enero,)
print("|--------------------------------------|")
print("| TOTAL REVENUE FEBRUARY: $",total_febrero,)
print("|--------------------------------------|")
print("| TOTAL REVENUE MARCH: $",total_marzo,)
print("|--------------------------------------|")
print("| TOTAL REVENUE ARIL: $",total_abril,)
print("|--------------------------------------|")
print("| TOTAL REVENUE MAY: $",total_mayo,)
print("|--------------------------------------|")
print("| TOTAL REVENUE JUNE: $",total_junio,)
print("|--------------------------------------|")
print("| TOTAL REVENUE JULY: $",total_julio,)
print("|--------------------------------------|")
print("| TOTAL REVENUE AUGUST: $",total_agosto,)
print("|--------------------------------------|")
print("| TOTAL REVENUE SEPTEMBER: $",total_septiembre,)
print("|--------------------------------------|")
print("| TOTAL REVENUE OCTOBER: $",total_octubre,)
print("|--------------------------------------|")
print("| TOTAL REVENUE NOVEMBER: $",total_noviembre,)
print("|--------------------------------------|")
print("| TOTAL REVENUE DECEMBER: $",total_diciembre,)
print("|--------------------------------------|")
continuar = input("new selection (yes/no): ")
if continuar == "yes":
print("loading...")
elif continuar == "no":
print("logggin out..")
else: print("type: yes or no")
continue
else:
print("Disconecting... you typed:[no] or typed an error\n"
"See you or try to log in again (TIP: yes/no)")
#----------------------------FINAL DE FINALES--------------------------------> |
15,801 | 943a694b2ee715df0041340ca5e2ab5b34af94c2 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-19 18:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('fiscales', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='asignacionvoluntario',
name='mesa',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='voluntarios', to='elecciones.Mesa'),
),
migrations.AlterField(
model_name='asignacionvoluntario',
name='voluntario',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='asignaciones', to='fiscales.Voluntario'),
),
]
|
15,802 | 7ac28c5bf8734e18693792b92563e26b81428cb2 | import requests
import string
def get_keywords(keywords, depth):
market_id = 'ATVPDKIKX0DER' # Amazon US
# Get extra keywords by attaching a-z and aa-zz to front and back of keyword
alphabet = list(string.ascii_lowercase)
alphabet += [letter*2 for letter in alphabet]
for keyword in list(dict.fromkeys(keywords[:])):
keyword_base = keyword.replace(' ', '%20')
for letter in alphabet:
keyword_front = (letter + ' ' + keyword_base).replace(' ', '%20')
keyword_back = (keyword_base + ' ' + letter).replace(' ', '%20')
for keyword_new in [keyword_base, keyword_front, keyword_back]:
url = f'https://completion.amazon.com/api/2017/suggestions?mid={market_id}&alias=aps&prefix={keyword_new}&suggestion-type=KEYWORD'
response = requests.get(url)
data = response.json()
suggestions = data['suggestions']
keywords += [suggestion['value'] for suggestion in suggestions]
while depth > 1:
return get_keywords(keywords, depth-1)
return list(dict.fromkeys(keywords))
keywords = get_keywords(['head torch'], depth=1)
|
15,803 | 03f78ca833b14deef11320997a6e6a6581d8a9bb | # 如果有两个字符串"hello"和"world",生成一个列表,列表中元素["hw","eo","lr","ll","od"]
mystr = "hello"
mystr1 = "world"
l = []
for i in range(len(mystr)):
l.append(mystr[i]+mystr1[i])
print(l)
|
15,804 | 1eee2c36d1c705be9e046e37c387e9eef08897ec | #! /usr/bin/python
import rospy
import json
from threading import Lock
from std_msgs.msg import String
from hbba_msgs.msg import Desire
from hbba_msgs.srv import AddDesires
from hbba_msgs.srv import RemoveDesires
class TeleopGenerator:
def __init__(self):
rospy.init_node('teleop_generator')
self.desire_active = False
self.teleop_desire = Desire()
self.teleop_desire.id = rospy.get_param('~des_id', 'teleop_desire')
self.teleop_desire.type = 'Teleop'
self.teleop_desire.utility = 1
self.teleop_desire.security = False
self.teleop_desire.intensity = rospy.get_param('~des_intensity', 2)
self.add_desires = rospy.ServiceProxy(
'add_desires',
AddDesires
)
self.remove_desires = rospy.ServiceProxy(
'remove_desires',
RemoveDesires
)
self.last = rospy.Time.now()
self.last_mutex = Lock()
rospy.Subscriber('fromElectron', String, self.electron_callback)
rospy.Timer(rospy.Duration(2), self.timer_callback)
rospy.spin()
def timer_callback(self, event):
self.last_mutex.acquire()
last = self.last
active = self.desire_active
self.last_mutex.release()
delay = rospy.Time.now() - last
if delay.to_sec() > 10 and active:
self.remove_desires([self.teleop_desire.id])
active = False
self.last_mutex.acquire()
self.desire_active = active
self.last_mutex.release()
def electron_callback(self, msg):
self.last_mutex.acquire()
self.last = rospy.Time.now()
active = self.desire_active
self.last_mutex.release()
data = json.loads(msg.data)
if data['enabled'] and not active:
self.add_desires([self.teleop_desire])
active = True
elif not data['enabled'] and active:
self.remove_desires([self.teleop_desire.id])
active = False
self.last_mutex.acquire()
self.desire_active = active
self.last_mutex.release()
if __name__ == '__main__':
TeleopGenerator()
|
15,805 | c19129997981d3ef420c2637d5ec049d72f54260 | """
@Time : 2021/2/19 4:33 PM
@Author : Xiaoming
将多个召回合并
1、提升评估指标mrr
2、itemcf没有召回数据的新用户,可以用hot去补全
3、可以加入多种召回方法,进行召回(这里没有做)
"""
import os
import warnings
from collections import defaultdict
from itertools import permutations
import pandas as pd
from tqdm import tqdm
from utils_log import Logger
from utils_evaluate import evaluate
warnings.filterwarnings('ignore')
# 初始化日志
log_file = 'my_log.log'
os.makedirs('log', exist_ok=True)
log = Logger(f'log/{log_file}').logger
log.info('多路召回合并: ')
def mms(df):
"""
将score进行最大最小值归一化,将不同召回得分归一化到相同范围
:param df:
:return:
"""
user_score_max = {}
user_score_min = {}
# 获取用户下的相似度的最大值和最小值
for user_id, g in df[['user_id', 'sim_score']].groupby('user_id'):
scores = g['sim_score'].values.tolist()
user_score_max[user_id] = scores[0]
user_score_min[user_id] = scores[-1]
ans = []
for user_id, sim_score in tqdm(df[['user_id', 'sim_score']].values):
ans.append((sim_score - user_score_min[user_id]) /
(user_score_max[user_id] - user_score_min[user_id]) +
10**-3)
return ans
def recall_result_sim(df1_, df2_):
df1 = df1_.copy()
df2 = df2_.copy()
user_item_ = df1.groupby('user_id')['article_id'].agg(set).reset_index()
user_item_dict1 = dict(zip(user_item_['user_id'],
user_item_['article_id']))
user_item_ = df2.groupby('user_id')['article_id'].agg(set).reset_index()
user_item_dict2 = dict(zip(user_item_['user_id'],
user_item_['article_id']))
cnt = 0
hit_cnt = 0
for user in user_item_dict1.keys():
item_set1 = user_item_dict1[user]
cnt += len(item_set1)
if user in user_item_dict2:
item_set2 = user_item_dict2[user]
inters = item_set1 & item_set2
hit_cnt += len(inters)
return hit_cnt / cnt
if __name__ == '__main__':
df_train = pd.read_csv('data/my_train_set.csv')
df_test = pd.read_csv('data/my_test_set.csv')
recall_path = 'result'
# 召回方式
recall_methods = ['itemcf', 'hot']
# 每种召回得分权重,这里hot很低,为了补全没有召回数据的用户,较小影响itemcf召回
weights = {'itemcf': 1, 'hot': 0.1}
recall_list = []
# recall_dict = {}
for recall_method in recall_methods:
recall_result = pd.read_csv(f'{recall_path}/recall_{recall_method}.csv')
weight = weights[recall_method]
recall_result['sim_score'] = mms(recall_result)
recall_result['sim_score'] = recall_result['sim_score'] * weight
recall_list.append(recall_result)
# recall_dict[recall_method] = recall_result
# 求相似度
# for recall_method1, recall_method2 in permutations(recall_methods, 2):
# score = recall_result_sim(recall_dict[recall_method1],
# recall_dict[recall_method2])
# log.debug(f'召回相似度 {recall_method1}-{recall_method2}: {score}')
# 合并召回结果
recall_final = pd.concat(recall_list, sort=False)
# 一个user多个召回有同一个item,将分数相加
recall_score = recall_final[['user_id', 'article_id', 'sim_score']].groupby(['user_id', 'article_id'
])['sim_score'].sum().reset_index()
# 清理冗余数据
recall_final = recall_final[['user_id', 'article_id', 'label'
]].drop_duplicates(['user_id', 'article_id'])
# 将label拼接回来
recall_final = recall_final.merge(recall_score, how='left')
# 将用户的得分按照从大到小排序
recall_final.sort_values(['user_id', 'sim_score'], inplace=True, ascending=[True, False])
# 只取每个用户前50个(对结果影响不大)
recall_final = recall_final.groupby('user_id').head(50)
log.debug(f'recall_final.shape: {recall_final.shape}')
# 计算相关指标
total = df_test.user_id.nunique()
hitrate_5, mrr_5, hitrate_10, mrr_10, hitrate_20, mrr_20, hitrate_40, mrr_40, hitrate_50, mrr_50 = evaluate(
recall_final[recall_final['label'].notnull()], total)
log.debug(
f'召回合并后指标: {hitrate_5}, {mrr_5}, {hitrate_10}, {mrr_10}, {hitrate_20}, {mrr_20}, {hitrate_40}, {mrr_40}, {hitrate_50}, {mrr_50}'
)
df = recall_final['user_id'].value_counts().reset_index()
df.columns = ['user_id', 'cnt']
log.debug(f"平均每个用户召回数量:{df['cnt'].mean()}")
log.debug(
f"标签分布: {recall_final[recall_final['label'].notnull()]['label'].value_counts()}"
)
recall_final.to_csv('result/recall.csv', index=False)
|
15,806 | 782a6526cbea81c1322a492b790d64df0ffbcd3d | from node import Node
from zip import Zip
class Commodity:
def __init__(self, origin: Node, dest: Zip):
"""
:param origin: origin node
:param dest: dest zip
:param quantity: number of packages
"""
self.name = origin.name+dest.name
self.origin_node = origin
self.dest = dest
# assigning default values
self.quantity = 0
self.scenario_num = -1
def set_quantity(self,quantity):
self.quantity = quantity
def get_quantity(self):
return self.quantity
def set_scenario(self, scen_num):
self.scenario_num = scen_num
def get_scenario_num(self):
return self.scenario_num
|
15,807 | 4bda88897e6d779c44441a693b39323902159d32 | """
Home Climate Monitoring
author: GiantMolecularCloud
This script is part of a collection of scripts to log climate information in python and send them
to influxdb and graphana for plotting.
Manually back up the homeclimate directory to someothermachine. This simply copies over the current
version including all scripts, services and the influxdb that are all placed in the homeclimate
directory.
Development script! Database backups should be done through the appropriate influxdb functions to
e.g. export a JSON record of the database, instead of copying the hundreds of small files that make
up the database.
"""
####################################################################################################
# Import modules
####################################################################################################
import sys
import subprocess
import time
import datetime
from influxdb import InfluxDBClient
import influxdb.exceptions as inexc
####################################################################################################
# Initialize connection to influxdb
####################################################################################################
# if influxdb server is up and accessible
# TDB: test connection
client = InfluxDBClient(host='localhost', port=8086, username='root', password='root', database='homeclimate')
####################################################################################################
# Read data
####################################################################################################
def copy_data():
"""
Backups data to another machine or raises exception if not successful.
"""
result = subprocess.call('sudo rsync -rvzhc /home/pi/homeclimate/ -e "ssh -i /home/pi/.ssh/id_rsa" homeclimate@someothermachine:/volume1/data/homeclimate/ --progress', shell=True)
time = datetime.datetime.utcnow().isoformat()
if result==0:
success = "successful"
else:
print(datetime.datetime.now(), " rsync to someothermachine failed.")
success = "rsync failed"
data = [{'measurement': 'backup',
'tags': {'target': 'someothermachine'},
'time': time,
'fields': {'success': success}
}]
return success,data
####################################################################################################
# Send data to influxdb
####################################################################################################
def write_database(client, data):
"""
Writes a given data record to the database and prints unexpected results. Successful writes are
not printed to keep the logs simple.
"""
try:
iresponse = client.write_points(data)
if not iresponse:
print("Sending data to database failed. Response: ", iresponse)
else:
print("Backup successful.")
except inexc.InfluxDBServerError:
print(datetime.datetime.now(), " Sending data to database failed due to timeout.")
pass
except Exception:
print(datetime.datetime.now(), " Encountered unknown error.")
pass
####################################################################################################
# Try backup up to five times
####################################################################################################
for i in range(5):
success, data = copy_data()
write_database(client = client,
data = data
)
if success=='successful':
sys.exit(0)
else:
print('try '+str(i)+': sleeping for 60, then try again')
time.sleep(60)
sys.exit(1)
####################################################################################################
|
15,808 | a20a28d8d5156e1830185af9bb9f112834cc95a4 | from .text import *
|
15,809 | a35bf29f700ea2d1aa0a6b7b2283c9928dce33aa | # function createPythonScriptProcess(targetFile, options) {
# options = _.pick(options || {}, ['shell', 'cmd']);
#
# const processOptions = getPythonCommandOptions(options),
# cmd = options.cmd || 'python';
#
# return processes.create(cmd, [targetFile], processOptions);
# } |
15,810 | 31fb9e229dd8869e2fd8076b074733f1ef692a28 | #1.5 String Array Practice
#----------------------------------------
#Question 1
#Declare day, month, and year in integer
#Create a string format day## month## year####
#Print today's date using format
#Check the length of the string
#Turn the string to upper case
#Split the string into 3 parts day, month, and year
#Question 2
#Introducing youself using special characters in your introduction
#----------------------------------------
#Solution 1
day = 10
month = "June"
year = 2020
today = "day{0} month{1} year{2}"
print(today.format(day, month, year))
print(len(today.format(day, month, year)))
print(today.format(day, month, year).upper())
print(today.format(day, month, year).split(" "))
#Solution 2 |
15,811 | b6501b5d9b2eb50cd55121b2e87926761cb2a887 | import numpy as np
from .core import Monitor
__all__ = ['Print', 'PlotStat', 'Cache']
class Print(Monitor):
'''Print data without changing anything.'''
def __init__(self, input_shape, itype, **kwargs):
super(Print, self).__init__(input_shape, input_shape, itype)
@classmethod
def monitor_forward(self, x):
print('Forward\n - x = %s' % x)
@classmethod
def monitor_backward(self, xy, dy, **kwargs):
x, y = xy
print('Backward\n - x = %s\n - y = %s\n - dy = %s' % (x, y, dy))
class PlotStat(Monitor):
'''
Print data without changing anything.
Args:
ax (<matplotib.axes>):
mask (list<bool>, len=2, default=[True,False]): masks\
for forward check and backward check.
Attributes:
ax (<matplotib.axes>):
mask (list<bool>, len=2): masks for forward check and backward check.
'''
def __init__(self, input_shape, itype, ax, mask=[True, False], **kwargs):
super(PlotStat, self).__init__(input_shape, input_shape, itype)
self.ax = ax
self.mask = mask
def monitor_forward(self, x):
if not self.mask[0]:
return
xx = np.sort(x.ravel())[::-1]
self.ax.clear()
self.ax.bar(np.arange(len(xx)), xx)
self.ax.title(' X: Mean = %.4f, Std. = %.4f' % (x.mean, np.std(x)))
def monitor_backward(self, xy, dy, **kwargs):
if not self.mask[1]:
return
xx = np.sort(dy.ravel())[::-1]
self.ax.clear()
self.ax.bar(np.arange(len(xx)), xx)
self.ax.title('DY: Mean = %.4f, Std. = %.4f' % (x.mean, np.std(x)))
class Cache(Monitor):
'''
Cache data without changing anything.
Attributes:
forward_list (list): cached forward data.
backward_list (list): cached backward data.
'''
def __init__(self, input_shape, itype, **kwargs):
super(Cache, self).__init__(input_shape, input_shape, itype)
self.forward_list = []
self.backward_list = []
def monitor_forward(self, x, **kwargs):
self.forward_list.append(x)
def monitor_backward(self, xy, dy, **kwargs):
self.backward_list.append(dy)
def clear(self):
'''clear history.'''
self.forward_list = []
self.backward_list = []
|
15,812 | 19e1d5d5f36046f9885ec3d01d449d7a06d7f594 | '''
"why does numpy not work in venv???????? When I install it not in the venv, then it runs...????
=> NEED TO ADJUST THE PYTHON VENV PATH!!!, THEN IT RUNS!"
'''
import numpy as np
import turtle_start as ts
forw = int(input("Pls enter distance in pixels: "))
print(ts.move_turtle(forw))
#---------------------------------------------------
x = int(input("Enter x value: "))
y = int(input("Enter y value: "))
vec1 = np.array([x,y])
vec2 = np.array([-x,-y])
answer1 = ts.add(x,y)
answer2 = ts.mult(x,y)
answer3 = ts.dot(vec1,vec2)
print(answer1)
print(answer2)
print(answer3)
|
15,813 | 4c6763eede53f0ccaf0fdeb8677fe6020976f5ea | # Generated by Django 3.0.8 on 2020-08-31 04:47
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('network', '0003_auto_20200831_0432'),
]
operations = [
migrations.AlterField(
model_name='user',
name='follower',
field=models.ManyToManyField(blank=True, related_name='following', to=settings.AUTH_USER_MODEL),
),
]
|
15,814 | d21a8fd9b9ce542b0153b151072301f4331ee6c3 | #selection sort algorithm using python
"""
selection sort steps:
- find the smallest element in the array
- exchange it with the intial position
- find second smallest and exchange it with second
"""
import random
#function to find teh smallest element of the array
def element_smallest(value, length, array, key):
j = value -1
for i in range(value, length):
if(array[j] > array[i]):
array[j] = array[i]
array[i] = key
key = array[j]
return key
#function to sort the array
def selection_sort(array):
for j in range(len(array)):
key = array[j]
smallest_element = element_smallest(j+1, len(array), array, key) # slection of the smallest element
array[j] = smallest_element # exchange the smallest element with a[j]
return array
# intializatoin and fucntion call
unsorted_array_list = random.sample(range(50), 20)
sorted_array_list = selection_sort(unsorted_array_list)
print("Sorted array is :", sorted_array_list)
|
15,815 | 8832d6bcc87bb40b2bf7afc79d39366107a89038 | from flask import Flask, render_template, request, redirect, url_for, flash
from flask_mysqldb import MySQL
app = Flask(__name__)
# mysql database
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = ''
app.config['MYSQL_DB'] = 'casodb'
mysql = MySQL(app)
app.secret_key='mysecrectkey'
@app.route('/')
def index():
cur = mysql.connection.cursor()
cur.execute('select * from libro')
data = cur.fetchall()
cur.execute('select * from autor')
data2 = cur.fetchall()
return render_template('index.html',libro=data,autor=data2)
# return 'Index - Diseño Software-UTEC'
# return 'Index - Diseño Software-UTEC'
@app.route('/add_libro',methods=['POST'])
def add_libro():
if request.method == 'POST':
nom = request.form['nombre']
disp = request.form['disponible']
isbn = request.form['isbn']
print('INSERT', id, nom, disp, isbn)
cur = mysql.connection.cursor()
cur.execute('insert into libro(nombre,disponible,isbn) values(%s,%s,%s)', (nom, disp, isbn))
mysql.connection.commit()
flash('libro Insertado correctamente')
return redirect(url_for('index'))
@app.route('/add_autor',methods=['POST'])
def add_autor():
if request.method == 'POST':
nom = request.form['nombre']
edic = request.form['edicion']
fecha = request.form['fecha']
print('INSERT', nom, edic, fecha)
cur = mysql.connection.cursor()
cur.execute('insert into autor(nombre,edicion,fecha_publicacion) values(%s,%s,%s)', (nom, edic, fecha))
mysql.connection.commit()
flash('Autor Insertado correctamente')
return redirect(url_for('index'))
@app.route('/edit/<id>')
def edit_libro(id):
cur = mysql.connection.cursor()
cur.execute('select * from libro where idLibro = %s',{id})
data = cur.fetchall()
print(data[0])
return render_template('edit.html', libro=data[0])
@app.route('/update/<id>',methods=['POST'])
def update_libro(id):
if request.method == 'POST':
nom = request.form['nombre']
dips = request.form['disponible']
isbn = request.form['isbn']
print('UPDATE', id, nom, dips, isbn)
cur = mysql.connection.cursor()
cur.execute("""
update libro
set nombre = %s,
disponible = %s,
isbn = %s
where idLibro = %s
""",(nom, dips, isbn, id) )
mysql.connection.commit()
flash('libro actualizado correctamente')
return redirect(url_for('index'))
# ---------------- EDITAR Y ACTUALIZAR AUTOR
@app.route('/edit2/<id>')
def edit_autor(id):
cur = mysql.connection.cursor()
cur.execute('select * from autor where idAutor = %s',{id})
data = cur.fetchall()
print(data[0])
return render_template('edit2.html', autor=data[0])
@app.route('/update2/<id>',methods=['POST'])
def update_autor(id):
if request.method == 'POST':
nom = request.form['nombre']
edic = request.form['edicion']
fecha = request.form['fecha']
print('UPDATE', id, nom, edic, fecha)
cur = mysql.connection.cursor()
cur.execute("""
update autor
set nombre = %s,
edicion = %s,
fecha_publicacion = %s
where idAutor = %s
""",(nom, edic, fecha, id) )
mysql.connection.commit()
flash('libro actualizado correctamente')
return redirect(url_for('index'))
@app.route('/delete/<string:id>')
def delete_libro(id):
cur = mysql.connection.cursor()
cur.execute('delete from libro where idLibro = {0}'.format(id))
mysql.connection.commit()
flash('Libro Eliminado correctamente')
return redirect(url_for('index'))
@app.route('/delete2/<string:id>')
def delete_autor(id):
cur = mysql.connection.cursor()
cur.execute('delete from autor where idAutor = {0}'.format(id))
mysql.connection.commit()
flash('Autor Eliminado correctamente')
return redirect(url_for('index'))
if __name__ == '__main__':
app.run(port=3010, debug=True) # 3000 para mariadb cuando se instalo |
15,816 | 42eb0359fa448b062bcca6d9714dda38076b69e9 | from django import template
from ..constants import ReviewResponse
from ..forms import FileSubmitForm
from ..models import Review, Submit
register = template.Library()
@register.inclusion_tag('submit/parts/submit_form.html')
def submit_form(receiver, redirect, user, caption=None):
"""
Renders submit form (or link) for specified SubmitReceiver.
If the receiver doesn't have form (or link), nothing will be rendered.
"""
data = {
'user_can_post_submit': receiver.can_post_submit(user),
'receiver': receiver,
'redirect_to': redirect,
}
conf = receiver.configuration
if 'form' in conf:
data['submit_form'] = FileSubmitForm(configuration=conf['form'])
data['caption'] = conf['form'].get('caption', None) or caption
if 'link' in conf:
data['submit_link'] = conf['link']
return data
@register.inclusion_tag('submit/parts/submit_list.html')
def submit_list(receiver, user):
"""
List of all submits for specified user and receiver.
"""
last_review_for_each_submit = Review.objects.filter(
submit__receiver=receiver, submit__user=user
).order_by(
'-submit__pk', '-time', '-pk'
).distinct(
'submit__pk'
).select_related(
'submit'
)
data = {
'user_has_admin_privileges': receiver.has_admin_privileges(user),
'receiver': receiver,
'submits': [(review.submit, review) for review in last_review_for_each_submit],
'response': ReviewResponse,
'Submit': Submit,
}
return data
@register.filter
def verbose(obj, msg):
"""
Use to print verbose versions of constants.JudgeTestResult
"""
return obj.verbose(msg)
|
15,817 | 9a1993b35787e2d869bc8c3ab0b66586981e1911 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-07-06 13:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogposts', '0002_question_date_que'),
]
operations = [
migrations.AddField(
model_name='question',
name='que',
field=models.TextField(blank=True, null=True),
),
]
|
15,818 | f279a0a40e10c73e9ee3d072de689c5d84294d44 | import scipy.io
import math
import numpy
from sklearn.metrics import confusion_matrix
# Initial dataset
Numpyfile = scipy.io.loadmat('mnist_data.mat')
train_x = Numpyfile['trX'] # 784x12116
train_y = Numpyfile['trY'] # 12116x1
test_x = Numpyfile['tsX'] # 784x2000
test_y = Numpyfile['tsY'] # 2002x1
"""
2-D feature vector N(mu, sigma^2)
"""
mean_7 = []
std_7 = []
mean_8 = []
std_8 = []
train_x7 = []
train_x8 = []
train_y7 = []
train_y8 = []
test_x7 = []
test_x8 = []
test_y7 = []
test_y8 = []
# Dividing training set into '7' and '8'
for i in range(len(train_y[0])):
if train_y[0][i] == 0.0:
train_x7.append(train_x[i])
train_y7.append(train_y[0][i])
else:
train_x8.append(train_x[i])
train_y8.append(train_y[0][i])
# Dividing test set into '7' and '8'
for i in range(len(test_y[0])):
if test_y[0][i] == 0.0:
test_x7.append(test_x[i])
test_y7.append(test_y[0][i])
else:
test_x8.append(test_x[i])
test_y8.append(test_y[0][i])
# mean and std for train data
for i in train_x7:
mean_7.append(numpy.mean(i))
std_7.append(numpy.std(i))
for i in train_x8:
mean_8.append(numpy.mean(i))
std_8.append(numpy.std(i))
tr_x7 = [list(a) for a in zip(mean_7, std_7)]
tr_x8 = [list(a) for a in zip(mean_8, std_8)]
tr_x7 = numpy.array(tr_x7).transpose().tolist()
tr_x8 = numpy.array(tr_x8).transpose().tolist()
mean_7 = []
std_7 = []
mean_8 = []
std_8 = []
# mean and std for test data
for i in test_x7:
mean_7.append(numpy.mean(i))
std_7.append(numpy.std(i))
for i in test_x8:
mean_8.append(numpy.mean(i))
std_8.append(numpy.std(i))
tst_x7 = [list(a) for a in zip(mean_7, std_7)]
tst_x8 = [list(a) for a in zip(mean_8, std_8)]
# finding mean and std for the new features obtained for the data
M_7 = []
STD_7 = []
M_8 = []
STD_8 = []
for i in tr_x7:
M_7.append(numpy.mean(i))
STD_7.append(numpy.std(i))
for i in tr_x8:
M_8.append(numpy.mean(i))
STD_8.append(numpy.std(i))
"""
Naive Bayes algorithm
"""
# Likelihood probability of digit 7 and digit 8
# p(x | Ci) = (1 / sqrt(2pi)*sigma) * (exp(-(x - mu)^2 / 2sigma^2))
prior_7 = len(train_x7) / len(train_x)
prior_8 = len(train_x8) / len(train_x)
pred_Y_7 = []
for i in range(len(tst_x7)):
prob_class_given_C_0 = []
log_likelihood = 0
for j in range(len(tst_x7[0])):
try:
r = - math.pow((tst_x7[i][j] - M_7[j]), 2) / (2 * math.pow(STD_7[j], 2))
u = 1 / (math.sqrt(2 * math.pi) * STD_7[j])
E = math.exp(r)
v = math.log(u * E)
log_likelihood = log_likelihood + v
except(ZeroDivisionError):
r = 0
u = 0
except(ValueError):
v = 0
prob_class_given_C_0.append(math.log(prior_7) + log_likelihood)
prob_class_given_C_1 = []
# Loop through every image
log_likelihood = 0
for j in range(len(tst_x7[0])):
try:
r = - math.pow((tst_x7[i][j] - M_8[j]), 2) / (2 * math.pow(STD_8[j], 2))
u = 1 / (math.sqrt(2 * math.pi) * STD_8[j])
E = math.exp(r)
v = math.log(u * E)
log_likelihood = log_likelihood + v
except(ZeroDivisionError):
r = 0
u = 0
except(ValueError):
v = 0
prob_class_given_C_1.append(math.log(prior_8) + log_likelihood)
# comparing the predicted result of the test data with the target values of test data to get the accuracy
if prob_class_given_C_0 > prob_class_given_C_1:
pred_Y_7.append(0.0)
else:
pred_Y_7.append(1.0)
# calculating accuracy
acc = 0
for i in range(len(pred_Y_7)):
if pred_Y_7[i] == test_y7[i]:
acc += 1
acc = acc / len(pred_Y_7)
print("NAIVE BAYES CLASSIFIER:")
print("Accuracy for Naive Bayes for digit '7':", acc * 100, "%")
pred_Y_8 = []
for i in range(len(tst_x8)):
prob_class_given_C_0 = []
log_likelihood = 0
for j in range(len(tst_x8[0])):
try:
r = - math.pow((tst_x8[i][j] - M_7[j]), 2) / (2 * math.pow(STD_7[j], 2))
u = 1 / (math.sqrt(2 * math.pi) * STD_7[j])
E = math.exp(r)
v = math.log(u * E)
log_likelihood = log_likelihood + v
except(ZeroDivisionError):
r = 0
u = 0
except(ValueError):
v = 0
prob_class_given_C_0.append(math.log(prior_7) + log_likelihood)
prob_class_given_C_1 = []
# Loop through every image
log_likelihood = 0
for j in range(len(tst_x8[0])):
try:
r = - math.pow((tst_x8[i][j] - M_8[j]), 2) / (2 * math.pow(STD_8[j], 2))
u = 1 / (math.sqrt(2 * math.pi) * STD_8[j])
E = math.exp(r)
v = math.log(u * E)
log_likelihood = log_likelihood + v
except(ZeroDivisionError):
r = 0
u = 0
except(ValueError):
v = 0
prob_class_given_C_1.append(math.log(prior_8) + log_likelihood)
# comparing the predicted result of the test data with the target values of test data to get the accuracy
if prob_class_given_C_0 > prob_class_given_C_1:
pred_Y_8.append(0.0)
else:
pred_Y_8.append(1.0)
acc = 0
for i in range(len(pred_Y_8)):
if pred_Y_8[i] == test_y8[i]:
acc += 1
# calculating accuracy
acc = acc / len(pred_Y_8)
print("Accuracy for Naive Bayes for digit '8':", acc * 100, "%")
print("=========================================================")
print("LOGISTIC REGRESSION:")
"""
Logistic Regression
"""
# converting train and test data into array
train_x7 = numpy.array(train_x7)
train_x8 = numpy.array(train_x8)
train_y7 = numpy.array(train_y7)
train_y8 = numpy.array(train_y8)
test_x7 = numpy.array(test_x7)
test_x8 = numpy.array(test_x8)
# sigmoid function
def sigmoid(scores):
return 1 / (1 + numpy.exp(-scores))
# initialize the parameter: weights
weights = numpy.zeros(train_x7.shape[1])
# defining the log- likelihood function
def log_likelihood(train_x7, train_y7, weights):
scores = numpy.dot(train_x7, weights)
ll = numpy.sum(train_y7 * scores - numpy.log(1 + numpy.exp(scores)))
return ll
# defining the main logistic regression function
def logistic_regression(train_x7, train_y7, weights, num_steps, learning_rate):
init = log_likelihood(train_x7, train_y7, weights)
print("initial ", init)
for step in range(num_steps):
scores = numpy.dot(train_x7, weights) # calculate the score to be sent to sigmoid function
predictions = sigmoid(scores)
error = train_y7 - predictions
gradient = numpy.dot(train_x7.transpose(), error)
# updating the weights using gradient ascent
weights += (learning_rate * gradient)
if step % 10000 == 0:
print(log_likelihood(train_x7, train_y7, weights))
return weights
weights = logistic_regression(train_x7, train_y7, weights, 1000, 0.001)
final_scores_C1 = numpy.round(numpy.dot(weights.transpose(), test_x7.transpose()))
final_scores_C0 = numpy.round(-numpy.dot(weights.transpose(), test_x7.transpose()))
preds_7 = []
for i in range(len(final_scores_C0)):
if final_scores_C0[i] > final_scores_C1[i]:
preds_7.append(0.0)
else:
preds_7.append(1.0)
# calculating accuracy
acc = 0
for i in range(len(preds_7)):
if preds_7[i] == test_y7[i]:
acc += 1
acc = acc / len(preds_7)
print("Accuracy for Logistic Regression for digit '7':", acc * 100, "%")
# Calculating LR for digit 8
weights = numpy.zeros(train_x8.shape[1])
def log_likelihood(train_x8, train_y8, weights):
scores = numpy.dot(train_x8, weights)
ll = numpy.sum(train_y8 * scores - numpy.log(1 + numpy.exp(scores)))
return ll
def logistic_regression(train_x8, train_y8, weights, num_steps, learning_rate):
init = log_likelihood(train_x8, train_y8, weights)
print("initial ", init)
for step in range(num_steps):
scores = numpy.dot(train_x8, weights)
predictions = sigmoid(scores)
error = train_y8 - predictions
gradient = numpy.dot(train_x8.transpose(), error)
# updating weights using gradient ascent
weights += (learning_rate * gradient)
if step % 10000 == 0:
print(log_likelihood(train_x8, train_y8, weights))
return weights
weights = logistic_regression(train_x8, train_y8, weights, 1000, 0.001)
final_scores_C1 = numpy.round(numpy.dot(weights.transpose(), test_x8.transpose()))
final_scores_C0 = numpy.round(-numpy.dot(weights.transpose(), test_x8.transpose()))
preds_8 = []
for i in range(len(final_scores_C0)):
if final_scores_C0[i] > final_scores_C1[i]:
preds_8.append(0.0)
else:
preds_8.append(1.0)
# calculating accuracy
acc = 0
for i in range(len(preds_8)):
if preds_8[i] == test_y8[i]:
acc += 1
acc = acc / len(preds_8)
print("Accuracy for Logistic Regression for digit '8':", acc * 100, "%")
|
15,819 | 6dd0f426daab4a2896f4f4ff8d463f575ada7262 | print([x for x in [1,3,6,78,35,55] for y in [12,24,35,24,88,120,155] if x==y])
s1 = set([1,3,6,78,35,55])
s2 = set([12,24,35,24,88,120,155])
s1 &= s2
print(list(s1)) |
15,820 | 149f2f9b6139cf398bb5bbc0a6058a76d84a93a8 | from __future__ import unicode_literals
from faker.providers import BaseProvider
class Provider(BaseProvider):
def ffo(self):
return 'bar' |
15,821 | 09408ecbde8ee4c402482d1804ab93a97a299afb | from setuptools import setup, find_packages
with open('README.md', 'r') as fh:
long_description = fh.read()
setup(
name='cloudflare_dynamic_ip',
version='0.0.3',
author='Cristian Steib',
author_email='cristiansteib@gmail.com',
description='Service to auto update ip in cloudflare',
long_description=long_description,
long_description_content_type='text/markdown',
url='',
package_data={'resources': ['*', '**/*', '**/**/*']},
install_requires=open('requirements.txt').read().splitlines(),
entry_points={
'console_scripts': [
'cloudflare-dynamic-ip = cloudflare_dynamic_ip.service:main']
},
packages=find_packages(),
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'License :: Other/Proprietary License',
],
)
|
15,822 | db239c37f7ee3d1c9e282b5580341962e16ad678 | time = 0
def on_button_pressed_ab():
global time
time = 0
basic.show_number(time)
input.on_button_pressed(Button.AB, on_button_pressed_ab)
|
15,823 | e4ae56caf4f44ea0322e1a5dd920043ec30693eb | # Asks the user for a distance in feet and converts it to inches.
# 7/14/2020
# CTI-110 P5T2_FeetToInches
# Ian Roberson
#
feet = float(input('Enter the distance in feet: '))
feetToInches = 12
inches = feet * feetToInches
if feet == 1:
print(feet, 'foot is', inches, 'inches.')
else:
print(feet, 'feet is', inches, 'inches.')
|
15,824 | bcff2081d55d80c898e46acd4d3da8aae7c5b114 | # Write a function that takes an ordered list of numbers (a list where the elements are in order
# from smallest to largest) and another number. The function decides whether or not the given number
# is inside the list and returns (then prints) an appropriate boolean.
#
# Extras:
#
# Use binary search.
import random
# Give an information if a number is in a number_list. Print out information. Uses standard search
#
# number - number to check if is in a list
# number_list - list of numbers ordered from smallest to largest
def standard_search(number, number_list):
if number in number_list:
print("Number: ",number, " is in a list")
else:
print("Number: ",number, " is not in a list")
# Give an information if a number is in a number_list. Print out an information. Uses binary search
#
# number - number to check if is in a list
# number_list - list of numbers ordered from smallest to largest
def binary_search(number, number_list):
is_in_a_list = 0 # boolean parameter informing if number is in a list
while len(number_list) > 1:
if number_list[round(len(number_list)/2)-1] == number:
is_in_a_list = 1
break
elif number_list[round(len(number_list)/2)] > number:
number_list = number_list[0:round(len(number_list)/2)-1]
# print(number_list)
elif number_list[round(len(number_list)/2)] < number:
number_list = number_list[round(len(number_list)/2):len(number_list)]
# print(number_list)
if is_in_a_list:
print("Number: ", number, "is in a list")
else:
print("Number: ", number, "is not in a list")
if __name__ == "__main__":
# Generating ordered list
number_list = random.sample(range(100), 20)
number_list.sort()
print("\n Standard Search ")
print(number_list)
standard_search(12, number_list)
binary_search(12, number_list)
|
15,825 | 60dd0c17b8ffb690f803f91054cdacddf7b904d1 | from flask import Flask, Blueprint, render_template
import os
import adal
import requests
import json
from azure.common.credentials import ServicePrincipalCredentials
from . import config
idam = Blueprint('idam', __name__)
@idam.route('/idam_services')
def idam_services():
return render_template('idam_services/index.html')
|
15,826 | 3107fdda6de8cca77045337f5168f2f95b677fe2 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 23 16:35:21 2017
@author: eti
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import numpy as np
import time
import os
import cPickle
import matplotlib.pyplot as plt
from Load_attrnet_inputs import load_batch
from attnet import *
from params import *
def plotGraph( x1 , training_loss , valid_loss , title ) :
plt.plot(x1, training_loss, label = "train")
plt.plot(x1, valid_loss, label = "valid")
plt.xlabel('number of epochs')
plt.ylabel('Value' )
plt.title(title)
plt.legend()
plt.figure()
#plt.savefig(tit + '.png') # save the figure to file
plt.show()
#plt.close()
def train(opt) :
#loader = DataLoader(opt)
#get num batches from loader
num_batch = 2494
model = Attnet(256,128,[100,100])
model.cuda()
infos = {}
# Load valid data
val_data = np.load('val_data_att.npy').item()
tmp = [val_data['features'][0:5000] , val_data['object'][0:5000] , val_data['atts'][0:5000] ]
tmp = [Variable(torch.from_numpy(t), requires_grad=False).cuda() for np.array(t)in tmp]
vfc_feats, obj , atts = tmp
vlabels = [ obj , atts ]
optimizer = optim.Adam(model.parameters(), lr= opt.learning_rate , weight_decay=opt.weight_decay)
train_loss = list()
val_loss = list()
for e in opt.epochs :
for b in num_batch :
start = time.time()
# Load data from train split (0)
data = np.load('train_batch' + str(b) + '.npy' ).item()
print('Read data:', time.time() - start)
tmp = [data['features'], data['object'] , data['atts'] ]
tmp = [Variable(torch.from_numpy(t), requires_grad=False).cuda() for np.array(t) in tmp]
fc_feats, obj , atts = tmp
labels = [ obj , atts ]
optimizer.zero_grad()
loss = loss_func(model(fc_feats) , labels)
loss.backward()
#utils.clip_gradient(optimizer, opt.grad_clip)
torch.nn.utils.clip_grad_norm(model.parameters(), opt.grad_clip)
optimizer.step()
train_loss.append(loss.data[0])
torch.cuda.synchronize()
end = time.time()
print("iter {} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}" \
.format(b, e, train_loss[-1], end - start))
#validation
loss = loss_func(model(vfc_feats) , vlabels)
val_loss.append(loss.data[0])
torch.cuda.synchronize()
print "validation loss" + str(loss.data[0])
current_score = - val_loss[-1]
#checkpoints
best_flag = False
if True: # if true
if best_val_score is None or current_score > best_val_score:
best_val_score = current_score
best_flag = True
checkpoint_path = os.path.join(opt.checkpoint_path, 'model.pth')
torch.save(model.state_dict(), checkpoint_path)
print("model saved to {}".format(checkpoint_path))
optimizer_path = os.path.join(opt.checkpoint_path, 'optimizer.pth')
torch.save(optimizer.state_dict(), optimizer_path)
# Dump miscalleous informations
#infos['batch'] = iteration
infos['epoch'] = e
#infos['iterators'] = loader.iterators
#infos['split_ix'] = loader.split_ix
infos['best_val_score'] = best_val_score
infos['opt'] = opt
#infos['vocab'] = loader.get_vocab()
with open(os.path.join(opt.checkpoint_path, 'infos_'+opt.id+'.pkl'), 'wb') as f:
cPickle.dump(infos, f)
if best_flag:
checkpoint_path = os.path.join(opt.checkpoint_path, 'model-best.pth')
torch.save(model.state_dict(), checkpoint_path)
print("model saved to {}".format(checkpoint_path))
with open(os.path.join(opt.checkpoint_path, 'infos_'+opt.id+'-best.pkl'), 'wb') as f:
cPickle.dump(infos, f)
#plot the graphs
x1 = list(range(1, epoch+1))
title = 'Loss'
plotGraph(x1,train_loss , val_loss , title)
if __name__ == "__main__":
opt = parse_opt()
train(opt) |
15,827 | 1c0d2841436f1fc1453a2c4c5e8bd9d0afb1b2dd | import numpy as np
a = np.zeros((3,1)) |
15,828 | a96dd7f72007c2df318b7cc5c2187e443c141e6d | import json
import os
from dataclasses import dataclass
from functools import lru_cache
from typing import Any, Callable, Dict, List, Optional, Type
from pythonbible.bible.bible_parser import BibleParser
from pythonbible.bible.osis.parser import OSISParser
from pythonbible.books import Book
from pythonbible.converter import (
convert_references_to_verse_ids,
convert_verse_ids_to_references,
)
from pythonbible.errors import (
InvalidVerseError,
MissingBookFileError,
MissingVerseFileError,
)
from pythonbible.normalized_reference import NormalizedReference
from pythonbible.verses import (
VERSE_IDS,
get_book_chapter_verse,
get_max_number_of_verses,
get_number_of_chapters,
is_single_chapter_book,
)
from pythonbible.versions import DEFAULT_VERSION, Version
@dataclass
class BookTitles:
long_title: str
short_title: str
VERSION_MAP: Dict[Version, Type[BibleParser]] = {
Version.AMERICAN_STANDARD: OSISParser,
Version.KING_JAMES: OSISParser,
}
def get_parser(**kwargs) -> BibleParser:
version: Version = kwargs.get("version", DEFAULT_VERSION)
version_map: Dict[Version, Type[BibleParser]] = kwargs.get(
"version_map", VERSION_MAP
)
return version_map.get(version, OSISParser)(version)
DEFAULT_PARSER: BibleParser = get_parser()
CURRENT_FOLDER: str = os.path.dirname(os.path.realpath(__file__))
DATA_FOLDER: str = os.path.join(os.path.join(CURRENT_FOLDER, "bible"), "data")
VERSE_TEXTS: Dict[Version, Dict[int, str]] = {}
BOOK_TITLES: Dict[Version, Dict[Book, BookTitles]] = {}
# TODO - handle Psalms vs Psalm appropriately
# TODO - handle single chapter books appropriately (e.g. Obadiah 1-4 rather than Obadiah 1:1-4)
def format_scripture_references(
references: Optional[List[NormalizedReference]], **kwargs
) -> str:
"""
:param references: a list of normalized scripture references
:return: a string version of the references formatted to be human-readable
"""
if references is None:
return ""
sorted_references: List[NormalizedReference] = references
# Only sort if there is more than one reference as it can take a long time if there
# are a lot of verses covered by the references.
if len(references) > 1:
verse_ids: List[int] = convert_references_to_verse_ids(references)
verse_ids.sort()
sorted_references = convert_verse_ids_to_references(verse_ids)
formatted_reference: str = ""
previous_reference: Optional[NormalizedReference] = None
for reference in sorted_references:
previous_book: Optional[Book] = _get_previous_book(previous_reference)
if previous_book != reference.book:
if previous_reference:
formatted_reference += ";"
formatted_reference += format_single_reference(reference, **kwargs)
previous_reference = reference
continue
if _is_reference_with_a_new_chapter(previous_reference, reference):
formatted_reference += ","
formatted_reference += format_single_reference(
reference, include_books=False, **kwargs
)
continue
# Reference with same book and chapter as previous reference
formatted_reference += ","
formatted_reference += format_single_reference(
reference, include_books=False, include_chapters=False, **kwargs
)
previous_reference = reference
return formatted_reference
def _get_previous_book(reference: Optional[NormalizedReference]) -> Optional[Book]:
if reference is None:
return None
return reference.book if reference.end_book is None else reference.end_book
def _is_reference_with_a_new_chapter(
previous_reference: Optional[NormalizedReference],
current_reference: NormalizedReference,
) -> bool:
if (
previous_reference
and previous_reference.end_chapter != current_reference.start_chapter
):
return True
return current_reference.end_chapter > current_reference.start_chapter
def format_single_reference(
reference: NormalizedReference,
include_books: bool = True,
include_chapters: bool = True,
**kwargs,
) -> str:
start_book: str = _get_start_book(reference, include_books, **kwargs)
start_chapter: str = _get_start_chapter(reference, include_chapters, **kwargs)
start_verse: str = _get_start_verse(reference, **kwargs)
end_book: str = _get_end_book(reference, include_books, **kwargs)
end_chapter: str = _get_end_chapter(reference, include_chapters, **kwargs)
end_verse: str = _get_end_verse(reference, **kwargs)
start_separator: str = " " if start_book and (start_chapter or start_verse) else ""
end_separator: str = " " if end_book and (end_chapter or end_verse) else ""
range_separator: str = (
" - " if end_book else "-" if end_chapter or end_verse else ""
)
return "".join(
[
start_book,
start_separator,
start_chapter,
start_verse,
range_separator,
end_book,
end_separator,
end_chapter,
end_verse,
]
)
def _get_start_book(
reference: NormalizedReference, include_books: bool = True, **kwargs
) -> str:
return _get_book_title(reference.book, include_books, **kwargs)
def _get_end_book(
reference: NormalizedReference, include_books: bool = True, **kwargs
) -> str:
if reference.end_book and reference.end_book != reference.book:
return _get_book_title(reference.end_book, include_books, **kwargs)
return ""
def _get_book_title(book: Book, include_books: bool = True, **kwargs) -> str:
if not include_books:
return ""
version: Version = kwargs.get("version", DEFAULT_VERSION)
full_title: bool = kwargs.get("full_title", False)
book_titles: Optional[BookTitles] = get_book_titles(book, version)
return (
(book_titles.long_title if full_title else book_titles.short_title)
if book_titles
else ""
)
def _get_start_chapter(
reference: NormalizedReference, include_chapters: bool = True, **kwargs
) -> str:
if not include_chapters:
return ""
force_include_chapters: bool = kwargs.get("always_include_chapter_numbers", False)
if (
_does_reference_include_all_verses_in_start_book(reference)
and not force_include_chapters
):
return ""
if is_single_chapter_book(reference.book) and not force_include_chapters:
return ""
return f"{reference.start_chapter}:"
def _get_start_verse(reference: NormalizedReference, **kwargs) -> str:
force_include_chapters: bool = kwargs.get("always_include_chapter_numbers", False)
if (
_does_reference_include_all_verses_in_start_book(reference)
and not force_include_chapters
):
return ""
return f"{reference.start_verse}"
def _get_end_chapter(
reference: NormalizedReference, include_chapters: bool = True, **kwargs
) -> str:
if not include_chapters:
return ""
force_include_chapters: bool = kwargs.get("always_include_chapter_numbers", False)
if reference.end_book and reference.book != reference.end_book:
if (
_does_reference_include_all_verses_in_end_book(reference)
and not force_include_chapters
):
return ""
if is_single_chapter_book(reference.end_book) and not force_include_chapters:
return ""
return f"{reference.end_chapter}:"
if (
_does_reference_include_all_verses_in_start_book(reference)
and not force_include_chapters
):
return ""
if is_single_chapter_book(reference.book) and not force_include_chapters:
return ""
if reference.start_chapter == reference.end_chapter:
return ""
return f"{reference.end_chapter}:"
def _get_end_verse(reference: NormalizedReference, **kwargs) -> str:
force_include_chapters: bool = kwargs.get("always_include_chapter_numbers", False)
if reference.end_book and reference.book != reference.end_book:
if (
_does_reference_include_all_verses_in_end_book(reference)
and not force_include_chapters
):
return ""
return f"{reference.end_verse}"
if (
_does_reference_include_all_verses_in_start_book(reference)
and not force_include_chapters
):
return ""
return (
f"{reference.end_verse}"
if reference.start_verse != reference.end_verse
or reference.start_chapter != reference.end_chapter
else ""
)
def _does_reference_include_all_verses_in_start_book(reference: NormalizedReference):
if reference.start_chapter != 1:
return False
if reference.start_verse != 1:
return False
if reference.end_book and reference.end_book != reference.book:
return True
max_chapters = get_number_of_chapters(reference.book)
if reference.end_chapter != max_chapters:
return False
return reference.end_verse == get_max_number_of_verses(reference.book, max_chapters)
def _does_reference_include_all_verses_in_end_book(reference: NormalizedReference):
max_chapters = get_number_of_chapters(reference.end_book)
if reference.end_chapter != max_chapters:
return False
return reference.end_verse == get_max_number_of_verses(
reference.end_book, max_chapters
)
def format_scripture_text(verse_ids: List[int], **kwargs) -> str:
one_verse_per_paragraph: bool = kwargs.get("one_verse_per_paragraph", False)
full_title: bool = kwargs.get("full_title", False)
format_type: str = kwargs.get("format_type", "html")
include_verse_numbers: bool = kwargs.get("include_verse_numbers", True)
parser: BibleParser = kwargs.get("parser", DEFAULT_PARSER)
if one_verse_per_paragraph or len(verse_ids) == 1:
return format_scripture_text_verse_by_verse(
verse_ids, parser.version, full_title, format_type, include_verse_numbers
)
return format_scripture_text_with_parser(
verse_ids, parser, full_title, format_type, include_verse_numbers
)
def format_scripture_text_verse_by_verse(
verse_ids: List[int],
version: Version,
full_title: bool,
format_type: str,
include_verse_numbers: bool,
) -> str:
verse_ids.sort()
text: str = ""
current_book: Optional[Book] = None
current_chapter: Optional[int] = None
for verse_id in verse_ids:
book, chapter_number, verse_number = get_book_chapter_verse(verse_id)
if book != current_book:
current_book = book
current_chapter = chapter_number
book_titles: Optional[BookTitles] = get_book_titles(book, version)
if book_titles:
title: str = (
book_titles.long_title if full_title else book_titles.short_title
)
text += _format_title(title, format_type, len(text) == 0)
text += _format_chapter(chapter_number, format_type)
elif chapter_number != current_chapter:
current_chapter = chapter_number
text += _format_chapter(chapter_number, format_type)
verse_text: Optional[str] = get_verse_text(verse_id, version)
if include_verse_numbers:
verse_text = f"{verse_number}. {verse_text}"
text += _format_paragraph(verse_text, format_type)
return text
def format_scripture_text_with_parser(
verse_ids: List[int],
parser: BibleParser,
full_title: bool,
format_type: str,
include_verse_numbers: bool,
) -> str:
title_function: Callable[[Any], Any] = (
parser.get_book_title if full_title else parser.get_short_book_title
)
text: str = ""
paragraphs: Any = parser.get_scripture_passage_text(
verse_ids, include_verse_number=include_verse_numbers
)
for book, chapters in paragraphs.items():
title: str = title_function(book)
text += _format_title(title, format_type, len(text) == 0)
for chapter, paragraphs in chapters.items():
text += _format_chapter(chapter, format_type)
for paragraph in paragraphs:
text += _format_paragraph(paragraph, format_type)
return text
def _format_title(title: str, format_type: str, is_first_book: bool) -> str:
if format_type == "html":
return f"<h1>{title}</h1>\n"
if not is_first_book:
return f"\n\n{title}\n\n"
return f"{title}\n\n"
def _format_chapter(chapter: int, format_type: str) -> str:
if format_type == "html":
return f"<h2>Chapter {chapter}</h2>\n"
return f"Chapter {chapter}\n\n"
def _format_paragraph(paragraph: Optional[str], format_type: str) -> str:
if format_type == "html":
return f"<p>{paragraph}</p>\n"
return f" {paragraph}\n"
@lru_cache()
def get_verse_text(verse_id: int, version: Version = DEFAULT_VERSION) -> Optional[str]:
"""
Given a verse id and, optionally, a Bible version, return the text for that verse.
:param verse_id:
:param version:
:return: the verse text
"""
if verse_id not in VERSE_IDS:
raise InvalidVerseError(verse_id=verse_id)
try:
version_verse_texts: Dict[int, str] = _get_version_verse_texts(version)
except FileNotFoundError as e:
raise MissingVerseFileError(e)
return version_verse_texts.get(verse_id)
@lru_cache()
def _get_version_verse_texts(version: Version) -> Dict[int, str]:
verse_texts: Optional[Dict[int, str]] = VERSE_TEXTS.get(version)
if verse_texts is None:
json_filename: str = os.path.join(
os.path.join(DATA_FOLDER, version.value.lower()), "verses.json"
)
verse_texts = {}
with open(json_filename, "r") as json_file:
for verse_id, verse_text in json.load(json_file).items():
verse_texts[int(verse_id)] = verse_text
VERSE_TEXTS[version] = verse_texts
return verse_texts
@lru_cache()
def get_book_titles(
book: Book, version: Version = DEFAULT_VERSION
) -> Optional[BookTitles]:
"""
Given a book of the Bible and optionally a version return the book title.
:param book:
:param version:
:return: the book title
"""
try:
version_book_tiles: Dict[Book, BookTitles] = _get_version_book_titles(version)
except FileNotFoundError as e:
raise MissingBookFileError(e)
return version_book_tiles.get(book)
@lru_cache()
def _get_version_book_titles(version: Version) -> Dict[Book, BookTitles]:
book_titles: Optional[Dict[Book, BookTitles]] = BOOK_TITLES.get(version)
if book_titles is None:
json_filename: str = os.path.join(
os.path.join(DATA_FOLDER, version.value.lower()), "books.json"
)
book_titles = {}
with open(json_filename, "r") as json_file:
for book_id, titles in json.load(json_file).items():
book: Book = Book(int(book_id))
book_titles[book] = BookTitles(titles[0], titles[1])
BOOK_TITLES[version] = book_titles
return book_titles
|
15,829 | 9deaf2e368fc30cbc69ed6f706d728451030bc68 | from classes.piece import Piece
from functions.pieces import Pieces
class King(Piece):
def __init__(self):
super().__init__('KK')
self.player = ''
self.id = ''
self.position = ''
def move(self, King):
self.moves = Pieces.moves(King)
return self.moves
|
15,830 | 44cc9117668a9a69598f1ec1a874733948b2f522 | N = int(input())
rl = [0, N-1]
rl_condition = ["",""]
flag = True
for i, RL in enumerate(rl):
print(RL)
print("\n")
s = input()
rl_condition[i] = s
if s == "Vacant":
flag = False
break
while flag:
center = (rl[0] + rl[1])//2
print(center)
print("\n")
s = input()
if s == "Vacant":
break
elif ((center - rl[0])%2 == 1) and (s == rl_condition[0]):
rl_condition[1] = s
rl[1] = center
continue
elif ((rl[1] - center)%2 == 1) and (s == rl_condition[1]):
rl_condition[0] = s
rl[0] = center
continue
elif ((center - rl[0])%2 == 0) and (s != rl_condition[0]):
rl_condition[1] = s
rl[1] = center
continue
elif ((rl[1] - center)%2 == 0) and (s != rl_condition[1]):
rl_condition[0] = s
rl[0] = center
continue
|
15,831 | 2ea14536ff2b3f24323d0abb0e9a62f12dc6923a | import pandas as pd
from sklearn import preprocessing
def get_data():
df = pd.read_csv('300k.csv')
df = df[(df != '?').all(axis=1)]
target = df['class']
target = pd.get_dummies(target,columns=['class'])
# preprocess
used_col = ['latitude', 'longitude','appearedTimeOfDay','appearedDayOfWeek','terrainType','closeToWater','continent','temperature','urban','rural','weatherIcon','population density','gymDistanceKm','pokestopDistanceKm' ]
for col in df.columns:
if col not in used_col:
del df[col]
mapping = {'afternoon':0,'evening':1,'morning':2,'night':3,'Sunday':0,'Monday':1,'Tuesday':2,'Wednesday':3,'Thursday':4,'Friday':5,'Saturday':6,'dummy_day':7,'False':0,'True':1,'Africa':0,'America':1,'America/Argentina':2,'America/Indiana':3,'America/Kentucky':4,'Asia':5,'Atlantic':6,'Australia':7,'Europe':8,'Indian':9,'Pacific':10,'clear-day':0,'clear-night':1,'cloudy':2,'fog':3,'partly-cloudy-day':4,'partly-cloudy-night':5,'rain':6,'wind':7}
df = df.applymap(lambda s: mapping.get(s) if s in mapping else s)
X,y = df.values.astype(float), target.values.astype(float)
scaler = preprocessing.StandardScaler().fit(X)
X = scaler.transform(X)
return X,y
def get_data_sparse():
df = pd.read_csv('300k.csv')
df = df[(df != '?').all(axis=1)]
target = df['class']
# preprocess
used_col = ['latitude', 'longitude','appearedTimeOfDay','appearedDayOfWeek','terrainType','closeToWater','continent','temperature','urban','rural','weatherIcon','population density','gymDistanceKm','pokestopDistanceKm' ]
for col in df.columns:
if col not in used_col:
del df[col]
mapping = {'afternoon':0,'evening':1,'morning':2,'night':3,'Sunday':0,'Monday':1,'Tuesday':2,'Wednesday':3,'Thursday':4,'Friday':5,'Saturday':6,'dummy_day':7,'False':0,'True':1,'Africa':0,'America':1,'America/Argentina':2,'America/Indiana':3,'America/Kentucky':4,'Asia':5,'Atlantic':6,'Australia':7,'Europe':8,'Indian':9,'Pacific':10,'clear-day':0,'clear-night':1,'cloudy':2,'fog':3,'partly-cloudy-day':4,'partly-cloudy-night':5,'rain':6,'wind':7}
df = df.applymap(lambda s: mapping.get(s) if s in mapping else s)
X,y = df.values.astype(float), target.values.astype(int)
scaler = preprocessing.StandardScaler().fit(X)
X = scaler.transform(X)
return X,y
|
15,832 | bfb5b3557c0af5fe4c7c41a1ec31d533ed11faac | #!/usr/bin/env python3
"""calculates the specificity each class in a confusion matrix"""
import numpy as np
def specificity(confusion):
"""
confusion is a confusion numpy.ndarray of shape (classes, classes)
classes is the number of classes
"""
total = np.sum(confusion)
truPos = np.diagonal(confusion)
actual = np.sum(confusion, axis=1)
predicted = np.sum(confusion, axis=0)
falPos = predicted - truPos
actNeg = total - actual
truNeg = actNeg - falPos
return truNeg / actNeg
|
15,833 | 4be5055a1b36fc3eefa65cd0b9a6ae1f06709d83 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 21 00:15:37 2020
@author: Keith Monreal
"""
# Give the user some context and get his or her name
print("\nAre you ready to take a quiz about everything and anything under the sun today?")
choice = input("Yes or No\n")
if 'Yes' in str(choice):
print("\nOkay, let's do this!. But first, what is your name?")
else:
print("\nOkay, whenever you're ready.")
name = input("Please input your beautiful name here:\n")
print("\nHi" + " " + str(name) + " " + "!" + " " + "Here is your first question.")
#set questions
questions = ["A sneeze is faster than an eye blink",
"Olympic gold medal is made of silver",
"Chocolate is lethal to dogs.",
"Shrimp's heart is in its head.",
"Pigs cannot look up into the sky.",
"Fingernails grow faster than hair",
"A shark can blink its eyes",
"Snoopy is yellow in Brazil.",
"Gorilla's blood type is all B",
"Sandwich is named after a person."]
#set the right answers
right_answers = ["T","T","T","T","T","F","T","F","T","T"]
#initialize the variables that will be used
answers = []
question_number = 0
score = 0
#last_question
def last_question(answers,score):
print("Your answers are" + " " + str(answers))
score = score/10
if score > 0.60:
print("Congratulations! You passed!")
else:
print("You failed. It's okay, they are useless facts anyway.")
score = "{:.2%}".format(score)
print("\nYour score is" + " " + str(score))
#while loop for the question proper
while question_number < 10:
print("\n" + str(questions[question_number]))
answer_n = input("\nT or F \n")
answers.append(answer_n)
if answers[question_number] == right_answers[question_number]:
score = score + 1
question_number = question_number + 1
if question_number == 10:
last_question(answers,score) |
15,834 | 37cd07d7974974a0e7977a3fdcb48619d0e8f9c7 | import os
import pandas as pd
import subprocess
from sentiment import *
from csv import writer
def append_list_as_row(file_name, list_of_elem):
# Open file in append mode
with open(file_name, 'a+', newline='') as write_obj:
# Create a writer object from csv module
csv_writer = writer(write_obj)
# Add contents of list as last row in the csv file
csv_writer.writerow(list_of_elem)
df = pd.read_csv("./reviews1.csv")
id_to_sentiment = {}
count = 0
min_count = 121
max_count = 200
'''
div by zero errors:
46 (18764)
'''
for listing_id in df['listing_id'].unique():
if (count < min_count):
count += 1
continue
if (count == max_count):
break
count += 1
if (count % 1 == 0):
print(count, ":", listing_id)
relevant_rows = df[df['listing_id'] == listing_id]
reviews = relevant_rows['comments'].to_list()
if len(reviews) > 10:
reviews = reviews[:10]
for i in range(len(reviews)):
reviews[i] = "\"" + str(reviews[i]) + "\""
choices = ["Positive", "Neutral", "Negative"]
sentiments = []
for rev in reviews:
sent = predict_single_sentiment(choices, rev)
if sent is not None:
sentiments.append(float(sent))
avg_score = sum(sentiments) / len(sentiments)
append_list_as_row("./sentiments.csv", [listing_id, avg_score])
print(avg_score)
'''
command = "python sentiment.py " + " ".join(reviews)
subproc = subprocess.Popen(command, shell=False, stdout=subprocess.PIPE)
sentiment = float(subproc.stdout.read())
del subproc
'''
'''
id_to_sentiment[listing_id] = round(avg_score, 2)
id_to_sentiment_df = pd.DataFrame(columns=["listing_id","sentiment"])
for listing_id in reviews:
id_to_sentiment_df.loc[len(id_to_sentiment_df)] = [listing_id, reviews[listing_id]]
id_to_sentiment_df.to_csv("./sentiments.csv")
''' |
15,835 | a2dddc34bd293794209a26c77086d8dbd5137244 | #!/usr/bin/env python3
from sys import argv
from os import path
from subprocess import call
s5 = 0
s60 = 0
s900 = 0
s1800 = 0
pos = 0
neg = 0
ins = 0
diff = 0
bad = {"BAD", "ERR", "UR"}
fail = {"TO", "MO", "RTE"}
with open("../results/tw_time.csv", 'r') as file:
for line in file:
tline = line.strip().split(",")
if tline[0] == "instance":
continue
for i in range(1, 5):
assert(tline[i] not in bad)
ins += 1
if tline[1] not in fail:
s5 += 1
if tline[2] not in fail:
s60 += 1
if tline[3] not in fail:
s900 += 1
if tline[4] not in fail:
s1800 += 1
if tline[1] not in fail and tline[3] in fail:
pos += 1
diff += 1
if tline[1] in fail and tline[3] not in fail:
neg += 1
diff += 1
print("ins: " + str(ins))
print("solve 5s: " + str(s5))
print("solve 60s: " + str(s60))
print("solve 900s: " + str(s900))
print("solve 1800s: " + str(s1800))
print("diff 5,900: " + str(diff))
print("pos 5,900: " + str(pos))
print("neg 5,900: " + str(neg)) |
15,836 | 026f932182ab51c1ccfb1a84fcbd716717babe2e | def chunk_iterator(idx, total, chunks, max_length=None):
'''https://stackoverflow.com/a/37414115'''
'''If you divide n elements into roughly k chunks you can make n % k chunks 1 element bigger than the other chunks to distribute the extra elements'''
'''[(n // k) + (1 if i < (n % k) else 0) for i in range(k)]'''
n = total
k = chunks
i0 = idx * (n // k) + min(idx, n % k)
max_i = (idx + 1) * (n // k) + min(idx + 1, n % k)
i = i0
while i < max_i:
if max_length is not None and i - i0 >= max_length:
break
else:
yield i
i += 1
|
15,837 | 846a933b541e20f2c2e720b58a9599e82b13c009 | # coding=gbk
###1. 375 西直门---学院路---387----北京西站 ##375包含了西直门
f=open("busTrans")
d={}
while 1:
line=f.readline().strip()
if line:
d1=line.split(":")
d[d1[0]]=d1[1].split(",")
else:
break
print(d)
r1=input("from:")
r2=input("to:")
for i in d.keys():
if r1 in d.get(i):
fro=i
#print(fro)
####2. 387包含了北京西站
for i in d.keys():
if r2 in d.get(i):
des=i
#print(des)
#print(fro)
#print(des)
####3. 375 和 387 相同站点是 学院路
flag=0
for i in d.get(fro):
if i in d.get(des):
mid=d.get(fro)[flag]
print(fro,mid,des)
flag +=1
#print(fro,mid,des)
l=[] |
15,838 | f10a178bc8ca2571e5a73aa258f02c2d407a9e37 | li = []
for i in range(100):
li.append(bin(i).count('1')) |
15,839 | 37e63a775db24c1b41f74bcd129d1877d4e48458 | a = list(map(int, input().split()))[:4]
c = 0
b = []
for i in a:
if i not in b:
b.append(i)
print(4-len(b)) |
15,840 | 85173d2d831f2610c6ac717cfa236747e91a96ac | def esconde_senha (n):
senha = n *"*"
print (senha) |
15,841 | d7e10e3b43941114a0c820d5e6fb6f3121d8bbd6 | from django.contrib.admin import SimpleListFilter
from django.core.cache import cache
from wagtail.contrib.modeladmin.options import (
ModelAdmin,
ModelAdminGroup,
modeladmin_register,
)
from wagtail.core.signals import page_published
from .models import Group, GroupContribution, MeetingType, Region, Location, Meeting
def receiver(sender, **kwargs):
"""
Clear the API cache whenever a Location or Meeting is published.
"""
cache.delete("wagtail_meeting_guide_api_cache")
# Register the signal receive for Location and Meeting publishes.
page_published.connect(receiver, sender=Location)
page_published.connect(receiver, sender=Meeting)
class RegionListFilter(SimpleListFilter):
"""
This filter will always return a subset of the instances in a Model, either
filtering by the user choice or by a default value.
"""
title = "Regions"
parameter_name = "region"
def lookups(self, request, model_admin):
"""
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
"""
list_of_regions = []
queryset = Region.objects.filter(parent__isnull=True).order_by("name")
for region in queryset:
list_of_regions.append((str(region.id), region.name))
return list_of_regions
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
# Compare the requested value to decide how to filter the queryset.
if self.value():
return queryset.filter(parent_id=self.value())
return queryset
class MeetingTypeAdmin(ModelAdmin):
model = MeetingType
menu_label = "Meeting Types"
menu_icon = "fa-folder-open"
add_to_settings_menu = True
list_display = (
"type_name",
"intergroup_code",
"spec_code",
"display_order",
)
ordering = ("display_order", "type_name")
class RegionAdmin(ModelAdmin):
model = Region
menu_icon = "doc-full-inverse"
empty_value_display = "-----"
list_display = ("parent", "name")
ordering = ("parent", "name")
list_filter = (RegionListFilter,)
def get_root_regions(self, obj):
return Region.objects.filter(parent=None)
class GroupAdmin(ModelAdmin):
model = Group
menu_label = "Groups"
menu_icon = "fa-folder-open"
add_to_settings_menu = False
list_display = ("name", "gso_number")
search_fields = ("name",)
class GroupContributionAdmin(ModelAdmin):
model = GroupContribution
menu_label = "Contributions"
menu_icon = "fa-folder-open"
add_to_settings_menu = False
list_display = ("group", "date", "amount")
list_filter = ("group",)
search_fields = ("group",)
class MeetingGuideAdminGroup(ModelAdminGroup):
menu_label = "Meeting Guide"
menu_icon = "fa-th"
menu_order = 1000
items = (MeetingTypeAdmin, RegionAdmin, GroupAdmin, GroupContributionAdmin)
modeladmin_register(MeetingGuideAdminGroup)
|
15,842 | e7e78b6962805dba0cfd92d71011c4abbd0bcb6f | from server_part.app import app
from server_part.database.tables import User, Post
from flask import jsonify
from server_part.utils.misc import auth
# admin could get information about some users using this function
@app.route('/user/<int:user_id>', methods=['GET'])
@auth.login_required(role=['admin'])
def show_user_profile(user_id):
user_obj = User.query.filter_by(id=user_id).first()
return jsonify({'username': user_obj.username,
'is active': user_obj.active,
'confirmed at': user_obj.confirmed_at,
'last activity': user_obj.last_activity,
'user roles': [role.name for role in user_obj.roles]})
# requests.get('http://localhost:5000/user/9', auth=HTTPBasicAuth('username', 'pass')).json()
@app.route('/post/<int:post_id>', methods=['GET'])
@auth.login_required(role=['admin', 'common'])
def show_post(post_id):
post_obj = Post.query.filter_by(id=post_id).first()
user_obj = User.query.filter_by(id=post_obj.user_id).first()
return jsonify({'title': post_obj.title,
'post text': post_obj.post_text,
'confirmed at': post_obj.confirmed_at,
'outdated': post_obj.outdated,
'created by ': user_obj.username,
'with tags': [tag.name for tag in post_obj.tag]})
# requests.get('http://localhost:5000/post/55', auth=HTTPBasicAuth('username', 'pass')).json() |
15,843 | 2ef0a6d8ab9369e8d127dd607bffa95570c212d4 | """test_serdepa.py: Tests for serdepa packets. """
import unittest
from codecs import decode, encode
from serdepa import (
SerdepaPacket, Length, List, Array, ByteString,
nx_uint8, nx_uint16, nx_uint32, nx_uint64,
nx_int8, nx_int16, nx_int32, nx_int64,
uint8, uint16, uint32, uint64,
int8, int16, int32, int64
)
from serdepa.exceptions import DeserializeError
__author__ = "Raido Pahtma, Kaarel Ratas"
__license__ = "MIT"
class PointStruct(SerdepaPacket):
_fields_ = [
("x", nx_int32),
("y", nx_int32)
]
class OnePacket(SerdepaPacket):
_fields_ = [
("header", nx_uint8),
("timestamp", nx_uint32),
("length", Length(nx_uint8, "data")),
("data", List(nx_uint8)),
("tail", List(nx_uint8))
]
class OneTailPacket(SerdepaPacket):
_fields_ = [
("header", nx_uint8),
("timestamp", nx_uint32),
("tail", List(nx_uint8))
]
class DefaultValuePacket(SerdepaPacket):
_fields_ = [
("header", nx_uint8, 1),
("timestamp", nx_uint32, 12345),
("length", Length(nx_uint8, "data")),
("data", List(nx_uint8), [1, 2, 3, 4]),
("tail", List(nx_uint8), [5, 6])
]
class AnotherPacket(SerdepaPacket):
_fields_ = [
("header", nx_uint8),
("timestamp", nx_uint32),
("origin", PointStruct),
("points", Length(nx_uint8, "data")),
("data", List(PointStruct))
]
class ArrayPacket(SerdepaPacket):
_fields_ = [
("header", nx_uint8),
("data", Array(PointStruct, 4))
]
class SimpleArray(SerdepaPacket):
_fields_ = [
("data", Array(nx_uint8, 10))
]
class MyNodes(SerdepaPacket):
_fields_ = [
("nodeId", nx_uint16),
("attr", nx_int16),
("inQlty", nx_uint8),
("outQlty", nx_uint8),
("qlty", nx_uint8),
("lifetime", nx_uint8)
]
class MyRouters(SerdepaPacket):
_fields_ = [
("beatId", nx_uint32),
("routerId", nx_uint16),
("partnerId", nx_uint16),
("attr", nx_uint16),
("qlty", nx_uint8),
("lifetime", nx_uint8),
("flags", nx_uint8),
]
class BeatRecord(SerdepaPacket):
_fields_ = [
("clockstamp", nx_uint32),
("nodes_in_beat", Length(nx_uint8, "nodes")),
("beats_in_cycle", Length(nx_uint8, "routers")),
("my_beat_id", nx_uint32),
("nodes", List(MyNodes)),
("routers", List(MyRouters))
]
class TransformTester(unittest.TestCase):
p1 = "010000303904010203040506"
def test_one(self):
p = OnePacket()
p.header = 1
p.timestamp = 12345
p.data.append(1)
p.data.append(2)
p.data.append(3)
p.data.append(4)
p.tail.append(5)
p.tail.append(6)
self.assertEqual(p.serialize(), decode(self.p1, "hex"))
def test_two(self):
p = OnePacket()
p.deserialize(decode(self.p1, "hex"))
self.assertEqual(p.header, 1)
self.assertEqual(p.timestamp, 12345)
self.assertEqual(p.length, 4)
self.assertEqual(len(p.data), 4)
self.assertEqual(len(p.tail), 2)
self.assertEqual(list(p.data), [1, 2, 3, 4])
self.assertEqual(list(p.tail), [5, 6])
class EmptyTailTester(unittest.TestCase):
p1 = "01000030390401020304"
def test_empty_tail_deserialize(self):
p = OnePacket()
p.deserialize(decode(self.p1, "hex"))
self.assertEqual(p.header, 1)
self.assertEqual(p.timestamp, 12345)
self.assertEqual(p.length, 4)
self.assertEqual(list(p.data), [1, 2, 3, 4])
self.assertEqual(list(p.tail), [])
def test_empty_tail_serialize(self):
p = OnePacket()
p.header = 1
p.timestamp = 12345
p.data.append(1)
p.data.append(2)
p.data.append(3)
p.data.append(4)
self.assertEqual(p.serialize(), decode(self.p1, "hex"))
class SomeTailTester(unittest.TestCase):
p1 = "0100003039"
p2 = "010000303901020304"
def test_empty_tail_deserialize(self):
p = OneTailPacket()
p.deserialize(decode(self.p1, "hex"))
self.assertEqual(p.header, 1)
self.assertEqual(p.timestamp, 12345)
self.assertEqual(list(p.tail), [])
def test_tail_deserialize(self):
p = OneTailPacket()
p.deserialize(decode(self.p2, "hex"))
self.assertEqual(p.header, 1)
self.assertEqual(p.timestamp, 12345)
self.assertEqual(list(p.tail), [1, 2, 3, 4])
def test_empty_tail_serialize(self):
p = OneTailPacket()
p.header = 1
p.timestamp = 12345
self.assertEqual(p.serialize(), decode(self.p1, "hex"))
def test_tail_serialize(self):
p = OneTailPacket()
p.header = 1
p.timestamp = 12345
p.tail.append(1)
p.tail.append(2)
p.tail.append(3)
p.tail.append(4)
self.assertEqual(p.serialize(), decode(self.p2, "hex"))
class DefaultValueTester(unittest.TestCase):
p1 = "010000303904010203040506"
p2 = "020000303904010203040506"
def test_default_value_serialize(self):
p = DefaultValuePacket()
self.assertEqual(p.serialize(), decode(self.p1, "hex"))
def test_default_keyword(self):
p = DefaultValuePacket(header=2)
self.assertEqual(p.serialize(), decode(self.p2, "hex"))
class ArrayTester(unittest.TestCase):
a1 = "00010203040506070809"
a2 = "000000000100000002000000030000000400000005000000060000000000000000"
def test_simple_array(self):
p = SimpleArray()
for i in range(10):
p.data.append(i)
self.assertEqual(p.serialize(), decode(self.a1, "hex"))
def test_single_array(self):
p = ArrayPacket()
p.header = 0
p.data.append(PointStruct(x=1, y=2))
p.data.append(PointStruct(x=3, y=4))
p.data.append(PointStruct(x=5, y=6))
self.assertEqual(p.serialize(), decode(self.a2, "hex"))
def test_single_array_deserialize(self):
p = ArrayPacket()
p.deserialize(decode(self.a2, "hex"))
self.assertEqual(p.header, 0)
self.assertEqual(len(p.data), 4)
self.assertEqual(p.data[1].x, 3)
self.assertEqual(p.data[3].y, 0)
class TestHourlyReport(unittest.TestCase):
report = (
"1DD26640"
"07"
"0D"
"0005029E"
"022B0139FFFF0003"
"029E010EFFFF3D03"
"00000000FFFF0000"
"00000000FFFF0000"
"00000000FFFF0000"
"00000000FFFF0000"
"00000000FFFF0000"
"00000000000000000000000000"
"000201AB029E01AB0000001500"
"00030296029E02350000001500"
"00000000000000000000000000"
"00000000000000000000000000"
"00000000000000000000000000"
"000701FA022B022D00FF381300"
"00000000000000000000000000"
"00000000000000000000000000"
"00000000000000000000000000"
"00000000000000000000000000"
"00000000000000000000000000"
"00000000000000000000000000"
)
def test_hourly_deserialize(self):
r = BeatRecord()
r.deserialize(decode(self.report, "hex"))
self.assertEqual(r.nodes_in_beat, 7)
self.assertEqual(r.beats_in_cycle, 13)
class StringTester(unittest.TestCase):
report = (
"1DD26640"
"07"
"0D"
"0005029E"
"022B0139FFFF0003"
"029E010EFFFF3D03"
"00000000FFFF0000"
"00000000FFFF0000"
"00000000FFFF0000"
"00000000FFFF0000"
"00000000FFFF0000"
"00000000000000000000000000"
"000201AB029E01AB0000001500"
"00030296029E02350000001500"
"00000000000000000000000000"
"00000000000000000000000000"
"00000000000000000000000000"
"000701FA022B022D00FF381300"
"00000000000000000000000000"
"00000000000000000000000000"
"00000000000000000000000000"
"00000000000000000000000000"
"00000000000000000000000000"
"00000000000000000000000000"
)
def test_str(self):
r = BeatRecord()
r.deserialize(decode(self.report, "hex"))
self.assertEqual(self.report, str(r))
class SerializedSizeTester(unittest.TestCase):
def test_minimal_serialized_size(self):
self.assertEqual(OnePacket.minimal_size(), 6)
self.assertEqual(BeatRecord.minimal_size(), 10)
def test_serialized_size(self):
p = OnePacket()
p.header = 1
p.timestamp = 12345
p.data.append(1)
p.data.append(2)
p.data.append(3)
p.data.append(4)
p.tail.append(5)
p.tail.append(6)
self.assertEqual(p.serialized_size(), 12)
class NestedPacketTester(unittest.TestCase):
p0 = (
"F1"
"0000000000000003"
"0000000100000002"
"0000000200000001"
"0000000300000000"
)
p1 = (
"D0"
"12345678"
"0000000100000001"
"01"
"0000000200000002"
)
def test_nested_packet_serialize(self):
packet = ArrayPacket()
packet.header = 0xF1
for i, j in zip(range(4), reversed(range(4))):
packet.data.append(PointStruct(x=i, y=j))
self.assertEqual(packet.serialize(), decode(self.p0, "hex"))
# Test regular nested packet
packet = AnotherPacket()
packet.header = 0xD0
packet.timestamp = 0x12345678
packet.origin.x = 1
packet.origin.y = 1
packet.data.append(PointStruct(x=2, y=2))
self.assertEqual(packet.serialize(), decode(self.p1, "hex"))
def test_nested_packet_deserialize(self):
packet = ArrayPacket()
packet.deserialize(decode(self.p0, "hex"))
self.assertEqual(packet.header, 0xF1)
self.assertEqual(
list(packet.data),
[
PointStruct(x=0, y=3),
PointStruct(x=1, y=2),
PointStruct(x=2, y=1),
PointStruct(x=3, y=0)
]
)
# Test rgular nested packet
packet = AnotherPacket()
packet.deserialize(decode(self.p1, "hex"))
self.assertEqual(packet.header, 0xD0)
self.assertEqual(packet.timestamp, 0x12345678)
self.assertEqual(packet.origin, PointStruct(x=1, y=1))
self.assertEqual(packet.points, 1)
self.assertEqual(
list(packet.data),
[PointStruct(x=2, y=2)]
)
def test_nested_packet_assign(self):
packet = AnotherPacket()
try:
packet.origin = PointStruct(x=1, y=1)
except ValueError as err:
self.fail(
"Assigning PointStruct to packet.origin failed: {}".format(
err.args[0] if err.args else "<NO MESSAGE>"
)
)
with self.assertRaises(ValueError):
packet.origin = AnotherPacket()
def test_nested_packet_assign_serialize(self):
packet = AnotherPacket()
packet.header = 0xD0
packet.timestamp = 0x12345678
packet.origin = PointStruct(x=1, y=1)
packet.data.append(PointStruct(x=2, y=2))
self.assertEqual(
encode(packet.serialize(), "hex").decode().upper(),
self.p1
)
class InvalidInputTester(unittest.TestCase):
p = (
"1DD26640"
"07"
"0D"
"0005029E"
"022B0139FFFF0003"
"029E010EFFFF3D03"
"00000000FFFF0000"
"00000000FFFF0000"
"00000000FFFF0000"
"00000000FFFF0000"
"00000000FFFF0000"
"00000000000000000000000000"
"000201AB029E01AB0000001500"
"00030296029E02350000001500"
"00000000000000000000000000"
"00000000000000000000000000"
"00000000000000000000000000"
"000701FA0F"
)
def test_invalid_length_input(self):
r = BeatRecord()
with self.assertRaises(DeserializeError):
r.deserialize(decode(self.p, "hex"))
class ByteStringTester(unittest.TestCase):
p1 = 'F10E323511122213541513A2D21F161C3621B8'
p2 = '0305E8F02398A9'
def test_variable_length_bytestring(self):
class VarLenPacket(SerdepaPacket):
_fields_ = (
("hdr", nx_uint16),
("tail", ByteString())
)
packet = VarLenPacket()
try:
packet.deserialize(decode(self.p1, "hex"))
except DeserializeError as e:
self.fail("Variable length ByteString deserializing failed with message: {}".format(e))
self.assertTrue(isinstance(packet.tail, ByteString))
self.assertEqual(packet.hdr, 0xF10E)
self.assertEqual(packet.tail, 0x323511122213541513A2D21F161C3621B8)
self.assertEqual(packet.serialize(), decode(self.p1, "hex"))
def test_fixed_length_bytestring(self):
class FixLenPacket(SerdepaPacket):
_fields_ = (
('hdr', nx_uint8),
('tail', ByteString(6))
)
packet = FixLenPacket()
packet.deserialize(decode(self.p2, "hex"))
self.assertEqual(packet.hdr, 0x03)
self.assertEqual(packet.tail, 0x05E8F02398A9)
self.assertEqual(packet.serialize(), decode(self.p2, "hex"))
def test_length_object_defined_length_bytestring(self):
class LenObjLenPacket(SerdepaPacket):
_fields_ = (
('hdr', nx_uint8),
('length', Length(nx_uint8, 'tail')),
('tail', ByteString())
)
packet = LenObjLenPacket()
packet.deserialize(decode(self.p2, "hex"))
self.assertEqual(packet.hdr, 0x03)
self.assertEqual(packet.length, 5)
self.assertEqual(packet.tail, 0xE8F02398A9)
self.assertEqual(str(packet.tail), 'E8F02398A9')
class BigTypeTester(unittest.TestCase):
p1 = '11FF00FF00FF00FF00'
def test_nx_uint64(self):
class Packet(SerdepaPacket):
_fields_ = (
('header', nx_uint8),
('guid', nx_uint64)
)
packet = Packet()
packet.deserialize(decode(self.p1, "hex"))
self.assertEqual(packet.header, 0x11)
self.assertEqual(packet.guid, 0xFF00FF00FF00FF00)
def test_nx_int64(self):
class Packet(SerdepaPacket):
_fields_ = (
('header', nx_uint8),
('guid', nx_int64)
)
packet = Packet()
packet.deserialize(decode(self.p1, "hex"))
self.assertEqual(packet.header, 0x11)
self.assertEqual(packet.guid, 0-0x00FF00FF00FF00FF-1)
def test_uint64(self):
class Packet(SerdepaPacket):
_fields_ = (
('header', nx_uint8),
('guid', uint64)
)
packet = Packet()
packet.deserialize(decode(self.p1, "hex"))
self.assertEqual(packet.header, 0x11)
self.assertEqual(packet.guid, 0x00FF00FF00FF00FF)
def test_int64(self):
class Packet(SerdepaPacket):
_fields_ = (
('header', nx_uint8),
('guid', int64)
)
packet = Packet()
packet.deserialize(decode(self.p1, "hex"))
self.assertEqual(packet.header, 0x11)
self.assertEqual(packet.guid, 0x00FF00FF00FF00FF)
class SubstructTester(unittest.TestCase):
input = decode(
"010203",
"hex"
)
def setUp(self):
class Inner(SerdepaPacket):
_fields_ = (
('first', nx_uint8, 0),
('second', nx_uint8),
)
class Outer(SerdepaPacket):
_fields_ = (
('inner', Inner),
('tail', nx_uint8),
)
self.Inner = Inner
self.Outer = Outer
self.packet = Outer()
self.inner = Inner()
def test_substruct_deserialize(self):
self.packet.deserialize(self.input)
self.assertEqual(self.packet.inner.first, 1)
self.assertEqual(self.packet.inner.second, 2)
self.assertEqual(self.packet.tail, 3)
def test_substruct_serialize(self):
self.packet.inner.first = 1
self.packet.inner.second = 2
self.packet.tail = 3
self.assertEqual(self.packet.serialize(), self.input)
def test_default_inner_initialization(self):
self.packet = self.Outer(inner=self.Inner(first=1, second=2), tail=3)
self.assertEqual(self.packet.serialize(), self.input)
class InvalidLengthTester(unittest.TestCase):
short_input = decode(
"0001020304050607",
"hex"
)
long_input = decode(
"000102030405060708090A0B0C0D0E0F",
"hex"
)
class TestPacket(SerdepaPacket):
_fields_ = (
('header', nx_uint32),
('length', nx_uint8),
('values', nx_uint8)
)
def test_deserialize(self):
packet = self.TestPacket()
with self.assertRaises(DeserializeError):
packet.deserialize(self.short_input)
with self.assertRaises(DeserializeError):
packet.deserialize(self.long_input)
if __name__ == '__main__':
unittest.main()
|
15,844 | f6008fa23129af3de30dc19c36fa93025789f085 | # -*- coding: utf-8 -*-
"""Log schema."""
from datetime import datetime
from typing import List, Optional
from pydantic import BaseModel
from projects.utils import to_camel_case
class LogBase(BaseModel):
class Config:
alias_generator = to_camel_case
allow_population_by_field_name = True
orm_mode = True
class Log(LogBase):
level: str
title: str
message: str
created_at: Optional[datetime]
class LogList(BaseModel):
logs: List[Log]
total: int
|
15,845 | bbe96af1c091b5bd9a30ce7b7fd8bbfd709fa6b4 | n, m = map(int, input().split())
arr = list(map(int, input().split()))
d = {}
for a in arr:
if a in d:
d[a] += 1
else:
d[a] = 1
dd = [[k, v] for k, v in d.items()]
dd = sorted(dd, key=lambda x: x[1])
if dd[-1][1] > n // 2:
print(dd[-1][0])
else:
print("?")
|
15,846 | a7827063f3de253130fa14634b6d2bfe543b8e4e | # Lesson 2.4: While Loops
# Loops are an important concept in computer programming.
# Loops let us run blocks of code many times which can be
# really useful when we have to repeat tasks.
# https://classroom.udacity.com/nanodegrees/nd000/parts/0001345403/modules/356813882475460/lessons/4196788670/concepts/50222508420923
def count():
i = 0
while i < 10:
print i
i = i + 1
count()
################################################################################
# Add your own code and notes here
################################################################################
################################################################################
# First code lesson in 2.4
################################################################################
# This code demonstrates a while loop with a "counting variable"
i = 0
while i < 10:
print i
i = i+1
# This uses a while loop to remove all the spaces from a string of
# text. Can you figure out how it works?
def remove_spaces(text):
text_without_spaces = '' #empty string for now
while text != '':
next_character = text[0]
if next_character != ' ': #that's a single space
text_without_spaces = text_without_spaces + next_character
text = text[1:]
return text_without_spaces
print remove_spaces("hello my name is andy how are you?")
################################################################################
# Second code lesson in 2.4
################################################################################
# Define a procedure, print_numbers, that takes
# as input a positive whole number, and prints
# out all the whole numbers from 1 to the input
# number.
# Make sure your procedure prints "upwards", so
# from 1 up to the input number.
def print_numbers(n):
count = 1
while count != n+1:
print count
count = count +1
print_numbers(3)
#>>> 1
#>>> 2
#>>> 3
|
15,847 | df79a8584bb50528479149140be296b481e223eb | import WavePy.wavelet as wv
import WavePy.spiht as spiht
from WavePy.lwt import *
import pickle
from pylab import *
lim = [# "cameraman",
#"house",
#"jetplane",
#"lake",
#"lena_gray_512",
#"livingroom",
#"mandril_gray"#,
"peppers_gray"#,
#"pirate",
#"walkbridge",
#"woman_blonde",
#"woman_darkhair"
]
path = "/home/zenathar/Pictures/test/standard_test_images/{0}.tif"
output = "/home/zenathar/Pictures/test/spiht/{0}.spti"
#fil = open("/home/zenathar/Documents/src/VideoBWT/python/512x512_5.dol","r+")
#dct = pickle.load(fil)
first = False
for bpp in linspace(0,8,100):
# if bpp >= 1.85858585859:
# first = False
if not first:
for i in lim:
r,g,b = wv.LoadImageRGB(path.format(i))
k = spiht.spiht_image_pack([r],"cdf97",5,[bpp],"bi.orth",0.00001, True,i,{},True)
outputfile = open(output.format(i) + str(bpp),"w+")
pickle.dump(k["test_data"],outputfile)
outputfile.close()
print path.format(i) + str(bpp)
|
15,848 | 295ab96565099f894ef7de68f93d88a62895eec5 | import tkinter as tk
import ctypes # An included library with Python install.
import packetCapture
import featureExtract
import reporting
import classifier
import numpy as np
from tkinter import messagebox
LARGE_FONT = ('Comic Sans MS', 30)
f= np.empty([1,76])
class gui(tk.Frame):
def __init__(self):
self.root = tk.Tk()
self.root.geometry("620x480")
self.root.wm_title("Intrusion Detection System")
self.root.iconbitmap('Hopstarter-Soft-Scraps-Document-Preview.ico')
tk.Frame.config(self.root, background="orange")
self.v = tk.IntVar()
tk.Label(self.root, text="Network Intrusion Detection", background="black", font=LARGE_FONT,
anchor="w", fg="white").grid(row=1, column=3, columnspan=50, pady=15, padx=35)
tk.Label(self.root, height=2, font=('Comic Sans MS', 15), text="file path", bg="orange",
anchor="w", fg="purple").grid(row=3, column=15)
self.file_path = tk.Entry(self.root, width=30)
self.file_path.grid(row=3, column=35)
tk.Button(self.root, text='Intercept', height=1,width=10, font=('Comic Sans MS', 15), command=self.PacketCaptureCall).grid(row=20,
column=15)
tk.Button(self.root, text="Reporting", height=1,width=10, font=('Comic Sans MS', 15), command=self.reportCall).grid(row=20,
column=35)
tk.Label(self.root, background="orange", font=LARGE_FONT, height=1,
anchor="w", fg="white").grid(row=22, column=3, columnspan=50)
tk.Button(self.root, text="analyse", height=1,width=10, font=('Comic Sans MS', 15), command=self.featureExtractionCall).grid(row=21,
column=25)
self.root.mainloop()
def PacketCaptureCall(self):
var = packetCapture.cap()
tk.Label(self.root, background="white", text=var, font=('Comic Sans MS', 15), bg="orange",
anchor="w", fg="black").grid(row=9, column=3, columnspan=50)
messagebox.showinfo("Intercept","Done!!")
def featureExtractionCall(self):
f=featureExtract.FeatureExtraction('TCP.pcap','features.csv')
# result = classifier.classify(f)
# reporting.report(result)
messagebox.showinfo("Analysis", "Done!!")
#print(f.shape)
def reportCall(self):
#messagebox.showinfo("FLOW result", "Flow contains malicious traffic ")
messagebox.showinfo("FLOW result", "Flow is clean traffic ")
Gui = gui()
|
15,849 | cd1065b22a580bf9ae72cd158d6ddb275891a4a2 | from Tree import BinaryNode
from Tree import minTree
def checkBalanced(rootNode):
depths = []
global depths
findImbalances(rootNode, 0)
def findImbalances(rootNode, depth):
if rootNode.getLeft():
findImbalances(rootNode.getLeft(), depth + 1)
if rootNode.getRight():
findImbalances(rootNode.getRight(), depth + 1)
if not (rootNode.getLeft() or rootNode.getRight()): #indicates end of branch
if len(depths) == 0:
depths.append(depth)
elif len(depths) == 1 and depths[0] != depth:
if abs(depths[0] - depth) > 1:
print "Unbalanced"
depths.append(depth)
elif not (depth in depths):
print "Unbalanced"
#should adjust such that function returns true or false rather than simply
#printing "Unbalanced" when an imbalance is found.
testList1 = range(22)
testTree1 = minTree(testList1)
checkBalanced(testTree1)
print "test1 complete"
testTree2 = BinaryNode(1,BinaryNode(2,BinaryNode(3,BinaryNode(4))),BinaryNode(5))
checkBalanced(testTree2)
print "test2 complete"
|
15,850 | 784f51c2c416e9eda2cdd89bc42946de90da11ed | import json
import pickle
import argparse
# Manually check to see how well the translation_probabilities_table was generated with this command line tool.
# Pass a Dutch word (not a phrase) and see the top 10 most likely translations to English.
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("dutch_word", help="The dutch word to translate.")
parser.add_argument("-t", "--translation_probabilities_table", help="The translations probabilities table pkl file.", required=True)
args = parser.parse_args()
dutch_word = args.dutch_word
filename = args.translation_probabilities_table
with open(filename, "rb") as f:
data = pickle.load(f)["data"]
f.close()
sorted_data = sorted(data[args.dutch_word].items(), key=lambda entry: entry[1])[::-1]
for e in sorted_data[0:15]:
print(e)
|
15,851 | b21c2f716a7f23f43086222cf6fb71e10e6aea8d | # Generated by Django 3.1.6 on 2021-06-14 14:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('employeeapi', '0002_employee_email'),
]
operations = [
migrations.RemoveField(
model_name='employee',
name='email',
),
]
|
15,852 | 456b6ec4516dd47576f3aaef7f592f9b208c6fe8 | import multiprocessing as mp
def cube(x):
return x**3
pool = mp.Pool(processes=4)
results = [pool.apply_async(cube, args=(x,)) for x in range(1, 7)]
output = [p.get() for p in results]
print(results)
print(output)
|
15,853 | d73e3142421e788f86356e6dbeffd7825c085272 | import boto.s3
import cStringIO
import os
import os.path
import time
import tarfile
import zipfile
from worker.config import config
def fetch(key, logger=None):
"""download and extract an archive"""
template = "$AICHALLENGE_PREFIX/var/lib/aichallenge/submissions/%s"
path = os.path.expandvars(template % key)
if os.path.isdir(path):
# already decompressed -- move on
return
if logger is not None:
logger.info('downloading ' + key)
access_key = config.get('worker', 'aws_access_key')
secret_key = config.get('worker', 'aws_secret_key')
bucket = config.get('worker', 'submission_bucket')
prefix = config.get('worker', 'submission_prefix')
s3 = boto.s3.Connection(access_key, secret_key)
bucket = s3.get_bucket(bucket)
s3_key = bucket.get_key(prefix + key)
if s3_key is None:
raise KeyError
io = cStringIO.StringIO()
s3_key.get_contents_to_file(io)
try:
io.seek(0)
zip = zipfile.ZipFile(io)
decompress_zip(zip, path)
return
except zipfile.BadZipfile:
pass
try:
io.seek(0)
tar = tarfile.open(fileobj=io)
decompress_tar(tar, path)
return
except tarfile.TarError:
pass
raise ValueError, "invalid archive"
def decompress_zip(zip, path):
path_tmp = path + "." + str(time.time())
os.mkdir(path_tmp, 0750)
for member in zip.namelist():
if not member.startswith("/") and not member.startswith(".."):
zip.extract(member, path_tmp)
try:
os.rename(path_tmp, path)
except OSError:
pass # someone else probably did it at the same time
def decompress_tar(tar, path):
path_tmp = path + "." + str(time.time())
os.mkdir(path_tmp, 0750)
for member in tar.getnames():
if not member.startswith("/") and not member.startswith(".."):
tar.extract(member, path_tmp)
try:
os.rename(path_tmp, path)
except OSError:
pass # someone else probably did it at the same time |
15,854 | eabf53ccfe5a58e2bb2da6e32138140d754dc3b6 | from modules.attack_methods.base_neo4j import BaseAttackMethod
class EC2RoleCompromise(BaseAttackMethod):
def get_edge_types(self):
return ['UserPublicIPAddress']
def get_attack_difficult(self):
return 30
def get_attack_name(self):
return "GrabAPIToken"
def get_target_node(self):
return 'EC2Instance'
def get_source_node(self):
return 'EC2Instance'
|
15,855 | 793f88934785e82f6694ceb738f8816a34c9326d | HOST_LDAP = 'http://127.0.0.1:8010'
TOKEN_LDAP = None
USER_MAP_LDAP = {
'mail': 'email',
'sAMAccountName': 'username',
'givenName': 'first_name'
}
|
15,856 | 2576aca732547aeee81a450ef56a8ec5708faa6e | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 13 16:34:32 2017
Script that creates the PSF of all files.
@author: jmilli
"""
import os
import numpy as np
import irdisDataHandler as i
import pdb
import vip
#ds9=vip.fits.vipDS9()
from astropy.io import ascii,fits
import matplotlib.pyplot as plt
from astropy import units as u
import pandas as pd
import sphere_utilities as sph
from contrast_utilities import contrast_curve_from_throughput
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
from image_tools import distance_array
#local = True
local = False
if local:
pathRoot = '/diskb/Data/survey_disk'
pathSparta = '/diskb/Data/sparta'
else:
pathRoot = '/Volumes/SHARDDS_data/survey_disk'
pathSparta = '/Volumes/SHARDDS_data/sparta'
#target_names = []
#psf_profile = {}
#contrast = {}
#date = {}
#atm_median_param = {}
#atm_dispersion_param = {}
#parang_var = {}
#paths_target = {}
delta_parang = {}
asymptote = {}
r0 = {}
targets_tmp = os.listdir(pathRoot)
targets = []
for target in targets_tmp:
if not target.startswith('.f') and os.path.isdir(os.path.join(pathRoot,target)) \
and not target.startswith('DISK') and not target.startswith('bad'):
targets.append(target)
#targets=['HD172555']
target_with_error = []
exception = []
channels=['left','right']
size_psf = 200
size_science_fullframe = 799
size_science = 199
## to search by index
#id_target = 8
#target_name = targets[id_target]
#print(target_name)
# to search by name
#name = 'HD181296'
#for idx,ta in enumerate(targets):
# if ta == name:
# print('The index of {0:s} is {1:d}'.format(name,idx))
# id_target = idx
# target_name = targets[id_target]
#print(target_name)
### in case you do a single channel
#ichannel=0
#channel=channels[ichannel]
def exponential_function(distance, amplitude, r0):
return amplitude * (1-np.exp(-distance/r0))
zp = 1024#Zero point of the 2MASS Hband filter in Jy
def convert_mag_to_Jy(mag):
return zp*np.power(10,-mag/2.5)
for id_target,target_name in enumerate(targets):
#for id_target,target_name in enumerate(targets[1:]):
print('Target: {0:s}'.format(target_name))
try:
pathTarget = os.path.join(pathRoot,target_name)
pathRaw = os.path.join(pathTarget,'raw')
pathOut = os.path.join(pathTarget,'pipeline')
fileNames = 'SPHER.*.fits'
irdis_data = i.IrdisDataHandler(pathRaw,pathOut,fileNames,name=target_name,coordOrigin='derot')
px = irdis_data.pixel_scale
if 'HD82943' in target_name:
star_flux_Jy = 8.39
else:
mag_H = irdis_data.simbad_dico['simbad_FLUX_H']
star_flux_Jy = convert_mag_to_Jy(mag_H)
scaling_factor = irdis_data.get_psf_scaling_factor()
parang,_,_ = irdis_data.get_parang(frameType='O',save=False)
delta_parang[target_name] = np.abs(parang[-1]-parang[0])
#%%
psf_data = pd.read_csv(os.path.join(pathOut,'psf_data.csv'))
psf_sum = fits.getdata(os.path.join(pathOut,'{0:s}_psf_sum.fits'.format(target_name)))
distarr_psf = distance_array((psf_sum.shape[0],psf_sum.shape[1]),verbose=True)
sky_inner = 70
sky_outer = 80
sky_value=np.median(psf_sum[np.logical_and(distarr_psf<sky_outer,distarr_psf>sky_inner)])
radius_array = np.arange(3,sky_inner)
psf_flux_array = [np.nansum(psf_sum[distarr_psf<r]-sky_value) for r in radius_array]
plt.close()
plt.plot(radius_array,psf_flux_array)
plt.xlabel('Radius in px')
plt.ylabel('Integrated flux in ADU')
plt.savefig(os.path.join(pathOut,'PSF_integrated_flux.pdf'))
plt.close()
ref_flux = np.max(psf_flux_array)*scaling_factor[0]
red_types = ['pca_klip_010','cadi']
for ired,red_type in enumerate(red_types):
cadi_sum = fits.getdata(os.path.join(pathOut,'{0:s}_{1:d}x{1:d}_{2:s}_{3:s}.fits'.format(target_name,size_science,\
channels[0],red_type))) + fits.getdata(os.path.join(pathOut,\
'{0:s}_{1:d}x{1:d}_{2:s}_{3:s}.fits'.format(target_name,size_science,channels[1],red_type)))
fits.writeto(os.path.join(pathOut,'{0:s}_{1:d}x{1:d}_{2:s}_{3:s}.fits'.format(target_name,size_science,'sum',red_type)),\
cadi_sum,header=irdis_data.firstHeader,clobber=True,output_verify='ignore')
panda_contrast_curve_left = pd.read_csv(os.path.join(pathOut,\
'{0:s}_contrast_curve_{1:d}x{1:d}_{2:s}_sorted_{3:s}.csv'.format(target_name,size_science,channels[0],red_type)))
popt_left, pcov_left = curve_fit(exponential_function, panda_contrast_curve_left['distance'], \
panda_contrast_curve_left['throughput'],bounds=([0.,10.],[1.,2000.]))
panda_contrast_curve_right = pd.read_csv(os.path.join(pathOut,\
'{0:s}_contrast_curve_{1:d}x{1:d}_{2:s}_sorted_{3:s}.csv'.format(target_name,size_science,channels[1],red_type)))
popt_right, pcov_right = curve_fit(exponential_function, panda_contrast_curve_right['distance'], \
panda_contrast_curve_right['throughput'],bounds=([0.,10.],[1.,2000.]))
start_distance = np.fix(np.min(np.append(panda_contrast_curve_left['distance'],panda_contrast_curve_right['distance'])))+1
end_distance = np.fix(np.max(np.append(panda_contrast_curve_left['distance'],panda_contrast_curve_right['distance'])))+1
mean_distance = np.arange(start_distance,end_distance)
interp_function_left = interp1d(panda_contrast_curve_left['distance'],panda_contrast_curve_left['throughput'],\
kind='linear',bounds_error=False,fill_value='extrapolate')
throughput_interp_left = interp_function_left(mean_distance)
interp_function_right = interp1d(panda_contrast_curve_right['distance'],panda_contrast_curve_right['throughput'],\
kind='linear',bounds_error=False,fill_value='extrapolate')
throughput_interp_right = interp_function_right(mean_distance)
mean_throughput = (throughput_interp_left+throughput_interp_right)/2.
panda_cadi_sum_contrast = contrast_curve_from_throughput(cadi_sum,\
np.mean(psf_data['fwhm']), px,\
np.sum(psf_data['flux'])*psf_data['scaling'][0],\
throughput=(mean_distance,mean_throughput),sigma=5)
panda_cadi_sum_contrast['sensitivity (Student) [mJy/arcsec^2]'] = panda_cadi_sum_contrast['sensitivity (Student)']*\
(np.sum(psf_data['flux'])*psf_data['scaling'][0])/ref_flux*star_flux_Jy*1000/(px**2)
panda_cadi_sum_contrast.to_csv(os.path.join(pathOut,'{0:s}_contrast_curve_{1:d}x{1:d}_{2:s}_sorted_{3:s}.csv'.format(target_name,size_science,'sum',red_type)))
# plt.close()
# plt.figure(ired)
# plt.semilogy(panda_contrast_curve_left['distance'],panda_contrast_curve_left['sensitivity (Student)'], 'r-', label="Left")
# plt.semilogy(panda_contrast_curve_right['distance'],panda_contrast_curve_right['sensitivity (Student)'], 'b-', label="Right")
# plt.semilogy(panda_cadi_sum_contrast['distance'],panda_cadi_sum_contrast['sensitivity (Student)'], 'g-', label="Sum")
# plt.legend(frameon=False)
# plt.xlabel('Separation in px')
# plt.ylabel('Contrast')
# plt.savefig(os.path.join(pathOut,'{0:s}_contrast_curve_{1:d}x{1:d}_{2:s}_sorted_{3:s}.pdf'.format(target_name,size_science,'sum',red_type)))
# plt.close()
# plt.figure(0)
# plt.plot(panda_contrast_curve_left['distance'],panda_contrast_curve_left['throughput'], 'ro', label="Measured throughput left")
# plt.plot(panda_contrast_curve_left['distance'], exponential_function(panda_contrast_curve_left['distance'], *popt_left), 'r-', label="Fitted throughput left")
# plt.plot(panda_contrast_curve_right['distance'],panda_contrast_curve_right['throughput'], 'bo', label="Measured throughput right")
# plt.plot(panda_contrast_curve_right['distance'], exponential_function(panda_contrast_curve_right['distance'], *popt_right), 'b-', label="Fitted throughput right")
# plt.legend(frameon=False)
asymptote[target_name] = (popt_left[0]+popt_right[0])/2.
r0[target_name] = (popt_left[1]+popt_right[1])/2.
cadi_fullframe_name_left = os.path.join(pathOut,'{0:s}_{1:d}x{1:d}_{2:s}_cadi.fits'.format(target_name,size_science_fullframe,channels[0]))
cadi_fullframe_name_right = os.path.join(pathOut,'{0:s}_{1:d}x{1:d}_{2:s}_cadi.fits'.format(target_name,size_science_fullframe,channels[1]))
# if (os.path.isfile(cadi_fullframe_name_left) and os.path.isfile(cadi_fullframe_name_right)) == False:
# continue
# else:
cadi_sum_fullframe = fits.getdata(os.path.join(pathOut,'{0:s}_{1:d}x{1:d}_{2:s}_cadi.fits'.format(target_name,size_science_fullframe,\
channels[0]))) + fits.getdata(os.path.join(pathOut,\
'{0:s}_{1:d}x{1:d}_{2:s}_cadi.fits'.format(target_name,size_science_fullframe,channels[1])))
fits.writeto(os.path.join(pathOut,'{0:s}_{1:d}x{1:d}_{2:s}_cadi.fits'.format(target_name,size_science_fullframe,'sum')),cadi_sum_fullframe,header=irdis_data.firstHeader,clobber=True,output_verify='ignore')
distance_extrapolated_fullframe = np.arange(np.max(mean_distance)+1,size_science_fullframe/2.)
distance_fullframe = np.append(mean_distance,distance_extrapolated_fullframe)
throughput_fullframe = np.append(mean_throughput,exponential_function(distance_extrapolated_fullframe, *(popt_right+popt_left)/2.))
# plt.close()
# plt.semilogx(panda_contrast_curve_left['distance'],panda_contrast_curve_left['throughput'], 'ro', label="Measured throughput left")
## plt.semilogx(panda_contrast_curve_left['distance'], exponential_function(panda_contrast_curve_left['distance'], *popt_left), 'r-', label="Fitted throughput left")
# plt.semilogx(panda_contrast_curve_right['distance'],panda_contrast_curve_right['throughput'], 'bo', label="Measured throughput right")
## plt.semilogx(panda_contrast_curve_right['distance'], exponential_function(panda_contrast_curve_right['distance'], *popt_right), 'b-', label="Fitted throughput right")
# plt.semilogx(distance_fullframe,throughput_fullframe,label="Extrapolated throughput sum")
# plt.legend(frameon=False)
# plt.xlabel('Separation in px')
# plt.ylabel('Throughput')
# plt.savefig(os.path.join(pathOut,'{0:s}_extrapolated_throughput_{1:s}.pdf'.format(target_name,red_type)))
panda_cadi_sum_fullframe_contrast = contrast_curve_from_throughput(cadi_sum_fullframe,\
np.mean(psf_data['fwhm']), px,\
np.sum(psf_data['flux'])*psf_data['scaling'][0],\
throughput=(distance_fullframe,throughput_fullframe),sigma=5)
panda_cadi_sum_fullframe_contrast['sensitivity (Student) [mJy/arcsec^2]'] = panda_cadi_sum_fullframe_contrast['sensitivity (Student)']*\
(np.sum(psf_data['flux'])*psf_data['scaling'][0])/ref_flux*star_flux_Jy*1000/(px**2)
panda_cadi_sum_fullframe_contrast.to_csv(os.path.join(pathOut,'{0:s}_contrast_curve_{1:d}x{1:d}_sum_sorted_cadi.csv'.format(target_name,size_science_fullframe)))
#%% rebin by a facto 4 of the images
except Exception as e:
print('A problem occured in {0:s}'.format(target_name))
print(e)
target_with_error.append(target_name)
exception.append(e)
if len(target_with_error)>0:
failedtargets_str = ' \n'.join(target_with_error)
txtfile = open(os.path.join(pathRoot,'failedtargets.txt'),'w')
txtfile.write(failedtargets_str)
txtfile.close()
#plt.scatter(np.asarray(delta_parang.values()),np.asarray(asymptote.values()))
#plt.ylabel('Highest throughput')
#plt.xlabel('Amplitude of parallactic angle variation')
#plt.grid()
#plt.savefig('/Users/jmilli/Desktop/ADI_max_throughput.pdf')
#
#r0_resel = np.asarray(r0.values())/4.
#plt.semilogx(np.asarray(delta_parang.values()),r0_resel,'bo')
#plt.ylabel('Separation in resel to achieve 1/e throughput')
#plt.xlabel('Amplitude of parallactic angle variation in $\circ$')
#plt.grid()
#plt.savefig('/Users/jmilli/Desktop/ADI_throughput_separation.pdf')
#
#nb_resel_scanned_at_r0 = r0_resel*np.deg2rad(np.asarray(delta_parang.values()))
#plt.scatter(np.asarray(delta_parang.values()),nb_resel_scanned_at_r0)
#plt.ylabel('Rotation in resels to achieve 1/e throughput')
#plt.xlabel('Amplitude of parallactic angle variation')
#plt.grid()
#plt.savefig('/Users/jmilli/Desktop/ADI_throughput_rotation.pdf')
|
15,857 | 718da997426e6dcc1db8f94d2fcfe03ea19bddb1 | # https://www.acmicpc.net/problem/2110
import sys
n, c = list(map(int, sys.stdin.readline().split(' ')))
homes = sorted([int(line) for line in sys.stdin.readlines()])
start = homes[1] - homes[0]
end = homes[-1] - homes[0]
result = 0
while(start <= end):
mid = (start+end)//2
value = homes[0]
count = 1
for i in range(1, len(homes)):
if homes[i] >= value+mid:
value = homes[i]
count += 1
if count >= c:
start = mid + 1
result = mid
else:
end = mid - 1
print(result)
|
15,858 | 84af944c2bb57facc1d60cc7b64bb244eaf39a7d | import sae
from honey import wsgi
application = sae.create_wsgi_app(wsgi.application) |
15,859 | 51085f185c1b0a8ef333ad4b55f847e3edf6b4a7 | import numpy as np
from mpmath import mp
mp.dps = 500
def construct_s(bh):
s = []
for bhj in bh:
if bhj != 0:
s.append(np.sign(bhj))
s = np.array(s)
s = s.reshape((len(s), 1))
return s
def construct_A_XA_Ac_XAc_bhA(X, bh, n, p):
A = []
Ac = []
bhA = []
for j in range(p):
bhj = bh[j]
if bhj != 0:
A.append(j)
bhA.append(bhj)
else:
Ac.append(j)
XA = X[:, A]
XAc = X[:, Ac]
bhA = np.array(bhA).reshape((len(A), 1))
return A, XA, Ac, XAc, bhA
def check_KKT(XA, XAc, y, bhA, lamda, n):
print("\nCheck Active")
e1 = y - np.dot(XA, bhA)
e2 = np.dot(XA.T, e1)
print(e2/ (lamda * n))
if XAc is not None:
print("\nCheck In Active")
e1 = y - np.dot(XA, bhA)
e2 = np.dot(XAc.T, e1)
print(e2/ (lamda * n))
def construct_test_statistic(j, XA, y, A):
ej = []
for each_j in A:
if j == each_j:
ej.append(1)
else:
ej.append(0)
ej = np.array(ej).reshape((len(A), 1))
inv = np.linalg.pinv(np.dot(XA.T, XA))
XAinv = np.dot(XA, inv)
etaj = np.dot(XAinv, ej)
etajTy = np.dot(etaj.T, y)[0][0]
return etaj, etajTy
def compute_yz(y, etaj, zk, n):
sq_norm = (np.linalg.norm(etaj))**2
e1 = np.identity(n) - (np.dot(etaj, etaj.T))/sq_norm
a = np.dot(e1, y)
b = etaj/sq_norm
yz = a + b*zk
return yz, b
def pivot(A, bh, list_active_set, list_zk, list_bhz, etaj, etajTy, cov, tn_mu, type):
tn_sigma = np.sqrt(np.dot(np.dot(etaj.T, cov), etaj))[0][0]
z_interval = []
for i in range(len(list_active_set)):
if type == 'As':
if np.array_equal(np.sign(bh), np.sign(list_bhz[i])):
z_interval.append([list_zk[i], list_zk[i + 1] - 1e-10])
if type == 'A':
if np.array_equal(A, list_active_set[i]):
z_interval.append([list_zk[i], list_zk[i + 1] - 1e-10])
new_z_interval = []
for each_interval in z_interval:
if len(new_z_interval) == 0:
new_z_interval.append(each_interval)
else:
sub = each_interval[0] - new_z_interval[-1][1]
if abs(sub) < 0.01:
new_z_interval[-1][1] = each_interval[1]
else:
new_z_interval.append(each_interval)
z_interval = new_z_interval
numerator = 0
denominator = 0
for each_interval in z_interval:
al = each_interval[0]
ar = each_interval[1]
denominator = denominator + mp.ncdf((ar - tn_mu)/tn_sigma) - mp.ncdf((al - tn_mu)/tn_sigma)
if etajTy >= ar:
numerator = numerator + mp.ncdf((ar - tn_mu)/tn_sigma) - mp.ncdf((al - tn_mu)/tn_sigma)
elif (etajTy >= al) and (etajTy < ar):
numerator = numerator + mp.ncdf((etajTy - tn_mu)/tn_sigma) - mp.ncdf((al - tn_mu)/tn_sigma)
if denominator != 0:
return float(numerator/denominator)
else:
return None
def pivot_with_specified_interval(z_interval, etaj, etajTy, cov, tn_mu):
tn_sigma = np.sqrt(np.dot(np.dot(etaj.T, cov), etaj))[0][0]
numerator = 0
denominator = 0
for each_interval in z_interval:
al = each_interval[0]
ar = each_interval[1]
denominator = denominator + mp.ncdf((ar - tn_mu)/tn_sigma) - mp.ncdf((al - tn_mu)/tn_sigma)
if etajTy >= ar:
numerator = numerator + mp.ncdf((ar - tn_mu)/tn_sigma) - mp.ncdf((al - tn_mu)/tn_sigma)
elif (etajTy >= al) and (etajTy < ar):
numerator = numerator + mp.ncdf((etajTy - tn_mu)/tn_sigma) - mp.ncdf((al - tn_mu)/tn_sigma)
if denominator != 0:
return float(numerator/denominator)
else:
return None
def p_value(A, bh, list_active_set, list_zk, list_bhz, etaj, etajTy, cov):
value = pivot(A, bh, list_active_set, list_zk, list_bhz, etaj, etajTy, cov, 0, 'A')
return 2 * min(1 - value, value)
|
15,860 | 1a01df5f519b0f4d5f5b4581be6ffc6c3f149d63 | from RunLenCode import *
from SEA import *
from twice_encode import *
scale=1
stastic=[]
stastic_2=[]
for i in range(6):
l = [10, 33, 100, 319, 1000, 3190]
scale = l[i]
s=Sea(scale,scale,3)
s.init_random()
r=RLC()
code=r.Sea2Code(s)
stastic.append(len(code)/(2*scale*scale))
t=Twice_encode()
code_2=t.Sea2Tcode(s)
stastic_2.append(len(code_2)/(2*scale*scale))
print(stastic)
print(stastic_2) |
15,861 | 3322d62bc64fa0057263fd19af3c208d1b27c379 | # from celery import task
# from django.core.mail import send_mail
# from .models import ShopOrder
#
#
# @task
# def order_created(order_id):
# order = ShopOrder.objects.get(id=order_id)
# subject = f"Order nr. {order_id}"
# message = f"Dear {order.client.first_name},\n\nYou have successfully placed an order. Your order id is {order.id}."
# mail_sent = send_mail(subject,
# message,
# 'admin@myshop.com',
# [order.client.email])
# return mail_sent
|
15,862 | 4469d5757f502fe810c83f08a8cbec651b2a4ccf | import rootpy.plotting.views as views
import math
def quad(*xs):
return math.sqrt(sum(x * x for x in xs))
class MedianView(object):
''' Takes high and low, returns median assigning half the diff as error. '''
def __init__(self, highv=None, lowv=None, centv=None):
self.highv = highv
self.lowv = lowv
self.centv = views.ScaleView( views.SumView(lowv, self.highv) , 0.5) if not centv else centv
@staticmethod
def apply_view(central_hist, high_hist, low_hist=None):
ret_hist = central_hist.Clone()
for bin in range(1, ret_hist.GetNbinsX() + 1):
error = quad(
central_hist.GetBinError(bin),
abs(high_hist.GetBinContent(bin) - central_hist.GetBinContent(bin))
) if high_hist else \
quad(
central_hist.GetBinError(bin),
abs(central_hist.GetBinContent(bin) - low_hist.GetBinContent(bin))
)
ret_hist.SetBinError(bin, error)
return ret_hist
def Get(self, path):
central_hist = self.centv.Get(path)
high_hist = self.highv.Get(path) if self.highv else None
low_hist = self.lowv.Get(path) if self.lowv else None
return self.apply_view(central_hist, high_hist, low_hist)
|
15,863 | 8f115212810b39e02330dae358cba9f212abf937 | from django.urls import path
from .views import SignUpView, register, user_cad_sucesso
from . import views
urlpatterns = [
path('signup/', SignUpView.as_view(), name='signup'),
path("register/", views.register, name="register"),
path("user_cad_sucess/", views.user_cad_sucesso, name="user_cad_sucesso"),
] |
15,864 | ad1dfaa588d6366e3dc517e8ad703749168c1bf2 | import scipy as _sp
import time as _time
import scipy.sparse as _sprs
import OpenPNM as _op
from scipy.spatial.distance import cdist as dist
def find_path(network, pore_pairs, weights=None):
r"""
Find the shortest path between pairs of pores.
Parameters
----------
network : OpenPNM Network Object
The Network object on which the search should be performed
pore_pairs : array_like
An N x 2 array containing N pairs of pores for which the shortest
path is sought.
weights : array_like, optional
An Nt-long list of throat weights for the search. Typically this
would be the throat lengths, but could also be used to represent
the phase configuration. If no weights are given then the
standard topological connections of the Network are used.
Returns
-------
A dictionary containing both the pores and throats that define the
shortest path connecting each pair of input pores.
Notes
-----
The shortest path is found using Dijkstra's algorithm included in the
scipy.sparse.csgraph module
TODO: The returned throat path contains the correct values, but not
necessarily in the true order
Examples
--------
>>> import OpenPNM
>>> import OpenPNM.Utilities.misc as misc
>>> pn = OpenPNM.Network.Cubic(shape=[3, 3, 3])
>>> a = misc.find_path(network=pn, pore_pairs=[[0, 4], [0, 10]])
>>> a['pores']
[array([0, 1, 4]), array([ 0, 1, 10])]
>>> a['throats']
[array([ 0, 19]), array([ 0, 37])]
"""
Ps = _sp.array(pore_pairs, ndmin=2)
if weights is None:
weights = _sp.ones_like(network.Ts)
graph = network.create_adjacency_matrix(data=weights,
sprsfmt='csr',
dropzeros=False)
paths = _sprs.csgraph.dijkstra(csgraph=graph,
indices=Ps[:, 0],
return_predecessors=True)[1]
pores = []
throats = []
for row in range(0, _sp.shape(Ps)[0]):
j = Ps[row][1]
ans = []
while paths[row][j] > -9999:
ans.append(j)
j = paths[row][j]
ans.append(Ps[row][0])
ans.reverse()
pores.append(_sp.array(ans))
throats.append(network.find_neighbor_throats(pores=ans,
mode='intersection'))
pdict = _op.Base.Tools.PrintableDict
dict_ = pdict({'pores': pores, 'throats': throats})
return dict_
def iscoplanar(coords):
r'''
Determines if given pores are coplanar with each other
Parameters
----------
coords : array_like
List of pore coords to check for coplanarity. At least 3 pores are
required.
Returns
-------
A boolean value of whether given points are coplanar (True) or not (False)
'''
coords = _sp.array(coords, ndmin=1)
if _sp.shape(coords)[0] < 3:
raise Exception('At least 3 input pores are required')
Px = coords[:, 0]
Py = coords[:, 1]
Pz = coords[:, 2]
# Do easy check first, for common coordinate
if _sp.shape(_sp.unique(Px))[0] == 1:
return True
if _sp.shape(_sp.unique(Py))[0] == 1:
return True
if _sp.shape(_sp.unique(Pz))[0] == 1:
return True
# Perform rigorous check using vector algebra
n1 = _sp.array((Px[1] - Px[0], Py[1] - Py[0], Pz[1] - Pz[0])).T
n2 = _sp.array((Px[2] - Px[1], Py[2] - Py[1], Pz[2] - Pz[1])).T
n = _sp.cross(n1, n2)
r = _sp.array((Px[1:-1] - Px[0], Py[1:-1] - Py[0], Pz[1:-1] - Pz[0]))
n_dot = _sp.dot(n, r)
if _sp.sum(n_dot) == 0:
return True
else:
return False
def tic():
r'''
Homemade version of matlab tic and toc function, tic starts or resets
the clock, toc reports the time since the last call of tic.
'''
global _startTime_for_tictoc
_startTime_for_tictoc = _time.time()
def toc(quiet=False):
r'''
Homemade version of matlab tic and toc function, tic starts or resets
the clock, toc reports the time since the last call of tic.
Parameters
----------
quiet : Boolean
If False (default) then a message is output to the console. If True
the message is not displayed and the elapsed time is returned.
'''
if '_startTime_for_tictoc' in globals():
t = _time.time() - _startTime_for_tictoc
if quiet is False:
print('Elapsed time in seconds: ', t)
else:
return t
else:
print("Toc: start time not set")
def unique_list(input_list):
r"""
For a given list (of points) remove any duplicates
"""
output_list = []
if len(input_list) > 0:
dim = _sp.shape(input_list)[1]
for i in input_list:
match = False
for j in output_list:
if dim == 3:
if i[0] == j[0] and i[1] == j[1] and i[2] == j[2]:
match = True
elif dim == 2:
if i[0] == j[0] and i[1] == j[1]:
match = True
elif dim == 1:
if i[0] == j[0]:
match = True
if match is False:
output_list.append(i)
return output_list
def amalgamate_data(objs=[], delimiter='_'):
r"""
Returns a dictionary containing ALL pore data from all netowrk and/or
phase objects received as arguments
Parameters
----------
obj : list of OpenPNM objects
The network and Phase objects whose data should be amalgamated into a
single dict
delimiter : string
The delimiter to place between the prop name and the object name. For
instance \'pore.air_molar_density\' or \'pore.air|molar_density'\. The
use of underscores can be problematic for reloading the data since they
are also used in multiple word properties. The default is '_' for
backwards compatibility, but the '|' option is preferred.
Returns
-------
A standard Python dict containing all the data from the supplied OpenPNM
objects
"""
if type(objs) is not list:
objs = list(objs)
data_amalgamated = {}
dlim = delimiter
exclusion_list = ['pore.centroid', 'pore.vertices', 'throat.centroid',
'throat.offset_vertices', 'throat.vertices', 'throat.normal',
'throat.perimeter', 'pore.vert_index', 'throat.vert_index']
for item in objs:
mro = [module.__name__ for module in item.__class__.__mro__]
# If Network object, combine Geometry and Network keys
if 'GenericNetwork' in mro:
keys = []
for key in list(item.keys()):
keys.append(key)
for geom in item._geometries:
for key in list(geom.keys()):
if key not in keys:
keys.append(key)
else:
if 'GenericPhase' in mro:
keys = []
for key in list(item.keys()):
keys.append(key)
for physics in item._physics:
for key in list(physics.keys()):
if key not in keys:
keys.append(key)
keys.sort()
for key in keys:
if key not in exclusion_list:
try:
if _sp.amax(item[key]) < _sp.inf:
element = key.split('.')[0]
propname = key.split('.')[1]
dict_name = element + '.' + item.name + dlim + propname
if key in ['pore.coords', 'throat.conns',
'pore.all', 'throat.all']:
dict_name = key
data_amalgamated.update({dict_name: item[key]})
except TypeError:
pass
return data_amalgamated
def conduit_lengths(network, throats=None, mode='pore'):
r"""
Return the respective lengths of the conduit components defined by the throat
conns P1 T P2
mode = 'pore' - uses pore coordinates
mode = 'centroid' uses pore and throat centroids
"""
if throats is None:
throats = network.throats()
Ps = network['throat.conns']
pdia = network['pore.diameter']
if mode == 'centroid':
try:
pcentroids = network['pore.centroid']
tcentroids = network['throat.centroid']
if _sp.sum(_sp.isnan(pcentroids)) + _sp.sum(_sp.isnan(tcentroids)) > 0:
mode = 'pore'
else:
plen1 = _sp.sqrt(_sp.sum(_sp.square(pcentroids[Ps[:, 0]] -
tcentroids), 1))-network['throat.length']/2
plen2 = _sp.sqrt(_sp.sum(_sp.square(pcentroids[Ps[:, 1]] -
tcentroids), 1))-network['throat.length']/2
except KeyError:
mode = 'pore'
if mode == 'pore':
# Find half-lengths of each pore
pcoords = network['pore.coords']
# Find the pore-to-pore distance, minus the throat length
lengths = _sp.sqrt(_sp.sum(_sp.square(pcoords[Ps[:, 0]] -
pcoords[Ps[:, 1]]), 1)) - network['throat.length']
lengths[lengths < 0.0] = 2e-9
# Calculate the fraction of that distance from the first pore
try:
fractions = pdia[Ps[:, 0]]/(pdia[Ps[:, 0]] + pdia[Ps[:, 1]])
# Don't allow zero lengths
# fractions[fractions == 0.0] = 0.5
# fractions[fractions == 1.0] = 0.5
except:
fractions = 0.5
plen1 = lengths*fractions
plen2 = lengths*(1-fractions)
return _sp.vstack((plen1, network['throat.length'], plen2)).T[throats]
|
15,865 | fe39683628096a28a29237364fe6407771285c41 | from corehq.apps.reports.datatables import DataTablesColumn
from corehq.apps.reports.datatables import DataTablesHeader
from custom.icds_reports.utils import ICDSMixin
class BaseIdentification(ICDSMixin):
title = '1.a Identification and Basic Information'
slug = 'identification'
has_sections = False
subtitle = []
posttitle = None
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn('', sortable=False),
DataTablesColumn('Name', sortable=False),
DataTablesColumn('Code', sortable=False)
)
|
15,866 | f25876b0be627d2b73aff2be0bb919c2e027c8b2 |
'''
>>> Rutgers SALT Supernova Spectral Reduction Pipeline <<<
This module prints the history of pipeline processes run on
each data file in the working directory.
It also contains functions for modifying specific keywords
in a FITS header. These keywords, notably 'RUPIPE' and
'RUIMGTYP', let the pipeline sort files into global dictionaries
automatically if the pipeline is run multiple times in a working
directory.
Please refer to the documentation for more information
about the purpose of maintaining a pipeline history.
*** Modifications ***
Sept. 30, 2013: Created module. -Viraj Pandya
'''
import sys # Standard python module used mainly to exit the pipeline.
import os # Standard os module used mainly for changing directories.
import shutil # Standard shutil module used mainly for copying files.
from glob import glob # For grabbing a list of filenames in working directory.
import numpy as np # NumPy is fundamental to many computations the pipeline does.
from scipy.interpolate import interp1d # For interpolation in 1-D spectra.
from scipy.optimize import leastsq # For fitting functions to 1-D spectra.
import lacosmicx # For removal of cosmic rays from 2-D science images.
import ds9 # To open 2-D images for the user's convenience.
import pyfits # To access and modify FITS data files.
# These are the pipeline modules which may be called by this driver or each other.
import dicts # Initializes the pipeline's global dictionaries which will be used across modules.
import params # Customizable parameters for the pipeline.
import pyrafCalls # Contains the functions which set the parameters for and call single PyRAF tasks.
def history():
# inform user about what 'RUIMGTYP' classes exist.
print "This task, history(), will print the 'RUIMGTYP' and 'RUPIPE' keywords present in each file's 0-header."
print "RUIMGTYP can be: "
print "science, arc, standard, flat, arc_sci, arc_std, bpm_sci, bpm_arc, bpm_std, bwm_sci, bwm_std."
print "If RUIMGTYP is not found in the header, OBJECT will be printed in its place."
print "RUPIPE will be shown as characters in a list each of which correspond to some particular pipeline process as defined below."
print "The combination of RUIMGTYP and RUPIPE will tell you what processes have been run on that image."
print "Here is what the characters in RUPIPE stand for; the order roughly corresponds to the full data reduction process."
print "c: combined or count-scaled"
print "n: normalized or flux-scaled"
print "f: flat-fielded"
print "o: original (bad pixel mask)"
print "l: LAcosmicx-affiliated"
print "a: specidentified arc (using PySALT)"
print "r: specrectified (wavelength-calibrated using PySALT)"
print "b: background-subtracted"
print "e: extracted"
print "j: apsum-extracted science for sky and sigma spectra"
print "i: identified arc spectrum (PyRAF, not PySALT)"
print "d: dispersion-corrected (using PyRAF's dispcor)"
print "g: flux-calibrated"
print "s: flux-normalized (using sarith)"
print "y: sky spectrum"
print "z: sigma spectrum"
print "u: combined dispersion-corrected spectrum"
print "v: combined flux-calibrated spectrum"
print "w: combined sky spectrum"
print "x: combined sigma spectrum"
print "t: telluric-corrected spectrum"
print "If RUPIPE is not found in the header, an empty list will be printed in its place."
while True:
choice = raw_input("Please enter 0 to see the dictionaries, or 1 to see the pipeline history: ")
if choice == '0' or choice == '1':
break
else:
print "Invalid input: you must enter 1 to proceed."
if choice == '0':
print "original flats:"
print dicts.flats
print "original arcs:"
print dicts.arcs
print "original standards:"
print dicts.standards
print "original sciences:"
print dicts.sciences
print "combined flats:"
print dicts.combflats
print "normalized flats:"
print dicts.normflats
print "flat-fielded sciences:"
print dicts.flatsciences
print "flat-fielded arcs:"
print dicts.flatarcs
print "flat-fielded standards:"
print dicts.flatstandards
print "bad pixel masks for sciences:"
print dicts.bpmsciences
print "bad pixel masks for arcs:"
print dicts.bpmarcs
print "bad pixel masks for standards:"
print dicts.bpmstandards
print "lacosmicx-corrected sciences:"
print dicts.laxsciences
print "cosmic ray pixel masks for sciences:"
print dicts.bpmlaxsciences
print "wavelength solutions from PySALT:"
print dicts.wavesols
print "wavelength-calibrated arcs:"
print dicts.wavearcs
print "wavelength-corrected sciences:"
print dicts.wavesciences
print "wavelength-corrected standards:"
print dicts.wavestandards
print "background-subtracted sciences:"
print dicts.backgroundsciences
print "extracted sciences:"
print dicts.extractedsciences
print "extracted standards:"
print dicts.extractedstandards
print "extracted arcs for standards:"
print dicts.extractedarcs_std
print "extracted arcs for sciences:"
print dicts.extractedarcs_sci
print "apsum-extracted sciences:"
print dicts.apsumsciences
print "dispersion-corrected sciences:"
print dicts.dispsciences
print "dispersion-corrected standards:"
print dicts.dispstandards
print "bad wavelength masks for sciences:"
print dicts.bwmsciences
print "bad wavelength masks for standards:"
print dicts.bwmstandards
print "standard flux calibration files:"
print dicts.stdfiles
print "sensfunc flux calibration files:"
print dicts.sensfiles
print "flux-calibrated sciences:"
print dicts.fluxsciences
print "flux-calibrated standards:"
print dicts.fluxstandards
print "flux-scaled sciences:"
print dicts.scaledfluxsciences
print "flux-scaled standards:"
print dicts.scaledstandards
print "telluric-corrected sciences:"
print dicts.tellsciences
print "telluric-corrected standards:"
print dicts.tellstandards
print "sky (science) spectra:"
print dicts.skysciences
print "sigma (science) spectra:"
print dicts.sigmasciences
print "count-scaled sciences:"
print dicts.scaleddispsciences
print "combined science spectra:"
print dicts.combinedspectra
elif choice == '1':
images = glob('*.fits')
for img in images: # print img and 'RUPIPE' (in list form) and 'RUIMGTYP'
hduimg = pyfits.open(img)
hdrimg = hduimg[0].header.copy()
imgtype = hdrimg.get('RUIMGTYP','')
if imgtype == '':
imgtype = hdrimg['OBJECT']
pipestring = hdrimg.get('RUPIPE','')
pipelist = list(pipestring)
print img,imgtype,pipelist
# This function updates pipeline history keywords in a newly created/modified FITS file.
def updatePipeKeys(inputname,imagetype,procChar):
hdukey = pyfits.open(inputname,mode='update')
hdrkey = hdukey[0].header
hdrkey['RUIMGTYP'] = imagetype
if procChar != '': # '' is for adding 'science' to mbxgpP*.fits image for future auto-sorting
try:
proc = list(hdrkey['RUPIPE']) # add processed character flag if it's not already present
except:
proc = [] # in case image has never been processed so 'RUPIPE' keyword is not in the header
if procChar not in proc:
proc.append(procChar)
proc = ''.join(proc)
hdrkey['RUPIPE'] = proc
hdukey.flush()
hdukey.close()
|
15,867 | fc8cfc661a660648f758a30d772e5557be6113fb | import mechanize
url = "http://cmiskp.echr.coe.int/tkp197/search.asp?skin=hudoc-en"
br = mechanize.Browser()
br.set_handle_robots(False)
br.open(url)
allforms = list(br.forms())
print "There are %d forms" % len(allforms)
for i, form in enumerate(allforms):
print i, form.name, form
# br.select_form("aspnetForm")
br.select_form("frmSearch")
# br["ctl00$phMainContent$txtRecipientName"] = "Liverpool"
br["pd_respondent"] = "GERMANY"
response = br.submit()
print response.read()
alllinks = list(br.links())
for link in alllinks[:10]:
print linkimport mechanize
url = "http://cmiskp.echr.coe.int/tkp197/search.asp?skin=hudoc-en"
br = mechanize.Browser()
br.set_handle_robots(False)
br.open(url)
allforms = list(br.forms())
print "There are %d forms" % len(allforms)
for i, form in enumerate(allforms):
print i, form.name, form
# br.select_form("aspnetForm")
br.select_form("frmSearch")
# br["ctl00$phMainContent$txtRecipientName"] = "Liverpool"
br["pd_respondent"] = "GERMANY"
response = br.submit()
print response.read()
alllinks = list(br.links())
for link in alllinks[:10]:
print link |
15,868 | 8569943fbea33e101d13509ce618b996ee0bb228 | """
Unit tests for field.py
"""
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
import unittest
import numpy as np
from lib.field import Field
from lib.tetromino import Tetromino
def generate_valid_state(state):
"""
Given a partially filled np array (valid column count, but less than 22
rows), this utility method fills the state the rest of the way.
"""
height, width = state.shape
assert width == Field.WIDTH
return np.vstack([
np.full((Field.HEIGHT - height, width), 0, dtype=np.uint8),
state,
])
class FieldAssertions: # pylint: disable=too-few-public-methods, no-self-use
def assertFieldsEqual(self, f1, f2): # pylint: disable=invalid-name
"""
Helper method for asserting that two Field objects are equal.
"""
if not isinstance(f1, Field):
raise AssertionError(f'{f1} is not a Tetromino')
if not isinstance(f2, Field):
raise AssertionError(f'{f2} is not a Tetromino')
np.testing.assert_array_equal(f1.state, f2.state)
class TestField(unittest.TestCase, FieldAssertions):
def test_init(self):
# Test that a newly initialized field object is empty.
field = Field.create()
self.assertIsNotNone(field)
self.assertFalse(field.state.any())
# Test that a valid state results in a properly initialized Field
state = generate_valid_state(np.array([
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
], dtype=np.uint8))
field = Field.create(state)
self.assertIsNotNone(field)
self.assertTrue((field.state == state).all())
# Ensure that a copy was made of the input state.
state[10, 1] = 2
self.assertFalse((field.state == state).all())
# Test that a invalid state returns None
state = np.ones((2, 3))
self.assertIsNone(Field.create(state))
def test_drop(self):
"""
Test various drop sequences and line clears.
"""
state = generate_valid_state(np.array([
[1, 1, 0, 1, 1, 0, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
], dtype=np.uint8))
field = Field.create(state)
self.assertIsNotNone(field)
lines_cleared = field.drop(Tetromino.JTetromino(), 0)
self.assertEqual(lines_cleared, 0)
expected_field = Field.create(generate_valid_state(np.array([
[6, 6, 6, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 6, 1, 1, 0, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
])))
self.assertFieldsEqual(field, expected_field)
lines_cleared = field.drop(
Tetromino.TTetromino().rotate_right(), 8)
self.assertEqual(lines_cleared, 1)
expected_field = Field.create(generate_valid_state(np.array([
[6, 6, 6, 0, 0, 0, 0, 0, 0, 3],
[1, 1, 6, 1, 1, 0, 1, 1, 3, 3],
])))
self.assertFieldsEqual(field, expected_field)
field.drop(Tetromino.OTetromino(), 3)
field.drop(Tetromino.ZTetromino(), 6)
field.drop(Tetromino.JTetromino().flip(), 0)
field.drop(Tetromino.OTetromino(), 8)
expected_field = Field.create(generate_valid_state(np.array([
[6, 0, 0, 0, 0, 0, 0, 0, 2, 2],
[6, 6, 6, 2, 2, 0, 5, 5, 2, 2],
[6, 6, 6, 2, 2, 0, 0, 5, 5, 3],
[1, 1, 6, 1, 1, 0, 1, 1, 3, 3],
])))
self.assertFieldsEqual(field, expected_field)
lines_cleared = field.drop(Tetromino.ITetromino().rotate_right(), 5)
self.assertEqual(lines_cleared, 2)
expected_field = Field.create(generate_valid_state(np.array([
[6, 0, 0, 0, 0, 1, 0, 0, 2, 2],
[6, 6, 6, 2, 2, 1, 0, 5, 5, 3],
])))
def test_count_gaps(self):
"""
Test that gap counting works as expected with filled/non-filled columns.
"""
field = Field.create()
self.assertEqual(field.count_gaps(), 0)
field = Field.create(generate_valid_state(np.array([
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 1, 1, 1, 1, 1, 1],
])))
self.assertEqual(field.count_gaps(), 0)
field = Field.create(generate_valid_state(np.array([
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
])))
self.assertEqual(field.count_gaps(), 1)
field = Field.create(generate_valid_state(np.array([
[1, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 0, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 1, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0, 1, 1, 1],
])))
self.assertEqual(field.count_gaps(), 6)
def test_heights(self): # pylint: disable=no-self-use
"""
Test that height calculation works as expected with filled/non-filled
columns.
"""
field = Field.create()
expected_heights = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
np.testing.assert_array_equal(field.heights(), expected_heights)
field = Field.create(generate_valid_state(np.array([
[1, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 0, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 1, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 1, 1],
])))
expected_heights = np.array([4, 3, 3, 2, 0, 0, 0, 1, 4, 1])
np.testing.assert_array_equal(field.heights(), expected_heights)
if __name__ == '__main__':
unittest.main()
|
15,869 | 71f6fce2384d524a3b56bf57e7800e488ad34e33 | import os
print "PC Name : "+os.environ['COMPUTERNAME']
print os.popen('systeminfo | findstr /c:"Total Physical Memory" /c:"Available Physical Memory"').read()
|
15,870 | 9f448b951016246ecf3d13c67ee7deed4aa42db1 | import sys
from collections import Counter as cc
def input(): return sys.stdin.readline().rstrip()
def ii(): return int(input())
def mi(): return map(int, input().split())
def li(): return list(mi())
def main():
a, b = mi()
def sxor(x):
ret = ((x+1)//2)%2
if x%2 == 0:
ret ^= x
return ret
ans = sxor(b)^sxor(a-1)
print(ans)
if __name__ == '__main__':
main() |
15,871 | db8299cc6ae699b11e4d551d0e3f63c609c43aab | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 22 17:22:43 2018
@author: Zhang Han
do clustering by TF-IDF and Kmean of scikit learn
"""
#-*- coding: utf8 -*-
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.externals import joblib
from sklearn.feature_extraction.text import TfidfVectorizer
import argparse
import re,os
MAX_FEATURES = 500
def _get_args():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
parser_cluster = subparsers.add_parser('cluster', help='cluster sample')
parser_cluster.add_argument('-s', '--sample', required=True, help='path to load sample file from')
parser_cluster.add_argument('-c', '--csv', help='path to csv file as a result')
parser_cluster.add_argument('-p', '--pkl', help='path to pkl file')
parser_cluster.add_argument('-l', '--clusters', help='number of clusters',type=int,default=20)
parser_cluster.add_argument('-H', '--dbhost', help='host ip for database', default='localhost')
parser_cluster.add_argument('-P', '--dbport', help='port for database', default='8086')
parser_cluster.add_argument('-u', '--dbuser', help='user name for database', default='root')
parser_cluster.add_argument('-w', '--dbpassword', help='password for database', default='root')
parser_cluster.add_argument('-d', '--draw', help='enable drawing', default='false', action="store_true")
parser_cluster.set_defaults(func=cluster)
parser_score = subparsers.add_parser('score', help='get score by diferrent k value')
parser_score.add_argument('-s', '--sample', required=True, help='path to load sample file from')
parser_score.add_argument('-i', '--minclusters', help='min clusters you wan to compare, default 2', type=int, default=2)
parser_score.add_argument('-a', '--maxclusters', help='max clusters you wan to compare, default 10', type=int, default=10)
parser_score.add_argument('-d', '--draw', help='enable drawing', default='false', action="store_true")
parser_score.set_defaults(func=score)
parser_predict = subparsers.add_parser('predict', help='cluster sample')
parser_predict.add_argument('-p', '--pkl', required=True, help='path to import pkl file')
parser_predict.add_argument('-H', '--dbhost', help='host ip for database')
parser_predict.add_argument('-P', '--dbport', help='port for database', default='8086')
parser_predict.add_argument('-u', '--dbuser', help='user name for database', default='root')
parser_predict.add_argument('-w', '--dbpassword', help='password for database', default='root')
parser_predict.add_argument('-j', '--json', help='json string')
parser_predict.add_argument('-c', '--csv', help='path to csv file as a result')
parser_predict.set_defaults(func=predict)
parser_predict = subparsers.add_parser('predictserver', help='cluster sample')
parser_predict.add_argument('-p', '--pkl', required=True, help='path to import pkl file')
parser_predict.add_argument('-H', '--dbhost', help='host ip for database')
parser_predict.add_argument('-P', '--dbport', help='port for database', default='8086')
parser_predict.add_argument('-u', '--dbuser', help='user name for database', default='root')
parser_predict.add_argument('-w', '--dbpassword', help='password for database', default='root')
parser_predict.add_argument('-s', '--source', help='broker that data is from,like kafka', default='localhost:9092')
parser_predict.add_argument('-t', '--topic', help='topic from kafka')
parser_predict.add_argument('-j', '--json', help='json string')
parser_predict.set_defaults(func=predict_server)
args = parser.parse_args()
return args
def cluster(args):
if args.csv == None: args.csv = args.sample + '.csv'
if args.pkl == None: args.pkl = args.sample + '.pkl'
print(args)
#load and preprocess dataset
print("Loading sample file")
df = pd.read_json(args.sample, lines=True)
data = cleaning(df['log'])
# transform
X,vectorizer = transform(data,max_features=MAX_FEATURES)
#train
print("Training")
kmeans = train(X, k=args.clusters)
print("Training finished")
#save to file
df = pd.DataFrame(data)
df['label'] = kmeans.labels_
try:
df.to_csv(args.csv, sep=',', header=True, index=False)
print("Saved cluster result to file {}".format(args.csv))
except Exception as e:
print("failed to save csv file! Error:",str(e))
#save to database
if args.dbhost!=None:
try:
print("Saving result to database")
df["log"] = df["log"].str.replace('"', r'\"' )
save_to_db(df, host=args.dbhost, port=args.dbport, table_name=os.path.basename(args.sample),
user=args.dbuser, password=args.dbpassword)
print("Finished saving to database")
except Exception as e:
print("Failed to save database. Error:" + str(e))
setattr(kmeans, "vectorizer", vectorizer)
joblib.dump(kmeans, args.pkl)
print("kmean存入"+args.pkl)
def score(args):
'''测试选择最优参数'''
df = pd.read_json(args.sample, lines=True)
data = cleaning(df['log'])
X,_ = transform(data,max_features=500)
ks = []
scores = []
for i in range(args.minclusters, args.maxclusters):
km= train(X,k=i)
print(i,km.inertia_)
ks.append(i)
scores.append(km.inertia_)
if args.draw == True:
import matplotlib.pyplot as plt
plt.figure(figsize=(8,4))
plt.plot(ks,scores,label="inertia",color="red",linewidth=1)
plt.xlabel("Feature")
plt.ylabel("Error")
plt.legend()
plt.show()
def predict(args):
model = joblib.load(args.pkl)
json = args.json
df = pd.read_json(json, lines=True)
data = cleaning(df['log'].copy(), drop_duplicates=False)
X,_ = transform(data, max_features=MAX_FEATURES, vectorizer=model.vectorizer)
labels = model.predict(X)
label_log = pd.DataFrame()
label_log['label'] = labels
label_log['log'] = df['log']
for i in range(len(labels)):
print("{} --- {}".format(labels[i],df['log'][i]))
if args.csv!=None:
label_log.to_csv(args.csv, sep=',', header=True, index=False)
def predict_server(args):
from kafka import KafkaConsumer
model = joblib.load(args.pkl)
print("start")
consumer = KafkaConsumer(args.topic, bootstrap_servers=[args.source])
# consumer = [
# '{"log":"bird: BGP: Unexpected connect from unknown address 10.252.21.153 (port 27467)\n","stream":"stdout","hostname":"core-cmhadoop5-2","container_log_file":"calico-node-lmcsz_kube-system_calico-node-d3fcbf92d8c09506a8493dfffeedd730543ec50b4e31564921444ef65ebd0a71"}'
# ]
print("receiving")
for msg in consumer:
json_str = msg.value.decode()
print (json_str)
df = pd.read_json(json_str, lines=True)
data = cleaning(df['log'].copy(), drop_duplicates=False)
X, _ = transform(data, max_features=MAX_FEATURES, vectorizer=model.vectorizer)
print("***********")
labels = model.predict(X)
df["label"] = labels
try:
print(df)
if args.dbhost != None:
print("save to database")
df["log"] = df["log"].str.replace('"', r'\"' )
save_to_db(df, host=args.dbhost, port=args.dbport, table_name="label",
user=args.dbuser, password=args.dbpassword)
except Exception as e:
print(str(e))
def cleaning(data, drop_duplicates=True):
'''
Param:
data - pandas.core.series.Series
Return:
pandas.DataFrame type
'''
data.replace(re.compile("^\s+|\s+$"), "", inplace=True)
data.replace(re.compile("\d+"), "", inplace=True)
if drop_duplicates==True:
data = data.drop_duplicates()
data.reset_index(inplace=True, drop=True)
return data
def transform(data, max_features=500, vectorizer=None):
if vectorizer==None:
vectorizer = TfidfVectorizer(max_features=max_features, use_idf=True)
X = vectorizer.fit_transform(data)
else:
X = vectorizer.transform(data)
return X,vectorizer
def train(X,k=10):
km = KMeans(n_clusters=k, init='k-means++', max_iter=300, n_init=1, verbose=False)
km.fit(X)
return km
def save_to_db(log_df, user='root',password='root', host='localhost', port=8086, table_name="demo",batch_size=1000):
from influxdb import InfluxDBClient
client = InfluxDBClient(host, port, user, password, 'log_predict')
#client.create_database('example')
for n in range(len(log_df)//batch_size+1):
df = log_df[n*batch_size:(n+1)*batch_size]
json_bodys = []
for i in range(len(df)):
json_body = {
"measurement": table_name,
"tags": {
"id": 0,
"label": 0
},
#"time": "2009-11-10T23:01:00Z",
"fields": {
"log":""
}
}
pos = n*batch_size+i
json_body["tags"]["id"] = pos
json_body["tags"]["label"] = df['label'][pos]
json_body["fields"]["log"] = df['log'][pos].replace('"', '\"')
json_bodys.append(json_body)
client.write_points(json_bodys)
if __name__ == '__main__':
args = _get_args()
args.func(args)
|
15,872 | ca564f66fa9c29bf9ea35357ca285ae3650e6967 | # [START imports]
import json
import sys
import os
import argparse
import datetime
import configparser
import yaml
from datetime import datetime
import sys
import argparse
sys.path.insert(0, './python-modules')
# sys.path.insert(0, './resources')
sys.dont_write_bytecode = True
# [END imports]
# [START import modules]
#import modules_config_handling as cf
import modules_bigquery_handling as bq
# [END import modules]
# [START] import google cloud modules
from google.cloud import bigquery
from google.cloud import storage
# [END] import google cloud modules
from resources.classes import ProjectClass
from resources.classes import RestfulClass
def import_config_yaml():
with open("config.yaml", "r") as f:
config = yaml.load(f,Loader=yaml.FullLoader)
return config
def import_restful_yaml():
with open("restful.yaml", "r") as f:
restful = yaml.load(f,Loader=yaml.FullLoader)
return restful
# store data in a storage bucket and then write to BiGQuery --> need to add month partition later
if __name__ == "__main__":
alias = ''
for args in sys.argv:
print(args)
if args in ['logging', 'workbench']:
alias = args
if alias==True:
os._exit(-1)
config = import_config_yaml()
project = ProjectClass(config['Project'], alias)
valid_aliases = project.list_valid_aliases(config['Project'])
print(valid_aliases)
print(project.name)
print(project.env)
print(project)
# bq.dataset_metadata_read()
|
15,873 | 6b19ac1cb7e42f9df5efa8a88536d728e6a2bea9 | from rack.models import Arch, Repo, Group, Package, Spin, UserProfile, UserGroup, PackageRating, GroupRating, UserGroupRating
from django.contrib import admin
admin.site.register(Arch)
admin.site.register(Repo)
admin.site.register(Group)
admin.site.register(UserGroup)
admin.site.register(Package)
admin.site.register(Spin)
admin.site.register(UserProfile)
admin.site.register(PackageRating)
admin.site.register(GroupRating)
admin.site.register(UserGroupRating)
|
15,874 | 15a468050bb2bf8aff266e0df9bc8679cb7af633 | pytest_plugins = "pytester"
|
15,875 | aff57ebdfaf6485cf9d66472b6584c7408567f52 | #!/usr/bin/env python
# coding: utf-8
import numpy as np # linear algebra
import pandas as pd #
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
sns.set()
#Importing the auxiliar and preprocessing librarys
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split, KFold, cross_validate
from sklearn.metrics import accuracy_score
normalized = pd.read_csv('normalized.csv', delimiter=',')
#This file contains the information of the DB without the comments of the causes
normalized.head()
# Second model: all columns
X = normalized.copy()
y = X.Amount
X.drop(['Amount'], axis=1, inplace=True)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2,
random_state=0)
# Select categorical columns with relatively low cardinality (convenient but arbitrary)
categorical_cols = [cname for cname in X_train.columns if
X_train[cname].nunique() < 10 and
X_train[cname].dtype == "object"]
# Select numerical columns
numerical_cols = [cname for cname in X_train.columns if
X_train[cname].dtype in ['int64', 'float64']]
my_cols = categorical_cols + numerical_cols
X_train = X_train[my_cols].copy()
X_valid = X_valid[my_cols].copy()
X_train.head()
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
# Preprocessing for numerical data
numerical_transformer = SimpleImputer(strategy='constant')
# Preprocessing for categorical data
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='most_frequent')),
('onehot', OneHotEncoder(handle_unknown='ignore'))
])
# Bundle preprocessing for numerical and categorical data
preprocessor = ColumnTransformer(
transformers=[
('num', numerical_transformer, numerical_cols),
('cat', categorical_transformer, categorical_cols)
])
# Define model
#model = RandomForestRegressor(n_estimators=100, random_state=0)
model = RandomForestRegressor(n_estimators=500, random_state=0)
# Bundle preprocessing and modeling code in a pipeline
clf = Pipeline(steps=[('preprocessor', preprocessor),
('model', model)
])
# Preprocessing of training data, fit model
clf.fit(X_train, y_train)
# Preprocessing of validation data, get predictions
preds = clf.predict(X_valid)
print('MAE:', mean_absolute_error(y_valid, preds))
print("The MAE improved from 1.410 to 1.103")
|
15,876 | 6cda5cd8b185a42409f3020e25e8e2d4f56f7f4e | """Save object into XML file.
Write content of object _x into file _data._xml.
Source: programming-idioms.org
"""
# Implementation author: Fazel94
# Created on 2019-09-27T14:19:13.327701Z
# Last modified on 2019-09-27T14:19:13.327701Z
# Version 1
import pyxser as pyx
# Python 2.5 to 2.7
# Use pickle or marshall module
class TestClass(object):
a = None
b = None
c = None
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
tst = TestClass("var_a", "var_b", "var_c")
ser = pyx.serialize(obj=tst, enc="utf-8")
print(ser)
|
15,877 | 98d1762d00c851baab6b55239a209ce70ade30fb | import numpy as np
from numpy.testing import assert_array_equal
# takes a list
def get_3_max_values(arr):
np_array = np.array(arr)
sorted_index_array = np.argsort(np_array)
sorted_array = np_array[sorted_index_array]
indices = sorted_index_array[-3:]
result = sorted_array[-3:]
return indices, result
class TestNumpy:
# should return 3 mac values from array
def test_get_3_max_values(self):
values = [-3.89, -59.99, 2.83, 7.53, 13.58, -9.29, 4.6, 10.48, 6.79, -5.8, 9.01]
largest_indices, largest_values = get_3_max_values(values)
assert_array_equal(largest_values, [9.01, 10.48, 13.58])
assert_array_equal(largest_indices, [10, 7, 4])
# cumsum add the current element of array with previous element
def test_cumsum(self):
values = [4, 6, 12]
expected_result = [4, 4+6, 4+6+12] # [4, 10, 22]
assert_array_equal(np.cumsum(values), expected_result) |
15,878 | 1e8270c8cf22426f24ec52fd72a4f935a3a3d611 | from tkinter import *
from tkmacosx import Button
root = Tk()
root.title("goyn GUI")
root.resizable(True, False)
root.mainloop() |
15,879 | 94312e2e5b189afcf3715b0732aca1ec16b72859 | # AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"say_hello": "01_greeter.ipynb",
"HelloSayer": "01_greeter.ipynb"}
modules = ["greeter.py"]
doc_url = "https://ruivalmeida.github.io/nbdev_presentation/"
git_url = "https://github.com/ruivalmeida/nbdev_presentation/tree/master/"
def custom_doc_links(name): return None
|
15,880 | bbe610556e9051b3a68e50d0cfbab982967802f6 | """
You have been given 3 integers - l, r and k. Find how many numbers between l and r (both inclusive) are divisible by k. You do not need to print these numbers, you just have to find their count.
Input Format
The first and only line of input contains 3 space separated integers l, r and k.
Output Format
Print the required answer on a single line.
SAMPLE INPUT
1 10 1
SAMPLE OUTPUT
10
"""
l, r, k = input().split()
l = int(l)
r = int(r)
k = int(k)
count = 0
for i in range(l, r+1):
if i % k is 0:
count+=1
print(count)
|
15,881 | a16ba91eb2505071c3880aa44fdae90aa292e423 | #!/usr/bin/env python
"""User Related Functionality."""
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.contrib.auth import login, logout, authenticate
from django.http import Http404
from myblog.models import UserMessage
def index(request):
"""
Index Page
"""
try:
ustatus = {}
ustatus['loginstatus'] = False
ustatus['user_comment'] = UserMessage.objects.all().order_by(
'-time_added')
if request.user.is_authenticated:
ustatus['loginstatus'] = True
ustatus['uname'] = request.user.username
return render(request, 'myblog/index.html', ustatus)
except Exception as error_not_found:
raise Http404('<h1>Sorry, Page Not Found! </h1>') from error_not_found
def login_user(request):
"""
User Login
"""
try:
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
if username not in ['', None] and password not in ['', None]:
gusername = authenticate(username=username, password=password)
if gusername:
login(request, gusername)
if not request.POST.get('remember_me', None):
request.session.set_expiry(0)
return redirect("/myblog")
else:
return redirect("/myblog?error=Invalid+Credentials")
except Exception as error_not_found:
raise Http404('<h1>Sorry, Page Not Found! </h1>') from error_not_found
def logout_user(request):
"""
User Logout
"""
try:
logout(request)
except Exception as error_not_found:
raise Http404('<h1>Sorry, Page Not Found! </h1>') from error_not_found
return redirect("/myblog")
def user_signup(request):
"""
Signup User
"""
try:
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
if username not in ['', None] and password not in ['', None]:
user = User.objects.create_user(
username=username, password=password)
user.save()
login(request, user)
return redirect("/myblog")
except Exception as error_not_found:
raise Http404('<h1>Sorry, Page Not Found! </h1>') from error_not_found
|
15,882 | 66a407f3e4f5d5642db1079d2c7f62e24e8e57e4 | """
You have n fair coins and you flip them all at the same time. Any that come up tails you set aside.
The ones that come up heads you flip again. How many rounds do you expect to play before only one coin remains?
Write a function that, given n, returns the number of rounds you'd expect to play until one coin remains.
"""
import numpy as np
def flip_coins(n):
""" Single run: flips coins until only 1 left. """
count = 0
while n > 1:
count += 1
coins = np.random.randint(2, size=n)
n = np.sum(coins)
return count
def monte_carlo(n, iters=1000):
""" Runs flip_coins(n) iters number of times.
Returns average count. """
counts = [flip_coins(n) for _ in range(iters)]
return np.mean(counts)
if __name__ == "__main__":
NUM = 10000
print(monte_carlo(NUM))
print(np.log2(NUM))
|
15,883 | d51b5a09a103649744034c6e17b30384b874d46b | import json
import numpy as np
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, (bytes, bytearray)):
return obj.decode("ASCII") # <- or any other encoding of your choice
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj) |
15,884 | 66ff8001d7c010f4224060d5b8f9854d1b87b438 | import os
import platform
import shutil
import subprocess
import textwrap
from pathlib import Path
import pytest
import yaml
from . import helpers
class Dumper(yaml.Dumper):
"""A YAML dumper to properly indent lists.
https://github.com/yaml/pyyaml/issues/234#issuecomment-765894586
"""
def increase_indent(self, flow=False, *args, **kwargs):
return super().increase_indent(flow=flow, indentless=False)
def config(*args):
umamba = helpers.get_umamba()
cmd = [umamba, "config"] + [arg for arg in args if arg]
res = helpers.subprocess_run(*cmd)
if "--json" in args:
j = yaml.load(res, yaml.FullLoader)
return j
return res.decode()
@pytest.fixture
def rc_file_args(request):
"""Parametrizable fixture to choose content of rc file as a dict."""
return getattr(request, "param", {})
@pytest.fixture
def rc_file_text(rc_file_args):
"""The content of the rc_file."""
return yaml.dump(rc_file_args, Dumper=Dumper)
@pytest.fixture
def rc_file(
request,
rc_file_text,
tmp_home,
tmp_root_prefix,
tmp_prefix,
tmp_path,
user_config_dir,
):
"""Parametrizable fixture to create an rc file at the desired location.
The file is created in an isolated folder and set as the prefix, root prefix, or
home folder.
"""
if hasattr(request, "param"):
where, rc_filename = request.param
if where == "home":
rc_file = tmp_home / rc_filename
elif where == "root_prefix":
rc_file = tmp_root_prefix / rc_filename
elif where == "prefix":
rc_file = tmp_prefix / rc_filename
elif where == "user_config_dir":
rc_file = user_config_dir / rc_filename
elif where == "env_set_xdg":
os.environ["XDG_CONFIG_HOME"] = str(tmp_home / "custom_xdg_config_dir")
rc_file = tmp_home / "custom_xdg_config_dir" / "mamba" / rc_filename
elif where == "absolute":
rc_file = Path(rc_filename)
else:
raise ValueError("Bad rc file location")
if rc_file.suffix == ".d":
rc_file = rc_file / "test.yaml"
else:
rc_file = tmp_path / "umamba/config.yaml"
rc_file.parent.mkdir(parents=True, exist_ok=True)
with open(rc_file, "w+") as f:
f.write(rc_file_text)
return rc_file
class TestConfig:
def test_config_empty(self, tmp_home):
assert "Configuration of micromamba" in config()
@pytest.mark.parametrize("quiet_flag", ["-q", "--quiet"])
def test_config_quiet(self, quiet_flag, tmp_home):
assert config(quiet_flag) == ""
class TestConfigSources:
@pytest.mark.parametrize(
"rc_file", (("home", "dummy.yaml"), ("home", ".mambarc")), indirect=True
)
@pytest.mark.parametrize(
"rc_file_args", ({"override_channels_enabled": True},), indirect=True
)
@pytest.mark.parametrize("quiet_flag", ["-q", "--quiet"])
@pytest.mark.parametrize("norc", [False, True])
def test_config_sources(self, rc_file, quiet_flag, norc):
if norc:
with pytest.raises(subprocess.CalledProcessError):
config("sources", quiet_flag, "--rc-file", rc_file, "--no-rc")
else:
res = config("sources", quiet_flag, "--rc-file", rc_file)
rc_file_short = str(rc_file).replace(os.path.expanduser("~"), "~")
assert res.strip().splitlines() == (
f"Configuration files (by precedence order):\n{rc_file_short}".splitlines()
)
@pytest.mark.parametrize("quiet_flag", ["-q", "--quiet"])
@pytest.mark.parametrize("norc", [False, True])
def test_config_sources_empty(self, tmp_prefix, quiet_flag, norc):
if norc:
res = config("sources", quiet_flag, "--no-rc")
assert res.strip() == "Configuration files disabled by --no-rc flag"
else:
res = config("sources", quiet_flag)
assert res.startswith("Configuration files (by precedence order):")
# TODO: test system located sources?
@pytest.mark.parametrize(
"rc_file",
(
# "/etc/conda/.condarc",
# "/etc/conda/condarc",
# "/etc/conda/condarc.d/",
# "/etc/conda/.mambarc",
# "/var/lib/conda/.condarc",
# "/var/lib/conda/condarc",
# "/var/lib/conda/condarc.d/",
# "/var/lib/conda/.mambarc",
("user_config_dir", "mambarc"),
("env_set_xdg", "mambarc"),
("home", ".conda/.condarc"),
("home", ".conda/condarc"),
("home", ".conda/condarc.d"),
("home", ".condarc"),
("home", ".mambarc"),
("root_prefix", ".condarc"),
("root_prefix", "condarc"),
("root_prefix", "condarc.d"),
("root_prefix", ".mambarc"),
("prefix", ".condarc"),
("prefix", "condarc"),
("prefix", "condarc.d"),
("prefix", ".mambarc"),
),
indirect=True,
)
@pytest.mark.parametrize(
"rc_file_args", ({"override_channels_enabled": True},), indirect=True
)
def test_config_rc_file(self, rc_file, tmp_env_name):
srcs = config("sources", "-n", tmp_env_name).strip().splitlines()
short_name = str(rc_file).replace(os.path.expanduser("~"), "~")
expected_srcs = (
f"Configuration files (by precedence order):\n{short_name}".splitlines()
)
assert srcs == expected_srcs
@pytest.mark.parametrize(
"rc_file",
[("home", "somefile.yml")],
indirect=True,
)
@pytest.mark.parametrize(
"rc_file_args", ({"override_channels_enabled": True},), indirect=True
)
def test_config_expand_user(self, rc_file):
rc_file_short = str(rc_file).replace(os.path.expanduser("~"), "~")
res = config("sources", "--rc-file", rc_file)
assert (
res.strip().splitlines()
== f"Configuration files (by precedence order):\n{rc_file_short}".splitlines()
)
class TestConfigList:
@pytest.mark.parametrize("rc_file_args", ({"channels": ["channel1", "channel2"]},))
def test_list_with_rc(self, rc_file, rc_file_text):
assert (
config("list", "--no-env", "--rc-file", rc_file).splitlines()
== rc_file_text.splitlines()
)
def test_list_without_rc(self):
assert config("list", "--no-env", "--no-rc").splitlines()[:-1] == []
@pytest.mark.parametrize("source_flag", ["--sources", "-s"])
@pytest.mark.parametrize("rc_file_args", ({"channels": ["channel1", "channel2"]},))
def test_list_with_sources(self, rc_file, source_flag):
home_folder = os.path.expanduser("~")
src = f" # '{str(rc_file).replace(home_folder, '~')}'"
assert (
config("list", "--no-env", "--rc-file", rc_file, source_flag).splitlines()
== f"channels:\n - channel1{src}\n - channel2{src}\n".splitlines()
)
@pytest.mark.parametrize("source_flag", ["--sources", "-s"])
@pytest.mark.parametrize("rc_file_args", ({"custom_channels": {"key1": "value1"}},))
def test_list_map_with_sources(self, rc_file, source_flag):
home_folder = os.path.expanduser("~")
src = f" # '{str(rc_file).replace(home_folder, '~')}'"
assert (
config("list", "--no-env", "--rc-file", rc_file, source_flag).splitlines()
== f"custom_channels:\n key1: value1{src}\n".splitlines()
)
@pytest.mark.parametrize("desc_flag", ["--descriptions", "-d"])
@pytest.mark.parametrize("rc_file_args", ({"channels": ["channel1", "channel2"]},))
def test_list_with_descriptions(self, rc_file, desc_flag):
assert (
config("list", "--no-env", "--rc-file", rc_file, desc_flag).splitlines()
== "# channels\n# Define the list of channels\nchannels:\n"
" - channel1\n - channel2\n".splitlines()
)
@pytest.mark.parametrize("desc_flag", ["--long-descriptions", "-l"])
@pytest.mark.parametrize("rc_file_args", ({"channels": ["channel1", "channel2"]},))
def test_list_with_long_descriptions(self, rc_file, desc_flag):
assert (
config("list", "--no-env", "--rc-file", rc_file, desc_flag).splitlines()
== "# channels\n# The list of channels where the packages will be searched for.\n"
"# See also 'channel_priority'.\nchannels:\n - channel1\n - channel2\n".splitlines()
)
@pytest.mark.parametrize("group_flag", ["--groups", "-g"])
@pytest.mark.parametrize("rc_file_args", ({"channels": ["channel1", "channel2"]},))
def test_list_with_groups(self, rc_file, group_flag):
group = (
"# ######################################################\n"
"# # Channels Configuration #\n"
"# ######################################################\n\n"
)
assert (
config(
"list", "--no-env", "--rc-file", rc_file, "-d", group_flag
).splitlines()
== f"{group}# channels\n# Define the list of channels\nchannels:\n"
" - channel1\n - channel2\n".splitlines()
)
def test_env_vars(self):
os.environ["MAMBA_OFFLINE"] = "true"
assert (
config("list", "offline", "--no-rc", "-s").splitlines()
== "offline: true # 'MAMBA_OFFLINE'".splitlines()
)
os.environ["MAMBA_OFFLINE"] = "false"
assert (
config("list", "offline", "--no-rc", "-s").splitlines()
== "offline: false # 'MAMBA_OFFLINE'".splitlines()
)
os.environ.pop("MAMBA_OFFLINE")
def test_no_env(self):
os.environ["MAMBA_OFFLINE"] = "false"
assert (
config(
"list", "offline", "--no-rc", "--no-env", "-s", "--offline"
).splitlines()
== "offline: true # 'CLI'".splitlines()
)
os.environ.pop("MAMBA_OFFLINE")
def test_precedence(self):
rc_dir = os.path.expanduser(
os.path.join("~", "test_mamba", helpers.random_string())
)
os.makedirs(rc_dir, exist_ok=True)
rc_file = os.path.join(rc_dir, ".mambarc")
short_rc_file = rc_file.replace(os.path.expanduser("~"), "~")
with open(rc_file, "w") as f:
f.write("offline: true")
try:
if "MAMBA_OFFLINE" in os.environ:
os.environ.pop("MAMBA_OFFLINE")
assert (
config("list", "offline", f"--rc-file={rc_file}", "-s").splitlines()
== f"offline: true # '{short_rc_file}'".splitlines()
)
os.environ["MAMBA_OFFLINE"] = "false"
assert (
config("list", "offline", "--no-rc", "-s").splitlines()
== "offline: false # 'MAMBA_OFFLINE'".splitlines()
)
assert (
config("list", "offline", f"--rc-file={rc_file}", "-s").splitlines()
== f"offline: false # 'MAMBA_OFFLINE' > '{short_rc_file}'".splitlines()
)
assert (
config(
"list", "offline", f"--rc-file={rc_file}", "-s", "--offline"
).splitlines()
== f"offline: true # 'CLI' > 'MAMBA_OFFLINE' > '{short_rc_file}'".splitlines()
)
assert (
config(
"list",
"offline",
f"--rc-file={rc_file}",
"--no-env",
"-s",
"--offline",
).splitlines()
== f"offline: true # 'CLI' > '{short_rc_file}'".splitlines()
)
assert (
config(
"list",
"offline",
"--no-rc",
"--no-env",
"-s",
"--offline",
).splitlines()
== "offline: true # 'CLI'".splitlines()
)
finally:
if "MAMBA_OFFLINE" in os.environ:
os.environ.pop("MAMBA_OFFLINE")
shutil.rmtree(os.path.expanduser(os.path.join("~", "test_mamba")))
# TODO: instead of "Key is not present in file" => "Key " + key + "is not present in file"
class TestConfigModifiers:
def test_file_set_single_input(self, rc_file):
config("set", "json", "true", "--file", rc_file)
assert (
config("get", "json", "--file", rc_file).splitlines()
== "json: true".splitlines()
)
def test_file_set_change_key_value(self, rc_file):
config("set", "json", "true", "--file", rc_file)
config("set", "json", "false", "--file", rc_file)
assert (
config("get", "json", "--file", rc_file).splitlines()
== "json: false".splitlines()
)
def test_file_set_invalit_input(self, rc_file):
assert (
config("set", "$%#@abc", "--file", rc_file).splitlines()
== "Key is invalid or more than one key was received".splitlines()
)
def test_file_set_multiple_inputs(self, rc_file):
assert (
config(
"set",
"json",
"true",
"clean_tarballs",
"true",
"--file",
rc_file,
).splitlines()
== "Key is invalid or more than one key was received".splitlines()
)
def test_file_remove_single_input(self, rc_file):
config("set", "json", "true", "--file", rc_file)
assert config("remove-key", "json", "--file", rc_file).splitlines() == []
def test_file_remove_non_existent_key(self, rc_file):
assert (
config("remove-key", "json", "--file", rc_file).splitlines()
== "Key is not present in file".splitlines()
)
def test_file_remove_invalid_key(self, rc_file):
assert (
config("remove-key", "^&*&^def", "--file", rc_file).splitlines()
== "Key is not present in file".splitlines()
)
def test_file_remove_vector(self, rc_file):
config("append", "channels", "flowers", "--file", rc_file)
config("remove-key", "channels", "--file", rc_file)
assert (
config("get", "channels", "--file", rc_file).splitlines()
== "Key is not present in file".splitlines()
)
def test_file_remove_vector_value(self, rc_file):
# Backward test compatibility: when an empty file exists, the formatting is different
rc_file.unlink()
config("append", "channels", "totoro", "--file", rc_file)
config("append", "channels", "haku", "--file", rc_file)
config("remove", "channels", "totoro", "--file", rc_file)
assert config("get", "channels", "--file", rc_file).splitlines() == [
"channels:",
" - haku",
]
# TODO: This behavior should be fixed "channels: []"
def test_file_remove_vector_all_values(self, rc_file):
config("append", "channels", "haku", "--file", rc_file)
config("remove", "channels", "haku", "--file", rc_file)
assert config("get", "channels", "--file", rc_file).splitlines() == [
"Key is not present in file"
]
def test_file_remove_vector_nonexistent_value(self, rc_file):
config("append", "channels", "haku", "--file", rc_file)
assert (
config(
"remove",
"channels",
"chihiro",
"--file",
rc_file,
).splitlines()
== "Key is not present in file".splitlines()
)
def test_file_remove_vector_multiple_values(self, rc_file):
config("append", "channels", "haku", "--file", rc_file)
assert (
config(
"remove",
"channels",
"haku",
"chihiro",
"--file",
rc_file,
).splitlines()
== "Only one value can be removed at a time".splitlines()
)
def test_file_append_single_input(self, rc_file):
# Backward test compatibility: when an empty file exists, the formatting is different
rc_file.unlink()
config("append", "channels", "flowers", "--file", rc_file)
assert config("get", "channels", "--file", rc_file).splitlines() == [
"channels:",
" - flowers",
]
def test_file_append_multiple_inputs(self, rc_file):
with open(rc_file, "w") as f:
f.write("channels:\n - foo")
config(
"append",
"channels",
"condesc,mambesc",
"--file",
rc_file,
)
assert (
config("get", "channels", "--file", rc_file).splitlines()
== "channels:\n - foo\n - condesc\n - mambesc".splitlines()
)
def test_file_append_multiple_keys(self, rc_file):
with open(rc_file, "w") as f:
f.write("channels:\n - foo\ndefault_channels:\n - bar")
config(
"append",
"channels",
"condesc,mambesc",
"default_channels",
"condescd,mambescd",
"--file",
rc_file,
)
assert (
config("get", "channels", "--file", rc_file).splitlines()
== "channels:\n - foo\n - condesc\n - mambesc".splitlines()
)
assert (
config("get", "default_channels", "--file", rc_file).splitlines()
== "default_channels:\n - bar\n - condescd\n - mambescd".splitlines()
)
def test_file_append_invalid_input(self, rc_file):
with pytest.raises(subprocess.CalledProcessError):
config("append", "--file", rc_file)
with pytest.raises(subprocess.CalledProcessError):
config("append", "@#A321", "--file", rc_file)
with pytest.raises(subprocess.CalledProcessError):
config("append", "json", "true", "--file", rc_file)
with pytest.raises(subprocess.CalledProcessError):
config(
"append",
"channels",
"foo,bar",
"json",
"true",
"--file",
rc_file,
)
def test_file_prepend_single_input(self, rc_file):
# Backward test compatibility: when an empty file exists, the formatting is different
rc_file.unlink()
config("prepend", "channels", "flowers", "--file", rc_file)
assert config("get", "channels", "--file", rc_file).splitlines() == [
"channels:",
" - flowers",
]
def test_file_prepend_multiple_inputs(self, rc_file):
with open(rc_file, "w") as f:
f.write("channels:\n - foo")
config(
"prepend",
"channels",
"condesc,mambesc",
"--file",
rc_file,
)
assert (
config("get", "channels", "--file", rc_file).splitlines()
== "channels:\n - condesc\n - mambesc\n - foo".splitlines()
)
def test_file_prepend_multiple_keys(self, rc_file):
with open(rc_file, "w") as f:
f.write("channels:\n - foo\ndefault_channels:\n - bar")
config(
"prepend",
"channels",
"condesc,mambesc",
"default_channels",
"condescd,mambescd",
"--file",
rc_file,
)
assert (
config("get", "channels", "--file", rc_file).splitlines()
== "channels:\n - condesc\n - mambesc\n - foo".splitlines()
)
assert (
config("get", "default_channels", "--file", rc_file).splitlines()
== "default_channels:\n - condescd\n - mambescd\n - bar".splitlines()
)
def test_file_prepend_invalid_input(self, rc_file):
with pytest.raises(subprocess.CalledProcessError):
config("prepend", "--file", rc_file)
with pytest.raises(subprocess.CalledProcessError):
config("prepend", "@#A321", "--file", rc_file)
with pytest.raises(subprocess.CalledProcessError):
config("prepend", "json", "true", "--file", rc_file)
with pytest.raises(subprocess.CalledProcessError):
config(
"prepend",
"channels",
"foo,bar",
"json",
"true",
"--file",
rc_file,
)
def test_file_append_and_prepend_inputs(self, rc_file):
# Backward test compatibility: when an empty file exists, the formatting is different
rc_file.unlink()
config("append", "channels", "flowers", "--file", rc_file)
config("prepend", "channels", "powers", "--file", rc_file)
assert config("get", "channels", "--file", rc_file).splitlines() == [
"channels:",
" - powers",
" - flowers",
]
def test_file_set_and_append_inputs(self, rc_file):
# Backward test compatibility: when an empty file exists, the formatting is different
rc_file.unlink()
config("set", "experimental", "true", "--file", rc_file)
config("append", "channels", "gandalf", "--file", rc_file)
config("append", "channels", "legolas", "--file", rc_file)
assert (
config("get", "experimental", "--file", rc_file).splitlines()
== "experimental: true".splitlines()
)
assert config("get", "channels", "--file", rc_file).splitlines() == [
"channels:",
" - gandalf",
" - legolas",
]
def test_file_set_and_prepend_inputs(self, rc_file):
# Backward test compatibility: when an empty file exists, the formatting is different
rc_file.unlink()
config("set", "experimental", "false", "--file", rc_file)
config("prepend", "channels", "zelda", "--file", rc_file)
config("prepend", "channels", "link", "--file", rc_file)
assert (
config("get", "experimental", "--file", rc_file).splitlines()
== "experimental: false".splitlines()
)
assert config("get", "channels", "--file", rc_file).splitlines() == [
"channels:",
" - link",
" - zelda",
]
def test_flag_env_set(self, rc_file):
config("set", "experimental", "false", "--env")
assert (
config("get", "experimental", "--env").splitlines()
== "experimental: false".splitlines()
)
def test_flag_env_file_remove_vector(self, rc_file):
config("prepend", "channels", "thinga-madjiga", "--env")
config("remove-key", "channels", "--env")
assert (
config("get", "channels", "--env").splitlines()
== "Key is not present in file".splitlines()
)
def test_flag_env_file_set_and_append_inputs(self, rc_file):
config("set", "local_repodata_ttl", "2", "--env")
config("append", "channels", "finn", "--env")
config("append", "channels", "jake", "--env")
assert (
config("get", "local_repodata_ttl", "--env").splitlines()
== "local_repodata_ttl: 2".splitlines()
)
assert config("get", "channels", "--env").splitlines() == [
"channels:",
" - finn",
" - jake",
]
class TestConfigExpandVars:
@staticmethod
def _roundtrip(rc_file_path, rc_contents):
rc_file_path.write_text(rc_contents)
return config("list", "--json", "--no-env", "--rc-file", rc_file_path)
@classmethod
def _roundtrip_attr(cls, rc_file_path, attr, config_expr):
return cls._roundtrip(rc_file_path, f"{attr}: {config_expr}")[attr]
@pytest.mark.parametrize("yaml_quote", ["", '"'])
def test_expandvars_conda(self, monkeypatch, tmpdir_factory, rc_file, yaml_quote):
"""
Environment variables should be expanded in settings that have expandvars=True.
Test copied from Conda.
"""
def _expandvars(attr, config_expr, env_value):
config_expr = config_expr.replace("'", yaml_quote)
monkeypatch.setenv("TEST_VAR", env_value)
return self._roundtrip_attr(rc_file, attr, config_expr)
ssl_verify = _expandvars("ssl_verify", "${TEST_VAR}", "yes")
assert ssl_verify
for attr, env_value in [
# Not supported by Micromamba
# ("client_ssl_cert", "foo"),
# ("client_ssl_cert_key", "foo"),
("channel_alias", "http://foo"),
]:
value = _expandvars(attr, "${TEST_VAR}", env_value)
assert value == env_value
for attr in [
# Not supported by Micromamba
# "migrated_custom_channels",
# "proxy_servers",
]:
value = _expandvars(attr, "{'x': '${TEST_VAR}'}", "foo")
assert value == {"x": "foo"}
for attr in [
"channels",
"default_channels",
]:
value = _expandvars(attr, "['${TEST_VAR}']", "foo")
assert value == ["foo"]
custom_channels = _expandvars(
"custom_channels", "{'x': '${TEST_VAR}'}", "http://foo"
)
assert custom_channels["x"] == "http://foo"
custom_multichannels = _expandvars(
"custom_multichannels", "{'x': ['${TEST_VAR}']}", "http://foo"
)
assert len(custom_multichannels["x"]) == 1
assert custom_multichannels["x"][0] == "http://foo"
envs_dirs = _expandvars("envs_dirs", "['${TEST_VAR}']", "/foo")
assert any("foo" in d for d in envs_dirs)
pkgs_dirs = _expandvars("pkgs_dirs", "['${TEST_VAR}']", "/foo")
assert any("foo" in d for d in pkgs_dirs)
@pytest.mark.parametrize(
"inp,outp",
[
("$", "$"),
("$$", "$"),
("foo", "foo"),
("${foo}bar1", "barbar1"),
("$[foo]bar", "$[foo]bar"),
("$bar bar", "$bar bar"),
("$?bar", "$?bar"),
("${foo", "${foo"),
("${{foo}}2", "baz1}2"),
("$bar$bar", "$bar$bar"),
# Not supported by Micromamba
# ("$foo$foo", "barbar"),
# ("$foo}bar", "bar}bar"),
# ("$foo$$foo bar", "bar$foo bar"),
# ("$foo bar", "bar bar"),
# *(
# [
# ("%", "%"),
# ("foo", "foo"),
# ("$foo bar", "bar bar"),
# ("${foo}bar", "barbar"),
# ("$[foo]bar", "$[foo]bar"),
# ("$bar bar", "$bar bar"),
# ("$?bar", "$?bar"),
# ("$foo}bar", "bar}bar"),
# ("${foo", "${foo"),
# ("${{foo}}", "baz1}"),
# ("$foo$foo", "barbar"),
# ("$bar$bar", "$bar$bar"),
# ("%foo% bar", "bar bar"),
# ("%foo%bar", "barbar"),
# ("%foo%%foo%", "barbar"),
# ("%%foo%%foo%foo%", "%foo%foobar"),
# ("%?bar%", "%?bar%"),
# ("%foo%%bar", "bar%bar"),
# ("'%foo%'%bar", "'%foo%'%bar"),
# ("bar'%foo%", "bar'%foo%"),
# ("'$foo'$foo", "'$foo'bar"),
# ("'$foo$foo", "'$foo$foo"),
# ]
# if platform.system() == "Windows"
# else []
# ),
],
)
@pytest.mark.parametrize("yaml_quote", ["", '"', "'"])
def test_expandvars_cpython(self, monkeypatch, rc_file, inp, outp, yaml_quote):
"""Tests copied from CPython."""
monkeypatch.setenv("foo", "bar", True)
monkeypatch.setenv("{foo", "baz1", True)
monkeypatch.setenv("{foo}", "baz2", True)
assert outp == self._roundtrip_attr(
rc_file, "channel_alias", yaml_quote + inp + yaml_quote
)
@pytest.mark.parametrize(
"inp,outp",
[
(
'x", "y',
[
"${x",
"y}",
],
),
("x\ny", ["${x y}"]),
],
)
def test_envsubst_yaml_mixup(self, monkeypatch, rc_file, inp, outp):
assert self._roundtrip_attr(rc_file, "channels", f'["${{{inp}}}"]') == outp
def test_envsubst_empty_var(self, monkeypatch, rc_file):
monkeypatch.setenv("foo", "", True)
# Windows does not support empty environment variables
expected = "${foo}" if platform.system() == "Windows" else ""
assert self._roundtrip_attr(rc_file, "channel_alias", "'${foo}'") == expected
def test_envsubst_windows_problem(self, monkeypatch, rc_file):
# Real-world problematic .condarc file
condarc = textwrap.dedent(
"""
channel_alias: https://xxxxxxxxxxxxxxxxxxxx.com/t/${CONDA_API_KEY}/get
channels:
- xxxxxxxxxxx
- yyyyyyyyyyyy
- conda-forge
custom_channels:
yyyyyyyyyyyy: https://${CONDA_CHANNEL_UPLOAD_USER}:${CONDA_CHANNEL_UPLOAD_PASSWORD}@xxxxxxxxxxxxxxx.com
custom_multichannels:
conda-forge:
- https://conda.anaconda.org/conda-forge
"""
)
monkeypatch.setenv("CONDA_API_KEY", "kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk", True)
monkeypatch.setenv("CONDA_CHANNEL_UPLOAD_USER", "uuuuuuuuu", True)
monkeypatch.setenv(
"CONDA_CHANNEL_UPLOAD_PASSWORD", "pppppppppppppppppppp", True
)
out = self._roundtrip(rc_file, condarc)
assert (
out["channel_alias"]
== "https://xxxxxxxxxxxxxxxxxxxx.com/t/kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk/get"
)
assert out["custom_channels"] == {
"yyyyyyyyyyyy": "https://uuuuuuuuu:pppppppppppppppppppp@xxxxxxxxxxxxxxx.com"
}
|
15,885 | 02aba038a5c457346576dfbe4d8b97dbee5cfa36 | import tkinter
# 创建主窗口
win = tkinter.Tk() # T是大写k是小写
win.title("窗口标题")
win.geometry("400x200")
'''
框架控件
在屏幕上显示一个矩形区域,多作为容器控件
'''
# 创建底层frame
frm = tkinter.Frame(win)
frm.pack()
# left
# 在底层Frame上创建leftFrame
frm_l = tkinter.Frame(frm)
# frm_l = tkinter.Frame(win) # 在win上创建
tkinter.Label(frm_l, text='左上', bg='red').pack(side=tkinter.TOP)
tkinter.Label(frm_l, text='左下', bg='yellow').pack(side=tkinter.TOP)
frm_l.pack(side=tkinter.LEFT)
# right
# frm_r = tkinter.Frame(win) # 在win上创建
frm_r = tkinter.Frame(frm)
tkinter.Label(frm_r, text='右上', bg='pink').pack(side=tkinter.TOP)
tkinter.Label(frm_r, text='右下', bg='blue').pack(side=tkinter.TOP)
frm_r.pack(side=tkinter.RIGHT)
win.mainloop() |
15,886 | 95bf0aeb8785716d30e7ff465eefbb861f334a3e | import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn import svm, neighbors
#https://stackoverflow.com/a/38105540
# save load_iris() sklearn dataset to iris
# if you'd like to check dataset type use: type(load_iris())
# if you'd like to view list of attributes use: dir(load_iris())
iris = load_iris()
# np.c_ is the numpy concatenate function
# which is used to concat iris['data'] and iris['target'] arrays
# for pandas column argument: concat iris['feature_names'] list
# and string list (in this case one string); you can make this anything you'd like..
# the original dataset would probably call this ['Species']
data1 = pd.DataFrame(data=np.c_[iris['data'], iris['target']],
columns=iris['feature_names'] + ['target'])
X = data1.drop(columns=['target'])
y = data1[['target']]
# print(data1.isnull().sum())
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
gauss = GaussianNB()
gauss.fit(X_train, y_train.values.ravel())
y_pred = dict()
y_pred['gauss'] = gauss.predict(X_test)
# print(y_pred)
svmModel = svm.SVC(gamma='scale')
svmModel.fit(X_train, y_train.values.ravel())
y_pred['svmModel'] = svmModel.predict(X_test)
k = 15
knn = neighbors.KNeighborsClassifier(k, weights='uniform')
knn.fit(X_train, y_train.values.ravel())
y_pred['knn'] = knn.predict(X_test)
for k, v in y_pred.items():
print(k + ": " + str(f1_score(y_test, v, average='weighted')))
# export_csv = data1.to_csv (r'C:\Users\Leo\Desktop\export_dataframe.csv', index = None, header=True)
|
15,887 | 8b106b455175d325fafa6847fabd1212f5ce3dc5 | # creates a newgameplay.json with all the data you need
from Battlerite.championData.dataParser.inireader import read_and_convert
from Battlerite.championData.dataParser.jsonreader import combine_json_data
from flask import jsonify
import json, os
def update_champion_data():
read_and_convert()
combine_json_data()
def get_champion_data():
script_dir = os.path.dirname(__file__) # <-- absolute dir the script is in
# combines the layout of gameplay.json with the data from english.ini
with open(os.path.join(script_dir, "newgameplay.json"), "r") as gameplay:
return jsonify(json.load(gameplay)), 200
|
15,888 | 440a79e329a2de4c1933412ae5248149e5db2132 | import sys
sys.path.insert(0, "..")
sys.path.insert(0, "../../common")
import json
import rospy
import random
import time
import serial
import thread
from klampt import *
from klampt.glprogram import *
from klampt import vectorops,so3,se3,gldraw,ik,loader
from OpenGL.GL import *
from planning.MyGLViewer import MyGLViewer
from util.constants import *
from Motion import motion
from perception import perception
from planning import planning
from sensor_msgs.msg import PointCloud2
from sensor_msgs.msg import Image
import sensor_msgs.point_cloud2 as pc2
import scipy.io as sio
import subprocess
import operator
baxter_rest_config = [0.0]*54
class FullIntegrationMaster:
# Assorted functions
# ==================
def __init__(self, world):
self.world = world
self.current_bin = 'A'
self.bin_state = {'A': {'done': False, 'contents': []},'B': {'done': False, 'contents': []}, 'C': {'done': False, 'contents': []}, 'D': {'done': False, 'contents': []}, 'E': {'done': False, 'contents': []}, 'F': {'done': False, 'contents': []}, 'G': {'done': False, 'contents': []}, 'H': {'done': False, 'contents': []}, 'I': {'done': False, 'contents': []}, 'J': {'done': False, 'contents': []}, 'K': {'done': False, 'contents': []}, 'L': {'done': False, 'contents': []}}
self.robotModel = world.robot(0)
self.state = INITIAL_STATE
self.config = self.robotModel.getConfig()
self.left_arm_links = [self.robotModel.link(i) for i in LEFT_ARM_LINK_NAMES]
self.right_arm_links = [self.robotModel.link(i) for i in RIGHT_ARM_LINK_NAMES]
id_to_index = dict([(self.robotModel.link(i).getID(),i) for i in range(self.robotModel.numLinks())])
self.left_arm_indices = [id_to_index[i.getID()] for i in self.left_arm_links]
self.right_arm_indices = [id_to_index[i.getID()] for i in self.right_arm_links]
self.Tcamera = se3.identity()
self.Tvacuum = se3.identity()
self.calibratedCameraXform = RIGHT_F200_CAMERA_CALIBRATED_XFORM
self.object_com = [0, 0, 0]
self.points1 = []
self.points2 = []
self.vacuumPc = Geometry3D()
self.vacuumPc.loadFile(VACUUM_PCD_FILE)
# Set up serial
if REAL_VACUUM:
self.serial = serial.Serial()
self.serial.port = ARDUINO_SERIAL_PORT
self.serial.baudrate = 9600
self.serial.open()
if self.serial.isOpen():
self.serial.write("hello")
response = self.serial.read(self.serial.inWaiting())
self.turnOffVacuum()
# Load JSON
if DRY_RUN:
with open(PICK_JSON_PATH) as pick_json_file:
raw_json_data = json.load(pick_json_file)
for k in self.bin_state:
self.bin_state[k]['contents'] = raw_json_data['bin_contents']['bin_'+k]
for my_dict in raw_json_data['work_order']:
bin_letter = my_dict['bin'][4]
self.bin_state[bin_letter]['target'] = my_dict['item']
self.current_bin = planning.selectBin(self.bin_state)
def start(self):
motion.setup(mode='physical',klampt_model=os.path.join(KLAMPT_MODELS_DIR,"baxter_col.rob"),libpath=LIBPATH)
motion.robot.startup()
rospy.init_node("listener", anonymous=True)
self.loop()
def drawStuff(self):
glDisable(GL_LIGHTING)
gldraw.xform_widget(self.Tvacuum,0.1,0.01)
gldraw.xform_widget(self.Tcamera,0.1,0.01)
glPointSize(5.0)
glColor3f(0.0,0.0,1.0)
glBegin(GL_POINTS)
for point in self.points1[::25]:
glVertex3f(point[0], point[1], point[2])
glEnd()
glColor3f(1.0,0.0,0.0)
glBegin(GL_POINTS)
for point in self.points2[::25]:
glVertex3f(point[0], point[1], point[2])
glEnd()
glPointSize(20.0)
glColor3f(0.0,0.8,0.0)
glBegin(GL_POINTS)
glVertex3f(self.object_com[0], self.object_com[1], self.object_com[2])
glEnd()
glPointSize(5.0)
glColor3f(1.0,0.0,0.0)
glBegin(GL_POINTS)
for i in range(self.vacuumPc.getPointCloud().numPoints()):
point = self.vacuumPc.getPointCloud().getPoint(i)
glVertex3f(point[0], point[1], point[2])
glEnd()
return self.world
def load_real_robot_state(self):
"""Makes the robot model match the real robot"""
self.robotModel.setConfig(motion.robot.getKlamptSensedPosition())
def set_model_right_arm(self, q):
destination = motion.robot.getKlamptSensedPosition()
for index,v in enumerate(self.right_arm_indices):
destination[v] = q[index]
self.robotModel.setConfig(destination)
def turnOnVacuum(self):
if REAL_VACUUM:
self.serial.write('H')
else:
print OKBLUE + "Fake vacuum is on" + END_COLOR
def turnOffVacuum(self):
if REAL_VACUUM:
self.serial.write('L')
else:
print OKBLUE + "Fake vacuum is off" + END_COLOR
def calibrateCamera(self):
print self.calibratedCameraXform
calibrateR = self.calibratedCameraXform[0];
calibrateT = self.calibratedCameraXform[1];
try:
input_var = raw_input("Enter joint and angle to change to separated by a comma: ").split(',');
#translational transformation
if(input_var[0] == "x" ):
calibrateT[0] = calibrateT[0] + float(input_var[1])
elif(input_var[0] == "y" ):
calibrateT[1] = calibrateT[1] + float(input_var[1])
elif(input_var[0] == "z" ):
calibrateT[2] = calibrateT[2] + float(input_var[1])
#rotational transformations
elif(input_var[0] == "xr" ):
calibrateR = so3.mul(calibrateR, so3.rotation([1, 0, 0], float(input_var[1])))
elif(input_var[0] == "yr" ):
calibrateR = so3.mul(calibrateR, so3.rotation([0, 1, 0], float(input_var[1])))
elif(input_var[0] == "zr" ):
calibrateR = so3.mul(calibrateR, so3.rotation([0, 0, 1], float(input_var[1])))
time.sleep(0.1);
self.calibratedCameraXform = (calibrateR, calibrateT)
except:
print "input error\n"
print "printing camera calibration"
print self.calibratedCameraXform
def calibrateShelf(self):
calibratedShelfXform = self.world.rigidObject(0).getTransform()
print calibratedShelfXform
calibrateR = calibratedShelfXform[0];
calibrateT = calibratedShelfXform[1];
try:
input_var = raw_input("Enter joint and angle to change to separated by a comma: ").split(',');
#translational transformation
if(input_var[0] == "x" ):
calibrateT[0] = calibrateT[0] + float(input_var[1])
elif(input_var[0] == "y" ):
calibrateT[1] = calibrateT[1] + float(input_var[1])
elif(input_var[0] == "z" ):
calibrateT[2] = calibrateT[2] + float(input_var[1])
#rotational transformations
elif(input_var[0] == "xr" ):
calibrateR = so3.mul(calibrateR, so3.rotation([1, 0, 0], float(input_var[1])))
elif(input_var[0] == "yr" ):
calibrateR = so3.mul(calibrateR, so3.rotation([0, 1, 0], float(input_var[1])))
elif(input_var[0] == "zr" ):
calibrateR = so3.mul(calibrateR, so3.rotation([0, 0, 1], float(input_var[1])))
time.sleep(0.1);
self.world.rigidObject(0).setTransform(calibrateR, calibrateT)
print self.world.rigidObject(0).getTransform()
except:
print "input error\n"
print "printing shelf calibration"
print self.world.rigidObject(0).getTransform()
# IK and motion planning
# ======================
def elbow_up(self):
destination = self.robotModel.getConfig()
for index,v in enumerate(self.right_arm_indices):
if not (ELBOW_UP_BOUNDS[index][0] < destination[v] and destination[v] < ELBOW_UP_BOUNDS[index][1]):
return False
return True
def right_arm_ik(self, right_target, ignore_elbow_up_constraint=True):
"""Solves IK to move the right arm to the specified
right_target ([x, y, z] in world space)
"""
self.load_real_robot_state()
self.set_model_right_arm(eval('Q_IK_SEED_' + self.current_bin))
qmin,qmax = self.robotModel.getJointLimits()
for i in range(1000):
point2_local = vectorops.add(VACUUM_POINT_XFORM[1], [.5, 0, 0])
point2_world = vectorops.add(right_target, [0, 0, -.5])
goal1 = ik.objective(self.robotModel.link('right_wrist'),local=VACUUM_POINT_XFORM[1],world=right_target)
goal2 = ik.objective(self.robotModel.link('right_wrist'),local=point2_local,world=point2_world)
if ik.solve([goal1, goal2],tol=0.0001) and (self.elbow_up() or ignore_elbow_up_constraint):
return True
print FAIL_COLOR + "right_arm_ik failed for " + str(right_target) + END_COLOR
if not (self.elbow_up() or ignore_elbow_up_constraint):
print FAIL_COLOR + str([self.robotModel.getConfig()[v] for v in self.right_arm_indices]) + END_COLOR
print FAIL_COLOR + "IK found but elbow wasn't up" + END_COLOR
return False
def right_arm_ik_near_seed(self, right_target, ignore_elbow_up_constraint=True):
"""Solves IK to move the right arm to the specified
right_target ([x, y, z] in world space)
"""
self.load_real_robot_state()
self.set_model_right_arm(eval('Q_IK_SEED_' + self.current_bin))
oldRSquared = -1
q_ik = None
qmin,qmax = self.robotModel.getJointLimits()
for i in range(1000):
point2_local = vectorops.add(VACUUM_POINT_XFORM[1], [.5, 0, 0])
point2_world = vectorops.add(right_target, [0, 0, -.5])
goal1 = ik.objective(self.robotModel.link('right_wrist'),local=VACUUM_POINT_XFORM[1],world=right_target)
goal2 = ik.objective(self.robotModel.link('right_wrist'),local=point2_local,world=point2_world)
if ik.solve([goal1, goal2],tol=0.0001) and (self.elbow_up() or ignore_elbow_up_constraint):
q_vals = [self.robotModel.getConfig()[v] for v in self.right_arm_indices];
rSquared = vectorops.norm(vectorops.sub(q_vals, eval('Q_IK_SEED_' + self.current_bin)))
if( oldRSquared <0 or oldRSquared > rSquared):
oldRSquared = rSquared
q_ik = q_vals
print FAIL_COLOR + "right_arm_ik failed for " + str(right_target) + END_COLOR
if not (self.elbow_up() or ignore_elbow_up_constraint):
print FAIL_COLOR + str([self.robotModel.getConfig()[v] for v in self.right_arm_indices]) + END_COLOR
print FAIL_COLOR + "IK found but elbow wasn't up" + END_COLOR
if(oldRSquared >= 0):
self.set_model_right_arm(q_ik)
return True
return False
# Main control loop
# =================
def loop(self):
try:
while True:
print OKBLUE + "Bin " + str(self.current_bin) + ": " + self.state + END_COLOR
if self.state == 'VISUAL_DEBUG':
self.object_com = [1.0839953170961105, -0.25145094946424207, 1.1241831909823194]
#self.set_model_right_arm( [0.1868786927065213, -1.567604604679142, 0.6922768776941961, 1.5862815343628953, -0.005567750307534711, -0.017979599494945674, 0.0035268645585939083])
self.load_real_robot_state()
else:
self.load_real_robot_state()
self.Tcamera = se3.mul(self.robotModel.link('right_lower_forearm').getTransform(), self.calibratedCameraXform)
self.Tvacuum = se3.mul(self.robotModel.link('right_wrist').getTransform(), VACUUM_POINT_XFORM)
self.vacuumPc = Geometry3D()
self.vacuumPc.loadFile(VACUUM_PCD_FILE)
temp_xform = self.robotModel.link('right_wrist').getTransform()
self.vacuumPc.transform(self.Tvacuum[0], self.Tvacuum[1])
if self.state == 'DEBUG_COLLISION_CHECKER':
temp_planner = planning.LimbPlanner(self.world, self.vacuumPc)
print 'Is this configuration collision free?'
print temp_planner.check_collision_free('right')
sys.stdout.flush()
elif self.state == 'FAKE_PATH_PLANNING':
self.object_com = [1.114, -.077, 1.412]
self.right_arm_ik(self.object_com)
self.Tcamera = se3.mul(self.robotModel.link('right_lower_forearm').getTransform(), self.calibratedCameraXform)
self.Tvacuum = se3.mul(self.robotModel.link('right_wrist').getTransform(), VACUUM_POINT_XFORM)
time.sleep(2342340)
elif self.state == 'START':
self.state = "MOVE_TO_SCAN_BIN"
elif self.state == 'MOVE_TO_SCAN_BIN':
for milestone in eval('Q_BEFORE_SCAN_' + self.current_bin):
print "Moving to " + str(milestone)
motion.robot.right_mq.appendLinear(MOVE_TIME, planning.cleanJointConfig(milestone))
while motion.robot.right_mq.moving():
time.sleep(1)
time.sleep(1)
motion.robot.right_mq.appendLinear(MOVE_TIME, planning.cleanJointConfig(eval('Q_SCAN_BIN_' + self.current_bin)))
self.state = 'MOVING_TO_SCAN_BIN'
elif self.state == 'MOVING_TO_SCAN_BIN':
if not motion.robot.right_mq.moving():
self.wait_start_time = time.time()
self.state = 'WAITING_TO_SCAN_BIN'
elif self.state == 'WAITING_TO_SCAN_BIN':
if time.time() - self.wait_start_time > SCAN_WAIT_TIME:
if REAL_PERCEPTION:
self.state = 'SCANNING_BIN'
else:
self.state = 'FAKE_SCANNING_BIN'
elif self.state == 'SCANNING_BIN':
print "Waiting for message from camera"
cloud = rospy.wait_for_message(ROS_DEPTH_TOPIC, PointCloud2)
if perception.isCloudValid(cloud):
cloud = perception.convertPc2ToNp(cloud)
np_cloud = cloud[::STEP]
self.points1 = []
for point in np_cloud:
transformed = se3.apply(self.Tcamera, point)
self.points1.append(transformed)
np_cloud = perception.subtractShelf(np_cloud, self.current_bin)
self.points2 = []
for point in np_cloud:
transformed = se3.apply(self.Tcamera, point)
self.points2.append(transformed)
self.object_com = se3.apply(self.Tcamera, perception.com(np_cloud))
if PRINT_BLOBS:
print np_cloud
sio.savemat(CLOUD_MAT_PATH, {'cloud':np_cloud})
fo = open(CHENYU_GO_PATH, "w")
fo.write("chenyugo")
fo.close()
self.object_com = se3.apply(self.Tcamera, perception.com(np_cloud))
if CALIBRATE_CAMERA:
self.state = "CALIBRATE_CAMERA"
elif SEGMENT:
self.state = "WAITING_FOR_SEGMENTATION"
else:
self.state = "MOVE_TO_GRASP_OBJECT"
else:
print FAIL_COLOR + "Got an invalid cloud, trying again" + END_COLOR
elif self.state == 'FAKE_SCANNING_BIN':
self.object_com = [1.1114839836097854, -0.6087936130127559, 0.9899267043340634]
if DRY_RUN:
raw_input("Hit enter to continue: ")
self.state = 'MOVE_TO_GRASP_OBJECT'
elif self.state == 'CALIBRATE_CAMERA':
self.calibrateCamera()
self.state = 'SCANNING_BIN'
elif self.state == 'CALIBRATE_SHELF':
self.calibrateShelf()
#make blocking
elif self.state == 'WAITING_FOR_SEGMENTATION':
if os.path.isfile(CHENYU_DONE_PATH):
os.remove(CHENYU_DONE_PATH)
self.state = 'POSTPROCESS_SEGMENTATION'
elif self.state == 'POSTPROCESS_SEGMENTATION':
object_blobs = []
time.sleep(5)
for i in range(1,20):
seg_file_path = MAT_PATH + "seg" + str(i) + ".mat"
print seg_file_path
if os.path.isfile(seg_file_path):
print seg_file_path + " exists"
mat_contents = sio.loadmat(seg_file_path)
r = mat_contents['r']
r = r[r[:,1]!=0, :]
object_blobs.append(r)
os.remove(seg_file_path)
if PRINT_BLOBS:
print "============="
print "object blobs"
print object_blobs
print "============="
if len(object_blobs) == 0:
self.state = 'BIN_DONE'
else:
object_list = [ITEM_NUMBERS[item_str] for item_str in self.bin_state[self.current_bin]['contents']]
target = ITEM_NUMBERS[self.bin_state[self.current_bin]['target']]
histogram_dict = perception.loadHistogram(object_list)
cloud_label = {} # key is the label of object, value is cloud points
label_score = {} # key is the label, value is the current score for the object
for object_cloud in object_blobs:
if PRINT_BLOBS:
print "====================="
print 'object_cloud'
print object_cloud
print '====================='
object_cloud = perception.resample(cloud,object_cloud,3)
label,score = perception.objectMatch(object_cloud,histogram_dict)
if label in cloud_label:
if label_score[label] < score:
label_score[label] = score
cloud_label[label] = object_cloud
else:
cloud_label[label] = object_cloud
label_score[label] = score
if PRINT_LABELS_AND_SCORES:
print cloud_label
print "============="
print label_score
if target in cloud_label:
self.object_com = se3.apply(self.Tcamera, perception.com(cloud_label[target]))
self.points1 = []
for point in cloud_label[target]:
transformed = se3.apply(self.Tcamera, point)
self.points1.append(transformed)
self.points2 = []
else:
cloud_score = {}
histogram_dict = perception.loadHistogram([target])
for object_cloud in object_blobs:
object_cloud = perception.resample(cloud,object_cloud,3)
label,score = perception.objectMatch(object_cloud,histogram_dict)
cloud_score[score] = object_cloud
sorted_cloud = sorted(cloud_score.items(), key=operator.itemgetter(0),reverse = True)
score = sorted_cloud[0][0]
com = perception.com(sorted_cloud[0][1])
self.points1 = []
self.points2 = []
for point in sorted_cloud[0][1]:
transformed = se3.apply(self.Tcamera, point)
self.points1.append(transformed)
self.object_com = se3.apply(self.Tcamera, com)
self.state = 'MOVE_TO_GRASP_OBJECT'
elif self.state == 'MOVE_TO_GRASP_OBJECT':
for milestone in eval('Q_AFTER_SCAN_' + self.current_bin):
print "Moving to " + str(milestone)
motion.robot.right_mq.appendLinear(MOVE_TIME, planning.cleanJointConfig(milestone))
while motion.robot.right_mq.moving():
time.sleep(1)
time.sleep(1)
motion.robot.right_mq.appendLinear(MOVE_TIME, planning.cleanJointConfig(eval('Q_IK_SEED_' + self.current_bin)))
while motion.robot.right_mq.moving():
time.sleep(1)
time.sleep(1)
self.load_real_robot_state()
self.Tvacuum = se3.mul(self.robotModel.link('right_wrist').getTransform(), VACUUM_POINT_XFORM)
print WARNING_COLOR + str(self.object_com) + END_COLOR
self.object_com = vectorops.add(self.object_com, COM_ADJUSTMENT)
current_vacuum_point = se3.apply(self.Tvacuum, [0, 0, 0])
milestone = vectorops.add(current_vacuum_point, self.object_com)
milestone = vectorops.div(milestone, 2)
if DRY_RUN:
self.state = 'MOVING_TO_GRASP_OBJECT'
else:
if self.right_arm_ik(milestone):
destination = self.robotModel.getConfig()
self.q_milestone = [destination[v] for v in self.right_arm_indices]
print WARNING_COLOR + "IK config for " + str(milestone) + ": " + str(self.q_milestone) + END_COLOR
motion.robot.right_mq.appendLinear(MOVE_TIME, planning.cleanJointConfig(self.q_milestone))
else:
print FAIL_COLOR + "Error: IK failed" + END_COLOR
sys.stdout.flush()
motion.robot.right_mq.appendLinear(MOVE_TIME, planning.cleanJointConfig(eval('Q_IK_SEED_' + self.current_bin)))
while motion.robot.right_mq.moving():
time.sleep(1)
time.sleep(1)
while motion.robot.right_mq.moving():
time.sleep(1)
time.sleep(1)
if self.right_arm_ik(self.object_com):
destination = self.robotModel.getConfig()
print WARNING_COLOR + "IK config for " + str(self.object_com) + ": " + str([destination[v] for v in self.right_arm_indices]) + END_COLOR
motion.robot.right_mq.appendLinear(MOVE_TIME, planning.cleanJointConfig([destination[v] for v in self.right_arm_indices]))
self.state = 'MOVING_TO_GRASP_OBJECT'
else:
print FAIL_COLOR + "Error: IK failed" + END_COLOR
sys.stdout.flush()
motion.robot.right_mq.appendLinear(MOVE_TIME, planning.cleanJointConfig(eval('Q_IK_SEED_' + self.current_bin)))
while motion.robot.right_mq.moving():
time.sleep(1)
time.sleep(1)
elif self.state == 'MOVING_TO_GRASP_OBJECT':
if not motion.robot.right_mq.moving():
time.sleep(1)
self.state = 'GRASP_OBJECT'
elif self.state == 'GRASP_OBJECT':
move_target = se3.apply(self.Tvacuum, [0, 0, 0])
move_target[2] = move_target[2] - GRASP_MOVE_DISTANCE - (MEAN_OBJ_HEIGHT / 2)
if self.right_arm_ik(move_target):
self.turnOnVacuum()
destination = self.robotModel.getConfig()
motion.robot.right_mq.appendLinear(MOVE_TIME, planning.cleanJointConfig([destination[v] for v in self.right_arm_indices]))
else:
print FAIL_COLOR + "Error: IK failed" + END_COLOR
sys.stdout.flush()
motion.robot.right_mq.appendLinear(MOVE_TIME, planning.cleanJointConfig(eval('Q_IK_SEED_' + self.current_bin)))
while motion.robot.right_mq.moving():
time.sleep(1)
time.sleep(1)
self.wait_start_time = time.time()
self.state = 'WAITING_TO_GRASP_OBJECT'
elif self.state == 'WAITING_TO_GRASP_OBJECT':
if time.time() - self.wait_start_time > GRASP_WAIT_TIME:
self.state = 'MOVE_UP_BEFORE_RETRACT'
elif self.state == 'MOVE_UP_BEFORE_RETRACT':
move_target = se3.apply(self.Tvacuum, [0, 0, 0])
move_target[2] = move_target[2] + GRASP_MOVE_DISTANCE + (MEAN_OBJ_HEIGHT / 2)
if self.right_arm_ik(move_target):
self.turnOnVacuum()
destination = self.robotModel.getConfig()
motion.robot.right_mq.appendLinear(MOVE_TIME, planning.cleanJointConfig([destination[v] for v in self.right_arm_indices]))
else:
print FAIL_COLOR + "Error: IK failed" + END_COLOR
sys.stdout.flush()
motion.robot.right_mq.appendLinear(MOVE_TIME, planning.cleanJointConfig(eval('Q_IK_SEED_' + self.current_bin)))
while motion.robot.right_mq.moving():
time.sleep(1)
time.sleep(1)
self.state = 'MOVE_TO_STOW_OBJECT'
elif self.state == 'MOVE_TO_STOW_OBJECT':
motion.robot.right_mq.appendLinear(MOVE_TIME, planning.cleanJointConfig(eval('Q_IK_SEED_' + self.current_bin)))
while motion.robot.right_mq.moving():
time.sleep(1)
time.sleep(1)
if not DRY_RUN:
motion.robot.right_mq.appendLinear(MOVE_TIME, planning.cleanJointConfig(self.q_milestone))
while motion.robot.right_mq.moving():
time.sleep(1)
time.sleep(1)
for milestone in eval('Q_AFTER_GRASP_' + self.current_bin):
print "Moving to " + str(milestone)
motion.robot.right_mq.appendLinear(MOVE_TIME, planning.cleanJointConfig(milestone))
while motion.robot.right_mq.moving():
time.sleep(1)
time.sleep(1)
motion.robot.right_mq.appendLinear(MOVE_TIME, Q_STOW)
self.state = 'MOVING_TO_STOW_OBJECT'
elif self.state == 'MOVING_TO_STOW_OBJECT':
if not motion.robot.right_mq.moving():
self.state = 'STOWING_OBJECT'
elif self.state == 'STOWING_OBJECT':
self.turnOffVacuum()
self.wait_start_time = time.time()
self.state = 'WAITING_FOR_SECURE_STOW'
elif self.state == 'WAITING_FOR_SECURE_STOW':
if time.time() - self.wait_start_time > GRASP_WAIT_TIME:
self.state = 'BIN_DONE' if SELECT_REAL_BIN else 'DONE'
elif self.state == 'BIN_DONE':
self.bin_state[self.current_bin]['done'] = True
self.current_bin = planning.selectBin(self.bin_state)
if self.current_bin is None:
self.state = 'DONE'
else:
self.state = 'START'
elif self.state == 'DONE':
print "actual vacuum point: ", se3.apply(self.Tvacuum, [0, 0, 0])
else:
print FAIL_COLOR + "Unknown state" + END_COLOR
time.sleep(1)
except KeyboardInterrupt:
motion.robot.stopMotion()
sys.exit(0)
def setupWorld():
world = WorldModel()
print "Loading full Baxter model (be patient, this will take a minute)..."
world.loadElement(os.path.join(KLAMPT_MODELS_DIR,"baxter.rob"))
#print "Loading simplified Baxter model..."
#world.loadElement(os.path.join(KLAMPT_MODELS_DIR,"baxter_col.rob"))
print "Loading Kiva pod model..."
world.loadElement(os.path.join(KLAMPT_MODELS_DIR,"kiva_pod/model.obj"))
print "Loading plane model..."
world.loadElement(os.path.join(KLAMPT_MODELS_DIR,"plane.env"))
Rbase,tbase = world.robot(0).link(0).getParentTransform()
world.robot(0).link(0).setParentTransform(Rbase,(0,0,0.95))
world.robot(0).setConfig(world.robot(0).getConfig())
#Trel = (so3.rotation((0,0,1),-math.pi/2), SHELF_MODEL_XFORM)
Trel = SHELF_MODEL_XFORM_CALIBRATED
T = world.rigidObject(0).getTransform()
world.rigidObject(0).setTransform(*se3.mul(Trel,T))
return world
def visualizerThreadFunction():
visualizer.run()
if __name__ == '__main__':
world = setupWorld()
master = FullIntegrationMaster(world)
visualizer = MyGLViewer(world, master)
thread.start_new_thread(visualizerThreadFunction, ())
master.start()
|
15,889 | 81e2f141a3de0a8f2831645e5b8de103f68841ea | from django.apps import AppConfig
class BeentoConfig(AppConfig):
name = 'BeenTo'
|
15,890 | 810b10c42c6c0edd90c18c880272d95a0866a0bf | num1, num2 = input().split()
def reverse_Num(num):
num_list = []
while num > 0:
temp = num % 10
num_list.append(int(temp))
num = (num-temp) / 10
new_num = 100 * num_list[0] + 10 * num_list[1] + 1 * num_list[2]
return new_num
def num_Comparison(num1, num2):
if num1 > num2:
print(num1)
else:
print(num2)
new_num1 = reverse_Num(int(num1))
new_num2 = reverse_Num(int(num2))
num_Comparison(new_num1, new_num2)
|
15,891 | 084333abadb23cc2acb335092f00edaaa260cf1a | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-08-18 10:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ProfileAbout',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10, verbose_name='\u540d\u5b57')),
('birthday', models.CharField(max_length=20, verbose_name='\u751f\u65e5')),
('emial', models.CharField(max_length=50, verbose_name='\u90ae\u7bb1')),
('phone', models.CharField(max_length=11, verbose_name='\u624b\u673a\u53f7\u7801')),
('adress', models.CharField(max_length=50, verbose_name='\u5730\u5740')),
('good_at1', models.CharField(max_length=50, verbose_name='\u7279\u957f1')),
('good_at2', models.CharField(max_length=50, verbose_name='\u7279\u957f2')),
('good_at3', models.CharField(max_length=50, verbose_name='\u7279\u957f3')),
('poem_title', models.CharField(max_length=20, verbose_name='\u8bd7\u6b4c\u9898\u76ee')),
('poem_content1', models.CharField(max_length=50, verbose_name='\u8bd7\u53e51')),
('poem_content2', models.CharField(max_length=50, verbose_name='\u8bd7\u53e52')),
('poem_content3', models.CharField(max_length=50, verbose_name='\u8bd7\u53e53')),
('poem_content4', models.CharField(max_length=50, verbose_name='\u8bd7\u53e54')),
('poem_content5', models.CharField(max_length=50, verbose_name='\u8bd7\u53e55')),
('poem_content6', models.CharField(max_length=50, verbose_name='\u8bd7\u53e56')),
('poem_content7', models.CharField(max_length=50, verbose_name='\u8bd7\u53e57')),
('poem_content8', models.CharField(max_length=50, verbose_name='\u8bd7\u53e58')),
('poem_content9', models.CharField(max_length=50, verbose_name='\u8bd7\u53e59')),
('poem_content10', models.CharField(max_length=50, verbose_name='\u8bd7\u53e510')),
('poem_content11', models.CharField(max_length=50, verbose_name='\u8bd7\u53e511')),
],
options={
'verbose_name': '\u4e2a\u4eba\u7b80\u4ecb',
'verbose_name_plural': '\u4e2a\u4eba\u7b80\u4ecb',
},
),
]
|
15,892 | d9b41178b8747d8f0f023a089377ca07855d64f3 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db.models.functions import datetime
from django.shortcuts import render, HttpResponse, redirect
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth import (
authenticate,
get_user_model,
logout,
login,
)
from django import forms
from forms import *
# from apps.core.models import User
@login_required
def quotes_home(request):
print "reached quotes home. If they are logged in, show the dashboard"
print "logged in as {}".format(request.user.first_name)
button_text = "Add this quote!"
title = "Contribute a quote"
all_quotes = Quote.objects.all()
fave_quotes = Quote.objects.filter(fave_users=request.user)
bank_quotes = Quote.objects.exclude(fave_users=request.user)
# print "fave quotes for user: ", fave_quotes
# print "NOT fave quotes (what's left:", bank_quotes
# print "all quotes:", all_quotes
#
# fave_quotes['fave'] = True
# bank_quotes['fave'] = False
quote_form = QuoteForm(request.POST or None)
# if quote_form.is_valid():
# author = quote_form.cleaned_data.get('author')
# quote_text = quote_form.cleaned_data.get('quote_text')
# user = request.user
# new_quote = Quote.objects.create(author=author, quote_text=quote_text, contributor=user)
# new_quote.save()
#
# print "added quote to db: {}".format(Quote.objects.last())
# return redirect('/')
context = {
'greeting_name': request.user.profile.alias,
'quote_form': quote_form,
'button_text': button_text,
'title': title,
'all_quotes': bank_quotes,
'fave_quotes': fave_quotes,
}
return render(request, 'dashboard.html', context)
@login_required
def add_new_quote(request):
quote_form = QuoteForm(request.POST)
if quote_form.is_valid():
author = quote_form.cleaned_data.get('author')
quote_text = quote_form.cleaned_data.get('quote_text')
user = request.user
new_quote = Quote.objects.create(author=author, quote_text=quote_text, contributor=user)
new_quote.save()
print "added quote to db: {}".format(Quote.objects.last())
return redirect('/')
print "trying to create a quote"
return HttpResponse('placeholder to create quote')
# I need to send the current user info when creating a new quote
@login_required
def add_favorite(request, quote_id):
print "reached ADD FAVE"
# this is going to TOGGLE favorite
current_quote = Quote.objects.get(id=quote_id)
print "quote: ", current_quote.quote_text
current_quote_fave_users = Quote.objects.get(id=quote_id).fave_users.all()
print "fave users:", current_quote_fave_users
# if the quote is already a favorite, then make it UN-favorite
for user in current_quote_fave_users:
print "user info:", user.id
if user.id == request.user.id:
current_quote.fave_users.remove(request.user)
current_quote.save()
return redirect('/quotes')
# otherwise make it a favorite
current_quote.fave_users.add(request.user)
# Quote.objects.filter(fave_users=request.user).
# print "fave users", current_quote.fave_users
# if Quote.objects.get(id=quote_id).fave_users(id=request.user):
# if Quote.objects.get(id=quote_id):
# print "YES THIS IS A FAVE"
# this means the user already favorited, so UN fave
# get current quote and add it to faves
new_fave = Quote.objects.get(id=quote_id)
# print "getting new fave:", new_fave
# print "adding association to user:", request.user
# new_fave.fave_users.add(request.user)
# print "trying to save new fave"
# new_fave.save()
return redirect('/')
# return HttpResponse('new fave saved')
@login_required
def delete_quote(request, quote_id):
del_quote = Quote.objects.get(id=quote_id)
if request.user == del_quote.contributor:
Quote.objects.get(id=quote_id).delete()
print "quote removed forever"
return redirect('/')
@login_required
def get_quotes_by_user(request, user_id):
quote_set = Quote.objects.filter(contributor_id=user_id)
quote_count = quote_set.count()
contributor = User.objects.get(id=user_id)
print "count: ", quote_count
print "quote set:", quote_set
context = {
'contributor_name': contributor.profile.alias,
'quotes': quote_set,
'quote_count': quote_count,
}
return render(request, 'all_quotes_by_user.html', context)
|
15,893 | b1542f9157442e8e4e10aa2971652a242d027807 | # 어려움;; 다른사람 코드 참고
class Solution:
def maxEvents(self, events: List[List[int]]) -> int:
events.sort(reverse = True)
h = []
res = d = 0
while events or h:
if not h:
d = events[-1][0] # 마지막 인덱스의 시작 시간
while events and events[-1][0] <= d: #
heapq.heappush(h, events.pop()[1]) # 힙에 마지막 요소 끝나는 시간 넣기
heapq.heappop(h) # 꺼내기
res += 1
d += 1
while h and h[0] < d:
heapq.heappop(h)
return res |
15,894 | b19037452b1bcbab6afb2e80b19e7fe5cafff7e4 | # Copyright 2012 The hookshot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Profile server."""
import json
import logging
from flask import Flask
APP = Flask(__name__)
DATA = None
def formatNode(node):
"""Formats a node for return to the user."""
return {'fullName': node.fullName, 'start': node.start, 'finish': node.finish, 'own': node.own, 'tag': node.tag}
@APP.route('/data')
def data():
"""Returns root level data in json format."""
result = {}
for thread in DATA.threads:
result[thread] = [formatNode(node) for node in DATA.threads[thread].tree]
return json.dumps({
'checkpoints': DATA.checkpoints,
'threads': result
})
@APP.route('/details/<thread>/<path>')
def details(thread, path):
"""Returns a detail drill in in json format."""
parts = [int(x) for x in path.split('.')]
node = DATA.threads[thread].tree[parts[0]]
for part in parts[1:]:
node = node.children[int(part)]
result = formatNode(node)
result['children'] = [formatNode(child) for child in node.children]
return json.dumps(result)
@APP.route('/')
def home():
"""Home page."""
return '''
<div id="toolbar">
<div id="hookshot">hookshot</div>
<div id="zoom">Zoom: <button id="in">+</button> <button id="out">-</button></div>
</div>
<link rel="stylesheet" href="/static/style.css"/>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.7.2/jquery.min.js"></script>
<script src="/static/view.js"></script>'''
def run(d):
"""Main body of the server."""
global DATA # global is ugly but quick. # pylint: disable=W0603
DATA = d
logging.info('Listening on http://localhost:8020')
APP.run(port=8020, debug=True)
|
15,895 | ff58fd82df1da945292d951a2acbe1b5bf2ed673 | def min_second_min(nums:list) -> tuple:
min_el = None
second_min = None
for num in nums:
if min_el is None:
min_el = num
elif second_min is None:
min_el, second_min = min(min_el, num), max(min_el, num)
elif num < second_min:
min_el, second_min = min(min_el, num), max(min_el, num)
return(min_el,
second_min)
def fastest_runners(results:list) -> tuple:
return(min_second_min(results)) |
15,896 | 5382675fa8f63e439efd587a13c22bd5982cb11b | # Generated by Django 2.1.4 on 2019-02-18 21:14
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('users', '0006_counsellee_twitter_handle'),
]
operations = [
migrations.CreateModel(
name='Counselling',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_contacted', models.DateField(default=django.utils.timezone.now)),
('counsellee', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='users.Counsellee')),
('counsellor', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='users.Counsellor')),
],
),
migrations.AddField(
model_name='counsellor',
name='counsellees',
field=models.ManyToManyField(through='users.Counselling', to='users.Counsellee'),
),
]
|
15,897 | f914360018cd9bbd2684464f83a673f3bed52b4d | import pandas as pd
import argparse
import weather_functions
parser = argparse.ArgumentParser()
parser.add_argument("input", type=str, help="Input data file")
parser.add_argument("output", type=str, help="Output plot file")
parser.add_argument("-s", "--start", default="01/01/2019", type=str, help="Start date in DD/MM/YYYY format")
parser.add_argument("-e", "--end", default="16/10/2021", type=str, help="End date in DD/MM/YYYY format")
args = parser.parse_args()
# load the data
weather = pd.read_csv(args.input,comment='#')
# define the start and end time for the plot
start_date=pd.to_datetime(args.start,dayfirst=True)
end_date=pd.to_datetime(args.end,dayfirst=True)
# preprocess the data
weather = weather_functions.preprocessing(weather,start_date,end_date)
# plot the data
ax,fig = weather_functions.plot_data(weather['Local time'], weather['T'])
# save the figure
fig.savefig(args.output)
|
15,898 | 79b65a5b1275a54da9875e511f452d18b3cb00ec | """"
名称:54 童芯派控制Tello无人机(MicroPython usocket标准库)
硬件: 童芯派
功能介绍:简单的利用童芯派实现对Tello无人机的起飞降落控制。
难度:⭐⭐⭐⭐⭐⭐
支持的模式:上传
"""
# ---------程序分割线----------------程序分割线----------------程序分割线----------
import usocket
import cyberpi
import time
import sys
cyberpi.wifi.connect("TELLO-5A186C", "")
while not cyberpi.wifi.is_connect():
pass
cyberpi.console.println("已连接至网络")
host = ''
port = 9000
locaddr = usocket.getaddrinfo(host, port)[0][-1]
print(locaddr)
sock = usocket.socket(usocket.AF_INET, usocket.SOCK_DGRAM)
tello_address = ('192.168.10.1', 8889)
sock.bind(locaddr)
msg = None
while True:
if cyberpi.controller.is_press("up"):
cyberpi.console.println("command")
msg = b"command"
# msg = msg.encode()
sock.sendto(msg, tello_address)
time.sleep(0.5)
if cyberpi.controller.is_press("a"):
cyberpi.console.println("off")
msg = "takeoff"
msg = msg.encode()
sent = sock.sendto(msg, tello_address)
time.sleep(0.5)
if cyberpi.controller.is_press("b"):
cyberpi.console.println("off")
msg = "land"
msg = msg.encode()
sock.sendto(msg, tello_address)
time.sleep(0.5) |
15,899 | 1ed3242d817a7633ba209d94f15838cee641256e | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from gevent import monkey; monkey.patch_all() # noqa
import gevent
import sys
from qtpy.QtCore import QTimer
from qtpy.QtWidgets import QApplication
from pyqtconsole.console import PythonConsole
def greet():
print("hello world")
class GEventProcessing:
"""Interoperability class between Qt/gevent that allows processing gevent
tasks during Qt idle periods."""
def __init__(self, idle_period=0.010):
# Limit the IDLE handler's frequency while still allow for gevent
# to trigger a microthread anytime
self._idle_period = idle_period
# IDLE timer: on_idle is called whenever no Qt events left for
# processing
self._timer = QTimer()
self._timer.timeout.connect(self.process_events)
self._timer.start(0)
def __enter__(self):
pass
def __exit__(self, *exc_info):
self._timer.stop()
def process_events(self):
# Cooperative yield, allow gevent to monitor file handles via libevent
gevent.sleep(self._idle_period)
if __name__ == '__main__':
app = QApplication([])
console = PythonConsole()
console.push_local_ns('greet', greet)
console.show()
console.eval_executor(gevent.spawn)
with GEventProcessing():
sys.exit(app.exec_())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.