blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e912c7724267d40a41f3d835179cc89dc9232e2b | ff5663993726dc46cd57c5b6b71c613589923f44 | /tsuki_app/views.py | 005aa86fd72d90c5921eeda9b84c0332ba6d12a7 | [] | no_license | AugustoPiva/tsuki_web | a903a9f2b61a5b6c1ad2f439a794b339de86dbcc | 74879787585d001e86d10efce3f441518b8e78b0 | refs/heads/master | 2022-12-08T09:41:07.100965 | 2021-03-03T01:56:38 | 2021-03-03T01:56:38 | 191,825,017 | 0 | 0 | null | 2022-12-08T05:52:58 | 2019-06-13T19:57:26 | JavaScript | UTF-8 | Python | false | false | 26,944 | py | from django.shortcuts import render,get_object_or_404
from django.urls import reverse_lazy,reverse,resolve
from django.http import HttpResponse, HttpRequest, HttpResponseRedirect
from .models import Pedidos,Listaprecios,Productosordenados,Tiposdegastos,Gastos,Clientes
from .forms import FormularioNuevoPedido,Fecha,Filtrargastos,Formulario_del_gasto,Cargagasto,Nuevocliente
from django.views.generic import (View,TemplateView,
ListView,DetailView,
CreateView,DeleteView,
UpdateView)
from django.contrib.auth import authenticate,login,logout
from django.contrib.auth.decorators import login_required
from django.db.models import Sum,F,Max,Avg,StdDev
from datetime import datetime,date
from django.shortcuts import redirect
from django.core.paginator import Paginator
from itertools import chain
import gspread
import time
import locale
locale.setlocale(locale.LC_ALL, 'es_ES')
from oauth2client.service_account import ServiceAccountCredentials
import json
import socket
global pedido_max
global gasto_max
from escpos import *
#si queres usar template views
# class PruebaTemplateView(TemplateView):
# template_name = 'algo.html
#
# def get_context_data(self, **kwargs):
# context= super().get_context_data(**kwargs)
# context['injectme']= 'BASIC INJECTION'
# return context
#
def imprimiendotodo(request):
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
ip_address = get_client_ip(request)
p = printer.Network(str(ip_address))
todoslospedidosdeldia=Pedidos.objects.filter(fecha__day=date.today().day,
fecha__month=date.today().month,
fecha__year=date.today().year)
for u in todoslospedidosdeldia:
p.set(text_type=u'normal', width=3, height=3, smooth=True, flip=False)
p.text(str(u.client))
p.set(width=2, height=2)
p.text("\n------------------------\n")
produc_ord = Productosordenados.objects.filter(pedido=u)
#imprimo todos los productos
for i in produc_ord:
p.text(str(i))
p.text("\n")
p.text("------------------------\n")
p.text("Total: $ ")
p.text(str(u.get_total()))
if (u.direnvio !="") or (u.direnvio !=None):
p.text("\n------------------------\n")
p.text(str(u.comentario))
if u.direnvio !="":
p.text("\n------------------------\n")
p.text("CON ENVIO")
p.cut()
loscalientes=produc_ord.filter(item__categoria_producto="calientes")
if loscalientes.count()>0:
p.set(text_type=u'normal', width=3, height=3, smooth=True, flip=False)
p.text(str(u.client))
p.set(width=2, height=2)
p.text("\n------------------------\n")
for i in loscalientes:
p.text(str(i))
p.text("\n")
p.cut()
#DIR ENVIO
if (u.direnvio!="") and (u.direnvio!= None):
p.set(text_type=u'normal', width=3, height=3, smooth=True, flip=False)
p.text(str(imprimir.client))
p.set(width=2, height=2)
p.text("\n------------------------\n")
p.text(u.direnvio)
p.text("\n------------------------\n")
p.text("Total: $ ")
p.text(str(u.get_total()))
p.cut()
time.sleep(0.5)
def user_login(request):
if request.method == 'POST':
# agarras lo que dice el label del html
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username,password=password)
if user:
if user.is_active:
login(request,user)
return HttpResponseRedirect(reverse('tsuki_app:pedidos'))
else:
return HttpResponseRedirect("tsuki_app:user_login")
else:
pass
else:
return render(request,'tsuki_app/login.html',{})
@login_required
def pedidos(request,**kwargs):
#Obtener la impresora
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
current_url = resolve(request.path_info).url_name
#Si la url tiene el pedido del cliente imprimir el ticket
try:
ip_address = get_client_ip(request)
imprimir = Pedidos.objects.get(id=kwargs['pk'])
p = printer.Network(str(ip_address))
p.set(text_type=u'normal', width=3, height=3, smooth=True, flip=False)
p.text(str(imprimir.client))
p.set(width=2, height=2)
p.text("\n------------------------\n")
produc_ord = Productosordenados.objects.filter(pedido=imprimir)
#imprimo todos los productos
for i in produc_ord:
p.text(str(i))
p.text("\n")
p.text("------------------------\n")
p.text("Total: $ ")
p.text(str(imprimir.get_total()))
if imprimir.comentario !=None:
p.text("\n------------------------\n")
p.text(str(imprimir.comentario))
if (imprimir.direnvio!="") and (imprimir.direnvio!= None):
p.text("\n------------------------\n")
p.text("CON ENVIO")
p.cut()
loscalientes=produc_ord.filter(item__categoria_producto="calientes")
if loscalientes.count()>0:
p.set(text_type=u'normal', width=3, height=3, smooth=True, flip=False)
p.text(str(imprimir.client))
p.set(width=2, height=2)
p.text("\n------------------------\n")
for i in loscalientes:
p.text(str(i))
p.text("\n")
p.cut()
#DIR ENVIO
if (imprimir.direnvio!="") and (imprimir.direnvio!= None):
p.set(text_type=u'normal', width=3, height=3, smooth=True, flip=False)
p.text(str(imprimir.client))
p.set(width=2, height=2)
p.text("\n------------------------\n")
p.text(imprimir.direnvio)
p.text("\n------------------------\n")
p.text("Total: $ ")
p.text(str(imprimir.get_total()))
p.cut()
except:
pass
productosdelasordenes=Productosordenados.objects.filter(pedido__fecha__day=date.today().day,
pedido__fecha__month=date.today().month,
pedido__fecha__year=date.today().year).order_by('pedido__client__nombre_apellido','pedido__id')
todoslospedidosdeldia=Pedidos.objects.filter(fecha__day=date.today().day,
fecha__month=date.today().month,
fecha__year=date.today().year)
pedidostotales=todoslospedidosdeldia.count()
totalenvios=0
for i in todoslospedidosdeldia:
if (i.direnvio!="") and (i.direnvio!= None):
totalenvios+=1
if request.method == "POST":
day = int(request.POST['dia'][8:10])
month = int(request.POST['dia'][5:7])
year = int(request.POST['dia'][0:4])
return HttpResponseRedirect(reverse('tsuki_app:filtrarporfecha',args=(day,month,year)))
x=date.today()
fecha=Fecha({'dia':x})
return render(request,'tsuki_app/pedidos_list.html',{'ip':ip_address,'pedidostotales':pedidostotales,'x':x,'fecha':fecha,'productosdeordenes':productosdelasordenes,'envios':totalenvios})
@login_required
def confirmareliminar(request,pk):
# alerta al usuario si quiere eliminar el pedido mostrandole detalles de la orden
if request.method == "POST":
Pedidos.objects.get(id=pk).delete()
return HttpResponseRedirect('/tsuki_app/')
else:
s = Pedidos.objects.get(id=pk)
items =Productosordenados.objects.filter(pedido=pk)
return render(request,'tsuki_app/confirmareliminacion.html',{'pedidoaeliminar':s,'prods':items})
@login_required
def filtrarfecha(request,**kwargs):
x= datetime(kwargs['year'],kwargs['month'],kwargs['day'])
fecha=Fecha({'dia':x})
productosdelasordenes=Productosordenados.objects.filter(pedido__fecha__day=kwargs['day'],
pedido__fecha__month=kwargs['month'],
pedido__fecha__year=kwargs['year']).order_by('pedido__client__nombre_apellido')
pedidos=Pedidos.objects.filter(fecha__day=kwargs['day'],
fecha__month=kwargs['month'],
fecha__year=kwargs['year'])
pedidostotales=pedidos.count()
totalenvios=0
for i in pedidos:
if (i.direnvio!="") and (i.direnvio!= None):
totalenvios+=1
if request.method == "POST":
day = int(request.POST['dia'][8:10])
month = int(request.POST['dia'][5:7])
year = int(request.POST['dia'][0:4])
return HttpResponseRedirect(reverse('tsuki_app:filtrarporfecha',args=(day,month,year)))
return render(request,'tsuki_app/pedidos_list.html',{'pedidostotales':pedidostotales,'x':x,'fecha':fecha,'productosdeordenes':productosdelasordenes,'envios':totalenvios})
def Index(request,**kwargs):
#Elimina el que se cancelo y el cliente, si no tiene ningun pedido
try:
id_pedido = kwargs['eliminar']
ultimo_pedido=Pedidos.objects.get(id=id_pedido)
cliente = Clientes.objects.get(id=ultimo_pedido.client.id)
Pedidos.objects.get(id=id_pedido).delete()
if Pedidos.objects.filter(client=cliente).count()==0:
cliente.delete()
else:
pass
except:
pass
return render(request, 'tsuki_app/base.html',{})
# @login_required
# def gestion_clientes(request,**kwargs):
# form = FormularioGestionClientes(request.POST or None)
#
# return render(request, 'tsuki_app/gestion_clients.html',{'form':form,'form2':form2})
@login_required
def nuevo_pedido(request,**kwargs):
#Si el cliente esta en la lista de cliente lo elijo y creo un nuevo pedido
if request.method == "POST":
if 'Form1' in request.POST:
form = FormularioNuevoPedido(request.POST or None)
if form.is_valid:
form.save()
pk_pedido= Pedidos.objects.latest('id').id
return HttpResponseRedirect(reverse('tsuki_app:agregarproductos',args=(pk_pedido,)))
else:
form2 = Nuevocliente(request.POST or None)
if form2.is_valid:
form2.save()
pk_client=Clientes.objects.latest('id').id
return HttpResponseRedirect(reverse('tsuki_app:nuevopedido',args=(pk_client,)))
else:
x=date.today()
form = FormularioNuevoPedido()
form2 = Nuevocliente(None)
#si no, creo un nuevo cliente, y automaticamente al crearlo me lo asigna para el proximo pedido
try:
cliente_reciencreado=Clientes.objects.get(id=kwargs['pk_client'])
form=FormularioNuevoPedido({'client':cliente_reciencreado,'fecha':x})
except:
pass
return render(request, 'tsuki_app/nuevo_pedido.html',{'form':form,'form2':form2})
@login_required
def agregarproductos(request,**kwargs):
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
productos= Listaprecios.objects.all()
ped=kwargs['pk_pedido']
pedido=Pedidos.objects.get(id=ped)
if request.method =="POST":
#Si no es un carrito vacio...
if request.POST['Productos'] !='{}':
productos_ordenados=json.loads(request.POST['Productos'])
for i in productos_ordenados:
prod=get_object_or_404(Listaprecios,id=i)
order_item = Productosordenados.objects.create(item=prod,cantidad=productos_ordenados[i],pedido=pedido)
order_item.total=order_item.precio_total()
#si el pedido tiene presentacion activar la variable booleana de vajilla
if order_item.item.categoria_producto == 'barcos' or order_item.item.categoria_producto == 'puentes':
order_item.lotienen=True
else:
pass
order_item.save()
if pedido.fecha.day == date.today().day:
try:
ip_address = get_client_ip(request)
imprimir=pedido
p = printer.Network(str(ip_address))
p.set(text_type=u'normal', width=3, height=3, smooth=True, flip=False)
p.text(str(imprimir.client))
p.set(width=2, height=2)
p.text("\n------------------------\n")
produc_ord = Productosordenados.objects.filter(pedido=imprimir)
#imprimo todos los productos
for i in produc_ord:
p.text(str(i))
p.text("\n")
p.text("------------------------\n")
p.text("Total: $ ")
p.text(str(imprimir.get_total()))
if imprimir.comentario != None:
p.text("\n------------------------\n")
p.text(str(imprimir.comentario))
if (imprimir.direnvio!="") and (imprimir.direnvio!= None):
p.text("\n------------------------\n")
p.text("CON ENVIO")
p.cut()
loscalientes=produc_ord.filter(item__categoria_producto="calientes")
if loscalientes.count()>0:
p.set(text_type=u'normal', width=3, height=3, smooth=True, flip=False)
p.text(str(imprimir.client))
p.set(width=2, height=2)
p.text("\n------------------------\n")
for i in loscalientes:
p.text(str(i))
p.text("\n")
p.cut()
if (imprimir.direnvio!="") and (imprimir.direnvio!= None):
p.set(text_type=u'normal', width=3, height=3, smooth=True, flip=False)
p.text(str(imprimir.client))
p.set(width=2, height=2)
p.text("\n------------------------\n")
p.text(imprimir.direnvio)
p.text("\n------------------------\n")
p.text("Total: $ ")
p.text(str(imprimir.get_total()))
p.cut()
except:
pass
else:
pass
return redirect('/tsuki_app/')
else:
pass
return render(request,'tsuki_app/agregar_productos.html',{'lista':productos,'pedido':pedido})
@login_required
def modificarpedido(request,pk):
productos= Listaprecios.objects.all()
orden=Pedidos.objects.get(id=pk)
carrito=Productosordenados.objects.filter(pedido=pk)
# extraigo todos los codigos que tiene la orden actual con sus cantidades en el dict orden_Actual
orden_actual=list(carrito.values('item__id','cantidad').values_list('item__id','cantidad'))
#Cargo el formulario con info del cliente, comentario y fecha y direccion envio
form = FormularioNuevoPedido(request.POST or None , instance=orden)
if request.method=="POST" and form.is_valid:
productos_ordenados=json.loads(request.POST['Productos'])
#si la lista ni se toco no se modifica nada
if productos_ordenados=={}:
pass
else:
#consulto todos los productos de la orden actual para chequear modificaciones en cantidad o si fueron eliminados
for prod in orden_actual:
# si prod sigue estando en la orden actual...
if str(prod[0]) in productos_ordenados:
#...y si coinciden las cantidades, no modifico nada y (*)
if prod[1]==productos_ordenados[str(prod[0])]:
pass
#si no coinciden actualizo valores
else:
prod_a_actualizar=Productosordenados.objects.get(pedido__id=pk,item__id=prod[0])
prod_a_actualizar.cantidad=productos_ordenados[str(prod[0])]
prod_a_actualizar.total=prod_a_actualizar.precio_total()
prod_a_actualizar.save()
#...(*)lo descarto para crear un nuevo producto pedido
productos_ordenados.pop(str(prod[0]), None)
#si no existe lo elimino
else:
instancia=Listaprecios.objects.get(id=prod[0])
Productosordenados.objects.get(item=instancia,pedido=orden).delete()
#luego agrego productos nuevos de productos_ordenados
for nuevo in productos_ordenados:
nuevo_producto=get_object_or_404(Listaprecios,id=int(nuevo))
order_item = Productosordenados.objects.create(item=nuevo_producto,cantidad=productos_ordenados[nuevo],pedido=orden)
order_item.total=order_item.precio_total()
order_item.save()
form.save()
return redirect('/tsuki_app/')
else:
pass
return render(request,'tsuki_app/modificar_pedido.html',{'form':form,'lista':productos,'carro':carrito,'orden':orden})
@login_required
def producciondeldia(request,**kwargs):
productosdelasordenes=Productosordenados.objects.filter(pedido__fecha__day=date.today().day,
pedido__fecha__month=date.today().month,
pedido__fecha__year=date.today().year)
productossinarroz = ["Salsa Tsuki","Salsa Teriyaki","Langostinos Rebozados 6p","Geisha Tsuki 4p","Geisha caviar 4p","Geisha palta 4p","Sashimi 5p","Niguiris de salmon 4p","Niguiris Ahumados 4p","Geisha comun"]
totalpiezasporprod = productosdelasordenes.exclude(item__nombre_producto__in=productossinarroz).annotate(totall=Sum(F('cantidad') * F('item__cantidad_producto')))
totalparroz = totalpiezasporprod.aggregate(supertotal=Sum('totall'))['supertotal']
totalpiezasdeldia = productosdelasordenes.aggregate(total=Sum(F('cantidad') * F('item__cantidad_producto')))['total']
psurtidas = productosdelasordenes.filter(item__sub_categoria_producto='surtido').aggregate(tsurtidas=Sum(F('cantidad') * F('item__cantidad_producto')))['tsurtidas']
rollssurtidos = round((psurtidas/8)/2)
psalmon = productosdelasordenes.filter(item__sub_categoria_producto='salmon').aggregate(tsalmon=Sum(F('cantidad') * F('item__cantidad_producto')))['tsalmon']
rollssalmon = round((psalmon*0.9)/8 + (psurtidas/8)/2)
gyn = round(psalmon*0.1)
hotsalmon = productosdelasordenes.filter(item__nombre_producto='Hot Salmon').aggregate(tsalm=Sum('cantidad'))['tsalm']
hotlangostinos = productosdelasordenes.filter(item__nombre_producto='Hot Langostinos').aggregate(tlang=Sum('cantidad'))['tlang']
langostinosrebozados= productosdelasordenes.filter(item__nombre_producto='Langostinos Rebozados 6p').aggregate(tpinc=Sum('cantidad'))['tpinc']
rolles = productosdelasordenes.filter(item__categoria_producto='rolls').annotate(Sum('cantidad'))
tiramisu = productosdelasordenes.filter(item__nombre_producto='Tiramisu').aggregate(tir=Sum('cantidad'))['tir']
chocotorta = productosdelasordenes.filter(item__nombre_producto='Chocotorta').aggregate(choc=Sum('cantidad'))['choc']
mousse = productosdelasordenes.filter(item__nombre_producto='Mousse de maracuya').aggregate(mousse=Sum('cantidad'))['mousse']
dict={'totalppp':totalpiezasporprod,
'totalparroz':totalparroz,
'productosdelasordenes':productosdelasordenes,
'hotsalmon':hotsalmon,
'hotlang':hotlangostinos,
'lreboz':langostinosrebozados,
'rolls':rolles,
'rsalmon':rollssalmon,
'rsurtidos':rollssurtidos,
'gyn':gyn,
'totalpiezasdia':totalpiezasdeldia,
'tiramisu':tiramisu,
'chocotorta':chocotorta,
'mousse':mousse
}
return render(request,'tsuki_app/producciondiaria.html',dict)
@login_required
def cargar_gastos(request,**kwargs):
gastos_list = Gastos.objects.all().order_by('-fechacarga')
paginator = Paginator(gastos_list, 8)
page = request.GET.get('page')
gastos = paginator.get_page(page)
form = Filtrargastos()
formulariocantidad=Cargagasto()
#si busco un gasto lo cargo para cargar cantidad fecha y monto por compra
if request.method == "POST":
if 'Form1' in request.POST:
pk=request.POST['seleccionar_gasto']
return HttpResponseRedirect(reverse('tsuki_app:presentar_gastos',args=(pk)))
else:
gasto = Tiposdegastos.objects.get(id=kwargs['pk'])
pk=kwargs['pk']
z=request.POST.copy()
z['gasto']=pk
formulariocantidad = Cargagasto(z)
if formulariocantidad.is_valid():
formulariocantidad.save()
return HttpResponseRedirect(reverse('tsuki_app:presentar_gastos',args=(pk)))
else:
pass
try:
gasto = Tiposdegastos.objects.get(id=kwargs['pk'])
return render(request,'tsuki_app/control_gastos.html',{'gastos':gastos,'form':form,'gasto':gasto,'formulariocantidad':formulariocantidad})
except:
pass
if 'eliminar' in kwargs:
Gastos.objects.get(id=kwargs['eliminar']).delete()
else:
pass
return render(request,'tsuki_app/control_gastos.html',{'form':form,'gastos':gastos})
@login_required
def crear_nuevogasto(request):
form = Formulario_del_gasto(request.POST or None)
if request.method == 'POST':
form=Formulario_del_gasto(request.POST)
if form.is_valid():
form.save()
objetocreado=Tiposdegastos.objects.get(descripcion=request.POST['descripcion'])
pk=objetocreado.id
return HttpResponseRedirect(reverse('tsuki_app:presentar_gastos',args=(pk,)))
return render(request,'tsuki_app/crear_gasto.html',{'form':form})
@login_required
def decision_compra(request,**kwargs):
def myFunc(e):
return e['fecha']
#Brindar las cantidades vendidas de los dias anteriores
form = Filtrargastos()
if request.method =='POST':
pk=request.POST['seleccionar_gasto']
return HttpResponseRedirect(reverse('tsuki_app:soporte_compras_item',args=(pk)))
try:
insumo=Tiposdegastos.objects.get(id=kwargs['pk']).descripcion
if insumo == "Salmon":
#agrupamos informacion por fechas con anterioridad a inicios del mes pasado
piezas_salmon = Productosordenados.objects.filter(item__sub_categoria_producto='salmon',pedido__fecha__month__gte=datetime.now().month-1).values('pedido__fecha').annotate(total=Sum(F('cantidad')*F('item__cantidad_producto')))
piezas_sin_salmon = Productosordenados.objects.filter(item__sub_categoria_producto='surtido',pedido__fecha__month__gte=datetime.now().month-1).values('pedido__fecha').annotate(total=Sum(F('cantidad')*F('item__cantidad_producto'))/2)
hots_salmon = Productosordenados.objects.filter(item__nombre_producto='Hot salmon',pedido__fecha__month__gte=datetime.now().month-1).values('pedido__fecha').annotate(total=Sum(F('cantidad')*8))
# print(piezas_salmon)
# print(piezas_sin_salmon)
# print(hots_salmon)
#piezas_todas_las_fuentes es un objeto que tiene fechas como llaves y las cantidades provenientes de combinados all salmon, sin salmon y hots y rolls
piezas_todas_las_fuentes=piezas_salmon.union(piezas_sin_salmon,hots_salmon)
#agrupo por fecha en total_xfecha para eliminar las fuentes y hacer una unica
total_xfecha= {}
for i in piezas_todas_las_fuentes:
if i['pedido__fecha'] in total_xfecha:
total_xfecha[i['pedido__fecha']]+=int(i['total'])
else:
total_xfecha[i['pedido__fecha']]=int(i['total'])
# guardas por fecha una lista de datos a los que llamas--> 1:total piezas salmon del dia,2:dia de la semana,3:numero de la semana
lista_fecha = []
for x in total_xfecha:
lista_fecha.append({'fecha':x,'datos':[total_xfecha[x],x.strftime("%A"),x.isocalendar()[1]]})
#ordenar de forma ascendente en fecha
lista_fecha.sort(key=myFunc)
#separo las cantidades de la semana pasada para generar un cuadro informativo
pedidos_semana_pasada={'martes':0,'miércoles':0,'jueves':0,'viernes':0,'sábado':0}
for u in lista_fecha:
if u['fecha'].isocalendar()[1] == datetime.now().isocalendar()[1]-1:
pedidos_semana_pasada[u['fecha'].strftime("%A")]=u['datos'][0]
else:
pass
form=Filtrargastos()
return render(request,'tsuki_app/decision_compra.html',{'form':form,'lista':lista_fecha,'sempas':pedidos_semana_pasada,'insumo':insumo})
except:
# reseteo la los pedidos de la semana pasada
pedidos_semana_pasada={'martes':0,'miércoles':0,'jueves':0,'viernes':0,'sábado':0}
pass
insumo = 'sinelegir'
return render(request,'tsuki_app/decision_compra.html',{'form':form,'insumo':insumo})
@login_required
def puentesybarcos(request, **kwargs):
try:
devolvio= kwargs['pk']
vajilla_devuelta= Productosordenados.objects.filter(pedido__id=devolvio)
for i in vajilla_devuelta:
i.lotienen=False
i.save()
except:
pass
lista_deudores_vajilla= Productosordenados.objects.filter(lotienen=True).order_by('pedido__id','pedido__fecha')
return render(request,'tsuki_app/consultar_deudores.html',{'lista':lista_deudores_vajilla})
| [
"augustopiva4@gmail.com"
] | augustopiva4@gmail.com |
98d04850ce867f41faac0f5dee2adac18e1050a8 | ce6d252997d9c328dc127176d30d4c4afb11ae6b | /solution/CanVisitAllRooms.py | 8f4cd9961a1c53fce116a54179af83c022ceb44c | [] | no_license | Vergilgeek/leetcode-solution-python | 0956500adfcd0f58a34f771d20b1b771ce83507a | 92794a6660506e901dee30f07d16a10fddc864c9 | refs/heads/master | 2022-12-05T18:35:47.154950 | 2020-08-31T07:28:37 | 2020-08-31T07:28:37 | 274,016,162 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,832 | py | from typing import List
# 841. 钥匙和房间
# 有 N 个房间,开始时你位于 0 号房间。每个房间有不同的号码:0,1,2,...,N-1,并且房间里可能有一些钥匙能使你进入下一个房间。
# 在形式上,对于每个房间 i 都有一个钥匙列表 rooms[i],每个钥匙 rooms[i][j] 由 [0,1,...,N-1] 中的一个整数表示,其中 N = rooms.length。 钥匙 rooms[i][j] = v 可以打开编号为 v 的房间。
# 最初,除 0 号房间外的其余所有房间都被锁住。
# 你可以自由地在房间之间来回走动。
# 如果能进入每个房间返回 true,否则返回 false。
class Solution:
# 深度优先搜索
# 可以使用深度优先搜索的方式遍历整张图,统计可以到达的节点个数,并利用数组 \textit{vis}vis 标记当前节点是否访问过,以防止重复访问。
# 时间复杂度:O(n+m)O(n+m),其中 nn 是房间的数量,mm 是所有房间中的钥匙数量的总数。
# 空间复杂度:O(n)O(n),其中 nn 是房间的数量。主要为栈空间的开销。
def canVisitAllRooms(self, rooms: List[List[int]]) -> bool:
roomsNum = len(rooms)
if roomsNum == 0:
return True
roomStatus = [0 for n in range(roomsNum)]
roomStatus[0] = 1
self.openRoom(self, roomStatus, rooms, 0)
for room in roomStatus:
if room == 0:
return False
return True
def openRoom(self,roomStatus:List[int],rooms: List[List[int]], roomNum:int):
for key in rooms[roomNum]:
if roomStatus[key] == 1:
continue
roomStatus[key] = 1
self.openRoom(self,roomStatus, rooms, key)
rooms = [[1,3],[3,0,1],[2],[0]]
self = Solution
print(self.canVisitAllRooms(self, rooms)) | [
"251627752@qq.com"
] | 251627752@qq.com |
030dce7993add6e77c513fc621b5a7f86c0566c5 | f785a7d4a31e5fb63715eec78e421e502a7a65a5 | /lab4/venv/Scripts/pip3.7-script.py | f96e597b063d133bb3fc89132679ae98f33f6c5e | [] | no_license | kuliev001/inf_001 | 2175c76468ce03affbcee542e7412f6c2a3feae7 | 49e06ebfeb18b3da84869f0c385093f1698dd85f | refs/heads/master | 2020-08-06T12:45:09.112197 | 2019-10-05T11:41:22 | 2019-10-05T11:41:22 | 212,979,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | #!C:\Users\Admin\d001\inf_001\lab4\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| [
"kuliev001@yandex.ru"
] | kuliev001@yandex.ru |
f40f16e083aaff00071e3beb1104d697546debd2 | e10cf552943f8b83c39a5377ee149b0da03c0a58 | /20181223_xmasctf/crypto-328-santas_list_(2.0)/files/list_2.py | 668646e2be2ce34cee946841e58bc906bd215680 | [] | no_license | pberba/ctf-solutions | edfbed3329f22069d90a0f84f8855cbdd8575a09 | 7d8a0aa1205efb16a7df457e5ef58e686e30e34d | refs/heads/master | 2021-11-11T14:04:20.012922 | 2021-11-09T02:36:43 | 2021-11-09T02:36:43 | 145,188,639 | 24 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,464 | py | #!/usr/bin/python3
from Crypto.PublicKey import RSA
from Crypto.Util.number import *
FLAG = open('flag.txt', 'r').read().strip()
def menu():
print()
print('[1] Encrypt')
print('[2] Decrypt')
print('[3] Exit')
return input()
def encrypt(m):
return pow(m, rsa.e, rsa.n)
def decrypt(c):
return pow(c, rsa.d, rsa.n)
rsa = RSA.generate(1024)
flag_encrypted = pow(bytes_to_long(FLAG.encode()), rsa.e, rsa.n)
used = [bytes_to_long(FLAG.encode())]
print('Ho, ho, ho and welcome back!')
print('Your list for this year:\n')
print('Sarah - Nice')
print('Bob - Nice')
print('Eve - Naughty')
print('Galf - ' + hex(flag_encrypted)[2:])
print('Alice - Nice')
print('Johnny - Naughty')
for i in range(5):
choice = menu()
if choice == '1':
m = bytes_to_long(input('\nPlaintext > ').strip().encode())
used.append(m)
print('\nEncrypted: ' + str(encrypt(m)))
elif choice == '2':
c = int(input('\nCiphertext > ').strip())
if c == flag_encrypted:
print('Ho, ho, no...')
else:
m = decrypt(c)
for no in used:
if m % no == 0:
print('Ho, ho, no...')
break
else:
print('\nDecrypted: ' + str(m))
elif choice == '3':
print('Till next time.\nMerry Christmas!')
break
print('Too many requests made... Disconnecting...')
| [
"jdpberba@gmail.com"
] | jdpberba@gmail.com |
efd7dbc9871ed3bbe471284d5c39efaf8a571c50 | 4a9bb536ea1590abbfd884616be4a7a69b962ce3 | /Assignments/Program_3/get_quake_points.py | 4a5b7f3fd8c55702ea7b5bd7fb91e56a80288001 | [] | no_license | cynorfleet/Spatial-DS-norfleet | b603983170b6f06474279168c14ddc5cea9b4ac1 | 8e83eaac5a4ea4553321ae2c231b79a1bfbf49c3 | refs/heads/master | 2021-01-25T06:17:57.297621 | 2017-06-28T19:22:09 | 2017-06-28T19:22:09 | 93,548,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,332 | py | import requests
import json
import sys
import glob
"""
This class helps read the NYC crime data.
Usage:
fh = FileHelper()
data = fh.get_data([2017]) #pass year in as list, get data for that year
data = fh.get_data([2015,2016,2017]) #pass years in as list, get data for those years
"""
def condense_file(data):
condensed_data = []
for quakes in data:
for quake in quakes['features']:
keep = {}
keep['geometry'] = quake['geometry']
keep['mag'] = quake["properties"]["mag"]
keep['magType'] = quake["properties"]["magType"]
keep['time'] = quake["properties"]["time"]
keep['place'] = quake["properties"]["place"]
keep['types'] = quake["properties"]["types"]
keep['rms'] = quake["properties"]["rms"]
keep['sig'] = quake["properties"]["sig"]
condensed_data.append(keep)
return condensed_data
##########################################################################################
def get_earth_quake_data(year, month=[1, 12], minmag=None, maxmag=None, query=True):
start_month = month[0]
end_month = month[1]
if not maxmag is None:
maxmag = '&maxmagnitude=' + str(maxmag)
else:
maxmag = ''
if not minmag is None:
minmag = '&minmagnitude=' + str(minmag)
else:
minmag = '&minmagnitude=' + str(1.0)
if query:
type = 'query'
else:
type = 'count'
url = 'https://earthquake.usgs.gov/fdsnws/event/1/' + type + '?format=geojson&starttime=' + \
str(year) + '-' + str(start_month) + '-01&endtime=' + \
str(year) + '-' + str(end_month) + '-01' + minmag + maxmag
r = requests.get(url).json()
if type == 'count':
return r['count']
else:
return r
def execute(savePath, startYr, magnitude_min, magnitude_max=None, endYr=2017):
"""
Added params to 'years' variable (76).
Added params to 'get_earth_quake_data' (82).
changed 'f.write' to append data (84).
"""
path = savePath
years = [x for x in range(startYr, endYr)]
months = [x for x in range(0, 12)]
r = []
#years = [2017]
for y in years:
r.append(get_earth_quake_data(
y, [1, 12], magnitude_min, magnitude_max, True))
print("Year:{} Count:{}".format(y, len(r)))
# f = open('./quake-' + str(y) + '.json', 'a')
f = open(path, 'a')
f.write(json.dumps(r, sort_keys=True, indent=4, separators=(',', ': ')))
f.close()
rc = condense_file(r)
f = open(path + '-condensed.json', 'a')
f.write(json.dumps(rc, sort_keys=True, indent=4, separators=(',', ': ')))
f.close()
"""
This class helps read the NYC crime data.
Usage:
fh = FileHelper()
data = fh.get_data([2017]) #pass year in as list, get data for that year
data = fh.get_data([2015,2016,2017]) #pass years in as list, get data for those years
"""
def condense_file(data):
condensed_data = []
for quakes in data:
for quake in quakes['features']:
keep = {}
keep['geometry'] = quake['geometry']
keep['mag'] = quake["properties"]["mag"]
keep['magType'] = quake["properties"]["magType"]
keep['time'] = quake["properties"]["time"]
keep['place'] = quake["properties"]["place"]
keep['types'] = quake["properties"]["types"]
keep['rms'] = quake["properties"]["rms"]
keep['sig'] = quake["properties"]["sig"]
condensed_data.append(keep)
return condensed_data
##########################################################################################
def get_earth_quake_data(year, month=[1, 12], minmag=None, maxmag=None, query=True):
start_month = month[0]
end_month = month[1]
if not maxmag is None:
maxmag = '&maxmagnitude=' + str(maxmag)
else:
maxmag = ''
if not minmag is None:
minmag = '&minmagnitude=' + str(minmag)
else:
minmag = '&minmagnitude=' + str(1.0)
if query:
type = 'query'
else:
type = 'count'
url = 'https://earthquake.usgs.gov/fdsnws/event/1/' + type + '?format=geojson&starttime=' + \
str(year) + '-' + str(start_month) + '-01&endtime=' + \
str(year) + '-' + str(end_month) + '-01' + minmag + maxmag
r = requests.get(url).json()
if type == 'count':
return r['count']
else:
return r
def execute(savePath, startYr, magnitude_min, magnitude_max=None, endYr=2017):
"""
Added params to 'years' variable (76).
Added params to 'get_earth_quake_data' (82).
changed 'f.write' to append data (84).
"""
path = savePath
years = [x for x in range(startYr, endYr)]
months = [x for x in range(0, 12)]
r = []
#years = [2017]
for y in years:
print("Year:%s" % (y))
r.append(get_earth_quake_data(
y, [1, 12], magnitude_min, magnitude_max, True))
# f = open('./quake-' + str(y) + '.json', 'a')
f = open(path, 'a')
f.write(json.dumps(r, sort_keys=True, indent=4, separators=(',', ': ')))
f.close()
rc = condense_file(r)
f = open(path + '-condensed.json', 'a')
f.write(json.dumps(rc, sort_keys=True, indent=4, separators=(',', ': ')))
f.close()
| [
"cynorfleet@gmail.com"
] | cynorfleet@gmail.com |
5e412f372c4edc8db03a7db6615a05a84a40bda2 | cc3222022682bf19b73b16d903aa9be7130042b5 | /python语言学习/ACM/母牛的故事.py | 7c399ec31d21d9d2534fdd7af83ca750b91750ba | [] | no_license | wyl-lib/personalPyhton | e44f7ce7ef2cb417f1db7cfb15e29b04937e45b0 | 0502c6d94c1fbaf83335f7d2582326580243bf19 | refs/heads/master | 2021-01-13T21:58:07.809578 | 2020-02-23T11:46:12 | 2020-02-23T11:46:12 | 242,507,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | #母牛的故事
import time
list = [0]*55
list[0] = 1
list[1] = 2
list[2] = 3
list[3] = 4
while(True):
Year = eval(input())
if(Year<=0):
break
else:
if(Year<=4):
CowNum = Year
print(CowNum)
else:
for i in range(4,Year,1):
start = time.perf_counter()
list[i] = list[i-1]+list[i-3]
end = time.perf_counter()
print(end-start)
print(list[Year-1])
| [
"1346788525@qq.com"
] | 1346788525@qq.com |
1c7add1cda136c23e652c1fd2a6383a444474ffc | 9634867a6f039f889b4561ff5d43a5ece9899ab1 | /dsuser/admin.py | 6f552c9c6a1e04c09852836ec7fa1d0629006a0f | [] | no_license | YangGangster/djangostagram | ac8054eb5f2384970a1abf123e967de7a5068e31 | 6148fee627723e9c1fa37d2f5b5c3da5c9b6676e | refs/heads/master | 2023-08-18T19:40:30.784409 | 2021-10-17T11:19:36 | 2021-10-17T11:19:36 | 417,370,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | from django.contrib import admin
from dsuser.models import Dsuser
# Register your models here.
class DsuserAdmin(admin.ModelAdmin):
list_display = ('userId','password')
admin.site.register(Dsuser,DsuserAdmin) | [
"carys3115@gmail.com"
] | carys3115@gmail.com |
2406cc09e09f44acf43936aebae6d7bcccffddd3 | fd2582f33b2be357f17bcdebb208c7756ff9199d | /djangosession/settings.py | f52d6881c1f6b88be76e54ce00bc7f796a0ccaf5 | [] | no_license | gangadharsa/djangosession | 6ab4826d6183bbb3ded78bd88d111902f9c714d7 | c8e4563115e63663c4c618ce25b555d38ad83a9b | refs/heads/master | 2020-05-17T04:17:30.039012 | 2019-04-25T20:19:22 | 2019-04-25T20:19:22 | 183,504,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,189 | py | """
Django settings for djangosession project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#5pipma+bv0zo$sr$inc=8aw=k3taes$h(7vwl+h1-$9$+q@va'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'cookieapp.apps.CookieappConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangosession.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangosession.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"gangadharsahoo1991@gmail.com"
] | gangadharsahoo1991@gmail.com |
843350f45169de7a9587ad33185a6e3bb54a0bba | 420269f4bede7be8de96eae076e054c0645c0475 | /settings.py | 3a1ada121c5d26bb7cffd15398862cca71d0340c | [
"Apache-2.0"
] | permissive | killvxk/Scanver | 45f766ea903a3fbc77cf85adfe6f44a032221baa | b23ac4753241f95e28587b82ae66672dd3eb6b7e | refs/heads/master | 2020-04-06T22:16:24.463437 | 2018-11-16T03:06:27 | 2018-11-16T03:06:27 | 157,830,534 | 1 | 0 | Apache-2.0 | 2018-11-16T07:39:12 | 2018-11-16T07:39:12 | null | UTF-8 | Python | false | false | 2,182 | py | #!/usr/bin/env python
# encoding=utf-8
#codeby 道长且阻
#email @ydhcui/QQ664284092
import os
import sys
sys.path.append('./lib')
import configparser
DEBUG = True
CONFNAME = 'conf.ini'
#获取脚本文件的当前路径
def cur_file_dir():
path = sys.path[0]
#判断为脚本文件还是编译后的文件,如果是脚本文件,则返回的是脚本的目录,
if os.path.isdir(path):
return path
elif os.path.isfile(path):
return os.path.dirname(path)
config = configparser.ConfigParser()
SELFPATH = cur_file_dir()
print(SELFPATH)
config.read(os.path.join(SELFPATH,CONFNAME))
###
DATAPATH = os.path.join(SELFPATH,config.get('data','datapath'))
LOGSPATH = os.path.join(SELFPATH,config.get('data','logspath'))
UPLOADPATH = os.path.join(SELFPATH,config.get('data','uploadpath'))
REPORTPATH = os.path.join(SELFPATH,config.get('data','reportpath'))
#数据库配置
DATABASE = {
'datatype':config.get('db','datatype'), #'sqlite',#mysql sqlite
'datahost':config.get('db','datahost'), #'127.0.0.1',
'dataport':config.getint('db','dataport'), #3306,
'dataname':config.get('db','dataname'), #'topsecvm',
'username':config.get('db','username'), #'root',
'password':config.get('db','password'), #'sa',
'datapath':config.get('db','datapath'), #'./data/userdata.db'
'charset' :'utf8mb4',
}
#web网站配置
SETTINGS = {
"debug" : DEBUG,
"gzip" : True,
"autoescape" : True,
"xsrf_cookies" : False,
"login_url" : "/#/login",
"cookie_secret" : "e1tuaV1UW3NpXU9bMDFdUFtnZV1TW2RhXUVbc2FdQ1tiaV19",
"template_path" : os.path.join(os.path.dirname(os.path.realpath(__file__)), config.get('web','template_path')),
"static_path" : os.path.join(os.path.dirname(os.path.realpath(__file__)), config.get('web','static_path')),#"./dist"),
}
FILETYPELIST = tuple(config.get('scan','filetype').split('|'))
REDIS = {
'host':config.get('redis','rhost'),
'port':config.get('redis','rport'),
'auth':config.get('redis','rauth'),
}
CLIENTID = config.get('node','nodeid')
if __name__ == '__main__':
print(tuple(config.get('scan','filetype').split('|'))) | [
"664284092@QQ.COM"
] | 664284092@QQ.COM |
872fea6e32fd13b181d5aee64e9711014a9df0d1 | cbc5e26bb47ae69e80a3649c90275becf25ce404 | /xlsxwriter/test/styles/test_write_num_fmts.py | 1445d94a1fdf72bcc2de972b5c46a5085b48cd0d | [
"BSD-2-Clause-Views",
"BSD-3-Clause",
"MIT"
] | permissive | mst-solar-car/kicad-bom-generator | c3549409c3139f787ad28391372b5cb03791694a | 2aae905056d06f3d25343a8d784049c141d05640 | refs/heads/master | 2021-09-07T14:00:40.759486 | 2018-02-23T23:21:13 | 2018-02-23T23:21:13 | 107,868,801 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2017, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ...styles import Styles
from ...format import Format
class TestWriteNumFmts(unittest.TestCase):
"""
Test the Styles _write_num_fmts() method.
"""
def setUp(self):
self.fh = StringIO()
self.styles = Styles()
self.styles._set_filehandle(self.fh)
def test_write_num_fmts(self):
"""Test the _write_num_fmts() method"""
xf_format = Format()
xf_format.num_format_index = 164
xf_format.set_num_format('#,##0.0')
self.styles._set_style_properties([[xf_format], None, 0, 1, 0, 0, [], []])
self.styles._write_num_fmts()
exp = """<numFmts count="1"><numFmt numFmtId="164" formatCode="#,##0.0"/></numFmts>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
| [
"mwrb7d@mst.edu"
] | mwrb7d@mst.edu |
507fdaaab6478a504d41ec853c6be3c7460bf0dc | 32fc863692b38b467509b934213ca558d6cc44c7 | /exoduscli/config.py | 4496588ea48db6b585606cd467fe68a5644df80d | [
"MIT"
] | permissive | cthlo/exoduscli | d8efa075ef9b45ba1f0792c349b3ccd6fa392285 | 4e59d4f5207f5777e5e93801fcad7e4a7a81d692 | refs/heads/master | 2020-12-24T07:43:53.348336 | 2016-09-04T08:01:57 | 2016-09-04T08:01:57 | 56,095,078 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | from os import path
from lib import appdirs
_appdir = appdirs.user_data_dir('exoduscli')
specialroot = path.join(_appdir, 'fakexbmc', 'special')
addonsdir = path.join(_appdir, 'addons')
logfile = path.join(_appdir, 'fakexbmc.log')
exodus = dict(
id = 'plugin.video.exodus',
b64url = 'aHR0cHM6Ly9vZmZzaG9yZWdpdC5jb20vZXhvZHVzL3BsdWdpbi52aWRlby5leG9kdXMvcGx1Z2luLnZpZGVvLmV4b2R1cy0yLjAuOC56aXA=',
zipmd5 = 'd1bf483d48be348c916fb95750b30e61',
entryfile = 'exodus.py',
version = '2.0.8'
)
libs = [
dict(
id = 'script.module.urlresolver',
b64url = 'aHR0cHM6Ly9vZmZzaG9yZWdpdC5jb20vdHZhcmVzb2x2ZXJzL3R2YS1jb21tb24tcmVwb3NpdG9yeS9yYXcvbWFzdGVyL3ppcHMvc2NyaXB0Lm1vZHVsZS51cmxyZXNvbHZlci9zY3JpcHQubW9kdWxlLnVybHJlc29sdmVyLTMuMC4xOS56aXA=',
zipmd5 = '4b78acc8d1f61cc7765071bbcbb8e09a',
libpath = 'lib',
version = '3.0.19'
)
]
| [
"cthlo@users.noreply.github.com"
] | cthlo@users.noreply.github.com |
41ce2dca456d38f905d9b1de4cc2d6e24dd2ab6b | 45beb8abb15127630080c92637f70fe67c06dcc0 | /catkin_ws/build/robot/catkin_generated/pkg.develspace.context.pc.py | f81082d918af8d0844977e0eb893a8451cdeb75b | [] | no_license | ABorghini/loop-closure-labiagi | ed511fc1d02b9c0958407b3218428066f03e4f28 | 321eca63e427fe7f1facb0f41190ef7338dddcc7 | refs/heads/main | 2023-08-14T17:40:20.942746 | 2021-10-04T10:26:44 | 2021-10-04T10:26:44 | 413,348,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "robot"
PROJECT_SPACE_DIR = "/home/alessia/Desktop/Loop Closure/loop-closure-labiagi/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"noreply@github.com"
] | noreply@github.com |
4511567dc74a2cf2f393471285401bfacdebf7fc | 89da172895a4f54b29992e3975af994c1ebc4108 | /common/request.py | fcadae56f38931b593e3e94a34ca9f65897eff8f | [] | no_license | zjunbin/QCDApiTesting | aa5552f3d751f0197acda9d3c0bc0b4e6cd920ce | c9e3738d91c9a25ead882e6021e23ba2e265dc27 | refs/heads/master | 2020-08-31T14:24:35.173617 | 2019-10-31T07:46:49 | 2019-10-31T07:46:49 | 218,710,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,521 | py | # coding utf-8
# @time :2019/3/119:10
# @Author :zjunbin
# @Email :648060307@qq.com
# @File :request.py
import requests
from common.mylog import MyLog
mylog = MyLog()
class Request:
def __init__(self, method, url, data, cookies = None):
if method == 'get':
try:
self.resp = requests.get(url=url, params=data)
except AttributeError as e:
mylog.error(e)
raise e
elif method == 'post':
try:
self.resp = requests.post(url=url, data=data ,cookies = cookies)
except AttributeError as e:
mylog.error(e)
raise e
elif method == 'delete':
try:
self.resp = requests.delete(url=url, param=data)
except AttributeError as e:
mylog.error(e)
raise e
else:
print('None')
def get_txt(self):
return self.resp.text
def get_json(self):
return self.resp.json()
def cookies(self):
return self.resp.cookies
def status_code(self):
return self.resp.status_code
if __name__ == '__main__':
url = 'http://120.78.128.25:8765/futureloan/mvc/api/member/login'
# url1 = 'http://47.107.168.87:8080/futureloan/mvc/api/member/withdraw'
url1 = 'http://120.78.128.25:8765/futureloan/mvc/api/member/recharge'
# url2 = 'http://47.107.168.87:8080/futureloan/mvc/api/loan/add'
# url4 = 'http://47.107.168.87:8080/futureloan/mvc/api/member/bidLoan'
# url5 = 'http://47.107.168.87:8080/futureloan/mvc/api/loan/audit'
params = {"mobilephone":"18684720553","pwd":"python"}
# data = {"mobilephone": "13566666666", "amount": "30"}
# data = {"memberId":"1125830","title":"买房","amount":"300000","loanRate":"20","loanTerm":"12","loanDateType":"0","repaymemtWay":"4","biddingDays":"10"}
# data = {'loanTerm': '12', 'loanDateType': '2', 'amount': '30000', 'loanRate': '18.0', 'biddingDays': '5', 'title': '买飞机', 'loanid_user_id': '1126396', 'repaymemtWay': '4'}
# data2= {"id":"17659","status":"4"}
# data3 ={"memberId":"1126396","password":"123456","loanId":"17659","amount":"999"}
data1 = {"mobilephone":"18684720553","amount":"500000"}
re = requests.session()
resp = re.request(method='get',url=url,params=params)
# resp = requests.get(url,params=params)
# print(resp.text)
resp2 =re.request(method='post',url=url1,data=data1)
print(resp2.text) | [
"648260307@qq.com"
] | 648260307@qq.com |
42d5a6042e0c6be7f18f9d30be02d922a309e3ca | ab6a1d156b1aafbb1f710ecf206716e807d23a93 | /LoadDataset.py | ef83b32eb5c630fdafc46d0442aaa7ccf98ee5b8 | [] | no_license | shahzainmehboob/deeplearning | f13d5d05fbc0d833f8b63df86550e1859ce92181 | ee8079fe1c532d9cf436a6d4a27878845db49d57 | refs/heads/master | 2020-04-20T00:49:56.816627 | 2019-02-20T23:05:41 | 2019-02-20T23:05:41 | 168,530,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,278 | py | import os
import matplotlib.image as misc
from sklearn.utils import shuffle
import numpy as np
import scipy
from skimage.transform import rescale, resize, downscale_local_mean
class LoadData:
# give the relative path of data set in constructor.
def __init__(self, address):
if os.listdir(address):
print('Dataset directory added')
print(os.listdir(address))
self.address = address
else:
print('Give proper directory name')
def generate_data(self):
dir_list = os.listdir(self.address)
Xdata = []
Ydata = []
i = 0
for dir in dir_list:
cls_lbl = int(dir[1])
current_dir = self.address + '/' + dir;
all_image = os.listdir(current_dir)
for image in all_image:
image_add = current_dir + '/' + image
image = misc.imread(image_add)[:,:,:3]
image_r = resize(image, (227,227,3))
Xdata.append(image_r)
# print(image_r.shape)
Ydata.append(cls_lbl)
i = i + 1
print(str(dir), ' directory images are fetched!!! ')
# print('Files in current directory: ', os.listdir(current_dir))
#print('Files in current directory: ', os.listdir(current_dir))
return Xdata, Ydata
# Shuffeling the data
def shuffle_data(self , X, y):
return shuffle(X, y)
def generate_data_2cls(self, dir_list=['c0','c1','c2','c3','c4','c5','c6','c7','c8','c9']):
Xdata = []
Ydata = []
i = 0
for dir in dir_list:
cls_lbl = int(dir[1])
current_dir = self.address + '/' + dir;
all_image = os.listdir(current_dir)
for image in all_image:
image_add = current_dir + '/' + image
image = misc.imread(image_add)[:,:,:3]
image_r = scipy.misc.imresize(image, (227,227,3))
Xdata.append(image_r)
#print(image_r.shape)
Ydata.append(cls_lbl)
i = i + 1
print(str(dir), ' directory images are fetched!!! ')
# print('Files in current directory: ', os.listdir(current_dir))
return Xdata, Ydata
| [
"shahzainmehboob@gmail.com"
] | shahzainmehboob@gmail.com |
288d5829e8450be1377316903a21346afe1a1f7d | 01c2cc36a4799f3cf9d0cbbfb1f2b7c934d84e8a | /Algorithms/search-divide-conquer/QuickSort.py | 0ac8603228eb0d4fddc62c2a30f0425f8c168aa7 | [] | no_license | koko1123/basic-algorithms | ca4f4b10ca84b3b9d9c560c5e8d14420633924cc | 5bcc9fd2c01a6b400c109f50697db04a497af44b | refs/heads/master | 2021-06-19T12:27:03.593160 | 2017-07-14T00:17:53 | 2017-07-14T00:17:53 | 79,063,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | import random
"""
Canonical implementation of Quicksort (in place)
random pivot used
"""
def quick_sort(arr):
quick_sort_helper(arr, 0, len(arr) - 1)
def quick_sort_helper(arr, start, end):
if end - start <= 1:
return
p = partition(arr, start, end)
quick_sort_helper(arr, start, p - 1)
quick_sort_helper(arr, p + 1, end)
def partition(arr, start, end):
# partition around a random pivot, return index of the pivot
pivot_index = random.randint(start, end)
pivot = arr[pivot_index]
i = start + 1
arr[start], arr[pivot_index] = arr[pivot_index], arr[start]
for j in range(start + 1, end + 1):
if arr[j] < pivot:
arr[j], arr[i] = arr[i], arr[j]
i += 1
arr[i - 1], arr[start] = arr[start], arr[i - 1]
return i - 1
# test
chaos = [5, 6, 3, 7, 2, 4, 1]
quick_sort(chaos)
print(chaos)
| [
"amlandeep1912@gmail.com"
] | amlandeep1912@gmail.com |
0ae2060bf342ed56086de24c03c498abac06721e | aa11a31c94ca91670499c14ff176ed6c67535c9b | /pkg/코드업/기초100제 - 재귀/기초100제 - 정렬/3014-정렬을 빠르게!.py | de40d3539aa5c2f4f76808e28670325a1d0049e1 | [] | no_license | JE-BONG/python__basic | d273fcccee2cdb0d67de7f866f1e0d9ecd9658ce | 958bddf3aec5d920053e3c77a173bac7a2326cae | refs/heads/master | 2023-04-18T10:17:08.921996 | 2021-05-03T11:42:52 | 2021-05-03T11:42:52 | 355,383,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | n = int(input())
data_list = list(map(int, input().split()))
for index in range(len(data_list)-1): # 0 ~ 4
for index2 in range(len(data_list)- index -1):
if data_list[index2] < data_list[index2+1]:
data_list[index2],data_list[index2+1] = data_list[index2],data_list[index2+1]
else:
data_list[index2],data_list[index2+1] = data_list[index2+1],data_list[index2]
print(data_list) | [
"jab1523@naver.com"
] | jab1523@naver.com |
497efd5264fcd56bbb680a97e50dbec2176cc6b7 | 9ddb13fbd31e194ed9963396d5a3f9b3ebae317b | /venv/bin/django-admin.py | 83d35cf46e6cc60b5f1f9866947b622d11fce5b1 | [] | no_license | SeongYoonHuh/Bookmark | d773ffcbf94d06d059d13cc9ddee44545fd354fd | 245692f7ea6c7e8afd78c7daad782ab1ec2feb4f | refs/heads/master | 2020-05-18T05:05:28.623807 | 2019-04-30T07:58:54 | 2019-04-30T07:58:54 | 184,194,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | #!/home/seongyoonhuh/bookmark_project_20190429/venv/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"hsy2763@naver.com"
] | hsy2763@naver.com |
fed18dc7e5b47ca1829d70af4e388786385eadde | 9b2bd53ad9923985f5d87713937fa1420113da0a | /EX05_trabalhos.py | 96379cdbf4688b643823e047606f97e8021283b1 | [] | no_license | rlsmadsfc/anc6_python | ea9f024d2d662f70c4e2533dd53ce7aef2724d10 | 9f6edfe472d4b71eb8b5c07a436591fedf20f73d | refs/heads/main | 2023-03-04T11:18:02.514775 | 2021-02-14T22:42:28 | 2021-02-14T22:42:28 | 331,103,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,570 | py | #!/usr/bin/python3
#coding: utf-8
# Neste exercício poderia ter sido utilizada apenas a manipulação de listas
# Contudo preferi utilizar listas e strings para exercitar a lógica, manipulação de variáveis
# Importar bibliotecas
import sys, os, time, datetime, string
# Função para procurar a letra na lista
def procura_letra(lista, letra):
for i in range(len(lista)):
if lista[i] == letra:
return True
return False
# Programa principal
if __name__ == "__main__":
try:
# Definir variáveis
index1 = int()
index2 = int()
string1=""
string2=""
loop1 = 1
loop2 = True
letras = []
listaAlfabeto = []
output = []
msg = ""
# Criar uma lista com o alfabeto em minúsculas
listaAlfabeto = list(string.ascii_lowercase)
# Limpar ecrã
os.system('clear')
# Imprime a lista (alfabeto)
print("\nAs letras digitadas em maiúsculas serão convertidas para minúsculas\n")
print("\nEste é o alfabeto (em lista):")
print(listaAlfabeto)
# Cria a condição para entrada de dados (2 letras)
for loop1 in range(1,3):
if loop1 == 1:
msg = "primeira"
else:
msg = "segunda"
loop2 = True
# Entrada de dados (2 letras)
while loop2:
entrada = input("\nDigite a " + msg + " letra (a-z): ")
if len(entrada) != 1:
print("\nDigite uma única letra (a-z)")
# Valida se a letra digita está no alfabeto
if procura_letra(listaAlfabeto, entrada.lower()) == True:
# Valida se a letra já foi digitada
if procura_letra(letras, entrada.lower()) == False:
letras.append(entrada.lower())
loop2 = False
else:
print("\nLetra já digitada! Digite uma nova letra!")
else:
loop2 = True
# Cria indexadores ordenados pelas posições das letras no alfabeto
# Uma alternativa seria fazer o sort da lista
if listaAlfabeto.index(letras[0]) < listaAlfabeto.index(letras[1]):
index1 = listaAlfabeto.index(letras[0])
index2 = listaAlfabeto.index(letras[1])
else:
index1 = listaAlfabeto.index(letras[1])
index2 = listaAlfabeto.index(letras[0])
# Cria uma string com alfabeto tendo como base a lista
for i in range(len(listaAlfabeto)):
string1 = string1+" "+listaAlfabeto[i]
# Imprime o alfabeto em string
print("\nEste é o alfabeto (em string):" + string1)
# Cria uma string com as letras fornecidas
for i in range(index1, index2+1):
string2 = string2+listaAlfabeto[i]
# Valida se as letras estão lado a lado. Se sim, imprime as letras em maiúsculas para diferenciar do alfabeto
# Se não estiverem próximas, avança para os próximos passos
if index2 - index1 == 1:
print("\nAs letras são próximas e não há uma letra central. Ver abaixo as letras em maiúsculas:\n")
output=listaAlfabeto
output[index1] = listaAlfabeto[index1].upper()
output[index2] = listaAlfabeto[index2].upper()
print(output)
else:
# Retonar as duas letras centrais quando o total de elementos na string for par
# Ou retorna a letra central se o total de elementos na string for ímpar)
if len(string2) % 2 == 0:
meio_esquerda = string2[(len(string2) - 1) // 2]
meio_direita = string2[len(string2) // 2]
# A impressão é feita de acordo com a sequência de entrada do utilizaodr
print("\nAs letras " + meio_esquerda + "," + meio_direita + " são centrais as letras " + str(letras[0] + "-" + str(letras[1])))
else:
medio = string2[len(string2)//2]
# A impressão é feita de acordo com a sequência de entrada do utilizaodr
print("\nA letra " + medio + " é central as letras " + str(letras[0] + "-" + str(letras[1])))
print("\nFim de Programa!!!")
except KeyboardInterrupt:
print("Program terminado a pedido! Bye...")
sys.exit()
except:
print("Ups, ocorreu um erro inesperado!")
sys.exit()
| [
"noreply@github.com"
] | noreply@github.com |
5a4910de1072d92d94f9dd51734477b5e611a449 | cb5fa586ec7d6593285c7fadb452edfa6cfc43d9 | /setup.py | 48f74c8b94d1bd7f0be181f87f7fbad6c68dfb5c | [] | no_license | McManning/OSUTracTheme | 6b557e1a4bc8f320608bf2bf12801f8298465ddc | c2112aa6d071eac18841014515c24511b341fcaf | refs/heads/master | 2016-09-03T06:46:21.908849 | 2015-05-20T14:42:36 | 2015-05-20T14:42:36 | 35,551,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
# Copyright (C) 2015 Chase McManning <cmcmanning@gmail.com>
#
# License TODO
#
from setuptools import setup
PACKAGE = 'osutractheme'
setup(
name = 'OSUTracTheme',
version = '0.1.0',
packages = [PACKAGE],
author = 'Chase McManning',
author_email = 'cmcmanning@gmail.com',
description = 'Trac theme for the OSU Office of Research that complies with university style guidelines',
license = 'TODO',
zip_safe = True,
install_requires = [
'Trac',
'TracThemeEngine>=2.0'
],
entry_points = {
'trac.plugins': 'OSUTracTheme = %s' % (PACKAGE)
}
) | [
"cmcmanning@gmail.com"
] | cmcmanning@gmail.com |
fb0f788170ff880209298d05f7ffe3435792829d | 254af7bbe369418729cad528907e03fc5ec47f54 | /Set4/md4.py | 24730d708ff989dcf0011eed473ffdeeb30fc282 | [] | no_license | pkug/matasano | 506fc54a2c3c527ff00b85418ded01946182ed74 | 6591a8ba94a04a06e23a602fcd46ddcfa6f8bfdb | refs/heads/master | 2020-12-03T23:28:37.553996 | 2016-10-19T12:25:34 | 2016-10-19T12:25:34 | 67,922,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,282 | py | #!/usr/bin/env python3
import binascii
def _pad(msg, bit_len=0):
n = len(msg)
bit_len = bit_len or n * 8
index = n & 0x3f
pad_len = 120 - index
if index < 56:
pad_len = 56 - index
padding = b'\x80' + b'\x00' * 63
suffix = bit_len.to_bytes(8, 'little', signed=False)
padded_msg = msg + padding[:pad_len] + suffix
return padded_msg
def _left_rotate(n, b):
return ((n << b) | ((n & 0xffffffff) >> (32 - b))) & 0xffffffff
def _if(x, y, z):
return x & y | ~x & z
def _maj(x, y, z):
return x & y | x & z | y & z
def _xor3(x, y, z):
return x ^ y ^ z
def _f1(a, b, c, d, k, s, X):
return _left_rotate(a + _if(b, c, d) + X[k], s)
def _f2(a, b, c, d, k, s, X):
return _left_rotate(a + _maj(b, c, d) + X[k] + 0x5a827999, s)
def _f3(a, b, c, d, k, s, X):
return _left_rotate(a + _xor3(b, c, d) + X[k] + 0x6ed9eba1, s)
class MD4:
def __init__(self, a=0, b=0, c=0, d=0):
self.A = a or 0x67452301
self.B = b or 0xefcdab89
self.C = c or 0x98badcfe
self.D = d or 0x10325476
def update(self, message_string, bit_len=0):
msg_bytes = _pad(message_string, bit_len)
for i in range(0, len(msg_bytes), 64):
self._compress(msg_bytes[i:i+64])
def _compress(self, block):
a, b, c, d = self.A, self.B, self.C, self.D
#print("_compress:", a, b, c, d)
x = []
for i in range(0, 64, 4):
x.append(int.from_bytes(block[i:i+4], 'little', signed=False))
a = _f1(a, b, c, d, 0, 3, x)
d = _f1(d, a, b, c, 1, 7, x)
c = _f1(c, d, a, b, 2, 11, x)
b = _f1(b, c, d, a, 3, 19, x)
a = _f1(a, b, c, d, 4, 3, x)
d = _f1(d, a, b, c, 5, 7, x)
c = _f1(c, d, a, b, 6, 11, x)
b = _f1(b, c, d, a, 7, 19, x)
a = _f1(a, b, c, d, 8, 3, x)
d = _f1(d, a, b, c, 9, 7, x)
c = _f1(c, d, a, b, 10, 11, x)
b = _f1(b, c, d, a, 11, 19, x)
a = _f1(a, b, c, d, 12, 3, x)
d = _f1(d, a, b, c, 13, 7, x)
c = _f1(c, d, a, b, 14, 11, x)
b = _f1(b, c, d, a, 15, 19, x)
a = _f2(a, b, c, d, 0, 3, x)
d = _f2(d, a, b, c, 4, 5, x)
c = _f2(c, d, a, b, 8, 9, x)
b = _f2(b, c, d, a, 12, 13, x)
a = _f2(a, b, c, d, 1, 3, x)
d = _f2(d, a, b, c, 5, 5, x)
c = _f2(c, d, a, b, 9, 9, x)
b = _f2(b, c, d, a, 13, 13, x)
a = _f2(a, b, c, d, 2, 3, x)
d = _f2(d, a, b, c, 6, 5, x)
c = _f2(c, d, a, b, 10, 9, x)
b = _f2(b, c, d, a, 14, 13, x)
a = _f2(a, b, c, d, 3, 3, x)
d = _f2(d, a, b, c, 7, 5, x)
c = _f2(c, d, a, b, 11, 9, x)
b = _f2(b, c, d, a, 15, 13, x)
a = _f3(a, b, c, d, 0, 3, x)
d = _f3(d, a, b, c, 8, 9, x)
c = _f3(c, d, a, b, 4, 11, x)
b = _f3(b, c, d, a, 12, 15, x)
a = _f3(a, b, c, d, 2, 3, x)
d = _f3(d, a, b, c, 10, 9, x)
c = _f3(c, d, a, b, 6, 11, x)
b = _f3(b, c, d, a, 14, 15, x)
a = _f3(a, b, c, d, 1, 3, x)
d = _f3(d, a, b, c, 9, 9, x)
c = _f3(c, d, a, b, 5, 11, x)
b = _f3(b, c, d, a, 13, 15, x)
a = _f3(a, b, c, d, 3, 3, x)
d = _f3(d, a, b, c, 11, 9, x)
c = _f3(c, d, a, b, 7, 11, x)
b = _f3(b, c, d, a, 15, 15, x)
# update state
self.A = (self.A + a) & 0xffffffff
self.B = (self.B + b) & 0xffffffff
self.C = (self.C + c) & 0xffffffff
self.D = (self.D + d) & 0xffffffff
def digest(self):
return binascii.hexlify(
self.A.to_bytes(4, 'little', signed=False) + \
self.B.to_bytes(4, 'little', signed=False) + \
self.C.to_bytes(4, 'little', signed=False) + \
self.D.to_bytes(4, 'little', signed=False)
).decode('ascii')
def dgst(self):
return self.A.to_bytes(4, 'little', signed=False) + \
self.B.to_bytes(4, 'little', signed=False) + \
self.C.to_bytes(4, 'little', signed=False) + \
self.D.to_bytes(4, 'little', signed=False)
if __name__ == '__main__':
def check(msg, sig):
m = MD4()
m.update(msg.encode('ascii'))
print(m.digest() == sig)
check("", '31d6cfe0d16ae931b73c59d7e0c089c0')
check("a", 'bde52cb31de33e46245e05fbdbd6fb24')
check("abc", 'a448017aaf21d8525fc10ae87aa6729d')
check("message digest",
'd9130a8164549fe818874806e1c7014b')
check("abcdefghijklmnopqrstuvwxyz",
'd79e1c308aa5bbcdeea8ed63df412da9')
check("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
'043f8582f241db351ce627e153e7f0e4')
check("12345678901234567890123456789012345678901234567890123456789012345678901234567890",
'e33b4ddc9c38f2199c3e7b164fcc0536')
| [
"pkugrinas@gmail.com"
] | pkugrinas@gmail.com |
e23525d9ba49cd582605b0febe397929da11e805 | cf44f0e38e1e5441166fc4e2729389b3efb1d7b8 | /venv/bin/virtualenv | 94240253b564275f3da734ad0f76e732e05bb66f | [] | no_license | aditisspatil/Flask | d8afba71ac8b1e01c2c1b4712b4df7f93dd45991 | b511eb99d5835e1bf612bb41d3e67c755cb8dd21 | refs/heads/main | 2023-03-08T11:20:51.133512 | 2021-02-20T14:15:08 | 2021-02-20T14:16:30 | 340,587,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | #!/home/aditi/PycharmProjects/flask/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from virtualenv.__main__ import run_with_catch
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_with_catch())
| [
"aditi.ss.patil@gmail.com"
] | aditi.ss.patil@gmail.com | |
ea87350f02a504bbe169ab85c731b8ef3f775d6b | 58f6e58e507db3edb95a08dc58ae291376affa8d | /server/Server_Music/mus/migrations/0002_song_rating.py | 4f472b1b9fac4492870a3d7c757492add3123f0f | [] | no_license | meettaraviya/Musicky | 6cdbe64d08ef893d17ba2edcea6309147d49c996 | 0e6938075eee905d4eefcb350fecad606b95820d | refs/heads/master | 2022-12-03T23:35:59.590363 | 2016-11-22T06:34:57 | 2016-11-22T06:34:57 | 213,819,013 | 0 | 0 | null | 2022-11-22T01:06:59 | 2019-10-09T04:13:44 | Java | UTF-8 | Python | false | false | 434 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2016-11-21 10:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mus', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='song',
name='rating',
field=models.SmallIntegerField(default=0),
),
]
| [
"rishrocks17@gmail.com"
] | rishrocks17@gmail.com |
366fb7de34dd2ebcdb86b39dc81a53ca26966c1a | 47f824a2b71fff8ad983666f7d7db432d2184be6 | /create_cube_csv.py | 264841befd464303804a55b5a9a297322511ad1b | [] | no_license | oelarnes/cube | 2ece33ce23cc965c1a96885756d94ad21f50f82a | 9b62c304d84cd44ce9f2ef72f921ec2271a8c0bc | refs/heads/master | 2021-07-25T21:53:06.395466 | 2020-04-04T03:30:37 | 2020-04-04T03:30:37 | 139,216,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | #!/Users/joel/anaconda3/bin/python
# create_cube_csv.py
# populate card_ref.txt with unique list of cards, then
# usage 'python create_cube_csv.py < card_ref.txt > cache/cube.csv'
import scryfall, sys, logging
logging.basicConfig(filename='cube_csv.log',level=logging.WARNING)
attrs = sys.argv if len(sys.argv[1:]) else scryfall.CUBE_ATTRS
finished_lines = []
print(scryfall.join_line([scryfall.get_attr_name(attr) for attr in attrs]))
for line in sys.stdin:
if line not in finished_lines:
finished_lines.append(line)
print(scryfall.card_attr_line(line, attrs))
| [
"oelarnes@gmail.com"
] | oelarnes@gmail.com |
1ee7dc2b9ca208d6002aaa8adfe393e5b25d084f | 88be4d5657d19462eb1d74d2d4d98180b423a889 | /robolearn/torch/policies/weighted_multi_policy_selector.py | e0cb8481457f36ea0e1a6161526cff851f74721d | [
"BSD-3-Clause"
] | permissive | domingoesteban/robolearn | bc58278fe38894f4ca9ec9e657ee13a479a368b7 | 0d20125425c352b80ef2eeed1c0b11ab6497b11a | refs/heads/master | 2020-04-15T22:38:25.343229 | 2019-01-29T17:01:42 | 2019-01-29T17:01:42 | 165,080,647 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | from robolearn.torch.core import PyTorchModule
from robolearn.models.policies import ExplorationPolicy
class WeightedMultiPolicySelector(PyTorchModule, ExplorationPolicy):
def __init__(self, multipolicy, idx):
self.save_init_params(locals())
super(WeightedMultiPolicySelector, self).__init__()
ExplorationPolicy.__init__(self, multipolicy.action_dim)
self._multipolicy = multipolicy
self.idx = idx
def get_action(self, *args, **kwargs):
kwargs['pol_idx'] = self.idx
action, policy_info = self._multipolicy.get_action(*args, **kwargs)
return action, policy_info
def get_actions(self, *args, **kwargs):
kwargs['pol_idx'] = self.idx
action, policy_info = self._multipolicy.get_actions(*args, **kwargs)
return action, policy_info
def forward(self, *nn_input, **kwargs):
kwargs['pol_idx'] = self.idx
action, policy_info = self._multipolicy(*nn_input, **kwargs)
return action, policy_info
| [
"domingo.esteban@iit.it"
] | domingo.esteban@iit.it |
2b7e5d90a8455b81f14433b31ba280dad2324588 | 5c2ac56a9f768948451230d9fe0d394e1a332a2c | /metrician/monitors/__init__.py | ae7f7e29185d40502d25f123345f85c199bfd98f | [
"MIT"
] | permissive | tedtroxell/metrician | 4fef1ff1faa4a404c8ea31d889b02e2be2c27d8e | d4164dbff8db5645ee8beca11dc55ba6c26c4cb6 | refs/heads/main | 2023-03-21T08:43:26.325191 | 2021-03-11T19:23:22 | 2021-03-11T19:23:22 | 343,936,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18 | py | from .OOD import * | [
"ted@tedtroxell.com"
] | ted@tedtroxell.com |
7d08491efda08a5d607a59710e749f9ee4adf84e | 425f4829adc34b380ef81553bf094d94a8884135 | /v1beta1/test/test_v1beta1discovery_details.py | fa6b03318e3c5ccf00b5e533e50fe989f8d2b672 | [
"Apache-2.0"
] | permissive | appvia/client-python | 655a77242135d4b8d2742db8a69d569666a6ac41 | 7b7158e0b857197cabaa2ccfa71af529a09fd36d | refs/heads/master | 2020-07-18T19:12:06.489460 | 2019-09-04T11:13:52 | 2019-09-04T11:13:52 | 206,297,740 | 0 | 0 | null | 2019-09-04T10:54:33 | 2019-09-04T10:54:32 | null | UTF-8 | Python | false | false | 1,001 | py | # coding: utf-8
"""
grafeas.proto
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: version not set
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from models.v1beta1discovery_details import V1beta1discoveryDetails # noqa: E501
from swagger_client.rest import ApiException
class TestV1beta1discoveryDetails(unittest.TestCase):
"""V1beta1discoveryDetails unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1discoveryDetails(self):
"""Test V1beta1discoveryDetails"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.v1beta1discovery_details.V1beta1discoveryDetails() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"danielwhatmuff@gmail.com"
] | danielwhatmuff@gmail.com |
19687c11d9274afac679c1d2611cf0b1b8156b13 | 7b10e35229aeaf81553dc8a2ac6fe3e9fbea0c32 | /setup.py | ad6d28be2db5826e4e17c1e135f8a598c9b37184 | [
"MIT"
] | permissive | bahattincinic/django-nodelete-model | 7e9257b0d6a6a826a1aee3ced1c3813893fa0daf | 5e2c83e2700e34cf396e72f9f3f06cfe96e5b821 | refs/heads/master | 2020-12-30T12:34:42.876815 | 2017-05-16T09:27:33 | 2017-05-16T09:27:33 | 91,387,039 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | #!/usr/bin/env python
from setuptools import setup
setup(
name="django-nodelete-model",
version='0.1',
url='http://github.com/django-nodelete-model',
author='Bahattin Cinic',
author_email='bahattincinic@gmail.com',
description='No Delete Model for django',
install_requires=[
'django>=1.4',
],
license='MIT',
include_package_data=True,
classifiers=[
"Programming Language :: Python",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
"Topic :: Software Development :: Libraries :: Python Modules",
"Framework :: Django",
"Environment :: Web Environment",
"Operating System :: OS Independent",
"Natural Language :: English",
]
)
| [
"bahattin@brickly.io"
] | bahattin@brickly.io |
172de3909694b0f4c9f1d6eb7ed7456069d750ae | fd0c5397d3d9b88cb90f69f5b54a8bcc2bce18d4 | /Codeforces_round721/A.py | 28d4e659236962e5bf6596b42a5aa77d3fc45ec4 | [] | no_license | hienpham15/Codeforces_competitions | 43e312468dc1e984be949257ed26f3eea0c6bd2d | fab13d5df1f82a96600abfae9b1e71507e5516ce | refs/heads/main | 2023-07-25T11:02:03.407916 | 2021-09-08T16:05:13 | 2021-09-08T16:05:13 | 376,763,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 20 16:15:00 2021
@author: hienpham
"""
import os
import math
import sys
parse_input = lambda: sys.stdin.readline().rstrip("\r\n")
def func(n):
i = n
while i > 0:
ans = i & (i - 1)
if ans == 0:
return (i - 1)
else:
i -= 1
def main():
n_cases = int(parse_input())
for i in range(n_cases):
n = int(parse_input())
print(func(n))
if __name__ == "__main__":
main() | [
"phamxuanhien15@gmail.com"
] | phamxuanhien15@gmail.com |
025a66fd3ce9811beceb05a3c51c2dccb93e6600 | e12830eb75fa77d5dc5eadaebc790d02e19c0fd7 | /Ch3/3.7.8.py | 0b602884050171e9f6ae175a2fa6befad7347452 | [] | no_license | mochapup/Python-Programming-2nd-edition-John-Zelle | 60f18dfeb687df8231362518ce3b20752850c4a2 | da67a3014066cc1ac242dd6571e1012d5fc58c5b | refs/heads/master | 2021-01-23T05:18:26.408239 | 2017-09-25T16:15:52 | 2017-09-25T16:15:52 | 92,963,347 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | # 3.7.8.py
# This program figures out the date of easter
def main():
print("""This program figures out the Gregorianc Epact,
or the number of days since the previous new moon from January 1st for a given year""")
year = int(input("Enter a year: "))
C = year // 100
epact = (8+(C//4)-C+((8*C+13)//25)+11*(year%19))%30
print(f"There are {epact} days between January 1st and the previous new moon for {year}")
main()
| [
"c.a.mcdaniel405@gmail.com"
] | c.a.mcdaniel405@gmail.com |
6349c6c6588a2c17dd0fa1cd8751e74bc763f8aa | 380c3942dec6383d9af89456b055aa9c8db8a3a5 | /Introduction to Python/Exercises/Exercise5.py | 1d00c586abd43a2d78c076636de6c0746a664eab | [] | no_license | mariascervino/basics | 570decaa7a662c1bee09d1640b99d6ccdec377b5 | 4a1c31560a29807e4576538228e528c8ec4e8cde | refs/heads/master | 2022-04-17T07:02:24.792711 | 2020-04-15T22:56:29 | 2020-04-15T22:56:29 | 256,055,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | # Exercise 5: Using if statements, create a variable called day, set it to
# “Tuesday”. Check to see if day is equal to “Monday” or “Tuesday”, and if it
# is, print, “Today is sunny”. If it is not, print “Today it will rain”
day = "Tuesday"
if day == "Tuesday" or day == "Monday":
print("Today is sunny")
else:
print("Today it will rain") | [
"maria.scervino@hotmail.com"
] | maria.scervino@hotmail.com |
3d710bc135abe8b2f05ed2957e0e98fee42cc9fd | 287a10a6f28517003728aebbd7ed097af13a8d18 | /exp_170727_neuroproof/pipeline.py | 569d93db858b5d701c70c85039b00a3960cdd7df | [] | no_license | jhennies/nmmp_experiments | 05c78c6068fa0f6df0002e57529cd7b8d1daa456 | 7c06a5818a5176fa0dc17a42ba22b2262239d91d | refs/heads/master | 2021-04-06T12:01:25.537695 | 2017-09-22T13:42:51 | 2017-09-22T13:42:51 | 83,289,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,023 | py |
import os
import vigra
import cPickle as pickle
import numpy as np
import sys
sys.path.append(
'/export/home/jhennies/src/nature_methods_multicut_pipeline_devel/nature_methods_multicut_pipeline/software/')
from multicut_src import DataSet
from multicut_src import lifted_multicut_workflow
from multicut_src import load_dataset
from multicut_src import compute_false_merges
from multicut_src import resolve_merges_with_lifted_edges_global, resolve_merges_with_lifted_edges
from multicut_src import RandomForest
from multicut_src import ExperimentSettings
from multicut_src import merge_small_segments
import logging
logger = logging.getLogger(__name__)
def init_dataset(
meta_folder, name,
raw_filepath, raw_name,
probs_filepath, probs_name,
seg_filepath, seg_name,
gt_filepath=None, gt_name=None,
make_cutouts=False
):
# Init the dataset
ds = DataSet(meta_folder, name)
# Add data
ds.add_raw(raw_filepath, raw_name)
ds.add_input(probs_filepath, probs_name)
ds.add_seg(seg_filepath, seg_name)
if gt_filepath is not None:
ds.add_gt(gt_filepath, gt_name)
# add cutouts for lifted multicut training
if make_cutouts:
shape = ds.shape
z_offset = 10
ds.make_cutout([0, 0, 0], [shape[0], shape[1], z_offset])
ds.make_cutout([0, 0, z_offset], [shape[0], shape[1], shape[2] - z_offset])
ds.make_cutout([0, 0, shape[2] - z_offset], [shape[0], shape[1], shape[2]])
def init_datasets(
meta_folder, names,
raw_path, raw_files, raw_names,
probs_path, probs_files, probs_names,
seg_path, seg_files, seg_names,
gt_path, gt_files, gt_names,
make_cutouts=None
):
assert len(raw_files) == len(raw_names)
assert len(probs_files) == len(raw_files)
assert len(probs_names) == len(raw_files)
assert len(seg_files) == len(raw_files)
assert len(seg_names) == len(raw_files)
assert len(gt_files) == len(raw_files)
assert len(gt_names) == len(raw_files)
if make_cutouts is None:
make_cutouts = [False] * len(raw_files)
for idx, train_name in enumerate(names):
if not os.path.exists(os.path.join(meta_folder, train_name)):
print 'Dataset {} is being created ...'.format(train_name)
raw_file = raw_files[idx]
probs_file = probs_files[idx]
seg_file = seg_files[idx]
gt_file = gt_files[idx]
raw_name = raw_names[idx]
probs_name = probs_names[idx]
seg_name = seg_names[idx]
gt_name = gt_names[idx]
init_dataset(
meta_folder, train_name,
raw_path + raw_file, raw_name,
probs_path + probs_file, probs_name,
seg_path + seg_file, seg_name,
gt_filepath=gt_path + gt_file, gt_name=gt_name,
make_cutouts=make_cutouts[idx]
)
else:
print 'Training set {} exists, nothing to do.'.format(train_name)
def run_lifted_mc(
meta_folder,
train_folder,
ds_train_names,
ds_test_name,
save_path,
results_name,
pre_save_path=None
):
assert os.path.exists(os.path.split(save_path)[0]), "Please choose an existing folder to save your results"
merge_segments = True
compute_mc = True
if os.path.isfile(save_path): # Nothing to do
compute_mc = False
merge_segments = False
else: # Final result needs to be computed
if pre_save_path is not None:
if os.path.isfile(pre_save_path):
compute_mc = False
if compute_mc:
seg_id = 0
feature_list = ['raw', 'prob', 'reg']
feature_list_lifted = ['cluster', 'reg']
gamma = 2.
ds_train = [load_dataset(train_folder, name) for name in ds_train_names if name != ds_test_name]
ds_test = load_dataset(meta_folder, ds_test_name)
mc_nodes, _, _, _ = lifted_multicut_workflow(
ds_train, ds_test,
seg_id, seg_id,
feature_list, feature_list_lifted,
gamma=gamma
)
segmentation = ds_test.project_mc_result(seg_id, mc_nodes)
# Save in case sth in the following function goes wrong
if pre_save_path is not None:
vigra.writeHDF5(segmentation, pre_save_path, results_name, compression='gzip')
if merge_segments:
if not compute_mc:
assert pre_save_path is not None, 'Investigate code, this must not happen!'
segmentation = vigra.readHDF5(pre_save_path, results_name)
# # Relabel with connected components
# segmentation = vigra.analysis.labelVolume(segmentation.astype('uint32'))
# Merge small segments
segmentation = merge_small_segments(segmentation.astype('uint32'), 100)
# Store the final result
vigra.writeHDF5(segmentation, save_path, results_name, compression = 'gzip')
def find_false_merges(
ds_test_name,
ds_train_names,
meta_folder,
test_seg_path, test_seg_key,
train_segs_paths, train_segs_keys
):
ds_train = [load_dataset(meta_folder, name) for name in ds_train_names if name != ds_test_name]
ds_test = load_dataset(meta_folder, ds_test_name)
# Path folders
test_paths_cache_folder = os.path.join(meta_folder, ds_test_name, 'path_data')
train_paths_cache_folder = os.path.join(meta_folder, 'train_path_data')
# logger.info('Starting compute_false_merges...')
_, false_merge_probs, _ = compute_false_merges(
ds_train, ds_test,
train_segs_paths, train_segs_keys,
test_seg_path, test_seg_key,
test_paths_cache_folder,
train_paths_cache_folder
)
with open(os.path.join(test_paths_cache_folder, 'false_paths_predictions.pkl'), 'w') as f:
pickle.dump(false_merge_probs, f)
def resolve_false_merges(
ds_name, ds_names,
meta_folder, rf_cache_folder,
new_nodes_filepath,
pre_seg_filepath, pre_seg_key,
min_prob_thresh, max_prob_thresh,
exclude_objs_with_larger_thresh,
global_resolve=True
):
# Path folders
paths_cache_folder = os.path.join(meta_folder, ds_name, 'path_data')
# TODO Change here
weight_filepath = os.path.join(meta_folder, ds_name,
'probs_to_energies_0_z_16.0_0.5_rawprobreg.h5')
lifted_filepath = os.path.join(meta_folder, ds_name,
'lifted_probs_to_energies_0_3_0.5_2.0.h5')
ds_train = [load_dataset(meta_folder, name) for name in ds_names if name != ds_name]
rf_cache_name = 'rf_merges_%s' % '_'.join([ds.ds_name for ds in ds_train])
ds = load_dataset(meta_folder, ds_name)
seg_id = 0
path_data_filepath = os.path.join(paths_cache_folder, 'paths_ds_{}.h5'.format(ds_name))
# with open(os.path.join(paths_cache_folder, 'paths_ds_{}.pkl'.format(ds_name))) as f:
# path_data = pickle.load(f)
paths = vigra.readHDF5(path_data_filepath, 'all_paths')
if paths.size:
paths = np.array([path.reshape((len(path) / 3, 3)) for path in paths])
paths_to_objs = vigra.readHDF5(path_data_filepath, 'paths_to_objs')
with open(os.path.join(paths_cache_folder, 'false_paths_predictions.pkl')) as f:
false_merge_probs = pickle.load(f)
# Find objects where probability >= min_prob_thresh and <= max_prob_thresh
objs_with_prob_greater_thresh = np.unique(
np.array(paths_to_objs)[
np.logical_and(
false_merge_probs >= min_prob_thresh,
false_merge_probs <= max_prob_thresh
)
]
)
if exclude_objs_with_larger_thresh:
objs_to_exclude = np.unique(
np.array(paths_to_objs)[
false_merge_probs > max_prob_thresh
]
)
objs_with_prob_greater_thresh = np.setdiff1d(objs_with_prob_greater_thresh, objs_to_exclude)
# Extract all paths for each of the found objects
false_paths = {}
for obj in objs_with_prob_greater_thresh:
# print paths_to_objs == obj
false_paths[obj] = np.array(paths)[paths_to_objs == obj]
rf_filepath = os.path.join(rf_cache_folder, rf_cache_name)
# with open(rf_filepath) as f:
# path_rf = pickle.load(f)
path_rf = RandomForest.load_from_file(rf_filepath, 'rf', ExperimentSettings().n_threads)
mc_segmentation = vigra.readHDF5(pre_seg_filepath, pre_seg_key)
mc_weights_all = vigra.readHDF5(weight_filepath, "data")
lifted_weights_all = vigra.readHDF5(lifted_filepath, "data")
if global_resolve:
new_node_labels = resolve_merges_with_lifted_edges_global(
ds, seg_id,
false_paths,
path_rf,
mc_segmentation,
mc_weights_all,
paths_cache_folder=paths_cache_folder,
lifted_weights_all=lifted_weights_all
)
else:
new_node_labels = resolve_merges_with_lifted_edges(
ds, ds_train,
seg_id,
false_paths,
path_rf,
mc_segmentation,
mc_weights_all,
paths_cache_folder=paths_cache_folder,
lifted_weights_all=lifted_weights_all
)
with open(new_nodes_filepath, 'w') as f:
pickle.dump(new_node_labels, f)
def project_new_result(
ds_name, meta_folder,
new_nodes_filepath,
save_path, results_name
):
ds = load_dataset(meta_folder, ds_name)
seg_id = 0
# Load resolving result
with open(new_nodes_filepath) as f:
new_node_labels = pickle.load(f)
# project the result back to the volume
mc_seg = ds.project_mc_result(seg_id, new_node_labels)
# Write the result
vigra.writeHDF5(mc_seg, save_path, results_name, compression = 'gzip')
import nifty_with_cplex.graph.rag as nrag
def project_resolved_objects_to_segmentation(
meta_folder, ds_name,
mc_seg_filepath, mc_seg_key,
new_nodes_filepath,
save_path, results_name
):
ds = load_dataset(meta_folder, ds_name)
seg_id = 0
mc_segmentation = vigra.readHDF5(mc_seg_filepath, mc_seg_key)
# Load resolving result
with open(new_nodes_filepath) as f:
resolved_objs = pickle.load(f)
rag = ds.rag(seg_id)
mc_labeling = nrag.gridRagAccumulateLabels(rag, mc_segmentation)
new_label_offset = np.max(mc_labeling) + 1
for obj in resolved_objs:
resolved_nodes = resolved_objs[obj]
for node_id in resolved_nodes:
mc_labeling[node_id] = new_label_offset + resolved_nodes[node_id]
new_label_offset += np.max(resolved_nodes.values()) + 1
mc_segmentation = nrag.projectScalarNodeDataToPixels(rag, mc_labeling, ExperimentSettings().n_threads)
# Write the result
vigra.writeHDF5(mc_segmentation, save_path, results_name, compression = 'gzip')
| [
"julianhennies@hotmail.de"
] | julianhennies@hotmail.de |
46d57600aae28aee46b5ee1f2214f2fbdc284adf | 755824757ca24913b1456e0e87209bce79683673 | /inception/slim/inception_model.py | e1c05668abf1f9f1eeb71870529ae0900d8dfd4c | [] | no_license | shubi4/W266Project | 956afec1044df33fe9ec751a8dcd810c5e2db2be | 78d6b96fb1fdb4c9f06c317b214ae7c7dd1c5ab9 | refs/heads/master | 2021-04-22T12:27:09.825724 | 2016-12-17T09:06:10 | 2016-12-17T09:06:10 | 75,509,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,956 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inception-v3 expressed in TensorFlow-Slim.
Usage:
# Parameters for BatchNorm.
batch_norm_params = {
# Decay for the batch_norm moving averages.
'decay': BATCHNORM_MOVING_AVERAGE_DECAY,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004):
with slim.arg_scope([slim.ops.conv2d],
stddev=0.1,
activation=tf.nn.relu,
batch_norm_params=batch_norm_params):
# Force all Variables to reside on the CPU.
with slim.arg_scope([slim.variables.variable], device='/cpu:0'):
logits, endpoints = slim.inception.inception_v3(
images,
dropout_keep_prob=0.8,
num_classes=num_classes,
is_training=for_training,
restore_logits=restore_logits,
scope=scope)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from inception.slim import ops
from inception.slim import scopes
def inception_v3(inputs,
dropout_keep_prob=0.8,
num_classes=1000,
is_training=True,
restore_logits=True,
scope=''):
"""Latest Inception from http://arxiv.org/abs/1512.00567.
"Rethinking the Inception Architecture for Computer Vision"
Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
Zbigniew Wojna
Args:
inputs: a tensor of size [batch_size, height, width, channels].
dropout_keep_prob: dropout keep_prob.
num_classes: number of predicted classes.
is_training: whether is training or not.
restore_logits: whether or not the logits layers should be restored.
Useful for fine-tuning a model with different num_classes.
scope: Optional scope for op_scope.
Returns:
a list containing 'logits', 'aux_logits' Tensors.
"""
# end_points will collect relevant activations for external use, for example
# summaries or losses.
end_points = {}
with tf.op_scope([inputs], scope, 'inception_v3'):
with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
is_training=is_training):
with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
stride=1, padding='VALID'):
# 299 x 299 x 3
end_points['conv0'] = ops.conv2d(inputs, 32, [3, 3], stride=2,
scope='conv0')
# 149 x 149 x 32
end_points['conv1'] = ops.conv2d(end_points['conv0'], 32, [3, 3],
scope='conv1')
# 147 x 147 x 32
end_points['conv2'] = ops.conv2d(end_points['conv1'], 64, [3, 3],
padding='SAME', scope='conv2')
# 147 x 147 x 64
end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3],
stride=2, scope='pool1')
# 73 x 73 x 64
end_points['conv3'] = ops.conv2d(end_points['pool1'], 80, [1, 1],
scope='conv3')
# 73 x 73 x 80.
end_points['conv4'] = ops.conv2d(end_points['conv3'], 192, [3, 3],
scope='conv4')
# 71 x 71 x 192.
end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3],
stride=2, scope='pool2')
# 35 x 35 x 192.
net = end_points['pool2']
# Inception blocks
with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
stride=1, padding='SAME'):
# mixed: 35 x 35 x 256.
with tf.variable_scope('mixed_35x35x256a'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 64, [1, 1])
with tf.variable_scope('branch5x5'):
branch5x5 = ops.conv2d(net, 48, [1, 1])
branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 64, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 32, [1, 1])
net = tf.concat(3, [branch1x1, branch5x5, branch3x3dbl, branch_pool])
end_points['mixed_35x35x256a'] = net
# mixed_1: 35 x 35 x 288.
with tf.variable_scope('mixed_35x35x288a'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 64, [1, 1])
with tf.variable_scope('branch5x5'):
branch5x5 = ops.conv2d(net, 48, [1, 1])
branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 64, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
net = tf.concat(3, [branch1x1, branch5x5, branch3x3dbl, branch_pool])
end_points['mixed_35x35x288a'] = net
# mixed_2: 35 x 35 x 288.
with tf.variable_scope('mixed_35x35x288b'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 64, [1, 1])
with tf.variable_scope('branch5x5'):
branch5x5 = ops.conv2d(net, 48, [1, 1])
branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 64, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
net = tf.concat(3, [branch1x1, branch5x5, branch3x3dbl, branch_pool])
end_points['mixed_35x35x288b'] = net
# mixed_3: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768a'):
with tf.variable_scope('branch3x3'):
branch3x3 = ops.conv2d(net, 384, [3, 3], stride=2, padding='VALID')
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 64, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3],
stride=2, padding='VALID')
with tf.variable_scope('branch_pool'):
branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
net = tf.concat(3, [branch3x3, branch3x3dbl, branch_pool])
end_points['mixed_17x17x768a'] = net
# mixed4: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768b'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 192, [1, 1])
with tf.variable_scope('branch7x7'):
branch7x7 = ops.conv2d(net, 128, [1, 1])
branch7x7 = ops.conv2d(branch7x7, 128, [1, 7])
branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = ops.conv2d(net, 128, [1, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7])
branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
end_points['mixed_17x17x768b'] = net
# mixed_5: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768c'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 192, [1, 1])
with tf.variable_scope('branch7x7'):
branch7x7 = ops.conv2d(net, 160, [1, 1])
branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = ops.conv2d(net, 160, [1, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
end_points['mixed_17x17x768c'] = net
# mixed_6: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768d'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 192, [1, 1])
with tf.variable_scope('branch7x7'):
branch7x7 = ops.conv2d(net, 160, [1, 1])
branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = ops.conv2d(net, 160, [1, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
end_points['mixed_17x17x768d'] = net
# mixed_7: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768e'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 192, [1, 1])
with tf.variable_scope('branch7x7'):
branch7x7 = ops.conv2d(net, 192, [1, 1])
branch7x7 = ops.conv2d(branch7x7, 192, [1, 7])
branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = ops.conv2d(net, 192, [1, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
end_points['mixed_17x17x768e'] = net
# Auxiliary Head logits
#Subha: commented out
'''
aux_logits = tf.identity(end_points['mixed_17x17x768e'])
with tf.variable_scope('aux_logits'):
aux_logits = ops.avg_pool(aux_logits, [5, 5], stride=3,
padding='VALID')
aux_logits = ops.conv2d(aux_logits, 128, [1, 1], scope='proj')
# Shape of feature map before the final layer.
shape = aux_logits.get_shape()
aux_logits = ops.conv2d(aux_logits, 768, shape[1:3], stddev=0.01,
padding='VALID')
aux_logits = ops.flatten(aux_logits)
aux_logits = ops.fc(aux_logits, num_classes, activation=None,
stddev=0.001, restore=restore_logits)
end_points['aux_logits'] = aux_logits
'''
# mixed_8: 8 x 8 x 1280.
# Note that the scope below is not changed to not void previous
# checkpoints.
# (TODO) Fix the scope when appropriate.
with tf.variable_scope('mixed_17x17x1280a'):
with tf.variable_scope('branch3x3'):
branch3x3 = ops.conv2d(net, 192, [1, 1])
branch3x3 = ops.conv2d(branch3x3, 320, [3, 3], stride=2,
padding='VALID')
with tf.variable_scope('branch7x7x3'):
branch7x7x3 = ops.conv2d(net, 192, [1, 1])
branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7])
branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1])
branch7x7x3 = ops.conv2d(branch7x7x3, 192, [3, 3],
stride=2, padding='VALID')
with tf.variable_scope('branch_pool'):
branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
net = tf.concat(3, [branch3x3, branch7x7x3, branch_pool])
end_points['mixed_17x17x1280a'] = net
# mixed_9: 8 x 8 x 2048.
with tf.variable_scope('mixed_8x8x2048a'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 320, [1, 1])
with tf.variable_scope('branch3x3'):
branch3x3 = ops.conv2d(net, 384, [1, 1])
branch3x3 = tf.concat(3, [ops.conv2d(branch3x3, 384, [1, 3]),
ops.conv2d(branch3x3, 384, [3, 1])])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 448, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
branch3x3dbl = tf.concat(3, [ops.conv2d(branch3x3dbl, 384, [1, 3]),
ops.conv2d(branch3x3dbl, 384, [3, 1])])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(3, [branch1x1, branch3x3, branch3x3dbl, branch_pool])
end_points['mixed_8x8x2048a'] = net
# mixed_10: 8 x 8 x 2048.
with tf.variable_scope('mixed_8x8x2048b'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 320, [1, 1])
with tf.variable_scope('branch3x3'):
branch3x3 = ops.conv2d(net, 384, [1, 1])
branch3x3 = tf.concat(3, [ops.conv2d(branch3x3, 384, [1, 3]),
ops.conv2d(branch3x3, 384, [3, 1])])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 448, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
branch3x3dbl = tf.concat(3, [ops.conv2d(branch3x3dbl, 384, [1, 3]),
ops.conv2d(branch3x3dbl, 384, [3, 1])])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(3, [branch1x1, branch3x3, branch3x3dbl, branch_pool])
end_points['mixed_8x8x2048b'] = net
# Final pooling and prediction
with tf.variable_scope('logits'):
shape = net.get_shape()
net = ops.avg_pool(net, shape[1:3], padding='VALID', scope='pool')
# 1 x 1 x 2048
net = ops.dropout(net, dropout_keep_prob, scope='dropout')
net = ops.flatten(net, scope='flatten')
# 2048
#Subha - don't want the final logits layer
'''
logits = ops.fc(net, num_classes, activation=None, scope='logits',
restore=restore_logits)
# 1000
end_points['logits'] = logits
end_points['predictions'] = tf.nn.softmax(logits, name='predictions')
'''
return net, end_points
def inception_v3_parameters(weight_decay=0.00004, stddev=0.1,
batch_norm_decay=0.9997, batch_norm_epsilon=0.001):
"""Yields the scope with the default parameters for inception_v3.
Args:
weight_decay: the weight decay for weights variables.
stddev: standard deviation of the truncated guassian weight distribution.
batch_norm_decay: decay for the moving average of batch_norm momentums.
batch_norm_epsilon: small float added to variance to avoid dividing by zero.
Yields:
a arg_scope with the parameters needed for inception_v3.
"""
# Set weight_decay for weights in Conv and FC layers.
with scopes.arg_scope([ops.conv2d, ops.fc],
weight_decay=weight_decay):
# Set stddev, activation and parameters for batch_norm.
with scopes.arg_scope([ops.conv2d],
stddev=stddev,
activation=tf.nn.relu,
batch_norm_params={
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon}) as arg_scope:
yield arg_scope
| [
"shubi4@hotmail.com"
] | shubi4@hotmail.com |
8ae9fb9ae54b014300cf7675e7bfdbabcd0e5011 | 836d5f7190f6b4503e758c87c71598f18fdfce14 | /12-Veri-Tabanı/sqlite-database-2/database.py | d5adc5a68ad53c2351607d67672c1ff0cbb2b0b7 | [] | no_license | S-Oktay-Bicici/PYTHON-PROGRAMMING | cf452723fd3e7e8ec2aadc7980208d747c502e9a | 22e864f89544249d6309d6f4570a4104bf47346b | refs/heads/main | 2021-11-30T00:19:21.158084 | 2021-11-16T15:44:29 | 2021-11-16T15:44:29 | 316,716,147 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | import sqlite3
veriler = [
("Ahmet Ümit","İstanbul Hatırası"),
("Yaşar Kemal","İnce Memed"),
("Paulo Coelho","Simyacı"),
("Paulo Coelho","Aldatmak")]
db = sqlite3.connect("kitaplar.db")
imlec = db.cursor()
imlec.execute("CREATE TABLE IF NOT EXISTS 'kitaplık tablosu' (yazar,kitap)")
for veri in veriler:
imlec.execute("INSERT INTO 'kitaplık tablosu' VALUES (?,?)",veri)
db.commit()
db.close()
| [
"noreply@github.com"
] | noreply@github.com |
b2d5d706f2f349be03c0c756e348e26475a9300b | ea44a1681e276b3cc85226b53de217f6096a05d4 | /fhir/resources/tests/test_specimen.py | bc833ac238d30077aeb6dc9c5c72f3cf16919802 | [
"BSD-3-Clause"
] | permissive | stephanie-howson/fhir.resources | 69d2a5a6b0fe4387b82e984255b24027b37985c4 | 126e9dc6e14541f74e69ef7c1a0b8a74aa981905 | refs/heads/master | 2020-05-04T22:24:49.826585 | 2019-06-27T15:51:26 | 2019-06-27T15:51:26 | 179,511,579 | 0 | 0 | null | 2019-04-04T14:14:53 | 2019-04-04T14:14:52 | null | UTF-8 | Python | false | false | 13,742 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-01-17.
# 2019, SMART Health IT.
import os
import pytest
import io
import unittest
import json
from .fixtures import force_bytes
from .. import specimen
from ..fhirdate import FHIRDate
@pytest.mark.usefixtures("base_settings")
class SpecimenTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Specimen", js["resourceType"])
return specimen.Specimen(js)
def testSpecimen1(self):
inst = self.instantiate_from("specimen-example-serum.json")
self.assertIsNotNone(inst, "Must have instantiated a Specimen instance")
self.implSpecimen1(inst)
js = inst.as_json()
self.assertEqual("Specimen", js["resourceType"])
inst2 = specimen.Specimen(js)
self.implSpecimen1(inst2)
def implSpecimen1(self, inst):
self.assertEqual(force_bytes(inst.accessionIdentifier.system), force_bytes("http://acme.com/labs/accession-ids"))
self.assertEqual(force_bytes(inst.accessionIdentifier.value), force_bytes("20150816-00124"))
self.assertEqual(inst.collection.collectedDateTime.date, FHIRDate("2015-08-16T06:40:17Z").date)
self.assertEqual(inst.collection.collectedDateTime.as_json(), "2015-08-16T06:40:17Z")
self.assertEqual(force_bytes(inst.container[0].type.coding[0].code), force_bytes("SST"))
self.assertEqual(force_bytes(inst.container[0].type.coding[0].display), force_bytes("Serum Separator Tube"))
self.assertEqual(force_bytes(inst.container[0].type.coding[0].system), force_bytes("http://acme.com/labs"))
self.assertEqual(force_bytes(inst.id), force_bytes("sst"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(force_bytes(inst.meta.tag[0].display), force_bytes("test health data"))
self.assertEqual(force_bytes(inst.meta.tag[0].system), force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(force_bytes(inst.type.coding[0].code), force_bytes("119364003"))
self.assertEqual(force_bytes(inst.type.coding[0].display), force_bytes("Serum sample"))
self.assertEqual(force_bytes(inst.type.coding[0].system), force_bytes("http://snomed.info/sct"))
def testSpecimen2(self):
inst = self.instantiate_from("specimen-example-pooled-serum.json")
self.assertIsNotNone(inst, "Must have instantiated a Specimen instance")
self.implSpecimen2(inst)
js = inst.as_json()
self.assertEqual("Specimen", js["resourceType"])
inst2 = specimen.Specimen(js)
self.implSpecimen2(inst2)
def implSpecimen2(self, inst):
self.assertEqual(force_bytes(inst.accessionIdentifier.system), force_bytes("https://vetmed.iastate.edu/vdl"))
self.assertEqual(force_bytes(inst.accessionIdentifier.value), force_bytes("20171120-1234"))
self.assertEqual(inst.collection.collectedDateTime.date, FHIRDate("2017-11-14").date)
self.assertEqual(inst.collection.collectedDateTime.as_json(), "2017-11-14")
self.assertEqual(force_bytes(inst.container[0].type.coding[0].code), force_bytes("RTT"))
self.assertEqual(force_bytes(inst.container[0].type.coding[0].display), force_bytes("Red Top Tube"))
self.assertEqual(force_bytes(inst.container[0].type.coding[0].system), force_bytes("https://vetmed.iastate.edu/vdl"))
self.assertEqual(force_bytes(inst.container[0].type.text), force_bytes("Red Top Blood Collection Tube"))
self.assertEqual(force_bytes(inst.id), force_bytes("pooled-serum"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(force_bytes(inst.meta.tag[0].display), force_bytes("test health data"))
self.assertEqual(force_bytes(inst.meta.tag[0].system), force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"))
self.assertEqual(force_bytes(inst.note[0].text), force_bytes("Pooled serum sample from 30 individuals"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(force_bytes(inst.type.coding[0].code), force_bytes("Serum sample, pooled"))
self.assertEqual(force_bytes(inst.type.coding[0].display), force_bytes("Serum sample, pooled"))
self.assertEqual(force_bytes(inst.type.coding[0].system), force_bytes("https://vetmed.iastate.edu/vdl"))
self.assertEqual(force_bytes(inst.type.text), force_bytes("Pooled serum sample"))
def testSpecimen3(self):
inst = self.instantiate_from("specimen-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Specimen instance")
self.implSpecimen3(inst)
js = inst.as_json()
self.assertEqual("Specimen", js["resourceType"])
inst2 = specimen.Specimen(js)
self.implSpecimen3(inst2)
def implSpecimen3(self, inst):
self.assertEqual(force_bytes(inst.accessionIdentifier.system), force_bytes("http://lab.acme.org/specimens/2011"))
self.assertEqual(force_bytes(inst.accessionIdentifier.value), force_bytes("X352356"))
self.assertEqual(force_bytes(inst.collection.bodySite.coding[0].code), force_bytes("49852007"))
self.assertEqual(force_bytes(inst.collection.bodySite.coding[0].display), force_bytes("Structure of median cubital vein (body structure)"))
self.assertEqual(force_bytes(inst.collection.bodySite.coding[0].system), force_bytes("http://snomed.info/sct"))
self.assertEqual(force_bytes(inst.collection.bodySite.text), force_bytes("Right median cubital vein"))
self.assertEqual(inst.collection.collectedDateTime.date, FHIRDate("2011-05-30T06:15:00Z").date)
self.assertEqual(inst.collection.collectedDateTime.as_json(), "2011-05-30T06:15:00Z")
self.assertEqual(force_bytes(inst.collection.method.coding[0].code), force_bytes("LNV"))
self.assertEqual(force_bytes(inst.collection.method.coding[0].system), force_bytes("http://terminology.hl7.org/CodeSystem/v2-0488"))
self.assertEqual(force_bytes(inst.collection.quantity.unit), force_bytes("mL"))
self.assertEqual(inst.collection.quantity.value, 6)
self.assertEqual(force_bytes(inst.contained[0].id), force_bytes("hep"))
self.assertEqual(force_bytes(inst.container[0].capacity.unit), force_bytes("mL"))
self.assertEqual(inst.container[0].capacity.value, 10)
self.assertEqual(force_bytes(inst.container[0].description), force_bytes("Green Gel tube"))
self.assertEqual(force_bytes(inst.container[0].identifier[0].value), force_bytes("48736-15394-75465"))
self.assertEqual(force_bytes(inst.container[0].specimenQuantity.unit), force_bytes("mL"))
self.assertEqual(inst.container[0].specimenQuantity.value, 6)
self.assertEqual(force_bytes(inst.container[0].type.text), force_bytes("Vacutainer"))
self.assertEqual(force_bytes(inst.id), force_bytes("101"))
self.assertEqual(force_bytes(inst.identifier[0].system), force_bytes("http://ehr.acme.org/identifiers/collections"))
self.assertEqual(force_bytes(inst.identifier[0].value), force_bytes("23234352356"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(force_bytes(inst.meta.tag[0].display), force_bytes("test health data"))
self.assertEqual(force_bytes(inst.meta.tag[0].system), force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"))
self.assertEqual(force_bytes(inst.note[0].text), force_bytes("Specimen is grossly lipemic"))
self.assertEqual(inst.receivedTime.date, FHIRDate("2011-03-04T07:03:00Z").date)
self.assertEqual(inst.receivedTime.as_json(), "2011-03-04T07:03:00Z")
self.assertEqual(force_bytes(inst.status), force_bytes("available"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(force_bytes(inst.type.coding[0].code), force_bytes("122555007"))
self.assertEqual(force_bytes(inst.type.coding[0].display), force_bytes("Venous blood specimen"))
self.assertEqual(force_bytes(inst.type.coding[0].system), force_bytes("http://snomed.info/sct"))
def testSpecimen4(self):
inst = self.instantiate_from("specimen-example-urine.json")
self.assertIsNotNone(inst, "Must have instantiated a Specimen instance")
self.implSpecimen4(inst)
js = inst.as_json()
self.assertEqual("Specimen", js["resourceType"])
inst2 = specimen.Specimen(js)
self.implSpecimen4(inst2)
def implSpecimen4(self, inst):
self.assertEqual(force_bytes(inst.accessionIdentifier.system), force_bytes("http://lab.acme.org/specimens/2015"))
self.assertEqual(force_bytes(inst.accessionIdentifier.value), force_bytes("X352356"))
self.assertEqual(inst.collection.collectedDateTime.date, FHIRDate("2015-08-18T07:03:00Z").date)
self.assertEqual(inst.collection.collectedDateTime.as_json(), "2015-08-18T07:03:00Z")
self.assertEqual(force_bytes(inst.container[0].capacity.unit), force_bytes("mls"))
self.assertEqual(inst.container[0].capacity.value, 50)
self.assertEqual(force_bytes(inst.container[0].specimenQuantity.unit), force_bytes("mls"))
self.assertEqual(inst.container[0].specimenQuantity.value, 10)
self.assertEqual(force_bytes(inst.container[0].type.text), force_bytes("Non-sterile specimen container"))
self.assertEqual(force_bytes(inst.id), force_bytes("vma-urine"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(force_bytes(inst.meta.tag[0].display), force_bytes("test health data"))
self.assertEqual(force_bytes(inst.meta.tag[0].system), force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"))
self.assertEqual(force_bytes(inst.processing[0].description), force_bytes("Acidify to pH < 3.0 with 6 N HCl."))
self.assertEqual(force_bytes(inst.processing[0].procedure.coding[0].code), force_bytes("ACID"))
self.assertEqual(force_bytes(inst.processing[0].procedure.coding[0].system), force_bytes("http://terminology.hl7.org/CodeSystem/v2-0373"))
self.assertEqual(inst.processing[0].timeDateTime.date, FHIRDate("2015-08-18T08:10:00Z").date)
self.assertEqual(inst.processing[0].timeDateTime.as_json(), "2015-08-18T08:10:00Z")
self.assertEqual(inst.receivedTime.date, FHIRDate("2015-08-18T07:03:00Z").date)
self.assertEqual(inst.receivedTime.as_json(), "2015-08-18T07:03:00Z")
self.assertEqual(force_bytes(inst.status), force_bytes("available"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(force_bytes(inst.type.coding[0].code), force_bytes("RANDU"))
self.assertEqual(force_bytes(inst.type.coding[0].display), force_bytes("Urine, Random"))
self.assertEqual(force_bytes(inst.type.coding[0].system), force_bytes("http://terminology.hl7.org/CodeSystem/v2-0487"))
def testSpecimen5(self):
inst = self.instantiate_from("specimen-example-isolate.json")
self.assertIsNotNone(inst, "Must have instantiated a Specimen instance")
self.implSpecimen5(inst)
js = inst.as_json()
self.assertEqual("Specimen", js["resourceType"])
inst2 = specimen.Specimen(js)
self.implSpecimen5(inst2)
def implSpecimen5(self, inst):
self.assertEqual(force_bytes(inst.accessionIdentifier.system), force_bytes("http://lab.acme.org/specimens/2011"))
self.assertEqual(force_bytes(inst.accessionIdentifier.value), force_bytes("X352356-ISO1"))
self.assertEqual(inst.collection.collectedDateTime.date, FHIRDate("2015-08-16T07:03:00Z").date)
self.assertEqual(inst.collection.collectedDateTime.as_json(), "2015-08-16T07:03:00Z")
self.assertEqual(force_bytes(inst.collection.method.coding[0].code), force_bytes("BAP"))
self.assertEqual(force_bytes(inst.collection.method.coding[0].system), force_bytes("http://terminology.hl7.org/CodeSystem/v2-0488"))
self.assertEqual(force_bytes(inst.contained[0].id), force_bytes("stool"))
self.assertEqual(force_bytes(inst.id), force_bytes("isolate"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(force_bytes(inst.meta.tag[0].display), force_bytes("test health data"))
self.assertEqual(force_bytes(inst.meta.tag[0].system), force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"))
self.assertEqual(force_bytes(inst.note[0].text), force_bytes("Patient dropped off specimen"))
self.assertEqual(inst.receivedTime.date, FHIRDate("2015-08-18T07:03:00Z").date)
self.assertEqual(inst.receivedTime.as_json(), "2015-08-18T07:03:00Z")
self.assertEqual(force_bytes(inst.status), force_bytes("available"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(force_bytes(inst.type.coding[0].code), force_bytes("429951000124103"))
self.assertEqual(force_bytes(inst.type.coding[0].display), force_bytes("Bacterial isolate specimen"))
self.assertEqual(force_bytes(inst.type.coding[0].system), force_bytes("http://snomed.info/sct"))
| [
"connect2nazrul@gmail.com"
] | connect2nazrul@gmail.com |
842861fff402dd09ab5a9f2cfa8e490d1b842ff7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03696/s669152949.py | 6ffe317a8dbd760aeaf50530708cdf2ac5bd88ad | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | N=int(input())
S=input()
A=0 #(
B=0 #)
for s in S:
if s=='(':
A+=1
elif s==')' and A>0:
A-=1
else:
B+=1
print('('*B+S+')'*A) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b163788348d239583607dd070a084cb206e92952 | be81dc6b30ddfcb512a58aae2a592f5707b65479 | /d1/swea_2063.py | 264787ad21465503391e4d2349391775b300b740 | [] | no_license | mingddo/Algo | 799082d2a9d8e2fa43a910ebf5e769e372774a70 | 6dee72aa3c99b59ada714bfe549b323dbdff2f30 | refs/heads/master | 2023-01-06T00:32:29.457676 | 2020-11-07T07:45:07 | 2020-11-07T07:45:07 | 281,081,195 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,057 | py | # 기본 제공코드는 임의 수정해도 관계 없습니다. 단, 입출력 포맷 주의
# 아래 표준 입출력 예제 필요시 참고하세요.
# 표준 입력 예제
'''
a = int(input()) 정수형 변수 1개 입력 받는 예제
b, c = map(int, input().split()) 정수형 변수 2개 입력 받는 예제
d = float(input()) 실수형 변수 1개 입력 받는 예제
e, f, g = map(float, input().split()) 실수형 변수 3개 입력 받는 예제
h = input() 문자열 변수 1개 입력 받는 예제
'''
# 표준 출력 예제
'''
a, b = 6, 3
c, d, e = 1.0, 2.5, 3.4
f = "ABC"
print(a) 정수형 변수 1개 출력하는 예제
print(b, end = " ") 줄바꿈 하지 않고 정수형 변수와 공백을 출력하는 예제
print(c, d, e) 실수형 변수 3개 출력하는 예제
print(f) 문자열 1개 출력하는 예제
'''
import sys
'''
아래의 구문은 input.txt 를 read only 형식으로 연 후,
앞으로 표준 입력(키보드) 대신 input.txt 파일로부터 읽어오겠다는 의미의 코드입니다.
여러분이 작성한 코드를 테스트 할 때, 편의를 위해서 input.txt에 입력을 저장한 후,
아래 구문을 이용하면 이후 입력을 수행할 때 표준 입력 대신 파일로부터 입력을 받아올 수 있습니다.
따라서 테스트를 수행할 때에는 아래 주석을 지우고 이 구문을 사용하셔도 좋습니다.
아래 구문을 사용하기 위해서는 import sys가 필요합니다.
단, 채점을 위해 코드를 제출하실 때에는 반드시 아래 구문을 지우거나 주석 처리 하셔야 합니다.
'''
#sys.stdin = open("input.txt", "r")
T = int(input())
# 여러개의 테스트 케이스가 주어지므로, 각각을 처리합니다.
n = T
numbers = sorted(map(int, input().split()))
print(numbers[n//2])
| [
"dk.myeong@gmail.com"
] | dk.myeong@gmail.com |
9d5e381b6742e606d841d20ce2e6480a9029a65d | e3af1769d017fa5b20677b1228fd3ab42afc8927 | /projet/IBPackage/operations.py | 481a6a5755012665fb4d06ee741c5fdef5d684ad | [] | no_license | komi24/IB201116 | 08e8692a72badb82eecc79af753e1cf5c4021380 | 924c6540978b0308686eac867c16a3f6d1725f65 | refs/heads/master | 2023-01-19T11:34:32.627342 | 2020-11-20T15:24:31 | 2020-11-20T15:24:31 | 313,242,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | # -*- coding: utf-8 -*-
def ajoute_2(a):
return a + 2
def addition(a, b):
return a + b
def produit(a, b):
return a * b
# def moyenne(liste, operation, init):
# somme = init
# for i in liste:
# somme = operation(i, somme)
| [
"mickael.bolnet@gmail.com"
] | mickael.bolnet@gmail.com |
263e5accf9c46da5bf018e6fe716b80de9ee55da | 4e0ff785b993b6bae70745434e61f27ca82e88f0 | /229-Majority-Element-II/solution.py | 1b9494257f4bd6ea4a55db58f2ad57d67a4ef1ec | [] | no_license | NobodyWHU/Leetcode | 2ee557dd77c65c5fa8ca938efb6de3793b4de261 | d284fa3daab02531e5300867463b293d44737e32 | refs/heads/master | 2021-01-23T14:05:28.161062 | 2016-09-23T11:51:51 | 2016-09-23T11:51:51 | 58,898,114 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
n1 = n2 = None
c1 = c2 = 0
for num in nums:
if n1 == num:
c1 += 1
elif n2 == num:
c2 += 1
elif c1 == 0:
n1, c1 = num, 1
elif c2 == 0:
n2, c2 = num, 1
else:
c1, c2 = c1 - 1, c2 - 1
size = len(nums)
return [n for n in (n1, n2)
if n is not None and nums.count(n) > size / 3]
| [
"haohaoranran@126.com"
] | haohaoranran@126.com |
2bd14f5debb27b3889f24b4f36d1bcde6d2768cf | eb841ad2854cbcb60aa75b1080573da6ae8e2a1c | /Evolife/Scenarii/S_Cooperation.py | 122d8266f1d1db6bff0260ceae74330ded53d133 | [] | no_license | tomMoral/pjld | a5aef7201a1ed5e666c9b71b9edaa77e00e8ddd2 | 436b027f1ae55a168ec96db98580ebcf3c9bcf34 | refs/heads/master | 2020-06-04T07:53:50.789582 | 2014-03-13T14:32:30 | 2014-03-13T14:32:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,684 | py | ##############################################################################
# EVOLIFE www.dessalles.fr/Evolife Jean-Louis Dessalles #
# Telecom ParisTech 2013 www.dessalles.fr #
##############################################################################
##############################################################################
# S_Cooperation #
##############################################################################
""" EVOLIFE: Cooperation Scenario:
Individual A cooperates with individual B with the hope that
B will reciprocate. A remembers B if it is the case.
"""
#=============================================================#
# HOW TO MODIFY A SCENARIO: read Default_Scenario.py #
#=============================================================#
import sys
if __name__ == '__main__': sys.path.append('../..') # for tests
import random
from Evolife.Tools.Tools import percent, noise_mult, error
from Evolife.Scenarii.Default_Scenario import Default_Scenario
######################################
# specific variables and functions #
######################################
class Scenario(Default_Scenario):
######################################
# Most functions below overload some #
# functions of Default_Scenario #
######################################
def genemap(self):
""" Defines the name of genes and their position on the DNA.
(see Genetic_map.py)"""
return [('Cooperativeness',0),('Exploration',0)] # Size 0 means that acutal size is read from configuration (Starter, see Genetics section)
def prepare(self, indiv):
""" defines what is to be done at the individual level before interactions
occur - Used in 'start_game'
"""
# scores are reset
indiv.score(200, FlagSet=True) # resetting scores
# friendship links (lessening with time) are updated
indiv.lessening_friendship((100 - self.Parameter('Erosion'))/100.0)
def partner(self, indiv, others):
""" Selects the best memorized cooperator, if any.
But with a probability controlled by the gene 'Exploration'
another partner is randomly selected
"""
BF = indiv.best_friend()
if BF and BF not in others:
error('Cooperation: best friend has vanished',str(BF))
if BF and random.randint(0,100) >= indiv.gene_relative_intensity('Exploration'):
return BF
# Exploration: a new partner is randomly chosen
partners = others[:] # ground copy of the list
partners.remove(indiv)
if BF:
partners.remove(BF)
if partners != []:
return random.choice(partners)
else:
return None
def interaction(self, indiv, Partner):
""" Dyadic cooperative interaction: one player (indiv) makes the first step by
offering a 'gift' to a partner. The latter then returns the favour.
Both the gift and the returned reward are controlled by genes.
Both involve costs.
"""
# First step: initial gift
gift = percent(self.Parameter('FirstStep') * indiv.gene_relative_intensity('Cooperativeness'))
Partner.score(noise_mult(gift,self.Parameter('Noise'))) # multiplicative noise
# First player pays associated cost
# Cost is a function of investment
cost = percent(gift * self.Parameter('FirstStepCost'))
indiv.score(-cost)
# Receiver remembers who gave the gift
Partner.new_friend(indiv, gift)
def update_positions(self, members, start_location):
""" locates individuals on an 2D space
"""
# sorting individuals by gene value
duplicate = members[:]
duplicate.sort(key=lambda x: x.gene_intensity('Cooperativeness'))
for m in enumerate(duplicate):
m[1].location = (start_location + m[0], m[1].gene_relative_intensity('Exploration'))
# def default_view(self): return ['Network']
def display_(self):
""" Defines what is to be displayed. It offers the possibility
of plotting the evolution through time of the best score,
the average score, and the average value of the
various genes defined on the DNA.
It should return a list of pairs (C,X)
where C is the curve colour and X can be
'best', 'average', any gene name as defined by genemap
or any phene name as dedined by phenomap
"""
#return [(2,'Cooperativeness'),(3,'Exploration'),(4,'average'),(5,'best')]
return [(2,'Cooperativeness'),(3,'Exploration')]
###############################
# Local Test #
###############################
if __name__ == "__main__":
print __doc__ + '\n'
raw_input('[Return]')
| [
"thierry.deo@polytechnique.edu"
] | thierry.deo@polytechnique.edu |
02f4b84ed35d98afddfaf42c1bced67546cb75f7 | 4c8cb78b6f9f2609cf82ff84fa65bfbf7129be0c | /90.py | 2a7be4afe3247e17d400f2bab25000217e4830f6 | [] | no_license | Diggerfdf/workbook_100_ardit | 24025edc81e0283c958da1db1ee9edb11c448d8e | 77309a04559dd68499d30b50a9339e8cf9cc7738 | refs/heads/master | 2020-04-21T02:26:52.118567 | 2019-02-05T15:19:12 | 2019-02-05T15:19:12 | 169,254,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | import sqlite3
import pandas as pd
conn = sqlite3.connect("./Files/database.db")
cur = conn.cursor()
cur.execute("SELECT * FROM countries WHERE area > 2000000")
rows = cur.fetchall()
conn.close()
df = pd.DataFrame.from_records(rows)
df.columns = ["Rank", "Country", "Area", "Population"]
df.to_csv("./Files/db_countries_2.csv", index = False)
| [
"diggerfdf@gmail"
] | diggerfdf@gmail |
4490d298cb083a520e91f8cd046242f7439b10be | 60cf5de97160c0c104b447879edd0ea1ca9724e8 | /q29.py | 34fb2528f462f89c7b3226061a2fd7f1d74bc2cd | [] | no_license | VinayHaryan/String | 6f6b7924ab87ac8ea5509edefaa3aeda795b0de0 | 089dcf02a8d26afcae0ac2b23c640be5a6079095 | refs/heads/main | 2023-05-27T22:15:31.792837 | 2021-06-17T08:39:42 | 2021-06-17T08:39:42 | 377,736,749 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,557 | py | '''
RUN LENGTH ENCODING IN PYTHON
given an input string, write a function that returns
string for the input string
For example, if the input string is ‘wwwwaaadexxxxxx’,
then the function should return ‘w4a3d1e1x6’.
Examples:
Input : str = 'wwwwaaadexxxxxx'
Output : 'w4a3d1e1x6'
'''
# python code for run length encoding
# from collections import OrderedDict
# def runlength(input):
# # generate ordered dictionary of all lower
# # case alphabets, its output will be
# # dict = {'w':0, 'a':0, 'd':0, 'e':0, 'x':0}
# dict = OrderedDict.fromkeys(input,0)
# # now iterate through input string to calculate
# # frquency of each character, its output will be
# # dict = {'w':4,'a':3,'d':1,'e':1,'x':6}
# for ch in input:
# dict[ch] += 1
# # now iterate through dictionary to make
# # output string from (key,value) pairs
# output = ''
# for key,value in dict.items():
# output = output + key + str(value)
# return output
# # Driver function
# if __name__ == '__main__':
# input="wwwwaaadexxxxxx"
# print (runlength(input))
from collections import OrderedDict
def runlengthencoding(input):
dict = OrderedDict.fromkeys(input,0)
for ch in input:
dict[ch] += 1
output = ''
for key, value in dict.items():
output = output + key + str(value)
return output
# Driver function
if __name__ == '__main__':
input = 'wwwwaaadexxxxxx'
print(runlengthencoding(input)) | [
"noreply@github.com"
] | noreply@github.com |
dd661697aebafd16f0b08085f79f72f5e6985143 | 82fb1dd31e047a3fcc8318f3df54a34e20921b14 | /caching/secrets/getSecet.py | 3ef5cf73af14bbf9d396614cd86720d52e5e12ec | [] | no_license | fanaticjo/aws | 3139265590006c0614cf07698da1017a0f0dfb88 | e783c71c448a9e8acf9c4aa52b22434600c08ffa | refs/heads/master | 2023-01-07T11:18:02.287305 | 2020-11-02T07:54:12 | 2020-11-02T07:54:12 | 300,803,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | import boto3
import base64
import os
import json
sts=boto3.client('secretsmanager',region_name='us-east-1')
def rdsSecret(rdssec):
secretname=rdssec
get_secret_value_response = sts.get_secret_value(
SecretId=secretname
)
if 'SecretString' in get_secret_value_response:
secret = json.loads(get_secret_value_response['SecretString'])
return f"""postgresql://{secret['username']}:{secret['password']}@{secret['host']}:{secret['port']}/{secret['db']}"""
else:
decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])
def redisSecret(redissec):
secretname = redissec
get_secret_value_response = sts.get_secret_value(
SecretId=secretname
)
secret = json.loads(get_secret_value_response['SecretString'])
return f"""redis://{secret['rediscluster']}:6379"""
if __name__=="__main__":
print(rdsSecret())
print(redisSecret())
| [
"biswajit196@live.com"
] | biswajit196@live.com |
f5e2627ca61f312f90a8bd9fab2920c046f40bb0 | cbe32d97bba706af1cb16fd6b1bc479ff2fb58d0 | /utils.py | 12b357e071168f7229f384418221432912998886 | [] | no_license | skozh/NaturalLanguageProcessing_HSE | bd4e63a9b59841bc80b1526b7a5423c54cfe034d | c6e64d892855e07aad8760528301f5b9842b61a0 | refs/heads/main | 2023-06-22T20:03:36.826158 | 2021-07-19T09:14:30 | 2021-07-19T09:14:30 | 387,404,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,450 | py | import nltk
import pickle
import re
import numpy as np
nltk.download('stopwords')
from nltk.corpus import stopwords
# Paths for all resources for the bot.
RESOURCE_PATH = {
'INTENT_RECOGNIZER': 'intent_recognizer.pkl',
'TAG_CLASSIFIER': 'tag_classifier.pkl',
'TFIDF_VECTORIZER': 'tfidf_vectorizer.pkl',
'THREAD_EMBEDDINGS_FOLDER': 'thread_embeddings_by_tags',
'WORD_EMBEDDINGS': 'word_embeddings.tsv',
}
def text_prepare(text):
"""Performs tokenization and simple preprocessing."""
replace_by_space_re = re.compile('[/(){}\[\]\|@,;]')
bad_symbols_re = re.compile('[^0-9a-z #+_]')
stopwords_set = set(stopwords.words('english'))
text = text.lower()
text = replace_by_space_re.sub(' ', text)
text = bad_symbols_re.sub('', text)
text = ' '.join([x for x in text.split() if x and x not in stopwords_set])
return text.strip()
def load_embeddings(embeddings_path):
"""Loads pre-trained word embeddings from tsv file.
Args:
embeddings_path - path to the embeddings file.
Returns:
embeddings - dict mapping words to vectors;
embeddings_dim - dimension of the vectors.
"""
# Hint: you have already implemented a similar routine in the 3rd assignment.
# Note that here you also need to know the dimension of the loaded embeddings.
# When you load the embeddings, use numpy.float32 type as dtype
########################
#### YOUR CODE HERE ####
########################
embeddings = {}
for line in open(embeddings_path, encoding='utf-8'):
word, *vec = line.strip().split('\t')
embeddings_dim = len(vec)
embeddings[word] = np.array(vec, dtype=np.float32)
return embeddings, embeddings_dim
def question_to_vec(question, embeddings, dim):
"""Transforms a string to an embedding by averaging word embeddings."""
# Hint: you have already implemented exactly this function in the 3rd assignment.
########################
#### YOUR CODE HERE ####
########################
result = np.zeros(dim, dtype=np.float32)
count = 0
for word in question.split():
if word in embeddings:
result += embeddings[word]
count += 1
return result / count if count != 0 else result
def unpickle_file(filename):
"""Returns the result of unpickling the file content."""
with open(filename, 'rb') as f:
return pickle.load(f)
| [
"noreply@github.com"
] | noreply@github.com |
3e4ed21539e3f1b888e3e2ff40ce6ac884f17494 | 648d6a02a4c890935234f25161f407b6a80d1e72 | /kkkk/main.py | d568e43f1672abcc5955ec4459a593916aef7008 | [] | no_license | bbchanidapa/read-file-csv-python | c8876cbf469b54770092cfabfd32e79f60020a5a | f837facaa367b29ed6e7fc203c1628fe5da88e6b | refs/heads/master | 2020-03-26T06:00:09.565214 | 2018-08-13T13:42:20 | 2018-08-13T13:42:20 | 144,584,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,789 | py | import os,pprint,re
from datetime import datetime, date, time
import read, zipped, db
import configuration as conf
pattern = []
fields = {}
database = {}
for c in conf.config:
p = conf.config[c].get('pattern')
pattern.append(p)
lineHeader = conf.config[c].get('header')
lineDatatype = conf.config[c].get('datatype')
lineData = conf.config[c].get('data')
field = conf.config[c].get('field')
fields[c] = field
files = read.getFile()
fileReads = read.reader(files,pattern,lineHeader,lineData,lineDatatype)
def convertType(fileReads):
for types in fileReads:
countDatatype = len(fileReads[types]['dataType'][0])
countData = len(fileReads[types]['data'])
#print types
for index in range(0,countData):
for x in range(0,countDatatype):
if fileReads[types]['dataType'][0][x] == 'INTEGER':
fileReads[types]['data'][index][x] = int(fileReads[types]['data'][index][x])
return fileReads
confileReads = convertType(fileReads)
dicts = zipped.zipFile(confileReads)
def setTimestamp(dicts):
timestamp = { 'timestamp' : datetime.utcnow()}
for d in dicts:
count = len(dicts[d])
for c in xrange(0,count):
dicts[d][c].update(timestamp)
return dicts
setTimestamp(dicts)
def customize(dicts):
record = {}
for file in dicts:
nameDB = 'report_'+file
for regs in fields[file]:
for lists in dicts[file]:
for keys in lists.keys():
matchs = re.match('(\w+)?'+regs+'(\w+)?', keys)
if matchs is not None:
key = matchs.group()
del lists[key]
dicts[nameDB] = dicts[file]
del dicts[file]
return dicts
dataToInserts = customize(dicts)
def inserts(dataToInserts):
for tables in dicts:
for record in dicts[tables]:
records = {}
records[tables] = record
result = db.insert(records)
print result
inserts(dataToInserts)
| [
"ambb_bb@hotmail.com"
] | ambb_bb@hotmail.com |
890864317cbbe612cfb418fd5661d41ab2727579 | e69c5a83cfa64b74f26642d41f296852dbeab8f7 | /chapter1.py | f0908d031a0acd58bc9c33101ebaa142d08a2dd4 | [] | no_license | jithijose/computer-vision | 6d325593189d49a123c1b3a0dc2f67f356b6b350 | 58095845b0d5d9a24fe56b316d7c9968c728637e | refs/heads/master | 2022-11-19T05:54:21.748555 | 2020-07-18T22:13:12 | 2020-07-18T22:13:12 | 280,744,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 991 | py | #####################################################################
# READ IMAGE VIDEO AND WEBCAM #
#####################################################################
# import cv2 package(cv2 - computer vision)
from cv2 import cv2
# Read an image file and show
img = cv2.imread('Resources/lena.png')
cv2.imshow('Output Image', img)
cv2.waitKey(0)
# Read a video file and play
cap = cv2.VideoCapture('Resources/test_video.mp4')
while True:
success, img = cap.read()
cv2.imshow('Video', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Read Web Camera
cap = cv2.VideoCapture(0)
cap.set(3, 640) # set width of image - property value:3
cap.set(4, 480) # set height of image - property value:4
cap.set(10, 200) # set brightness of image - property value:10
while True:
success, img = cap.read()
cv2.imshow('Video', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break | [
"noreply@github.com"
] | noreply@github.com |
127bded960ca79191ac9a8013349b33e8930d4d3 | e609e79b5eb6e28dbca5553cad4811c1bfa3c007 | /build/dynamixel_motor/dynamixel_tutorials/catkin_generated/pkg.installspace.context.pc.py | fb33073e73645872a9aed2e6947810b00f152f7c | [] | no_license | ORaZn/lab3 | 62cf515a09c2d0fcbe4fd7d6d7b21d8ccd0c71b9 | 6075faef99ce5e21f0b5f27e1492820cd848bb4f | refs/heads/master | 2022-12-26T20:13:45.595041 | 2020-10-06T17:54:17 | 2020-10-06T17:54:17 | 295,806,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "dynamixel_controllers".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "dynamixel_tutorials"
PROJECT_SPACE_DIR = "/home/oraz/catkin_ws/install"
PROJECT_VERSION = "0.4.1"
| [
"oraz.ospanov@nu.edu.kz"
] | oraz.ospanov@nu.edu.kz |
1ecdbd24a54e0f36e82c577573e83e1a23fabb46 | cbed2935158b9d2392fd65a4416f2a8a45d79284 | /venv/Scripts/pip3-script.py | 46d74ef4e18faf41854fb0a03a1d96f9c5f93d27 | [] | no_license | robeekhyra/parking-detector-and-analyzer | 87a93140e30420c4bd069f9e852946e5620dac89 | 43aa5d08693c6f0170b1894ed4e06929e4a8b145 | refs/heads/master | 2020-03-10T00:46:46.348190 | 2018-04-11T15:29:21 | 2018-04-11T15:29:21 | 129,090,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | #!"C:\Users\Robee Khyra Te\PycharmProjects\parkingsite\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3')()
)
| [
"robee.khyra@gmail.com"
] | robee.khyra@gmail.com |
2fd2df39f7938b677a466fa1c2606d2409a2863a | 716c453110217e4b478823940b6ce773fe837b3b | /BlogProject/school_project/blog/urls.py | 3c46c2b23b8e8910fee69d8f1d1868537c880512 | [] | no_license | Mateaus/blog_site | e9a46ac581a65c2b563ff18090480f775a5ad9cf | 305bafbd18c2a260589da77d8e162fb36994357b | refs/heads/master | 2020-05-07T11:59:47.008936 | 2019-04-11T17:42:58 | 2019-04-11T17:42:58 | 180,485,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | from django.urls import path
from .views import (
PostListView,
PostDetailView,
PostCreateView,
PostUpdateView,
PostDeleteView
)
from . import views #import the view.py file into here
urlpatterns = [
path('', PostListView.as_view(), name='blog-home'),
path('post/<int:pk>/', PostDetailView.as_view(), name='post-detail'),
path('post/new/', PostCreateView.as_view(), name='post-create'),
path('post/<int:pk>/update/', PostUpdateView.as_view(), name='post-update'),
path('post/<int:pk>/delete/', PostDeleteView.as_view(), name='post-delete'),
path('about/', views.about, name='blog-about'),
]
| [
"noreply@github.com"
] | noreply@github.com |
8ae1215e7351323fa30e296d34e9cf8d769a78c1 | 6c50175e82974fdb0ccabd544a40e013e6672cb0 | /LoginReg_Bootstrap/settings.py | fbcb01abea32951e3eba4b4872bb9626432a3aa4 | [] | no_license | Jallnutt1/LoginReg_Bootstrap | 9515878688ac6a16efaba18345b90b389a6c6213 | 60532872f1e04a5809f65745665e2f16df0a913e | refs/heads/main | 2023-05-26T05:02:59.124607 | 2021-06-04T01:50:46 | 2021-06-04T01:50:46 | 373,688,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,151 | py | """
Django settings for LoginReg_Bootstrap project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!lgvb3n$coe(bg@64j#p)&r^u6+o&y!vjmh=1c&iph=j%%&ylu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'LoginReg',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'LoginReg_Bootstrap.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'LoginReg_Bootstrap.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"{ID}+{username}@users.noreply.github.com"
] | {ID}+{username}@users.noreply.github.com |
6a297e28513d06f4b765fef7de3e980f1fde169e | ef86558df143d137f6fc2e438da1141a965a4662 | /ex14.py | 54f97cd42ed85ca6077556c7ac45f9cc4f0222f5 | [] | no_license | zimrrigudino/learnpythonthehardway | 2b516b41b9ac987a8838ab88e507295747eba435 | 5dcbf049ce2cb54d7fb40d1868888545fdbd5de3 | refs/heads/master | 2021-09-06T13:15:46.418061 | 2018-02-07T00:09:46 | 2018-02-07T00:09:46 | 105,075,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | #Zimrri Gudino
#Prompting and passing
from sys import argv
script, user_name = argv
prompt = "> "
print "Hi %s, I'm the %s script." % (user_name, script)
print "I'd like to ask you a few questions."
print "Do you like me %s?" % user_name
likes = raw_input(prompt)
print "Where do you live %s?" % user_name
lives = raw_input(prompt)
print "What kind of computer do you have?"
computer = raw_input(prompt)
print "How old are you?"
age = raw_input(prompt)
print """
Alright, so you said %r about liking me.
You live in %r. Not sure where that is.
You have a %r computer. Nice.
And you are %r this old. You are old!
""" % (likes, lives, computer, age)
| [
"zgudino@mail.csuchico.edu"
] | zgudino@mail.csuchico.edu |
c9903269bc89b02fe42b2a38a1a00e60006fd0f3 | aa6e1dd07a71a73bc08574b76f9e57a3ce8c8286 | /077.Test_BeeWare_windows/beeware-tutorial/beeware-venv/Lib/site-packages/pip/_internal/wheel_builder.py | fa08016bdfb189f76aa4603f52684ae94fb20a25 | [
"MIT"
] | permissive | IvanaXu/PyTools | 0aff5982f50bb300bfa950405192c78473b69537 | 358ae06eef418fde35f424909d4f13049ca9ec7b | refs/heads/master | 2023-06-07T21:45:44.242363 | 2023-06-06T16:00:25 | 2023-06-06T16:00:25 | 163,940,845 | 60 | 8 | MIT | 2022-12-23T02:49:05 | 2019-01-03T07:54:16 | Python | UTF-8 | Python | false | false | 9,522 | py | """Orchestrator for building wheels from InstallRequirements.
"""
import logging
import os.path
import re
import shutil
from pip._internal.models.link import Link
from pip._internal.operations.build.wheel import build_wheel_pep517
from pip._internal.operations.build.wheel_legacy import build_wheel_legacy
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import ensure_dir, hash_file, is_wheel_installed
from pip._internal.utils.setuptools_build import make_setuptools_clean_args
from pip._internal.utils.subprocess import call_subprocess
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.urls import path_to_url
from pip._internal.vcs import vcs
if MYPY_CHECK_RUNNING:
from typing import (
Any, Callable, Iterable, List, Optional, Tuple,
)
from pip._internal.cache import WheelCache
from pip._internal.req.req_install import InstallRequirement
BinaryAllowedPredicate = Callable[[InstallRequirement], bool]
BuildResult = Tuple[List[InstallRequirement], List[InstallRequirement]]
logger = logging.getLogger(__name__)
_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.IGNORECASE)
def _contains_egg_info(s):
# type: (str) -> bool
"""Determine whether the string looks like an egg_info.
:param s: The string to parse. E.g. foo-2.1
"""
return bool(_egg_info_re.search(s))
def _should_build(
req, # type: InstallRequirement
need_wheel, # type: bool
check_binary_allowed, # type: BinaryAllowedPredicate
):
# type: (...) -> bool
"""Return whether an InstallRequirement should be built into a wheel."""
if req.constraint:
# never build requirements that are merely constraints
return False
if req.is_wheel:
if need_wheel:
logger.info(
'Skipping %s, due to already being wheel.', req.name,
)
return False
if need_wheel:
# i.e. pip wheel, not pip install
return True
# From this point, this concerns the pip install command only
# (need_wheel=False).
if req.editable or not req.source_dir:
return False
if not check_binary_allowed(req):
logger.info(
"Skipping wheel build for %s, due to binaries "
"being disabled for it.", req.name,
)
return False
if not req.use_pep517 and not is_wheel_installed():
# we don't build legacy requirements if wheel is not installed
logger.info(
"Using legacy 'setup.py install' for %s, "
"since package 'wheel' is not installed.", req.name,
)
return False
return True
def should_build_for_wheel_command(
req, # type: InstallRequirement
):
# type: (...) -> bool
return _should_build(
req, need_wheel=True, check_binary_allowed=_always_true
)
def should_build_for_install_command(
req, # type: InstallRequirement
check_binary_allowed, # type: BinaryAllowedPredicate
):
# type: (...) -> bool
return _should_build(
req, need_wheel=False, check_binary_allowed=check_binary_allowed
)
def _should_cache(
req, # type: InstallRequirement
):
# type: (...) -> Optional[bool]
"""
Return whether a built InstallRequirement can be stored in the persistent
wheel cache, assuming the wheel cache is available, and _should_build()
has determined a wheel needs to be built.
"""
if req.editable or not req.source_dir:
# never cache editable requirements
return False
if req.link and req.link.is_vcs:
# VCS checkout. Do not cache
# unless it points to an immutable commit hash.
assert not req.editable
assert req.source_dir
vcs_backend = vcs.get_backend_for_scheme(req.link.scheme)
assert vcs_backend
if vcs_backend.is_immutable_rev_checkout(req.link.url, req.source_dir):
return True
return False
assert req.link
base, ext = req.link.splitext()
if _contains_egg_info(base):
return True
# Otherwise, do not cache.
return False
def _get_cache_dir(
req, # type: InstallRequirement
wheel_cache, # type: WheelCache
):
# type: (...) -> str
"""Return the persistent or temporary cache directory where the built
wheel need to be stored.
"""
cache_available = bool(wheel_cache.cache_dir)
assert req.link
if cache_available and _should_cache(req):
cache_dir = wheel_cache.get_path_for_link(req.link)
else:
cache_dir = wheel_cache.get_ephem_path_for_link(req.link)
return cache_dir
def _always_true(_):
# type: (Any) -> bool
return True
def _build_one(
req, # type: InstallRequirement
output_dir, # type: str
build_options, # type: List[str]
global_options, # type: List[str]
):
# type: (...) -> Optional[str]
"""Build one wheel.
:return: The filename of the built wheel, or None if the build failed.
"""
try:
ensure_dir(output_dir)
except OSError as e:
logger.warning(
"Building wheel for %s failed: %s",
req.name, e,
)
return None
# Install build deps into temporary directory (PEP 518)
with req.build_env:
return _build_one_inside_env(
req, output_dir, build_options, global_options
)
def _build_one_inside_env(
req, # type: InstallRequirement
output_dir, # type: str
build_options, # type: List[str]
global_options, # type: List[str]
):
# type: (...) -> Optional[str]
with TempDirectory(kind="wheel") as temp_dir:
assert req.name
if req.use_pep517:
assert req.metadata_directory
wheel_path = build_wheel_pep517(
name=req.name,
backend=req.pep517_backend,
metadata_directory=req.metadata_directory,
build_options=build_options,
tempd=temp_dir.path,
)
else:
wheel_path = build_wheel_legacy(
name=req.name,
setup_py_path=req.setup_py_path,
source_dir=req.unpacked_source_directory,
global_options=global_options,
build_options=build_options,
tempd=temp_dir.path,
)
if wheel_path is not None:
wheel_name = os.path.basename(wheel_path)
dest_path = os.path.join(output_dir, wheel_name)
try:
wheel_hash, length = hash_file(wheel_path)
shutil.move(wheel_path, dest_path)
logger.info('Created wheel for %s: '
'filename=%s size=%d sha256=%s',
req.name, wheel_name, length,
wheel_hash.hexdigest())
logger.info('Stored in directory: %s', output_dir)
return dest_path
except Exception as e:
logger.warning(
"Building wheel for %s failed: %s",
req.name, e,
)
# Ignore return, we can't do anything else useful.
if not req.use_pep517:
_clean_one_legacy(req, global_options)
return None
def _clean_one_legacy(req, global_options):
# type: (InstallRequirement, List[str]) -> bool
clean_args = make_setuptools_clean_args(
req.setup_py_path,
global_options=global_options,
)
logger.info('Running setup.py clean for %s', req.name)
try:
call_subprocess(clean_args, cwd=req.source_dir)
return True
except Exception:
logger.error('Failed cleaning build dir for %s', req.name)
return False
def build(
requirements, # type: Iterable[InstallRequirement]
wheel_cache, # type: WheelCache
build_options, # type: List[str]
global_options, # type: List[str]
):
# type: (...) -> BuildResult
"""Build wheels.
:return: The list of InstallRequirement that succeeded to build and
the list of InstallRequirement that failed to build.
"""
if not requirements:
return [], []
# Build the wheels.
logger.info(
'Building wheels for collected packages: %s',
', '.join(req.name for req in requirements), # type: ignore
)
with indent_log():
build_successes, build_failures = [], []
for req in requirements:
cache_dir = _get_cache_dir(req, wheel_cache)
wheel_file = _build_one(
req, cache_dir, build_options, global_options
)
if wheel_file:
# Update the link for this.
req.link = Link(path_to_url(wheel_file))
req.local_file_path = req.link.file_path
assert req.link.is_wheel
build_successes.append(req)
else:
build_failures.append(req)
# notify success/failure
if build_successes:
logger.info(
'Successfully built %s',
' '.join([req.name for req in build_successes]), # type: ignore
)
if build_failures:
logger.info(
'Failed to build %s',
' '.join([req.name for req in build_failures]), # type: ignore
)
# Return a list of requirements that failed to build
return build_successes, build_failures
| [
"1440420407@qq.com"
] | 1440420407@qq.com |
12aa3285772618385f60506381286555afdfeb19 | 991d7f5cd86a676d8213011fba92145dff2c4591 | /ClubMahindraDataOlympics_2019/code/ml_modules/custom_estimator.py | 2782a8af84ab5ec93d4da94dc3cb176fd7c84b81 | [] | no_license | Karrol/ml-hackathons | 37e87b100fee0a7636f394610973c9a34256a28f | 34ed93e20fcb2843740a42dbcf4ac02af73233b9 | refs/heads/master | 2022-04-01T06:52:21.262152 | 2020-01-27T08:42:06 | 2020-01-27T08:42:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,607 | py | import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
from sklearn.base import clone
from sklearn.linear_model import LogisticRegression, LinearRegression
from xgboost import XGBClassifier, XGBRegressor
from lightgbm import LGBMClassifier, LGBMRegressor
from catboost import CatBoostClassifier, CatBoostRegressor, Pool, cv
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import train_test_split
from custom_fold_generator import CustomFolds, FoldScheme
'''
modify the scoring_metric to use custom metric, for not using roc_auc_score
scoring metric should accept y_true and y_predicted as parameters
add a functionality to give folds as an iterable
'''
class_instance = lambda a, b: eval("{}(**{})".format(a, b if b is not None else {}))
class Estimator(object):
def __init__(self, model, n_splits=5, random_state=100, shuffle=True, validation_scheme=FoldScheme.StratifiedKFold,
cv_group_col=None, early_stopping_rounds=None, categorical_features_indices=None,
task_type = 'classification', eval_metric='auc', scoring_metric=roc_auc_score, over_sampling=False,
n_jobs=-1,
**kwargs):
try:
# build model instance from tuple/list of ModelName and params
# model should be imported before creating the instance
self.model = class_instance(model[0], model[1])
except Exception as e:
# model instance is already passed
self.model = clone(model)
self.n_splits = n_splits
self.random_state = random_state
self.seed = random_state
self.shuffle = shuffle
self.n_jobs = n_jobs
self.early_stopping_rounds = early_stopping_rounds
if isinstance(validation_scheme, str) or isinstance(validation_scheme, unicode):
self.validation_scheme = FoldScheme(validation_scheme)
else:
self.validation_scheme = validation_scheme
self.cv_group_col = cv_group_col
self.categorical_features_indices=categorical_features_indices
self.task_type = task_type
self.eval_metric = eval_metric
self.scoring_metric = scoring_metric
self.over_sampling = over_sampling
def get_params(self):
return {
'model': (self.model.__class__.__name__, self.model.get_params()),
'n_splits': self.n_splits,
'random_state': self.random_state,
'shuffle': self.shuffle,
'n_jobs': self.n_jobs,
'early_stopping_rounds': self.early_stopping_rounds,
'validation_scheme': self.validation_scheme,
'cv_group_col': self.cv_group_col,
'task_type': self.task_type,
'eval_metric': self.eval_metric,
'scoring_metric': self.scoring_metric,
}
def fit(self, x, y, use_oof=False, n_jobs=-1):
if not hasattr(self.model, 'fit') :
raise Exception ("Model/algorithm needs to implement fit()")
fitted_models = []
if use_oof:
folds = CustomFolds(num_folds=self.n_splits, random_state=self.random_state, shuffle=self.shuffle, validation_scheme=self.validation_scheme)
self.indices = folds.split(x,y,group=self.cv_group_col)
for i, (train_index, test_index) in enumerate(self.indices):
model = clone(self.model)
model.n_jobs = n_jobs
if (isinstance(model, LGBMClassifier) or isinstance(model, LGBMRegressor)) and (self.early_stopping_rounds is not None):
model.fit(X=x[train_index], y=y[train_index],
eval_set=[(x[test_index],y[test_index]),(x[train_index],y[train_index])],
verbose=100, eval_metric=self.eval_metric, early_stopping_rounds=self.early_stopping_rounds)
elif (isinstance(model, XGBClassifier) or isinstance(model, XGBRegressor)) and (self.early_stopping_rounds is not None):
model.fit(X=x[train_index], y=y[train_index],
eval_set=[(x[test_index], y[test_index])],
verbose=100, eval_metric=self.eval_metric, early_stopping_rounds=self.early_stopping_rounds)
elif (isinstance(model, CatBoostClassifier) or isinstance(model, CatBoostRegressor)) and (self.early_stopping_rounds is not None):
model.od_wait=int(self.early_stopping_rounds)
model.fit(x[train_index], y[train_index], cat_features=self.categorical_features_indices,
eval_set=(x[test_index], y[test_index]),
use_best_model=True, verbose=100)
else:
x_train, y_train = x[train_index], y[train_index]
if self.over_sampling:
print "oversampling"
x_train, y_train = SMOTE().fit_resample(x_train, y_train)
model.fit(x_train, y_train)
fitted_models.append(model)
else:
model = clone(self.model)
model.n_jobs = n_jobs
x_train, x_val, y_train, y_val = train_test_split(x, y, test_size =0.2, shuffle=True, random_state=100)
if isinstance(model, LGBMClassifier):
if self.early_stopping_rounds is not None:
model.fit(X=x_train, y=y_train, eval_set=[(x_val, y_val), (x_train, y_train)],
verbose=False, eval_metric='auc', early_stopping_rounds=self.early_stopping_rounds)
elif isinstance(model, XGBClassifier):
if self.early_stopping_rounds is not None:
model.fit(X=x_train, y=y_train, eval_set=[(x_val,y_val)],
verbose=False, eval_metric='auc', early_stopping_rounds=self.early_stopping_rounds)
model.fit(x, y)
fitted_models.append(model)
self.fitted_models = fitted_models
return self
def feature_importances(self, columns=None):
if not hasattr(self, 'fitted_models') :
raise Exception ("Model/algorithm needs to implement fit()")
if isinstance(self.model, LogisticRegression):
feature_importances = np.column_stack(m.coef_[0] for m in self.fitted_models)
elif isinstance(self.model, LinearRegression) :
feature_importances = np.column_stack(m.coef_ for m in self.fitted_models)
else:
feature_importances = np.column_stack(m.feature_importances_ for m in self.fitted_models)
importances = np.mean(1.*feature_importances/feature_importances.sum(axis=0), axis=1)
if columns is not None:
if len(columns) != len(importances):
raise ValueError("Columns length Mismatch")
df = pd.DataFrame(zip(columns, importances), columns=['column', 'feature_importance'])
else:
df = pd.DataFrame(zip(range(len(importances)), importances), columns=['column_index', 'feature_importance'])
df.sort_values(by='feature_importance', ascending=False, inplace=True)
df['rank'] = np.arange(len(importances)) + 1
return df
def transform(self, x):
if not hasattr(self, 'fitted_models') :
raise Exception ("Model/algorithm needs to implement fit()")
if self.task_type == 'classification':
return np.mean(np.column_stack((est.predict_proba(x)[:,1] for est in self.fitted_models)), axis=1)
else:
return np.mean(np.column_stack((est.predict(x) for est in self.fitted_models)), axis=1)
def fit_transform(self, x, y):
self.fit(x, y, use_oof=True)
predictions = np.zeros((x.shape[0],))
for i, (train_index, test_index) in enumerate(self.indices):
if self.task_type == 'classification':
predictions[test_index] = self.fitted_models[i].predict_proba(x[test_index])[:,1]
else:
predictions[test_index] = self.fitted_models[i].predict(x[test_index])
self.cv_scores = [
self.scoring_metric(y[test_index], predictions[test_index])
for i, (train_index, test_index) in enumerate(self.indices)
]
self.avg_cv_score = np.mean(self.cv_scores)
self.overall_cv_score = self.scoring_metric(y, predictions)
return predictions
def save_model(self):
pass
def load_model(self):
pass
def predict(self, x):
return self.transform(x)
def predict_proba(self, x):
return self.transform(x)
def get_repeated_out_of_folds(self, x, y, num_repeats=1):
cv_scores = []
fitted_models = []
for iteration in range(num_repeats):
self.random_state = self.seed*(iteration+1)
predictions = self.fit_transform(x, y)
cv_scores.extend(self.cv_scores)
fitted_models.extend(self.fitted_models)
self.random_state = self.seed
self.fitted_models = fitted_models
return {
'cv_scores': cv_scores,
'avg_cv_score': np.mean(cv_scores),
'var_scores': np.std(cv_scores),
'overall_cv_score': self.overall_cv_score,
}
def get_nested_scores(self, x, y):
pass | [
"pradeeppathak9@gmail.com"
] | pradeeppathak9@gmail.com |
5d2c18d1ea37c56236232061cf2a19e8e6d11fac | ca75f7099b93d8083d5b2e9c6db2e8821e63f83b | /z2/part2/interactive/jm/random_normal_1/561870518.py | e07b9173fe09044ec19d2a4fbff66e5550b7c929 | [
"MIT"
] | permissive | kozakusek/ipp-2020-testy | 210ed201eaea3c86933266bd57ee284c9fbc1b96 | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | refs/heads/master | 2022-10-04T18:55:37.875713 | 2020-06-09T21:15:37 | 2020-06-09T21:15:37 | 262,290,632 | 0 | 0 | MIT | 2020-06-09T21:15:38 | 2020-05-08T10:10:47 | C | UTF-8 | Python | false | false | 7,951 | py | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 561870518
"""
"""
random actions, total chaos
"""
board = gamma_new(6, 8, 4, 15)
assert board is not None
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_move(board, 1, 4, 2) == 1
assert gamma_move(board, 2, 3, 7) == 1
assert gamma_move(board, 2, 2, 7) == 1
assert gamma_busy_fields(board, 2) == 2
assert gamma_move(board, 3, 3, 4) == 1
assert gamma_free_fields(board, 3) == 43
board700952916 = gamma_board(board)
assert board700952916 is not None
assert board700952916 == ("..22..\n"
"......\n"
"......\n"
"...3..\n"
"......\n"
"1...1.\n"
"......\n"
"......\n")
del board700952916
board700952916 = None
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_move(board, 2, 2, 4) == 1
assert gamma_free_fields(board, 2) == 41
assert gamma_move(board, 3, 1, 7) == 1
assert gamma_move(board, 3, 0, 5) == 1
assert gamma_move(board, 4, 3, 3) == 1
assert gamma_move(board, 4, 0, 5) == 0
assert gamma_move(board, 1, 5, 5) == 1
assert gamma_move(board, 1, 4, 3) == 1
assert gamma_move(board, 2, 4, 7) == 1
assert gamma_move(board, 3, 3, 5) == 1
assert gamma_move(board, 3, 2, 0) == 1
assert gamma_move(board, 4, 4, 4) == 1
assert gamma_move(board, 4, 1, 1) == 1
assert gamma_move(board, 1, 3, 5) == 0
assert gamma_free_fields(board, 1) == 31
assert gamma_move(board, 2, 5, 1) == 1
assert gamma_busy_fields(board, 3) == 5
assert gamma_move(board, 4, 4, 1) == 1
assert gamma_move(board, 1, 6, 2) == 0
assert gamma_busy_fields(board, 1) == 5
assert gamma_move(board, 2, 1, 3) == 1
assert gamma_move(board, 3, 3, 2) == 1
assert gamma_move(board, 3, 1, 5) == 1
assert gamma_move(board, 4, 3, 0) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_move(board, 2, 1, 7) == 0
assert gamma_free_fields(board, 2) == 25
assert gamma_move(board, 3, 2, 2) == 1
assert gamma_move(board, 3, 2, 0) == 0
assert gamma_free_fields(board, 3) == 24
assert gamma_move(board, 4, 6, 1) == 0
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 1, 7, 0) == 0
assert gamma_move(board, 2, 6, 3) == 0
assert gamma_move(board, 3, 1, 0) == 1
assert gamma_move(board, 4, 6, 5) == 0
board137299271 = gamma_board(board)
assert board137299271 is not None
assert board137299271 == (".3222.\n"
"......\n"
"33.3.1\n"
"..234.\n"
".2.41.\n"
"1.331.\n"
".4..42\n"
"1334..\n")
del board137299271
board137299271 = None
assert gamma_move(board, 1, 2, 1) == 1
assert gamma_move(board, 1, 3, 5) == 0
assert gamma_move(board, 2, 0, 4) == 1
assert gamma_move(board, 2, 5, 4) == 1
assert gamma_busy_fields(board, 2) == 8
assert gamma_move(board, 3, 2, 5) == 1
assert gamma_move(board, 4, 0, 5) == 0
assert gamma_move(board, 4, 1, 2) == 1
assert gamma_move(board, 1, 5, 4) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 6, 5) == 0
assert gamma_move(board, 2, 5, 6) == 1
assert gamma_move(board, 3, 1, 4) == 1
assert gamma_move(board, 3, 5, 7) == 1
assert gamma_move(board, 4, 4, 6) == 1
assert gamma_move(board, 1, 6, 3) == 0
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_move(board, 2, 2, 5) == 0
assert gamma_move(board, 2, 4, 4) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_golden_move(board, 2, 1, 1) == 1
assert gamma_move(board, 3, 7, 0) == 0
assert gamma_move(board, 3, 0, 7) == 1
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 1, 1, 3) == 0
assert gamma_move(board, 1, 5, 5) == 0
assert gamma_move(board, 2, 6, 0) == 0
assert gamma_move(board, 4, 1, 3) == 0
assert gamma_move(board, 4, 2, 3) == 1
assert gamma_move(board, 1, 1, 7) == 0
assert gamma_move(board, 1, 1, 5) == 0
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_free_fields(board, 2) == 12
assert gamma_move(board, 3, 1, 0) == 0
assert gamma_move(board, 4, 1, 3) == 0
assert gamma_move(board, 4, 0, 6) == 1
assert gamma_move(board, 1, 6, 1) == 0
assert gamma_move(board, 1, 1, 3) == 0
assert gamma_move(board, 2, 6, 1) == 0
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_move(board, 3, 6, 1) == 0
assert gamma_move(board, 3, 2, 4) == 0
assert gamma_move(board, 4, 2, 5) == 0
assert gamma_move(board, 1, 4, 1) == 0
assert gamma_move(board, 1, 4, 5) == 1
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_busy_fields(board, 2) == 10
assert gamma_move(board, 3, 1, 0) == 0
assert gamma_move(board, 3, 2, 3) == 0
assert gamma_busy_fields(board, 3) == 13
assert gamma_move(board, 4, 0, 4) == 0
assert gamma_busy_fields(board, 1) == 7
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_free_fields(board, 2) == 10
assert gamma_move(board, 3, 2, 0) == 0
assert gamma_move(board, 3, 0, 6) == 0
assert gamma_busy_fields(board, 3) == 13
assert gamma_move(board, 4, 1, 3) == 0
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_move(board, 1, 5, 4) == 0
assert gamma_move(board, 2, 2, 7) == 0
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 4, 7) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 4, 3, 5) == 0
assert gamma_move(board, 4, 1, 5) == 0
assert gamma_busy_fields(board, 4) == 8
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 2, 0, 5) == 0
assert gamma_move(board, 2, 2, 7) == 0
assert gamma_busy_fields(board, 2) == 10
assert gamma_move(board, 3, 3, 5) == 0
assert gamma_busy_fields(board, 3) == 13
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 1, 1, 4) == 0
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 4, 0, 3) == 1
assert gamma_golden_move(board, 4, 6, 5) == 0
assert gamma_move(board, 1, 1, 0) == 0
board301797101 = gamma_board(board)
assert board301797101 is not None
assert board301797101 == ("332223\n"
"4...42\n"
"333311\n"
"232342\n"
"42441.\n"
"14331.\n"
".21.42\n"
"1334..\n")
del board301797101
board301797101 = None
assert gamma_move(board, 2, 1, 3) == 0
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 3, 5) == 0
assert gamma_move(board, 3, 0, 0) == 0
assert gamma_free_fields(board, 3) == 9
assert gamma_move(board, 1, 4, 7) == 0
assert gamma_move(board, 2, 6, 1) == 0
assert gamma_free_fields(board, 2) == 9
assert gamma_move(board, 3, 0, 1) == 1
assert gamma_move(board, 4, 0, 5) == 0
assert gamma_move(board, 1, 3, 5) == 0
assert gamma_move(board, 2, 0, 5) == 0
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 3, 2) == 0
assert gamma_move(board, 3, 1, 7) == 0
assert gamma_move(board, 4, 3, 5) == 0
assert gamma_move(board, 4, 2, 7) == 0
assert gamma_move(board, 1, 0, 0) == 0
assert gamma_move(board, 2, 4, 6) == 0
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 6, 2) == 0
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 1, 3) == 0
assert gamma_move(board, 4, 4, 5) == 0
assert gamma_move(board, 1, 5, 0) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 6, 1) == 0
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 3, 6, 1) == 0
assert gamma_busy_fields(board, 3) == 14
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 6, 3) == 0
assert gamma_busy_fields(board, 4) == 9
assert gamma_move(board, 1, 6, 1) == 0
assert gamma_move(board, 1, 1, 4) == 0
board493510436 = gamma_board(board)
assert board493510436 is not None
assert board493510436 == ("332223\n"
"4...42\n"
"333311\n"
"232342\n"
"42441.\n"
"14331.\n"
"321.42\n"
"1334.1\n")
del board493510436
board493510436 = None
assert gamma_move(board, 2, 6, 3) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 4, 2, 5) == 0
gamma_delete(board)
| [
"jakub@molinski.dev"
] | jakub@molinski.dev |
67db78641ba82d306cfac8cb698fca5b8a7fbc9d | 183bffb8eb17f17b99d72386d13959ca31e99728 | /C2.py | f988b884b6f713fef98b68a62162cecc61f6926a | [] | no_license | hightemp/appICMPTunnel | 53c54e998f949d3aa65630d7480fda86e57501d9 | fb6490493f750bd85445a4863bc53c70a858c30d | refs/heads/master | 2020-05-20T13:07:50.919177 | 2019-05-08T13:56:35 | 2019-05-08T13:56:35 | 185,590,268 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | #!/usr/bin/env python3
#encoding: utf8
from scapy.all import *
def main():
while True:
command = input('# Enter command: ')
# создаём ICMP-пакет с командой в качестве полезной нагрузки
pinger = IP(dst="localhost")/ICMP(id=0x0001, seq=0x1)/command
send(pinger)
# ждём ICMP-сообщение с ответом от агента
rx = sniff(count=1, timeout=2)
# если агент не на локальной машине, используйте это: rx = sniff(filter="icmp", count=1)
print(rx[0][Raw].load.decode('utf-8'))
if __name__ == "__main__":
main() | [
"hightemp.unknown@gmail.com"
] | hightemp.unknown@gmail.com |
a055c002c46e0b1d76c423f31670d4f0cf440e70 | 1e93a542be9f74cfde7460dde6b4512a9b037be1 | /ec2.py | 41ab56190fe6d9b099e71643a0ea3863b2cd438b | [
"MIT"
] | permissive | achiku/ansible-playbook-lnd | 84f23946ac34e04c2a877fe9f37045eb44636432 | 0e76111e58eaf8e11bd639719329c45cddfb5579 | refs/heads/master | 2021-08-30T00:37:56.406900 | 2017-12-15T11:41:21 | 2017-12-15T11:41:21 | 105,519,863 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 70,539 | py | #!/usr/bin/env python
'''
EC2 external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
AWS EC2 using the Boto library.
NOTE: This script assumes Ansible is being executed where the environment
variables needed for Boto have already been set:
export AWS_ACCESS_KEY_ID='AK123'
export AWS_SECRET_ACCESS_KEY='abc123'
optional region environment variable if region is 'auto'
This script also assumes there is an ec2.ini file alongside it. To specify a
different path to ec2.ini, define the EC2_INI_PATH environment variable:
export EC2_INI_PATH=/path/to/my_ec2.ini
If you're using eucalyptus you need to set the above variables and
you need to define:
export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
If you're using boto profiles (requires boto>=2.24.0) you can choose a profile
using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using
the AWS_PROFILE variable:
AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml
For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
When run against a specific host, this script returns the following variables:
- ec2_ami_launch_index
- ec2_architecture
- ec2_association
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
- ec2_block_devices
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
- ec2_deviceIndex
- ec2_dns_name
- ec2_eventsSet
- ec2_group_name
- ec2_hypervisor
- ec2_id
- ec2_image_id
- ec2_instanceState
- ec2_instance_type
- ec2_ipOwnerId
- ec2_ip_address
- ec2_item
- ec2_kernel
- ec2_key_name
- ec2_launch_time
- ec2_monitored
- ec2_monitoring
- ec2_networkInterfaceId
- ec2_ownerId
- ec2_persistent
- ec2_placement
- ec2_platform
- ec2_previous_state
- ec2_private_dns_name
- ec2_private_ip_address
- ec2_publicIp
- ec2_public_dns_name
- ec2_ramdisk
- ec2_reason
- ec2_region
- ec2_requester_id
- ec2_root_device_name
- ec2_root_device_type
- ec2_security_group_ids
- ec2_security_group_names
- ec2_shutdown_state
- ec2_sourceDestCheck
- ec2_spot_instance_request_id
- ec2_state
- ec2_state_code
- ec2_state_reason
- ec2_status
- ec2_subnet_id
- ec2_tenancy
- ec2_virtualization_type
- ec2_vpc_id
These variables are pulled out of a boto.ec2.instance object. There is a lack of
consistency with variable spellings (camelCase and underscores) since this
just loops through all variables the object exposes. It is preferred to use the
ones with underscores when multiple exist.
In addition, if an instance has AWS Tags associated with it, each tag is a new
variable named:
- ec2_tag_[Key] = [Value]
Security groups are comma-separated in 'ec2_security_group_ids' and
'ec2_security_group_names'.
'''
# (c) 2012, Peter Sankauskas
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import os
import argparse
import re
from time import time
import boto
from boto import ec2
from boto import rds
from boto import elasticache
from boto import route53
from boto import sts
import six
from ansible.module_utils import ec2 as ec2_utils
HAS_BOTO3 = False
try:
import boto3 # noqa
HAS_BOTO3 = True
except ImportError:
pass
from six.moves import configparser
from collections import defaultdict
try:
import json
except ImportError:
import simplejson as json
DEFAULTS = {
'all_elasticache_clusters': 'False',
'all_elasticache_nodes': 'False',
'all_elasticache_replication_groups': 'False',
'all_instances': 'False',
'all_rds_instances': 'False',
'aws_access_key_id': None,
'aws_secret_access_key': None,
'aws_security_token': None,
'boto_profile': None,
'cache_max_age': '300',
'cache_path': '~/.ansible/tmp',
'destination_variable': 'public_dns_name',
'elasticache': 'True',
'eucalyptus': 'False',
'eucalyptus_host': None,
'expand_csv_tags': 'False',
'group_by_ami_id': 'True',
'group_by_availability_zone': 'True',
'group_by_aws_account': 'False',
'group_by_elasticache_cluster': 'True',
'group_by_elasticache_engine': 'True',
'group_by_elasticache_parameter_group': 'True',
'group_by_elasticache_replication_group': 'True',
'group_by_instance_id': 'True',
'group_by_instance_state': 'False',
'group_by_instance_type': 'True',
'group_by_key_pair': 'True',
'group_by_platform': 'True',
'group_by_rds_engine': 'True',
'group_by_rds_parameter_group': 'True',
'group_by_region': 'True',
'group_by_route53_names': 'True',
'group_by_security_group': 'True',
'group_by_tag_keys': 'True',
'group_by_tag_none': 'True',
'group_by_vpc_id': 'True',
'hostname_variable': None,
'iam_role': None,
'include_rds_clusters': 'False',
'nested_groups': 'False',
'pattern_exclude': None,
'pattern_include': None,
'rds': 'False',
'regions': 'all',
'regions_exclude': 'us-gov-west-1, cn-north-1',
'replace_dash_in_groups': 'True',
'route53': 'False',
'route53_excluded_zones': '',
'route53_hostnames': None,
'stack_filters': 'False',
'vpc_destination_variable': 'ip_address'
}
class Ec2Inventory(object):
def _empty_inventory(self):
return {"_meta": {"hostvars": {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
self.aws_account_id = None
# Index of hostname (address) to instance ID
self.index = {}
# Boto profile to use (if any)
self.boto_profile = None
# AWS credentials.
self.credentials = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Make sure that profile_name is not passed at all if not set
# as pre 2.24 boto will fall over otherwise
if self.boto_profile:
if not hasattr(boto.ec2.EC2Connection, 'profile_name'):
self.fail_with_error("boto version must be >= 2.24 to use profile")
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
scriptbasename = __file__
scriptbasename = os.path.basename(scriptbasename)
scriptbasename = scriptbasename.replace('.py', '')
defaults = {
'ec2': {
'ini_fallback': os.path.join(os.path.dirname(__file__), 'ec2.ini'),
'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename)
}
}
if six.PY3:
config = configparser.ConfigParser(DEFAULTS)
else:
config = configparser.SafeConfigParser(DEFAULTS)
ec2_ini_path = os.environ.get('EC2_INI_PATH', defaults['ec2']['ini_path'])
ec2_ini_path = os.path.expanduser(os.path.expandvars(ec2_ini_path))
if not os.path.isfile(ec2_ini_path):
ec2_ini_path = os.path.expanduser(defaults['ec2']['ini_fallback'])
if os.path.isfile(ec2_ini_path):
config.read(ec2_ini_path)
# Add empty sections if they don't exist
try:
config.add_section('ec2')
except configparser.DuplicateSectionError:
pass
try:
config.add_section('credentials')
except configparser.DuplicateSectionError:
pass
# is eucalyptus?
self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
# Regions
self.regions = []
configRegions = config.get('ec2', 'regions')
if (configRegions == 'all'):
if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials)
else:
configRegions_exclude = config.get('ec2', 'regions_exclude')
for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
self.regions.append(regionInfo.name)
else:
self.regions = configRegions.split(",")
if 'auto' in self.regions:
env_region = os.environ.get('AWS_REGION')
if env_region is None:
env_region = os.environ.get('AWS_DEFAULT_REGION')
self.regions = [env_region]
# Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
self.hostname_variable = config.get('ec2', 'hostname_variable')
if config.has_option('ec2', 'destination_format') and \
config.has_option('ec2', 'destination_format_tags'):
self.destination_format = config.get('ec2', 'destination_format')
self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',')
else:
self.destination_format = None
self.destination_format_tags = None
# Route53
self.route53_enabled = config.getboolean('ec2', 'route53')
self.route53_hostnames = config.get('ec2', 'route53_hostnames')
self.route53_excluded_zones = []
self.route53_excluded_zones = [a for a in config.get('ec2', 'route53_excluded_zones').split(',') if a]
# Include RDS instances?
self.rds_enabled = config.get('ec2', 'rds')
# Include RDS cluster instances?
self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters')
# Include ElastiCache instances?
self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
# Return all EC2 instances?
self.all_instances = config.getboolean('ec2', 'all_instances')
# Instance states to be gathered in inventory. Default is 'running'.
# Setting 'all_instances' to 'yes' overrides this option.
ec2_valid_instance_states = [
'pending',
'running',
'shutting-down',
'terminated',
'stopping',
'stopped'
]
self.ec2_instance_states = []
if self.all_instances:
self.ec2_instance_states = ec2_valid_instance_states
elif config.has_option('ec2', 'instance_states'):
for instance_state in config.get('ec2', 'instance_states').split(','):
instance_state = instance_state.strip()
if instance_state not in ec2_valid_instance_states:
continue
self.ec2_instance_states.append(instance_state)
else:
self.ec2_instance_states = ['running']
# Return all RDS instances? (if RDS is enabled)
self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
# Return all ElastiCache replication groups? (if ElastiCache is enabled)
self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
# Return all ElastiCache clusters? (if ElastiCache is enabled)
self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
# Return all ElastiCache nodes? (if ElastiCache is enabled)
self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
# boto configuration profile (prefer CLI argument then environment variables then config file)
self.boto_profile = self.args.boto_profile or \
os.environ.get('AWS_PROFILE') or \
config.get('ec2', 'boto_profile')
# AWS credentials (prefer environment variables)
if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or
os.environ.get('AWS_PROFILE')):
aws_access_key_id = config.get('credentials', 'aws_access_key_id')
aws_secret_access_key = config.get('credentials', 'aws_secret_access_key')
aws_security_token = config.get('credentials', 'aws_security_token')
if aws_access_key_id:
self.credentials = {
'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key
}
if aws_security_token:
self.credentials['security_token'] = aws_security_token
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
if self.boto_profile:
cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
cache_name = 'ansible-ec2'
cache_id = self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID', self.credentials.get('aws_access_key_id'))
if cache_id:
cache_name = '%s-%s' % (cache_name, cache_id)
self.cache_path_cache = os.path.join(cache_dir, "%s.cache" % cache_name)
self.cache_path_index = os.path.join(cache_dir, "%s.index" % cache_name)
self.cache_max_age = config.getint('ec2', 'cache_max_age')
self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags')
# Configure nested groups instead of flat namespace.
self.nested_groups = config.getboolean('ec2', 'nested_groups')
# Replace dash or not in group names
self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups')
# IAM role to assume for connection
self.iam_role = config.get('ec2', 'iam_role')
# Configure which groups should be created.
group_by_options = [a for a in DEFAULTS if a.startswith('group_by')]
for option in group_by_options:
setattr(self, option, config.getboolean('ec2', option))
# Do we need to just include hosts that match a pattern?
self.pattern_include = config.get('ec2', 'pattern_include')
if self.pattern_include:
self.pattern_include = re.compile(self.pattern_include)
# Do we need to exclude hosts that match a pattern?
self.pattern_exclude = config.get('ec2', 'pattern_exclude')
if self.pattern_exclude:
self.pattern_exclude = re.compile(self.pattern_exclude)
# Do we want to stack multiple filters?
self.stack_filters = config.getboolean('ec2', 'stack_filters')
# Instance filters (see boto and EC2 API docs). Ignore invalid filters.
self.ec2_instance_filters = []
if config.has_option('ec2', 'instance_filters'):
filters = config.get('ec2', 'instance_filters')
if self.stack_filters and '&' in filters:
self.fail_with_error("AND filters along with stack_filter enabled is not supported.\n")
filter_sets = [f for f in filters.split(',') if f]
for filter_set in filter_sets:
filters = {}
filter_set = filter_set.strip()
for instance_filter in filter_set.split("&"):
instance_filter = instance_filter.strip()
if not instance_filter or '=' not in instance_filter:
continue
filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]
if not filter_key:
continue
filters[filter_key] = filter_value
self.ec2_instance_filters.append(filters.copy())
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile',
help='Use boto profile for connections to EC2')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
if self.route53_enabled:
self.get_route53_records()
for region in self.regions:
self.get_instances_by_region(region)
if self.rds_enabled:
self.get_rds_instances_by_region(region)
if self.elasticache_enabled:
self.get_elasticache_clusters_by_region(region)
self.get_elasticache_replication_groups_by_region(region)
if self.include_rds_clusters:
self.include_rds_clusters_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def connect(self, region):
''' create connection to api server'''
if self.eucalyptus:
conn = boto.connect_euca(host=self.eucalyptus_host, **self.credentials)
conn.APIVersion = '2010-08-31'
else:
conn = self.connect_to_aws(ec2, region)
return conn
def boto_fix_security_token_in_profile(self, connect_args):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + self.boto_profile
if boto.config.has_option(profile, 'aws_security_token'):
connect_args['security_token'] = boto.config.get(profile, 'aws_security_token')
return connect_args
def connect_to_aws(self, module, region):
connect_args = self.credentials
# only pass the profile name if it's set (as it is not supported by older boto versions)
if self.boto_profile:
connect_args['profile_name'] = self.boto_profile
self.boto_fix_security_token_in_profile(connect_args)
if self.iam_role:
sts_conn = sts.connect_to_region(region, **connect_args)
role = sts_conn.assume_role(self.iam_role, 'ansible_dynamic_inventory')
connect_args['aws_access_key_id'] = role.credentials.access_key
connect_args['aws_secret_access_key'] = role.credentials.secret_key
connect_args['security_token'] = role.credentials.session_token
conn = module.connect_to_region(region, **connect_args)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
return conn
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
region '''
try:
conn = self.connect(region)
reservations = []
if self.ec2_instance_filters:
if self.stack_filters:
filters_dict = {}
for filters in self.ec2_instance_filters:
filters_dict.update(filters)
reservations.extend(conn.get_all_instances(filters=filters_dict))
else:
for filters in self.ec2_instance_filters:
reservations.extend(conn.get_all_instances(filters=filters))
else:
reservations = conn.get_all_instances()
# Pull the tags back in a second step
# AWS are on record as saying that the tags fetched in the first `get_all_instances` request are not
# reliable and may be missing, and the only way to guarantee they are there is by calling `get_all_tags`
instance_ids = []
for reservation in reservations:
instance_ids.extend([instance.id for instance in reservation.instances])
max_filter_value = 199
tags = []
for i in range(0, len(instance_ids), max_filter_value):
tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i + max_filter_value]}))
tags_by_instance_id = defaultdict(dict)
for tag in tags:
tags_by_instance_id[tag.res_id][tag.name] = tag.value
if (not self.aws_account_id) and reservations:
self.aws_account_id = reservations[0].owner_id
for reservation in reservations:
for instance in reservation.instances:
instance.tags = tags_by_instance_id[instance.id]
self.add_instance(instance, region)
except boto.exception.BotoServerError as e:
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
else:
backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
error = "Error connecting to %s backend.\n%s" % (backend, e.message)
self.fail_with_error(error, 'getting EC2 instances')
def tags_match_filters(self, tags):
''' return True if given tags match configured filters '''
if not self.ec2_instance_filters:
return True
for filters in self.ec2_instance_filters:
for filter_name, filter_value in filters.items():
if filter_name[:4] != 'tag:':
continue
filter_name = filter_name[4:]
if filter_name not in tags:
if self.stack_filters:
return False
continue
if isinstance(filter_value, list):
if self.stack_filters and tags[filter_name] not in filter_value:
return False
if not self.stack_filters and tags[filter_name] in filter_value:
return True
if isinstance(filter_value, six.string_types):
if self.stack_filters and tags[filter_name] != filter_value:
return False
if not self.stack_filters and tags[filter_name] == filter_value:
return True
return self.stack_filters
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
if not HAS_BOTO3:
self.fail_with_error("Working with RDS instances requires boto3 - please install boto3 and try again",
"getting RDS instances")
client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials)
db_instances = client.describe_db_instances()
try:
conn = self.connect_to_aws(rds, region)
if conn:
marker = None
while True:
instances = conn.get_all_dbinstances(marker=marker)
marker = instances.marker
for index, instance in enumerate(instances):
# Add tags to instances.
instance.arn = db_instances['DBInstances'][index]['DBInstanceArn']
tags = client.list_tags_for_resource(ResourceName=instance.arn)['TagList']
instance.tags = {}
for tag in tags:
instance.tags[tag['Key']] = tag['Value']
if self.tags_match_filters(instance.tags):
self.add_rds_instance(instance, region)
if not marker:
break
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
elif e.error_code == "OptInRequired":
error = "RDS hasn't been enabled for this account yet. " \
"You must either log in to the RDS service through the AWS console to enable it, " \
"or set 'rds = False' in ec2.ini"
elif not e.reason == "Forbidden":
error = "Looks like AWS RDS is down:\n%s" % e.message
self.fail_with_error(error, 'getting RDS instances')
def include_rds_clusters_by_region(self, region):
if not HAS_BOTO3:
self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again",
"getting RDS clusters")
client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials)
marker, clusters = '', []
while marker is not None:
resp = client.describe_db_clusters(Marker=marker)
clusters.extend(resp["DBClusters"])
marker = resp.get('Marker', None)
account_id = boto.connect_iam().get_user().arn.split(':')[4]
c_dict = {}
for c in clusters:
# remove these datetime objects as there is no serialisation to json
# currently in place and we don't need the data yet
if 'EarliestRestorableTime' in c:
del c['EarliestRestorableTime']
if 'LatestRestorableTime' in c:
del c['LatestRestorableTime']
if not self.ec2_instance_filters:
matches_filter = True
else:
matches_filter = False
try:
# arn:aws:rds:<region>:<account number>:<resourcetype>:<name>
tags = client.list_tags_for_resource(
ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier'])
c['Tags'] = tags['TagList']
if self.ec2_instance_filters:
for filters in self.ec2_instance_filters:
for filter_key, filter_values in filters.items():
# get AWS tag key e.g. tag:env will be 'env'
tag_name = filter_key.split(":", 1)[1]
# Filter values is a list (if you put multiple values for the same tag name)
matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags'])
if matches_filter:
# it matches a filter, so stop looking for further matches
break
if matches_filter:
break
except Exception as e:
if e.message.find('DBInstanceNotFound') >= 0:
# AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster.
# Ignore errors when trying to find tags for these
pass
# ignore empty clusters caused by AWS bug
if len(c['DBClusterMembers']) == 0:
continue
elif matches_filter:
c_dict[c['DBClusterIdentifier']] = c
self.inventory['db_clusters'] = c_dict
def get_elasticache_clusters_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache clusters (with
nodes' info) in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_instances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
conn = self.connect_to_aws(elasticache, region)
if conn:
# show_cache_node_info = True
# because we also want nodes' information
response = conn.describe_cache_clusters(None, None, None, True)
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
elif e.error_code == "OptInRequired":
error = "ElastiCache hasn't been enabled for this account yet. " \
"You must either log in to the ElastiCache service through the AWS console to enable it, " \
"or set 'elasticache = False' in ec2.ini"
elif not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
try:
# Boto also doesn't provide wrapper classes to CacheClusters or
# CacheNodes. Because of that we can't make use of the get_list
# method in the AWSQueryConnection. Let's do the work manually
clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
except KeyError as e:
error = "ElastiCache query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
for cluster in clusters:
self.add_elasticache_cluster(cluster, region)
def get_elasticache_replication_groups_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache replication groups
in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_instances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
conn = self.connect_to_aws(elasticache, region)
if conn:
response = conn.describe_replication_groups()
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
try:
# Boto also doesn't provide wrapper classes to ReplicationGroups
# Because of that we can't make use of the get_list method in the
# AWSQueryConnection. Let's do the work manually
replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
except KeyError as e:
error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
for replication_group in replication_groups:
self.add_elasticache_replication_group(replication_group, region)
def get_auth_error_message(self):
''' create an informative error message if there is an issue authenticating'''
errors = ["Authentication error retrieving ec2 inventory."]
if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
else:
errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
boto_config_found = [p for p in boto_paths if os.path.isfile(os.path.expanduser(p))]
if len(boto_config_found) > 0:
errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
else:
errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
return '\n'.join(errors)
def fail_with_error(self, err_msg, err_operation=None):
'''log an error to std err for ansible-playbook to consume and exit'''
if err_operation:
err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
err_msg=err_msg, err_operation=err_operation)
sys.stderr.write(err_msg)
sys.exit(1)
def get_instance(self, region, instance_id):
conn = self.connect(region)
reservations = conn.get_all_instances([instance_id])
for reservation in reservations:
for instance in reservation.instances:
return instance
def add_instance(self, instance, region):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
# Only return instances with desired instance states
if instance.state not in self.ec2_instance_states:
return
# Select the best destination address
if self.destination_format and self.destination_format_tags:
dest = self.destination_format.format(*[getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags])
elif instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable, None)
if dest is None:
dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None)
else:
dest = getattr(instance, self.destination_variable, None)
if dest is None:
dest = getattr(instance, 'tags').get(self.destination_variable, None)
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Set the inventory name
hostname = None
if self.hostname_variable:
if self.hostname_variable.startswith('tag_'):
hostname = instance.tags.get(self.hostname_variable[4:], None)
else:
hostname = getattr(instance, self.hostname_variable)
# set the hostname from route53
if self.route53_enabled and self.route53_hostnames:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
if name.endswith(self.route53_hostnames):
hostname = name
# If we can't get a nice hostname, use the destination address
if not hostname:
hostname = dest
# to_safe strips hostname characters like dots, so don't strip route53 hostnames
elif self.route53_enabled and self.route53_hostnames and hostname.endswith(self.route53_hostnames):
hostname = hostname.lower()
else:
hostname = self.to_safe(hostname).lower()
# if we only want to include hosts that match a pattern, skip those that don't
if self.pattern_include and not self.pattern_include.match(hostname):
return
# if we need to exclude hosts that match a pattern, skip those
if self.pattern_exclude and self.pattern_exclude.match(hostname):
return
# Add to index
self.index[hostname] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[instance.id] = [hostname]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, instance.placement, hostname)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.placement)
self.push_group(self.inventory, 'zones', instance.placement)
# Inventory: Group by Amazon Machine Image (AMI) ID
if self.group_by_ami_id:
ami_id = self.to_safe(instance.image_id)
self.push(self.inventory, ami_id, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'images', ami_id)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_type)
self.push(self.inventory, type_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by instance state
if self.group_by_instance_state:
state_name = self.to_safe('instance_state_' + instance.state)
self.push(self.inventory, state_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'instance_states', state_name)
# Inventory: Group by platform
if self.group_by_platform:
if instance.platform:
platform = self.to_safe('platform_' + instance.platform)
else:
platform = self.to_safe('platform_undefined')
self.push(self.inventory, platform, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'platforms', platform)
# Inventory: Group by key pair
if self.group_by_key_pair and instance.key_name:
key_name = self.to_safe('key_' + instance.key_name)
self.push(self.inventory, key_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'keys', key_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)
self.push(self.inventory, vpc_id_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
# Inventory: Group by security group
if self.group_by_security_group:
try:
for group in instance.groups:
key = self.to_safe("security_group_" + group.name)
self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by AWS account ID
if self.group_by_aws_account:
self.push(self.inventory, self.aws_account_id, dest)
if self.nested_groups:
self.push_group(self.inventory, 'accounts', self.aws_account_id)
# Inventory: Group by tag keys
if self.group_by_tag_keys:
for k, v in instance.tags.items():
if self.expand_csv_tags and v and ',' in v:
values = map(lambda x: x.strip(), v.split(','))
else:
values = [v]
for v in values:
if v:
key = self.to_safe("tag_" + k + "=" + v)
else:
key = self.to_safe("tag_" + k)
self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
if v:
self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled and self.group_by_route53_names:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
self.push(self.inventory, name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'route53', name)
# Global Tag: instances without tags
if self.group_by_tag_none and len(instance.tags) == 0:
self.push(self.inventory, 'tag_none', hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', 'tag_none')
# Global Tag: tag all EC2 instances
self.push(self.inventory, 'ec2', hostname)
self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
self.inventory["_meta"]["hostvars"][hostname]['ansible_host'] = dest
def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is
addressable '''
# Only want available instances unless all_rds_instances is True
if not self.all_rds_instances and instance.status != 'available':
return
# Select the best destination address
dest = instance.endpoint[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Set the inventory name
hostname = None
if self.hostname_variable:
if self.hostname_variable.startswith('tag_'):
hostname = instance.tags.get(self.hostname_variable[4:], None)
else:
hostname = getattr(instance, self.hostname_variable)
# If we can't get a nice hostname, use the destination address
if not hostname:
hostname = dest
hostname = self.to_safe(hostname).lower()
# Add to index
self.index[hostname] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[instance.id] = [hostname]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, instance.availability_zone, hostname)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.availability_zone)
self.push_group(self.inventory, 'zones', instance.availability_zone)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_class)
self.push(self.inventory, type_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
self.push(self.inventory, vpc_id_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
# Inventory: Group by security group
if self.group_by_security_group:
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by tag keys
if self.group_by_tag_keys:
for k, v in instance.tags.items():
if self.expand_csv_tags and v and ',' in v:
values = map(lambda x: x.strip(), v.split(','))
else:
values = [v]
for v in values:
if v:
key = self.to_safe("tag_" + k + "=" + v)
else:
key = self.to_safe("tag_" + k)
self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
if v:
self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by engine
if self.group_by_rds_engine:
self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname)
if self.nested_groups:
self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
# Inventory: Group by parameter group
if self.group_by_rds_parameter_group:
self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname)
if self.nested_groups:
self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
# Global Tag: instances without tags
if self.group_by_tag_none and len(instance.tags) == 0:
self.push(self.inventory, 'tag_none', hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', 'tag_none')
# Global Tag: all RDS instances
self.push(self.inventory, 'rds', hostname)
self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
self.inventory["_meta"]["hostvars"][hostname]['ansible_host'] = dest
def add_elasticache_cluster(self, cluster, region):
''' Adds an ElastiCache cluster to the inventory and index, as long as
it's nodes are addressable '''
# Only want available clusters unless all_elasticache_clusters is True
if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available':
return
# Select the best destination address
if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']:
# Memcached cluster
dest = cluster['ConfigurationEndpoint']['Address']
is_redis = False
else:
# Redis sigle node cluster
# Because all Redis clusters are single nodes, we'll merge the
# info from the cluster with info about the node
dest = cluster['CacheNodes'][0]['Endpoint']['Address']
is_redis = True
if not dest:
# Skip clusters we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, cluster['CacheClusterId']]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[cluster['CacheClusterId']] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', cluster['CacheClusterId'])
# Inventory: Group by region
if self.group_by_region and not is_redis:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone and not is_redis:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type and not is_redis:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group and not is_redis:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine and not is_redis:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine']))
# Inventory: Group by parameter group
if self.group_by_elasticache_parameter_group:
self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName']))
# Inventory: Group by replication group
if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']:
self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId']))
# Global Tag: all ElastiCache clusters
self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId'])
host_info = self.get_host_info_dict_from_describe_dict(cluster)
self.inventory["_meta"]["hostvars"][dest] = host_info
# Add the nodes
for node in cluster['CacheNodes']:
self.add_elasticache_node(node, cluster, region)
def add_elasticache_node(self, node, cluster, region):
''' Adds an ElastiCache node to the inventory and index, as long as
it is addressable '''
# Only want available nodes unless all_elasticache_nodes is True
if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
return
# Select the best destination address
dest = node['Endpoint']['Address']
if not dest:
# Skip nodes we cannot address (e.g. private VPC subnet)
return
node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
# Add to index
self.index[dest] = [region, node_id]
# Inventory: Group by node ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[node_id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', node_id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
# Inventory: Group by parameter group (done at cluster level)
# Inventory: Group by replication group (done at cluster level)
# Inventory: Group by ElastiCache Cluster
if self.group_by_elasticache_cluster:
self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
# Global Tag: all ElastiCache nodes
self.push(self.inventory, 'elasticache_nodes', dest)
host_info = self.get_host_info_dict_from_describe_dict(node)
if dest in self.inventory["_meta"]["hostvars"]:
self.inventory["_meta"]["hostvars"][dest].update(host_info)
else:
self.inventory["_meta"]["hostvars"][dest] = host_info
def add_elasticache_replication_group(self, replication_group, region):
''' Adds an ElastiCache replication group to the inventory and index '''
# Only want available clusters unless all_elasticache_replication_groups is True
if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':
return
# Skip clusters we cannot address (e.g. private VPC subnet or clustered redis)
if replication_group['NodeGroups'][0]['PrimaryEndpoint'] is None or \
replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] is None:
return
# Select the best destination address (PrimaryEndpoint)
dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']
# Add to index
self.index[dest] = [region, replication_group['ReplicationGroupId']]
# Inventory: Group by ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[replication_group['ReplicationGroupId']] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone (doesn't apply to replication groups)
# Inventory: Group by node type (doesn't apply to replication groups)
# Inventory: Group by VPC (information not available in the current
# AWS API version for replication groups
# Inventory: Group by security group (doesn't apply to replication groups)
# Check this value in cluster level
# Inventory: Group by engine (replication groups are always Redis)
if self.group_by_elasticache_engine:
self.push(self.inventory, 'elasticache_redis', dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', 'redis')
# Global Tag: all ElastiCache clusters
self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])
host_info = self.get_host_info_dict_from_describe_dict(replication_group)
self.inventory["_meta"]["hostvars"][dest] = host_info
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
point to them. '''
if self.boto_profile:
r53_conn = route53.Route53Connection(profile_name=self.boto_profile)
else:
r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones()
route53_zones = [zone for zone in all_zones if zone.name[:-1] not in self.route53_excluded_zones]
self.route53_records = {}
for zone in route53_zones:
rrsets = r53_conn.get_all_rrsets(zone.id)
for record_set in rrsets:
record_name = record_set.name
if record_name.endswith('.'):
record_name = record_name[:-1]
for resource in record_set.resource_records:
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name)
def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. '''
instance_attributes = ['public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address']
name_list = set()
for attrib in instance_attributes:
try:
value = getattr(instance, attrib)
except AttributeError:
continue
if value in self.route53_records:
name_list.update(self.route53_records[value])
return list(name_list)
def get_host_info_dict_from_instance(self, instance):
instance_vars = {}
for key in vars(instance):
value = getattr(instance, key)
key = self.to_safe('ec2_' + key)
# Handle complex types
# state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
if key == 'ec2__state':
instance_vars['ec2_state'] = instance.state or ''
instance_vars['ec2_state_code'] = instance.state_code
elif key == 'ec2__previous_state':
instance_vars['ec2_previous_state'] = instance.previous_state or ''
instance_vars['ec2_previous_state_code'] = instance.previous_state_code
elif isinstance(value, (int, bool)):
instance_vars[key] = value
elif isinstance(value, six.string_types):
instance_vars[key] = value.strip()
elif value is None:
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2__placement':
instance_vars['ec2_placement'] = value.zone
elif key == 'ec2_tags':
for k, v in value.items():
if self.expand_csv_tags and ',' in v:
v = list(map(lambda x: x.strip(), v.split(',')))
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
elif key == 'ec2_block_device_mapping':
instance_vars["ec2_block_devices"] = {}
for k, v in value.items():
instance_vars["ec2_block_devices"][os.path.basename(k)] = v.volume_id
else:
pass
# TODO Product codes if someone finds them useful
# print key
# print type(value)
# print value
instance_vars[self.to_safe('ec2_account_id')] = self.aws_account_id
return instance_vars
def get_host_info_dict_from_describe_dict(self, describe_dict):
''' Parses the dictionary returned by the API call into a flat list
of parameters. This method should be used only when 'describe' is
used directly because Boto doesn't provide specific classes. '''
# I really don't agree with prefixing everything with 'ec2'
# because EC2, RDS and ElastiCache are different services.
# I'm just following the pattern used until now to not break any
# compatibility.
host_info = {}
for key in describe_dict:
value = describe_dict[key]
key = self.to_safe('ec2_' + self.uncammelize(key))
# Handle complex types
# Target: Memcached Cache Clusters
if key == 'ec2_configuration_endpoint' and value:
host_info['ec2_configuration_endpoint_address'] = value['Address']
host_info['ec2_configuration_endpoint_port'] = value['Port']
# Target: Cache Nodes and Redis Cache Clusters (single node)
if key == 'ec2_endpoint' and value:
host_info['ec2_endpoint_address'] = value['Address']
host_info['ec2_endpoint_port'] = value['Port']
# Target: Redis Replication Groups
if key == 'ec2_node_groups' and value:
host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
replica_count = 0
for node in value[0]['NodeGroupMembers']:
if node['CurrentRole'] == 'primary':
host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
elif node['CurrentRole'] == 'replica':
host_info['ec2_replica_cluster_address_' + str(replica_count)] = node['ReadEndpoint']['Address']
host_info['ec2_replica_cluster_port_' + str(replica_count)] = node['ReadEndpoint']['Port']
host_info['ec2_replica_cluster_id_' + str(replica_count)] = node['CacheClusterId']
replica_count += 1
# Target: Redis Replication Groups
if key == 'ec2_member_clusters' and value:
host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
# Target: All Cache Clusters
elif key == 'ec2_cache_parameter_group':
host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
# Target: Almost everything
elif key == 'ec2_security_groups':
# Skip if SecurityGroups is None
# (it is possible to have the key defined but no value in it).
if value is not None:
sg_ids = []
for sg in value:
sg_ids.append(sg['SecurityGroupId'])
host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
# Target: Everything
# Preserve booleans and integers
elif isinstance(value, (int, bool)):
host_info[key] = value
# Target: Everything
# Sanitize string values
elif isinstance(value, six.string_types):
host_info[key] = value.strip()
# Target: Everything
# Replace None by an empty string
elif value is None:
host_info[key] = ''
else:
# Remove non-processed complex types
pass
return host_info
def get_host_info(self):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if self.args.host not in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if self.args.host not in self.index:
# host might not exist anymore
return self.json_format_dict({}, True)
(region, instance_id) = self.index[self.args.host]
instance = self.get_instance(region, instance_id)
return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
def push(self, my_dict, key, element):
''' Push an element onto an array that may not have been defined in
the dict '''
group_info = my_dict.setdefault(key, [])
if isinstance(group_info, dict):
host_list = group_info.setdefault('hosts', [])
host_list.append(element)
else:
group_info.append(element)
def push_group(self, my_dict, key, element):
''' Push a group as a child of another group. '''
parent_group = my_dict.setdefault(key, {})
if not isinstance(parent_group, dict):
parent_group = my_dict[key] = {'hosts': parent_group}
child_groups = parent_group.setdefault('children', [])
if element not in child_groups:
child_groups.append(element)
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
with open(self.cache_path_cache, 'r') as f:
json_inventory = f.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
with open(self.cache_path_index, 'rb') as f:
self.index = json.load(f)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
with open(filename, 'w') as f:
f.write(json_data)
def uncammelize(self, key):
temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = "[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += "\-"
return re.sub(regex + "]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
if __name__ == '__main__':
# Run the script
Ec2Inventory()
| [
"akira.chiku@gmail.com"
] | akira.chiku@gmail.com |
24707b53251bb31062a5561856da7cc3ab965ea4 | 898828a656914d5abeb5d87e11fdd46866310c53 | /smact/structure_prediction/__init__.py | c48e64a7c3251ff5300bcb3087f043129d65926e | [
"MIT"
] | permissive | mdommett/SMACT | e983552578b1a95e7bf8d114154856a411dc73dd | 0aa82b452856a54bad62032324d593f41ce1e11c | refs/heads/master | 2023-07-02T23:44:04.104316 | 2021-08-12T13:35:36 | 2021-08-12T13:35:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | """Minimalist ionic compound prediction tools for materials design."""
import logging
__author__ = "Alexander Moriarty"
__credits__ = {
"WMD Group",
"Imperial College London",
"Andrew Jackson",
"Dan Davies",
"Keith Butler",
"Aron Walsh",
"Alexander Moriarty"
}
__status__ = "Development"
logger = logging.getLogger(__name__)
| [
"k.t.butler@bath.ac.uk"
] | k.t.butler@bath.ac.uk |
7dd424133a11a8a56e217055df42adeb2b3d1c46 | 028a2a32511223b5a0645a7370ff2172b237d643 | /rsue_system_analysis/puro/migrations/0001_initial.py | c6ab028433cebe8b0579ceb6bf792e11594793d9 | [] | no_license | igor2104/rsue_system_analysis | 5941627146c648d4ee214040a340a66c9821b125 | 89bd2ee699edda8e254c54fd9020da2c94e5f96a | refs/heads/master | 2023-05-28T00:38:30.555472 | 2019-09-17T07:53:44 | 2019-09-17T07:55:51 | 208,991,381 | 0 | 0 | null | 2021-06-10T21:57:54 | 2019-09-17T07:47:47 | JavaScript | UTF-8 | Python | false | false | 3,614 | py | # Generated by Django 2.1.2 on 2018-12-11 08:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Expert',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='ФИО')),
],
options={
'verbose_name': 'Эксперт',
'verbose_name_plural': 'Эксперты',
'ordering': ('poll', 'id'),
},
),
migrations.CreateModel(
name='Indicator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Название')),
],
options={
'verbose_name': 'Показатель',
'verbose_name_plural': 'Показатели',
'ordering': ('poll', 'id'),
},
),
migrations.CreateModel(
name='Poll',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Название')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='polls', to=settings.AUTH_USER_MODEL, verbose_name='Пользователь')),
],
options={
'verbose_name': 'Опрос',
'verbose_name_plural': 'Опросы',
},
),
migrations.CreateModel(
name='Tour',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.IntegerField(verbose_name='Номер')),
('poll', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tours', to='puro.Poll', verbose_name='Опрос')),
],
options={
'verbose_name': 'Тур',
'verbose_name_plural': 'Туры',
'ordering': ('poll', 'number'),
},
),
migrations.AddField(
model_name='indicator',
name='poll',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='indicators', to='puro.Poll', verbose_name='Опрос'),
),
migrations.AddField(
model_name='expert',
name='poll',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='experts', to='puro.Poll', verbose_name='Опрос'),
),
migrations.AlterUniqueTogether(
name='tour',
unique_together={('poll', 'number')},
),
migrations.AlterUniqueTogether(
name='poll',
unique_together={('user', 'name')},
),
migrations.AlterUniqueTogether(
name='indicator',
unique_together={('poll', 'name')},
),
migrations.AlterUniqueTogether(
name='expert',
unique_together={('poll', 'name')},
),
]
| [
"ermolov962104@gmail.com"
] | ermolov962104@gmail.com |
71d5280b4a1b28f3116aef25d72dd49e2f9abfc8 | 239d8eb48d82be04957fcdd3b67c967aff0c65e8 | /tests.py | 657c5643c7a7b0c3da5397e359b94154cbf48b38 | [] | no_license | Alejandro131/PythonPang | bd4a46f3c117108b0817617159ff5b042d401926 | fe6f09efdb03d6de46d66c4dff7c972a3e40e6c9 | refs/heads/master | 2020-04-22T17:11:00.566564 | 2013-07-01T07:30:33 | 2013-07-01T07:30:33 | 10,288,313 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,442 | py | import unittest
from pang.ball import Ball
from pang.vec2d import Vec2D
from pang.collision import *
from pang.obstacle import Obstacle
class BallToBoxCollisionTest(unittest.TestCase):
def setUp(self):
self.balls = []
self.results = []
self.box = Obstacle(Vec2D(200, 200), Vec2D(200, 200))
self.balls.append(Ball(150, Vec2D(200, 100), Vec2D(50, 50)))
self.results.append((Vec2D(200, 0), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(200, 100), Vec2D(-50, 50)))
self.results.append((Vec2D(200, 0), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(400, 100), Vec2D(50, 50)))
self.results.append((Vec2D(400, 0), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(400, 100), Vec2D(-50, 50)))
self.results.append((Vec2D(400, 0), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(450, 200 - 50*(3**.5)),
Vec2D(50, 50)))
self.results.append((Vec2D(450, 3.76), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(450, 200 - 50*(3**.5)),
Vec2D(-50, 50)))
self.results.append((Vec2D(450, 3.76), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(400 + 50*(3**.5), 150),
Vec2D(-50, 50)))
self.results.append((Vec2D(596.24, 150), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(400 + 50*(3**.5), 150),
Vec2D(-50, -50)))
self.results.append((Vec2D(596.24, 150), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(500, 200), Vec2D(-50, 50)))
self.results.append((Vec2D(600, 200), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(500, 200), Vec2D(-50, -50)))
self.results.append((Vec2D(600, 200), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(500, 400), Vec2D(-50, 50)))
self.results.append((Vec2D(600, 400), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(500, 400), Vec2D(-50, -50)))
self.results.append((Vec2D(600, 400), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(400 + 50*(3**.5), 450),
Vec2D(-50, 50)))
self.results.append((Vec2D(596.24, 450), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(400 + 50*(3**.5), 450),
Vec2D(-50, -50)))
self.results.append((Vec2D(596.24, 450), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(450, 400 + 50*(3**.5)),
Vec2D(50, -50)))
self.results.append((Vec2D(450, 596.24), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(450, 400 + 50*(3**.5)),
Vec2D(-50, -50)))
self.results.append((Vec2D(450, 596.24), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(400, 500), Vec2D(50, -50)))
self.results.append((Vec2D(400, 600), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(400, 500), Vec2D(-50, -50)))
self.results.append((Vec2D(400, 600), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(200, 500), Vec2D(50, -50)))
self.results.append((Vec2D(200, 600), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(200, 500), Vec2D(-50, -50)))
self.results.append((Vec2D(200, 600), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(150, 400 + 50*(3**.5)),
Vec2D(50, -50)))
self.results.append((Vec2D(150, 596.24), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(150, 400 + 50*(3**.5)),
Vec2D(-50, -50)))
self.results.append((Vec2D(150, 596.24), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(200 - 50*(3**.5), 450),
Vec2D(50, -50)))
self.results.append((Vec2D(3.76, 450), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(200 - 50*(3**.5), 450),
Vec2D(50, 50)))
self.results.append((Vec2D(3.76, 450), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(100, 400), Vec2D(50, -50)))
self.results.append((Vec2D(0, 400), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(100, 400), Vec2D(50, 50)))
self.results.append((Vec2D(0, 400), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(100, 200), Vec2D(50, -50)))
self.results.append((Vec2D(0, 200), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(100, 200), Vec2D(50, 50)))
self.results.append((Vec2D(0, 200), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(200 - 50*(3**.5), 150),
Vec2D(50, -50)))
self.results.append((Vec2D(3.76, 150), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(200 - 50*(3**.5), 150),
Vec2D(50, 50)))
self.results.append((Vec2D(3.76, 150), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(150, 200 - 50*(3**.5)),
Vec2D(50, 50)))
self.results.append((Vec2D(150, 3.76), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(150, 200 - 50*(3**.5)),
Vec2D(-50, 50)))
self.results.append((Vec2D(150, 3.76), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(300, 100), Vec2D(50, 50)))
self.results.append((Vec2D(300, 0), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(300, 100), Vec2D(-50, 50)))
self.results.append((Vec2D(300, 0), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(450, 100), Vec2D(50, 50)))
self.results.append((Vec2D(450, 17.16), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(450, 100), Vec2D(-50, 50)))
self.results.append((Vec2D(450, 17.16), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(450, 150), Vec2D(50, 50)))
self.results.append((Vec2D(450, -32.84), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(450, 150), Vec2D(-50, 50)))
self.results.append((Vec2D(337.87, 37.87), Vec2D(-1, -1)))
self.balls.append(Ball(150, Vec2D(450, 150), Vec2D(-50, -50)))
self.results.append((Vec2D(632.84, 150), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(500, 150), Vec2D(-50, 50)))
self.results.append((Vec2D(582.84, 150), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(500, 150), Vec2D(-50, -50)))
self.results.append((Vec2D(582.84, 150), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(500, 300), Vec2D(-50, 50)))
self.results.append((Vec2D(600, 300), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(500, 300), Vec2D(-50, -50)))
self.results.append((Vec2D(600, 300), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(500, 450), Vec2D(-50, 50)))
self.results.append((Vec2D(582.84, 450), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(500, 450), Vec2D(-50, -50)))
self.results.append((Vec2D(582.84, 450), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(450, 450), Vec2D(-50, 50)))
self.results.append((Vec2D(632.84, 450), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(450, 450), Vec2D(-50, -50)))
self.results.append((Vec2D(562.13, 562.13), Vec2D(-1, -1)))
self.balls.append(Ball(150, Vec2D(450, 450), Vec2D(50, -50)))
self.results.append((Vec2D(450, 632.84), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(450, 500), Vec2D(-50, -50)))
self.results.append((Vec2D(450, 582.84), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(450, 500), Vec2D(50, -50)))
self.results.append((Vec2D(450, 582.84), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(300, 500), Vec2D(-50, -50)))
self.results.append((Vec2D(300, 600), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(300, 500), Vec2D(50, -50)))
self.results.append((Vec2D(300, 600), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(150, 500), Vec2D(-50, -50)))
self.results.append((Vec2D(150, 582.84), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(150, 500), Vec2D(50, -50)))
self.results.append((Vec2D(150, 582.84), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(150, 450), Vec2D(-50, -50)))
self.results.append((Vec2D(150, 632.84), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(150, 450), Vec2D(50, -50)))
self.results.append((Vec2D(262.13, 562.13), Vec2D(-1, -1)))
self.balls.append(Ball(150, Vec2D(150, 450), Vec2D(50, 50)))
self.results.append((Vec2D(-32.84, 450), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(100, 450), Vec2D(50, -50)))
self.results.append((Vec2D(17.16, 450), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(100, 450), Vec2D(50, 50)))
self.results.append((Vec2D(17.16, 450), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(100, 300), Vec2D(50, -50)))
self.results.append((Vec2D(0, 300), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(100, 300), Vec2D(50, 50)))
self.results.append((Vec2D(0, 300), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(100, 150), Vec2D(50, -50)))
self.results.append((Vec2D(17.16, 150), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(100, 150), Vec2D(50, 50)))
self.results.append((Vec2D(17.16, 150), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(150, 150), Vec2D(50, -50)))
self.results.append((Vec2D(-32.84, 150), Vec2D(-1, 0)))
self.balls.append(Ball(150, Vec2D(150, 150), Vec2D(50, 50)))
self.results.append((Vec2D(37.87, 37.87), Vec2D(-1, -1)))
self.balls.append(Ball(150, Vec2D(150, 150), Vec2D(-50, 50)))
self.results.append((Vec2D(150, -32.84), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(150, 100), Vec2D(50, 50)))
self.results.append((Vec2D(150, 17.16), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(150, 100), Vec2D(-50, 50)))
self.results.append((Vec2D(150, 17.16), Vec2D(0, -1)))
self.balls.append(Ball(150, Vec2D(0, 0), Vec2D(50, 0)))
self.results.append((Vec2D(0, 0), None))
def tearDown(self):
del self.box
del self.balls
del self.results
def test_edge_collision(self):
for test_index in [32, 33, 41, 42, 50, 51, 59, 60]:
result = ball_to_box(self.balls[test_index], self.box, True)
self.assertEqual(self.results[test_index][1], result)
self.assertEqual(self.results[test_index][0],
self.balls[test_index].position)
self.assertFalse(ball_to_box(self.balls[test_index],
self.box, True))
def test_edge_limit_collision(self):
for test_index in [0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25,
26, 27]:
result = ball_to_box(self.balls[test_index], self.box, True)
self.assertEqual(self.results[test_index][1], result)
self.assertEqual(self.results[test_index][0],
self.balls[test_index].position)
self.assertFalse(ball_to_box(self.balls[test_index],
self.box, True))
def test_vertex_collision(self):
for test_index in [34, 35, 36, 37, 38, 39, 40, 43, 44, 45, 46, 47, 48,
49, 52, 53, 54, 55, 56, 57, 58, 61, 62, 63, 64, 65,
66, 67]:
result = ball_to_box(self.balls[test_index], self.box, True)
self.assertEqual(self.results[test_index][1], result)
self.assertEqual(self.results[test_index][0],
self.balls[test_index].position)
self.assertFalse(ball_to_box(self.balls[test_index],
self.box, True))
def test_vertex_limit_collision(self):
for test_index in [4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29,
30, 31]:
result = ball_to_box(self.balls[test_index], self.box, True)
self.assertEqual(self.results[test_index][1], result)
self.assertEqual(self.results[test_index][0],
self.balls[test_index].position)
self.assertFalse(ball_to_box(self.balls[test_index],
self.box, True))
def test_no_collision(self):
result = ball_to_box(self.balls[68], self.box, True)
self.assertEqual(self.results[68][1], result)
self.assertEqual(self.results[68][0], self.balls[68].position)
self.assertFalse(ball_to_box(self.balls[68], self.box, True))
class CalculateAngleTest(unittest.TestCase):
def test_angle(self):
self.assertAlmostEqual(0, calc_angle(Vec2D(1, 0)))
self.assertAlmostEqual(30, calc_angle(Vec2D((3**.5) / 2, .5)))
self.assertAlmostEqual(30, calc_angle(Vec2D((3**.5) / 2, -.5)))
self.assertAlmostEqual(45, calc_angle(Vec2D(1, 1)))
self.assertAlmostEqual(45, calc_angle(Vec2D(1, -1)))
self.assertAlmostEqual(60, calc_angle(Vec2D(.5, (3**.5) / 2)))
self.assertAlmostEqual(60, calc_angle(Vec2D(.5, -(3**.5) / 2)))
self.assertAlmostEqual(90, calc_angle(Vec2D(0, 1)))
self.assertAlmostEqual(90, calc_angle(Vec2D(0, -1)))
self.assertAlmostEqual(120, calc_angle(Vec2D(-.5, (3**.5) / 2)))
self.assertAlmostEqual(120, calc_angle(Vec2D(-.5, -(3**.5) / 2)))
self.assertAlmostEqual(135, calc_angle(Vec2D(-1, 1)))
self.assertAlmostEqual(135, calc_angle(Vec2D(-1, -1)))
self.assertAlmostEqual(150, calc_angle(Vec2D(-(3**.5) / 2, .5)))
self.assertAlmostEqual(150, calc_angle(Vec2D(-(3**.5) / 2, -.5)))
self.assertAlmostEqual(180, calc_angle(Vec2D(-1, 0)))
class BoxToBoxCollisionTest(unittest.TestCase):
def setUp(self):
self.box = Obstacle(Vec2D(200, 200), Vec2D(200, 200))
self.test_boxes = []
self.test_boxes.append(Obstacle(Vec2D(400, 100), Vec2D(100, 250)))
self.test_boxes.append(Obstacle(Vec2D(100, 400), Vec2D(250, 100)))
self.test_boxes.append(Obstacle(Vec2D(100, 200), Vec2D(250, 100)))
self.test_boxes.append(Obstacle(Vec2D(100, 200), Vec2D(250, 300)))
self.test_boxes.append(Obstacle(Vec2D(200, 100), Vec2D(100, 250)))
self.test_boxes.append(Obstacle(Vec2D(200, 100), Vec2D(300, 250)))
self.test_boxes.append(Obstacle(Vec2D(200, 200), Vec2D(300, 100)))
self.test_boxes.append(Obstacle(Vec2D(200, 200), Vec2D(100, 100)))
self.test_boxes.append(Obstacle(Vec2D(200, 200), Vec2D(100, 300)))
self.test_boxes.append(Obstacle(Vec2D(200, 200), Vec2D(300, 300)))
self.test_boxes.append(Obstacle(Vec2D(200, 200), Vec2D(300, -50)))
self.test_boxes.append(Obstacle(Vec2D(200, 200), Vec2D(500, 500)))
self.test_boxes.append(Obstacle(Vec2D(200, 200), Vec2D(500, 300)))
self.test_boxes.append(Obstacle(Vec2D(200, 200), Vec2D(400, 200)))
self.test_boxes.append(Obstacle(Vec2D(200, 200), Vec2D(200, 0)))
self.player_results = []
self.test_boxes.append(Obstacle(Vec2D(80, 100), Vec2D(200, 105)))
self.player_results.append(Vec2D(200, 100))
self.test_boxes.append(Obstacle(Vec2D(80, 100), Vec2D(350, 115)))
self.player_results.append(Vec2D(400, 115))
self.test_boxes.append(Obstacle(Vec2D(80, 100), Vec2D(250, 115)))
self.player_results.append(Vec2D(120, 115))
def tearDown(self):
del self.box
del self.test_boxes
del self.player_results
def test_inside(self):
for test_index in range(0, 2):
self.assertTrue(box_to_box(self.test_boxes[test_index], self.box))
def test_outside(self):
for test_index in range(2, 6):
self.assertTrue(box_to_box(self.test_boxes[test_index], self.box))
def test_vertex(self):
for test_index in range(6, 10):
self.assertTrue(box_to_box(self.test_boxes[test_index], self.box))
def test_no_collision(self):
for test_index in range(10, 13):
self.assertFalse(box_to_box(self.test_boxes[test_index], self.box))
def test_adjacent_edge(self):
for test_index in range(13, 15):
self.assertFalse(box_to_box(self.test_boxes[test_index], self.box))
if __name__ == '__main__':
unittest.main()
| [
"alex131_d@yahoo.com"
] | alex131_d@yahoo.com |
77b73a4c62c781666aa6f58703e8ed6973d129db | c61145e8771724575f67ae5738dd6cbb9626a706 | /blog/models.py | b885ebd7fe878c0266a464c1da35eb04d96169e5 | [] | no_license | Seredyak1/test_task | 1399dd082f4281ca6f72d036f4df4c1c6945dafe | a5d433b827df46ffa95dd6dd91245b204884674f | refs/heads/master | 2020-04-16T08:03:04.521740 | 2019-01-16T09:33:47 | 2019-01-16T09:33:47 | 165,409,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | from django.contrib.auth.models import User
from django.db import models
class Post(models.Model):
class Meta:
verbose_name_plural = 'Posts'
ordering = ('-updated_at',)
user = models.ForeignKey(User, on_delete=models.CASCADE)
title = models.CharField(max_length=128, blank=True, null=True)
body = models.TextField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
@property
def like_count(self):
"""Show the number of likes in the Post"""
return self.like_set.count()
def add_like(self, user):
"""Add like to this Post"""
Like.objects.get_or_create(user=user, post=self)
def unlike(self, user):
"""Delete like to this Post"""
Like.objects.filter(user=user, post=self).delete()
class Like(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
| [
"sanya.seredyak@gmail.com"
] | sanya.seredyak@gmail.com |
14dd22e487e54a040da5d85ad85ac2b813b6311d | c7a305fc7223b2ef71017395016c06be9974d1d6 | /code.py | d2a8694f81ff61ec7864bd547df83fb9c06ee6a8 | [] | no_license | jlcampbell/breakout | 341bf59c59109d3755c1da17ff5571e3a26ddaac | 3310eea466ff8a8da7535c2510e6c9b2f27caeef | refs/heads/master | 2018-11-01T13:57:32.007415 | 2018-08-24T16:00:46 | 2018-08-24T16:00:46 | 113,598,543 | 0 | 1 | null | 2017-12-08T19:03:08 | 2017-12-08T17:09:26 | HTML | UTF-8 | Python | false | false | 446 | py | from adafruit_hid.mouse import Mouse
from adafruit_circuitplayground.express import cpx
import math
m = Mouse()
x, y, z = cpx.acceleration
x2 = x;
y2 = y;
z2 = z;
diff = 0
while True:
x, y, z = cpx.acceleration
if x-x2>0.5:
print(x-x2)
x2 = x;
y2 = y;
z2 = z;
m.move(-20, 0, 0)
elif x-x2<-0.5:
print(x-x2)
x2 = x;
y2 = y;
z2 = z;
m.move(20, 0, 0)
| [
"jlcampbell@users.noreply.github.com"
] | jlcampbell@users.noreply.github.com |
6232aafcfbc5dc48dba2b167a7a94597f50a2e88 | 5f681c02c8094a7c0c32dee9d8dec9c980a65850 | /config.py | 8c131656ded24690e9131f08c67cceaae0fb273d | [] | no_license | Page-Jiao/my-PANet | f6e376dcf2d57ffca2b90dc7dd6bcb44bc5ab58f | 30ab6f5f0aa40f87c1de47c58000fccb4cb7752d | refs/heads/master | 2021-05-17T12:26:14.864798 | 2020-03-31T01:58:45 | 2020-03-31T01:58:45 | 250,775,623 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,564 | py | """Experiment Configuration"""
import os
import re
import glob
import itertools
import sacred
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds
sacred.SETTINGS['CONFIG']['READ_ONLY_CONFIG'] = False
sacred.SETTINGS.CAPTURE_MODE = 'no'
ex = Experiment('PANet')
ex.captured_out_filter = apply_backspaces_and_linefeeds
source_folders = ['.', './dataloaders', './models', './util']
sources_to_save = list(itertools.chain.from_iterable(
[glob.glob(f'{folder}/*.py') for folder in source_folders]))
for source_file in sources_to_save:
ex.add_source_file(source_file)
@ex.config
def cfg():
"""Default configurations"""
input_size = (417, 417)
seed = 1234
cuda_visable = '0, 1, 2, 3, 4, 5, 6, 7'
gpu_id = 0
mode = 'test' # 'train' or 'test'
if mode == 'train':
dataset = 'VOC' # 'VOC' or 'COCO'
n_steps = 30000
label_sets = 0
batch_size = 1
lr_milestones = [10000, 20000, 30000]
align_loss_scaler = 1
ignore_label = 255
print_interval = 100
save_pred_every = 10000
model = {
'align': True,
}
task = {
'n_ways': 1,
'n_shots': 1,
'n_queries': 1,
}
optim = {
'lr': 1e-3,
'momentum': 0.9,
'weight_decay': 0.0005,
}
elif mode == 'test':
notrain = False
snapshot = './runs/PANet_VOC_sets_0_1way_1shot_[train]/1/snapshots/30000.pth'
n_runs = 5
n_steps = 1000
batch_size = 1
scribble_dilation = 0
bbox = False
scribble = False
# Set dataset config from the snapshot string
if 'VOC' in snapshot:
dataset = 'VOC'
elif 'COCO' in snapshot:
dataset = 'COCO'
else:
raise ValueError('Wrong snapshot name !')
# Set model config from the snapshot string
model = {}
for key in ['align',]:
model[key] = key in snapshot
# Set label_sets from the snapshot string
label_sets = int(snapshot.split('_sets_')[1][0])
# Set task config from the snapshot string
task = {
'n_ways': int(re.search("[0-9]+way", snapshot).group(0)[:-3]),
'n_shots': int(re.search("[0-9]+shot", snapshot).group(0)[:-4]),
'n_queries': 1,
}
else:
raise ValueError('Wrong configuration for "mode" !')
exp_str = '_'.join(
[dataset,]
+ [key for key, value in model.items() if value]
+ [f'sets_{label_sets}', f'{task["n_ways"]}way_{task["n_shots"]}shot_[{mode}]'])
path = {
'log_dir': './runs',
'init_path': './pretrained_model/vgg16-397923af.pth',
'VOC':{'data_dir': '../data/Pascal/VOCdevkit/VOC2012/',
'data_split': 'trainaug',},
'COCO':{'data_dir': '../../data/COCO/',
'data_split': 'train',},
}
@ex.config_hook
def add_observer(config, command_name, logger):
"""A hook fucntion to add observer"""
exp_name = f'{ex.path}_{config["exp_str"]}'
if config['mode'] == 'test':
if config['notrain']:
exp_name += '_notrain'
if config['scribble']:
exp_name += '_scribble'
if config['bbox']:
exp_name += '_bbox'
observer = FileStorageObserver.create(os.path.join(config['path']['log_dir'], exp_name))
ex.observers.append(observer)
return config
| [
"jpq6699@mail.ustc.edu.cn"
] | jpq6699@mail.ustc.edu.cn |
88fbea7908fceadd92950d0d0eca62bc5c17eef9 | cd1bad03ef80e9112aaa97c950a1fc4d74da7fc7 | /Practice(Beginner)/Python3/Valid_Triangles.py | 8ff72befe620a4e69e94cd93acb9630a1e0a9208 | [] | no_license | UtsabSen/Codechef | 85a875443f0afbbc1b7b6d25e8731968ba385ae5 | 92ff47f6ae58824115bac15190c4c9f1b5a28e67 | refs/heads/master | 2021-04-02T22:47:28.711698 | 2020-05-16T08:56:54 | 2020-05-16T08:56:54 | 248,332,274 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | # Source: https://www.codechef.com/problems/FLOW013
n = int(input())
l = [100,50,10,5,2,1]
for it in range(n):
a,b,c = map(int,input().split())
if(a+b+c == 180 and a > 0 and b > 0 and c > 0):
print("YES")
else:
print("NO")
| [
"noreply@github.com"
] | noreply@github.com |
f4018757458a86a63df44d42374c69ea3d612194 | de4d3fed2b538587124ad855c8ba2f30933e7edf | /backend/sparepart_main/sparepart_main/asgi.py | 597f5430d993ea910e06c11dd1b1488e41205dd3 | [] | no_license | zahydakhan/project_spare | aaea130edefa95630f73b3026de6c32800b0bc7f | 850374c270fd5ad2897bf9b6f0afb93b9e171059 | refs/heads/master | 2023-03-11T17:13:13.103574 | 2021-02-23T06:40:52 | 2021-02-23T06:40:52 | 339,530,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
ASGI config for sparepart_main project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sparepart_main.settings')
application = get_asgi_application()
| [
"zahydakhan@gmail.com"
] | zahydakhan@gmail.com |
4660ea0d2890f4a7ae7e8f48cbe1f776c8393822 | de428c011b56db862f05ec0ceab17b85f83f94b1 | /pythongame/scenes_game/player_environment_interactions.py | bfdfdb4eea733e83b0f61229bf7c9e6e1f382640 | [] | no_license | risooonho/python-2d-game | c6d1fceaf09c72a6f7573230a4a899bf79164b7f | 24b02646ed56f9017069b243b774e0ee46951aea | refs/heads/master | 2021-05-17T06:02:13.538699 | 2020-02-15T23:59:54 | 2020-02-15T23:59:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,646 | py | import sys
from typing import Optional, Any, List, Tuple
from pythongame.core.game_data import CONSUMABLES, PORTALS
from pythongame.core.game_state import GameState, NonPlayerCharacter, LootableOnGround, Portal, WarpPoint, \
ConsumableOnGround, ItemOnGround, Chest, Shrine
from pythongame.core.game_state import WorldEntity
from pythongame.core.item_data import build_item_name, create_item_description, get_item_data
from pythongame.core.math import boxes_intersect, is_x_and_y_within_distance, \
get_manhattan_distance_between_rects
from pythongame.core.npc_behaviors import has_npc_dialog
from pythongame.core.view.game_world_view import EntityActionText, EntityActionTextStyle
from pythongame.scenes_game.game_engine import GameEngine
class PlayerInteractionsState:
def __init__(self):
self.entity_to_interact_with: Any = None
def handle_nearby_entities(self, player_entity: WorldEntity, game_state: GameState, game_engine: GameEngine):
self.entity_to_interact_with = None
player_position = player_entity.get_position()
distance_to_closest_entity = sys.maxsize
for npc in game_state.non_player_characters:
if has_npc_dialog(npc.npc_type):
close_to_player = is_x_and_y_within_distance(player_position, npc.world_entity.get_position(), 75)
distance = get_manhattan_distance_between_rects(player_entity.rect(), npc.world_entity.rect())
if close_to_player and distance < distance_to_closest_entity:
self.entity_to_interact_with = npc
distance_to_closest_entity = distance
lootables_on_ground: List[LootableOnGround] = list(game_state.items_on_ground)
lootables_on_ground += game_state.consumables_on_ground
for lootable in lootables_on_ground:
if boxes_intersect(player_entity.rect(), lootable.world_entity.rect()):
self.entity_to_interact_with = lootable
distance_to_closest_entity = 0
for portal in game_state.portals:
close_to_player = is_x_and_y_within_distance(player_position, portal.world_entity.get_position(), 75)
distance = get_manhattan_distance_between_rects(player_entity.rect(), portal.world_entity.rect())
if close_to_player:
game_engine.handle_being_close_to_portal(portal)
if close_to_player and distance < distance_to_closest_entity:
self.entity_to_interact_with = portal
distance_to_closest_entity = distance
for warp_point in game_state.warp_points:
close_to_player = is_x_and_y_within_distance(player_position, warp_point.world_entity.get_position(), 75)
distance = get_manhattan_distance_between_rects(player_entity.rect(), warp_point.world_entity.rect())
if close_to_player and distance < distance_to_closest_entity:
self.entity_to_interact_with = warp_point
distance_to_closest_entity = distance
for chest in game_state.chests:
close_to_player = is_x_and_y_within_distance(player_position, chest.world_entity.get_position(), 75)
distance = get_manhattan_distance_between_rects(player_entity.rect(), chest.world_entity.rect())
if close_to_player and distance < distance_to_closest_entity:
self.entity_to_interact_with = chest
distance_to_closest_entity = distance
for shrine in game_state.shrines:
close_to_player = is_x_and_y_within_distance(player_position, shrine.world_entity.get_position(), 75)
distance = get_manhattan_distance_between_rects(player_entity.rect(), shrine.world_entity.rect())
if close_to_player and distance < distance_to_closest_entity:
self.entity_to_interact_with = shrine
distance_to_closest_entity = distance
def get_entity_to_interact_with(self):
return self.entity_to_interact_with
def get_entity_action_text(self, is_shift_key_held_down: bool) -> Optional[EntityActionText]:
if self.entity_to_interact_with is None:
return None
return _get_entity_action_text(self.entity_to_interact_with, is_shift_key_held_down)
def _get_entity_action_text(ready_entity: Any, is_shift_key_held_down: bool) -> Optional[EntityActionText]:
if isinstance(ready_entity, NonPlayerCharacter):
return EntityActionText(ready_entity.world_entity, "...", [])
elif isinstance(ready_entity, LootableOnGround):
name, style = _get_loot_name(ready_entity)
if is_shift_key_held_down:
loot_details = _get_loot_details(ready_entity)
else:
loot_details = []
return EntityActionText(ready_entity.world_entity, name, loot_details, style=style)
elif isinstance(ready_entity, Portal):
if ready_entity.is_enabled:
data = PORTALS[ready_entity.portal_id]
return EntityActionText(ready_entity.world_entity, data.destination_name, [])
else:
return EntityActionText(ready_entity.world_entity, "???", [])
elif isinstance(ready_entity, WarpPoint):
return EntityActionText(ready_entity.world_entity, "Warp", [])
elif isinstance(ready_entity, Chest):
return EntityActionText(ready_entity.world_entity, "Open", [])
elif isinstance(ready_entity, Shrine):
if ready_entity.has_been_used:
return None
else:
return EntityActionText(ready_entity.world_entity, "Touch", [])
else:
raise Exception("Unhandled entity: " + str(ready_entity))
def _get_loot_name(lootable: LootableOnGround) -> Tuple[str, EntityActionTextStyle]:
if isinstance(lootable, ConsumableOnGround):
name = CONSUMABLES[lootable.consumable_type].name
return name, EntityActionTextStyle.PLAIN
if isinstance(lootable, ItemOnGround):
name = build_item_name(lootable.item_id)
if lootable.item_id.suffix_id is not None:
style = EntityActionTextStyle.LOOT_RARE
elif get_item_data(lootable.item_id).is_unique:
style = EntityActionTextStyle.LOOT_UNIQUE
else:
style = EntityActionTextStyle.PLAIN
return name, style
def _get_loot_details(lootable: LootableOnGround) -> List[str]:
if isinstance(lootable, ConsumableOnGround):
return [CONSUMABLES[lootable.consumable_type].description]
if isinstance(lootable, ItemOnGround):
# TODO Render suffix lines differently?
return [line.text for line in create_item_description(lootable.item_id)]
| [
"jonte.murray@gmail.com"
] | jonte.murray@gmail.com |
4913c2722dadc4eab70e690b9fb6b88e0097a781 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2022_06_15/aio/_application_insights_management_client.py | 8b411d38bef302c4b99c3ad09e97a57e5267f2e1 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 4,161 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models as _models
from ..._serialization import Deserializer, Serializer
from ._configuration import ApplicationInsightsManagementClientConfiguration
from .operations import WebTestsOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ApplicationInsightsManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""Composite Swagger for Application Insights Management Client.
:ivar web_tests: WebTestsOperations operations
:vartype web_tests:
azure.mgmt.applicationinsights.v2022_06_15.aio.operations.WebTestsOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2022-06-15". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ApplicationInsightsManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client: AsyncARMPipelineClient = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.web_tests = WebTestsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ApplicationInsightsManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details: Any) -> None:
await self._client.__aexit__(*exc_details)
| [
"noreply@github.com"
] | noreply@github.com |
e03f8fc11f1346b5e6a15da61705ad3130f29de5 | 51ddb0e7ae60b50498842ddcfdeb73a82176e7e0 | /users/views.py | 7c11d8216c041018b572bb19fb33c52f5b12a0e7 | [] | no_license | Snehal-Borkar/Tradexa_Task | 2262ee79a6b76886393c13103f6ffd570a38602e | b20930feffaf8fa04512e0057bfdb74ff10f763c | refs/heads/main | 2023-09-03T18:29:13.005260 | 2021-10-28T10:50:12 | 2021-10-28T10:50:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,151 | py | from django.http import request
from django.http.response import HttpResponse,HttpResponseRedirect
from django.shortcuts import render
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from .models import Post
import datetime
# Create your views here.
def log(request):
return render(request,"users/login.html")
def handlelogin(request):
if request.method =="POST":
username=request.POST['username']
password=request.POST['password']
user = authenticate(request, username=username, password=password)
print(user)
if user is not None:
login(request, user)
user=request.user
print("request.user:",request.user)
if request.user.is_active:
# return redirect('/login/')
post=Post.objects.filter(user=user)
return render(request,"users/posts.html",{"user":user,"post":post})
if request.user.is_active:
return HttpResponse("<h4> you are just an active member</h4>")
else:
return HttpResponse("<h4> invalid credentials !</h4>")
else:
return render(request,"users/login.html")
@login_required
def post(request):
user=request.user
if request.method=="POST":
text=request.POST['post_text']
crat=datetime.datetime.now()
Post.objects.create(user=user,text=text,created_at=crat,updated_at=crat)
post=Post.objects.filter(user=user)
return render(request,"users/posts.html",{"user":user,"post":post})
@login_required
def handleupdatepost(request,id):
user=request.user
upt=Post.objects.get(id=id)
if request.method =="POST":
text=request.POST['post_text']
# Post.objects.get(id=id).update(text=text)
crat=datetime.datetime.now()
upt.text=text
upt.updated_at=crat
upt.save()
post=Post.objects.filter(user=user)
return render(request,"users/posts.html",{"post":post,"user":user})
return render(request,"users/updatepost.html",{"text":upt,"user":user})
| [
"borkarsnehal60@gmail.com"
] | borkarsnehal60@gmail.com |
c006ca77594cce7b285e3cb5b9081c678b8e1f01 | 668dad44beb30cadb170e32a8a7f0a57c42e653c | /denormalize_to_csv.py | d2910eec545e9f6d62c1a4e254eb4424ae66ed54 | [] | no_license | SEL-Columbia/ss_data_analysis | 22b72540732b03836423e18462495b2252a2cca8 | dfb8c2670cddbddbb693e5a3243bc829bccf5ae0 | refs/heads/master | 2016-09-05T12:31:00.546458 | 2013-10-21T23:01:34 | 2013-10-21T23:01:34 | 13,162,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,354 | py | import os
import datetime
import csv
import sys
"""
denormalize_to_csv.py
usage: python denormalize_to_csv.py logs_dir
description: Script to take a directory of sharedsolar log files
in csv format and denormalizes them such that they
can be concatenated together into "one big table"
of the same structure without losing any information
(while duplicating some...hence "denormalize")
"""
FIELD_MAP = {
'Time Stamp': 'time_stamp',
'Watts': 'watts',
'Volts': 'volts',
'Amps': 'amps',
'Watt Hours SC20': 'watt_hours_sc20',
'Watt Hours Today': 'watt_hours_today',
'Max Watts': 'max_watts',
'Max Volts': 'max_volts',
'Max Amps': 'max_amps',
'Min Watts': 'min_watts',
'Min Volts': 'min_volts',
'Min Amps': 'min_amps',
'Power Factor': 'power_factor',
'Power Cycle': 'power_cycle',
'Frequency': 'frequency',
'Volt Amps': 'volt_amps',
'Relay Not Closed': 'relay_not_closed',
'Send Rate': 'send_rate',
'Machine ID': 'machine_id',
'Type': 'circuit_type',
'Credit': 'credit'
}
def write_denormalized_csv(logfile, site, ip):
outfile = logfile.replace(".log", ".csv")
with open(logfile,'r') as csvinput:
with open(outfile, 'w') as csvoutput:
first_line = csvinput.readline()
# Simple check for properly formatted file (NOTE: MAINS files will not have a credit field at the end)
if (first_line.startswith("Time Stamp,Watts,Volts,Amps,Watt Hours SC20,Watt Hours Today,Max Watts,Max Volts,Max Amps,Min Watts,Min Volts,Min Amps,Power Factor,Power Cycle,Frequency,Volt Amps,Relay Not Closed,Send Rate,Machine ID,Type")):
# reset read ptr
csvinput.seek(0)
reader = csv.reader(csvinput)
writer = csv.writer(csvoutput, lineterminator='\n')
all = []
has_credit = True
# handle the header row
existing_header_row = next(reader)
new_header_row = []
# add the new denormalized fields to the new header
new_header_row.append('line_num')
new_header_row.append('site_id')
new_header_row.append('ip_addr')
# If the header row doesn't contain the Credit field, add it
if existing_header_row[-1] != 'Credit':
existing_header_row.append('Credit')
has_credit = False
# convert field names
for field in existing_header_row:
if field not in FIELD_MAP:
sys.stderr.write("Erroneous field: %s in file: %s skipping..." % (field, logfile))
else:
new_header_row.append(FIELD_MAP[field])
all.append(new_header_row)
line_num = 0
for row in reader:
row.insert(0, line_num)
row.insert(1, site)
row.insert(2, ip)
# in case there was no credit field in the input file, make it 0
if not has_credit:
row.append("0")
all.append(row)
line_num = line_num + 1
writer.writerows(all)
line_num = 0
else:
sys.stderr.write("Empty or corrupted file: %s\n" % logfile)
def denormalize_to_csv(logs_dir):
for (dirpath,dirnames,filenames) in os.walk(logs_dir):
for f in filenames:
if f.endswith(".log"):
dir_info = dirpath.split("/")
# Note: dir_info contents are blah/Site/YYYY/MM/DD/HH
site = dir_info[-5] # get the site from the dir (site is always 5 dirs up in the path)
ip = f[0:f.find(".")] # get the ip from the filename
full_filename = os.path.join(dirpath, f)
write_denormalized_csv(full_filename, site, ip)
if __name__=="__main__":
import sys
assert len(sys.argv) == 2, \
"Usage: python denormalize_to_csv.py logs_dir"
logs_dir = sys.argv[1]
denormalize_to_csv(logs_dir)
| [
"chris.natali@gmail.com"
] | chris.natali@gmail.com |
58917f325490ea31f0266642594353b9a3a355ea | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/2992.py | b11dac90d43eb4458792e75c04fcfe2c80575061 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py |
def read_case(file):
answer = int(file.readline()) - 1
for skip in range(4):
line = file.readline()
if skip is answer:
result = set(line.split())
return result
def read_input(filename):
with open(filename, "r") as in_file:
n_cases = int(in_file.readline().split()[0])
for case in range(n_cases):
yield case + 1, read_case(in_file), read_case(in_file)
def solve_case(first, second):
answer = first & second
if len(answer) == 1:
return "".join(answer)
elif answer:
return "Bad magician!"
else:
return "Volunteer cheated!"
cases = read_input("A-small-attempt0.in")
outfile = open("output.txt", "w+")
for case, first, second in cases:
result = solve_case(first, second)
outfile.write("Case #{}: {}\n".format(case, result)) | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
6146c5b1434b4cb8fedce9eaaf77a5bc2d36c4a0 | 26e20b10ca589ce3c5d4275ddb41d8ec0cb07c85 | /BOJ/이진 탐색/BaekJoon1920_수 찾기.py | d31eed6343de575158874e99602cdbca1e15fbbd | [] | no_license | Younggil-kim/CodingTestStudy | 4f53b2489e580567424e1d72f60af33291a51b91 | 83f24e7f4978f23b606a1f5efd38bba0e1408cbe | refs/heads/master | 2023-08-11T02:27:29.391700 | 2021-09-30T13:29:57 | 2021-09-30T13:29:57 | 291,027,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | #문제 해석
#배열이 두 개 주어 질 때, 다른 배열에 있는지 없는지 검사하는 것
N = int(input())
lstN = list(map(int, input().split()))
M = int(input())
lstM = list(map(int, input().split()))
lstN.sort()
def binary_search(lst, target, start, end):
while start <= end:
mid = (start + end)//2
if lst[mid] == target:
print(1)
return
elif lst[mid] > target:
end = mid - 1
else:
start = mid + 1
print(0)
return
for num in lstM:
binary_search(lstN, num, 0,N-1)
| [
"gom991@ajou.ac.kr"
] | gom991@ajou.ac.kr |
1c1f8b758977f0dd9d74a0b69d24b6199005bb45 | 08e3fce7e9b9b61ba82fbba108c94ddd9188c57d | /anagram.py | 69246bc094aef719ac6338a36e6fe7be58e812ff | [] | no_license | mistry-siddharth/Python_Practice_Problems | 57d558f6e11f4b79f7baeeaac042df7e1f7a5427 | 661637858699fa7eee0738a5be0d3c8cb404971f | refs/heads/master | 2021-01-13T13:47:58.016334 | 2017-01-13T02:04:08 | 2017-01-13T02:04:08 | 76,326,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | def isAnagram(s, t):
'''
if s == '' and t == '':
return True
if len(s) != len(t):
return False
s_dict = dict()
t_dict = dict()
for x in s:
s_dict[x] = s_dict.get(x, 0) + 1
for x in t:
t_dict[x] = t_dict.get(x, 0) + 1
for (ks, vs), (kt, vt) in zip(s_dict.items(), t_dict.items()):
if ks == kt and vs == vt:
return True
else:
return False
'''
s = sorted(s)
print(s)
t = sorted(t)
print(t)
if s == t:
return True
else:
return False
print(isAnagram("anagram", "nagaram"))
| [
"mistry.siddharth@gmail.com"
] | mistry.siddharth@gmail.com |
47ef83fb0fed19641baae48832b281e6d005b517 | cfbcad5f755e49144c02482f703e3f2529509594 | /py37/bin/python-config | a803be5d27e8b9f48f4c38a2216ca5f3ae2a4c9d | [] | no_license | k-petro/imbalanced | 42cdbceb817abfe58f767afafe655eb0e5579467 | 5119be74f0fed974f40dd7f9d2651ca9c6559f5e | refs/heads/master | 2023-07-20T02:26:27.524403 | 2019-10-09T12:08:58 | 2019-10-09T12:08:58 | 213,340,926 | 0 | 0 | null | 2023-07-06T21:47:26 | 2019-10-07T09:16:12 | Python | UTF-8 | Python | false | false | 2,386 | #!/Users/owenxoual/Desktop/DataHEC/quintenProject/unbalanced_data/py37/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"owen@ltutech.com"
] | owen@ltutech.com | |
3e7bd0e43f9e95ffadeefd0da7f5652ca4909e70 | c96f731c34725392cca689847d8a522f6e1784b4 | /Tasks 01/task_01.py | 7e54c06bdf656c31b34bce53e2647341412acdd4 | [] | no_license | maxn-csi/python-training | f10c0acbcc7ee7d7cd1443ec2558e4164e39d6d7 | deff7b42b1fc57520be05309a410049320eb70f9 | refs/heads/master | 2021-01-05T20:36:30.280774 | 2020-02-19T13:11:28 | 2020-02-19T13:11:28 | 241,130,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | #!/usr/bin/python3.8
#
# task_01
#
# Пользователь вводит строку из букв в нижнем регистре.
# Нужно посчитать, сколько в этой строке английских гласных букв.
# Корректность ввода не проверять
string = input("Please enter a string: ")
print()
count = 0
for char in string:
if char in "aeiouy":
count += 1
print (count, "Vowels are in there") | [
"maksimnaydin@coherentsolutions.com"
] | maksimnaydin@coherentsolutions.com |
cc2ad3116de0306db35049502adbc9d2156c7f84 | a71a463c943da2a072212afa173d320fb4f5b849 | /final/settings.py | 5881f283409cc88d4f2df844efa71cb9c8e7d607 | [] | no_license | StevenDewey/ColorShades | a716760d36bf981f77a9db127e5a8e4afdc9c265 | 06ac1a9f4a3241ea03441ae72db8baf682087e91 | refs/heads/master | 2021-01-01T19:34:39.458805 | 2015-04-30T05:14:41 | 2015-04-30T05:14:41 | 34,835,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,065 | py | """
Django settings for final project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(csx4vsi9g+@h17_@ia5zpnp!mfe2dhm*+$li(iu!0a+-t7dr2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_mako_plus.controller',
'homepage',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django_mako_plus.controller.router.RequestInitMiddleware',
)
ROOT_URLCONF = 'final.urls'
WSGI_APPLICATION = 'final.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
# SECURITY WARNING: this next line must be commented out at deployment
BASE_DIR,
)
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
DEBUG_PROPAGATE_EXCEPTIONS = DEBUG # never set this True on a live site
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django_mako_plus': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
},
}
###############################################################
### Specific settings for the Django-Mako-Plus app
# identifies where the Mako template cache will be stored, relative to each app
DMP_TEMPLATES_CACHE_DIR = 'cached_templates'
# the default app and page to render in Mako when the url is too short
DMP_DEFAULT_PAGE = 'index'
DMP_DEFAULT_APP = 'homepage'
# these are included in every template by default - if you put your most-used libraries here, you won't have to import them exlicitly in templates
DMP_DEFAULT_TEMPLATE_IMPORTS = [
'import os, os.path, re',
]
# whether to send the custom DMP signals -- set to False for a slight speed-up in router processing
# determines whether DMP will send its custom signals during the process
DMP_SIGNALS = True
# whether to minify using rjsmin, rcssmin during 1) collection of static files, and 2) on the fly as .jsm and .cssm files are rendered
# rjsmin and rcssmin are fast enough that doing it on the fly can be done without slowing requests down
DMP_MINIFY_JS_CSS = True
# see the DMP online tutorial for information about this setting
DMP_TEMPLATES_DIRS = [
# os.path.join(BASE_DIR, 'base_app', 'templates'),
]
### End of settings for the base_app Controller
################################################################
| [
"s.dewey1@gmail.com"
] | s.dewey1@gmail.com |
c37513010dfe99c88f543eeb676cd5e01e77b7dc | 0f110f7055bff1d59cdad676d66f7b780eb3339f | /RidgeLassoEx/RidgeLassoEx.py | 96ad37276341e8aa2aa27ca41901e42643322997 | [] | no_license | jayantamajumdar/PredictiveModelingCode | 3ecc9a5fcfc6f6c4c1cc62a4f6edb1ccb47d3aa2 | 34a6c6ceea8b46ed1f3613d25beb7b52c25624e6 | refs/heads/master | 2021-01-10T10:37:19.343446 | 2015-06-04T18:36:34 | 2015-06-04T18:36:34 | 36,837,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,479 | py | # Autos.csv includes variables: MPG, cylinders, displacement, horsepower, weight, acceleration, origin, train
# 'train' variable indicates membership of observation in training set
# Determine best parameter for lasso and ridge models using 5 fold cross-validation
# Dependent Variable is MPG, for which the rest of the variables will be predictors
# Finally compare Least-Squares Regression, Lasso Regression and Ridge Regression on full training data
# calculate prediction error on test data for each model
import pandas as pd
import numpy as np
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
# Load data directly into dataframe df, split into train and test set
df = pd.read_csv("autos.csv")
train = df[(df["train"] == True)]
train = train.reset_index()
del train['index']
# map in folds data to perform cross validation on training set to determine best regularization parameter
train['fold'] = pd.read_csv("auto.folds",header=None)
del train["train"]
test = df[(df["train"] == False)]
del test["train"]
names = train.columns.tolist()
# X is dataframe of all independent variables
# y is single column holding dependent variable
X = train[names[1:len(names)-1]]
y = pd.DataFrame(train['mpg'])
y['fold'] = train['fold']
folds = range(5)
# We will start by finding our best Lasso Model
# Regularization parameters to choose from for our Lasso regression
lambda_ridge = np.array([0.001,0.005,0.01,0.05,0.1,0.5,1,5,10,50,100])
lambda_lasso = np.array([0.0001,0.0005,0.001,0.005,0.01,0.05,0.1])
plt.figure(1)
axis = plt.gca()
axis.set_color_cycle(2* ['b','r','g','c','k','y'])
# Initialize lists to store mean squared errors from lasso models fitted with different regularization
# parameters and each of the mse values from the different folds in our 5 fold cross validation.
# These values will determine the optimally fit lambda value
k = X.shape[1]
lasso_coef = np.zeros((len(lambda_lasso), k))
lasso_mse_list = []
lasso_mse_mean = []
for i,a in enumerate(lambda_lasso):
lasso_model = Lasso(alpha=a, normalize=True)
lasso_mse_fold = []
for j in folds:
X_fold_test = train[(train['fold'] == j)]
X_fold_train = train[(train['fold'] != j)]
y_fold_test = y[(y['fold'] == j)]
y_fold_train= y[(y['fold'] != j)]
del X_fold_train['mpg'],X_fold_test['mpg'], X_fold_test['fold'], X_fold_train['fold'], y_fold_train['fold'], y_fold_test['fold']
lasso_model.fit(X_fold_train.as_matrix(), y_fold_train.as_matrix())
mse = mean_squared_error(y_fold_test.as_matrix(), lasso_model.predict(X_fold_test.as_matrix()))
lasso_mse_fold.append(mse)
lasso_coef[i] = lasso_model.coef_
lasso_mse_list.append(lasso_mse_fold)
lasso_mse_mean.append(np.array(lasso_mse_list).mean())
# best lasso parameter
min_index_lasso = lasso_mse_mean.index(min(lasso_mse_mean))
print "Best Lasso: ", lambda_lasso[min_index_lasso]
# plot lasso coefficients
for coef in lasso_coef.T:
plt.plot(lambda_lasso, coef)
plt.xlabel('Lambda')
plt.ylabel('Coefficients')
plt.xlim(min(lambda_lasso),max(lambda_lasso))
plt.title('Lasso')
# plots of average lasso error across folds. Includes red line for best lambda
plt.figure(2)
plt.plot(-np.log(lambda_lasso),
np.sqrt(np.array(lasso_mse_list)).mean(axis=1))
plt.axvline(-np.log(lambda_lasso[min_index_lasso]), color = 'r')
plt.xlabel(r'-log(lambda)')
plt.ylabel('RMSE')
plt.title('Lasso')
# Now we fit our best Ridge model, similar to above steps.
plt.figure(3)
axis = plt.gca()
axis.set_color_cycle(2* ['y','r','g','c','k','b'])
ridge_coef = np.zeros((len(lambda_ridge), k))
ridge_mse_list = []
ridge_mse_mean = []
for i,a in enumerate(lambda_ridge):
ridge_model = Ridge(alpha=a, normalize=True)
ridge_mse_fold = []
for j in folds:
X_fold_test = train[(train['fold'] == j)]
X_fold_train = train[(train['fold'] != j)]
y_fold_test = y[(y['fold'] == j)]
y_fold_train= y[(y['fold'] != j)]
del X_fold_train['mpg'],X_fold_test['mpg'], X_fold_test['fold'], X_fold_train['fold'], y_fold_train['fold'], y_fold_test['fold']
ridge_model.fit(X_fold_train.as_matrix(), y_fold_train.as_matrix())
mse = mean_squared_error(y_fold_test.as_matrix(), ridge_model.predict(X_fold_test.as_matrix()))
ridge_mse_fold.append(mse)
ridge_coef[i] = ridge_model.coef_[0]
ridge_mse_list.append(ridge_mse_fold)
ridge_mse_mean.append(np.array(ridge_mse_list).mean())
# Best Ridge Regularization parameter
min_index_ridge = ridge_mse_mean.index(min(ridge_mse_mean))
print "Best Ridge: ", lambda_ridge[min_index_ridge]
for coef in ridge_coef.T:
plt.plot(lambda_ridge, coef)
plt.xlabel('Regularization')
plt.ylabel('Coefficients')
plt.xlim(min(lambda_ridge),max(lambda_ridge))
plt.title('Ridge')
plt.figure(4)
plt.plot(-np.log(lambda_ridge),
np.sqrt(np.array(ridge_mse_list)).mean(axis=1))
plt.axvline(-np.log(lambda_ridge[min_index_ridge]), color = 'red')
plt.xlabel(r'-log(lambda)')
plt.ylabel('RMSE')
plt.title('CV Error')
plt.show()
# Using best reg. parameters from above to find prediction error on our test set
df2 = pd.read_csv("autos.csv")
train2 = df2[(df2["train"] == True)]
del train2["train"]
test2 = df2[(df2["train"] == False)]
del test2["train"]
names = train2.columns.tolist()
X_train = train2[names[1:len(names)]]
X_test = test2[names[1:len(names)]]
y_train = train2[names[0]]
y_test = test2[names[0]]
train = np.matrix(X_train)
test = np.matrix(X_test)
# Building lasso, ridge and basic linear model to compare results
lasso_model2 = Lasso(alpha=0.01, normalize=True)
ridge_model2= Ridge(alpha=0.01, normalize=True)
lm = LinearRegression()
lasso_model2.fit(X_train.as_matrix(), y_train.as_matrix())
ridge_model2.fit(X_train.as_matrix(), y_train.as_matrix())
lm.fit(train, y_train.as_matrix())
lasso_model2.predict(X_test.as_matrix())
ridge_model2.predict(X_test.as_matrix())
lm.predict(test)
mse_lasso = mean_squared_error(y_test.as_matrix(), lasso_model2.predict(X_test.as_matrix()))
mse_ridge = mean_squared_error(y_test.as_matrix(), ridge_model2.predict(X_test.as_matrix()))
mse_lm= mean_squared_error(y_test.as_matrix(), lm.predict(test))
# MSE for each model
print "Lasso MSE: ", mse_lasso
print "Ridge MSE: ", mse_ridge
print "Least Squares MSE: ", mse_lm
print 'Coefficients:'
| [
"jayantamajumdar@utexas.edu"
] | jayantamajumdar@utexas.edu |
ccb2a7c0e19b3cee1a6508c55da7aa14629e579e | 64f87ff419844b032e4fa1c0379e898a0df16ad4 | /MinutesToYearsConversion.py | 95efc83bf75d975c8ebd687458f3cf22d6345e49 | [] | no_license | SushmaBR/FirstPython | cff075954b97379fe75ef3d46e58a785d64b8e32 | e836d86f3477f356503f140e73a0b523d059a5cc | refs/heads/master | 2021-05-14T21:46:49.355566 | 2017-11-01T06:03:55 | 2017-11-01T06:03:55 | 109,092,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | min=float(input("Enter the minutes"))
hrs=min/60
days=hrs/24
years=days/365.25
yrs=min/60/24/365.25
print("hrs:",hrs)
print("days:",days)
print("years:",years)
print("yrs:",yrs) | [
"noreply@github.com"
] | noreply@github.com |
dd5b6cd1dcfa4e17a583abb6f173703c237f4563 | 0982fd61707a8f6242c9fe87bf70be13e443ba95 | /tools/dcgan.py | 532dad781e39116d6c144dd1ba0d595ad4a83a07 | [] | no_license | shinoairisu/MyM1-M2 | 5b0b58743a84e0828c77c6cc17672a211128160d | 5e97bf6b4e82f58b7a24ef275f6e468fbee91357 | refs/heads/main | 2023-06-24T02:27:45.663583 | 2021-07-29T04:09:05 | 2021-07-29T04:09:05 | 361,403,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,098 | py | # -*- coding: utf-8 -*-
from collections import OrderedDict
import torch
import torch.nn as nn
class Generator(nn.Module):
def __init__(self, nz=100, ngf=128, nc=3):
super(Generator, self).__init__()
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
def forward(self, input):
return self.main(input)
def initialize_weights(self, w_mean=0., w_std=0.02, b_mean=1, b_std=0.02):
for m in self.modules():
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, w_mean, w_std)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, b_mean, b_std)
nn.init.constant_(m.bias.data, 0)
class Discriminator(nn.Module):
def __init__(self, nc=3, ndf=128):
super(Discriminator, self).__init__()
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
return self.main(input)
def initialize_weights(self, w_mean=0., w_std=0.02, b_mean=1, b_std=0.02):
for m in self.modules():
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, w_mean, w_std)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, b_mean, b_std)
nn.init.constant_(m.bias.data, 0)
| [
"noreply@github.com"
] | noreply@github.com |
0a202c67557ee336c4c55dc9270adb7cd16ef075 | d02eef71621fe1b08bc091de1892cd68c340373f | /python-syntax/any7.py | e4e0c420f0d520f0959051de6ce31b28c041c47b | [] | no_license | lexsac/python-syntax | c50578414a7fc3bd51737654a0bbb8758f6b65ac | f8e1ee325dff94092f82fba6b4cb94e07a2535d2 | refs/heads/main | 2023-07-27T22:22:28.603918 | 2021-09-12T00:14:58 | 2021-09-12T00:14:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | def any7(nums):
"""Are any of these numbers a 7? (True/False)"""
# YOUR CODE HERE
for num in nums:
if num == 7:
return True
return False
print("should be true", any7([1, 2, 7, 4, 5]))
print("should be false", any7([1, 2, 4, 5]))
| [
"noreply@github.com"
] | noreply@github.com |
d72a2b7583ce54f89d3993c352ac10befa76bbb8 | 880847e3820aa18b9f255932634bc13e5aca63e8 | /python/SearchImages.py | f69794b791f7fd1a77ca23a7f57a57fc843b73fd | [
"MIT"
] | permissive | indeshan/Curiosity | bda26210027fb2cce7b5f8d78a6fcb23c46a1f83 | eb76edf54982d68ecec96664291ce3ce038e30c7 | refs/heads/master | 2023-06-23T12:27:35.825041 | 2021-07-28T04:05:35 | 2021-07-28T04:05:35 | 206,609,352 | 0 | 0 | MIT | 2020-10-07T11:19:43 | 2019-09-05T16:27:55 | Jupyter Notebook | UTF-8 | Python | false | false | 296 | py | import fnmatch
import os
images = ['*.jpg', '*.jpeg', '*.png', '*.tif', '*.tiff']
matches = []
for root, dirnames, filenames in os.walk("C:\"):
for extensions in images:
for filename in fnmatch.filter(filenames, extensions):
matches.append(os.path.join(root, filename))
| [
"noreply@github.com"
] | noreply@github.com |
b83c96cbe21aa2990f4a43514a5718fc6fc0a492 | 494893aa6efb8e81ba31e7241277632bb25e6d56 | /eig1.py | 710233c0ccf44176147b55901d5f36180379ed9d | [] | no_license | ZEYINWU/Singel-Cell | c7663c76302860314d6ad30e8658e4d8a4ba155c | 4d0e2ab54237b03443aa35cc3e40ff3cc1872c09 | refs/heads/master | 2020-06-10T12:47:08.043801 | 2016-12-08T17:38:22 | 2016-12-08T17:38:22 | 75,960,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | from numpy import linalg as LA
def eig1(A):
eigen_A = LA.eig(A)
v = eigen_A[1]
d = eigen_A[0]
d1 = -np.sort(-d)
idx = np.argsort(-d)
idx1 = idx
eigval = d[idx1]
eigvec = v[,idx1].real
eigval_full = d[idx]
res=list()
res.append(eigval)
res.append(eigvec)
res.append(eigval_full)sa
return res
| [
"zeyinwu1992@gmail.com"
] | zeyinwu1992@gmail.com |
26ce3d24da31b82492985aea44d7b6fa84c31074 | 23d01d942c97a31e46529c4371e98aa0c757ecd1 | /hll/cnc/tools/cncframework/inverse.py | ae6f6df42ae6edf7f7466ed31751310500ed9ae1 | [
"BSD-2-Clause"
] | permissive | ModeladoFoundation/ocr-apps | f538bc31282f56d43a952610a8f4ec6bacd88e67 | c0179d63574e7bb01f940aceaa7fe1c85fea5902 | refs/heads/master | 2021-09-02T23:41:54.190248 | 2017-08-30T01:48:39 | 2017-08-30T01:49:30 | 116,198,341 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,183 | py | from itertools import chain
from sympy import Symbol, solve, Piecewise
from sympy.core import sympify
import cncframework.events.actions as actions
def tag_expr(tag, out_var):
"""Return out_var = tag as a SymPy expression."""
# since sympify will automatically equate to zero, we convert it to:
# tag_expr - o_n, and solve for s, some variable in the tagspace
return sympify(str.format("{} - {}", tag.expr, out_var))
def piecewise_tag_expr(tag, out_var, condition):
"""Return out_var = tag as a piecewise function defined where condition
evaluates to True."""
expr = tag_expr(tag, out_var)
# replace '#' and '@' from condition
condition = condition.replace('@', 'arg').replace('#', 'ctx')
cond = sympify(condition)
return Piecewise((expr, cond))
def find_collNames(output_list):
"""
Return list of collection names collected from refs in output_list.
"""
colls = []
for out in output_list:
if out.kind in {"STEP", "ITEM"}:
colls.append(out.collName)
elif out.kind == "IF":
colls.append(out.refs[0].collName)
return colls
def find_step_inverses(stepFunction):
"""
Given a StepFunction, read the expressions for each output and return a map
{c: [f: tagspace -> t for each output tag t] for each output collection or
step c} where the tagspace is enumerated (t1,t2,...,tn).
"""
tag_space = [Symbol(t) for t in stepFunction.tag]
outputs = {coll: [] for coll in find_collNames(stepFunction.outputs)}
def solve_for(tag, tag_space, out_var, cond=None):
expr = (piecewise_tag_expr(tag, out_var, cond) if cond else
tag_expr(tag, out_var))
solution = solve(expr, tag_space, dict=True)
return solution[0] if solution else {}
for output in stepFunction.outputs:
if output.kind in {"STEP", "ITEM"}:
tag_list = output.key if output.kind == "ITEM" else output.tag
outputs[output.collName].extend(
solve_for(t, tag_space, "t{}".format(i + 1))
for (i, t) in enumerate(t for t in tag_list if not t.isRanged))
elif output.kind == "IF":
out_ref = output.refs[0]
tag_list = out_ref.key if out_ref.kind == "ITEM" else out_ref.tag
outputs[out_ref.collName].extend(
solve_for(t, tag_space, "t{}".format(i + 1), output.rawCond)
for (i, t) in enumerate(t for t in tag_list if not t.isRanged))
return outputs
def find_blame_candidates(arg_blame, graph_data):
"""
Given arg_blame in format coll@tag and graph_data from specfile, find the
possible steps@tag that could be responsible for putting or prescribing
arg_blame.
"""
coll_name, coll_tag = arg_blame.split("@")
# turn coll_tag into a tuple representing a point in tagspace
coll_tag = tuple(coll_tag.split(","))
# turn coll_tag into dict of substitutions tk: coll_tag[k]
coll_tag_system = {
Symbol("t{}".format(i + 1)): v for i, v in enumerate(coll_tag)}
# {s: {in_tag: value for each input tag of s} for each step s}
candidates = {}
# steps that contain the collection in output but have no valid solution
rejected_steps = set()
for (step, func) in graph_data.stepFunctions.iteritems():
func_inverses = find_step_inverses(func)
if coll_name in func_inverses:
candidates[step] = {}
for out_tag in func_inverses[coll_name]:
for (in_tag, expr) in out_tag.iteritems():
in_tag = str(in_tag)
# evaluate inv_p(t)
inv = expr.subs(coll_tag_system)
if in_tag in candidates[step]:
if inv != candidates[step][in_tag]:
# then the solution is inconsistent, reject
rejected_steps.add(step)
else:
candidates[step][in_tag] = inv
for s in rejected_steps:
del candidates[s]
return candidates
def _node_to_name(node, event_graph):
"""Create a name string for a given node in the event graph.
"""
return "{}@{}".format(event_graph.property(node, "name", ""),
event_graph.property(node, "tag", ""))
def blame_deadlocks(graph_ast, event_graph):
"""Blame candidates for deadlock given the execution graph of a program by
attempting to remove all steps that depend on blocked steps.
"""
step_functions = graph_ast.stepFunctions
potentially_deadlocked = event_graph.gotten_without_put()
# Map step/item@tag to node in graph
tags_to_nodes = {_node_to_name(node, event_graph):node for node in event_graph}
blame_candidateses = [find_blame_candidates(_node_to_name(blame_node,
event_graph), graph_ast) for blame_node in potentially_deadlocked]
# Fill in all blamed nodes as having "run" (by adding the node if not
# present and add put/prescribe edges).
for step_name, tags in chain.from_iterable(map(dict.iteritems, blame_candidateses)):
# Put the tag tuple in canonical order (same as in spec file)
tag_tuple = tuple([int(tags[tag]) for tag in step_functions[step_name].tag])
tag_tuple_string = ", ".join(map(str, tag_tuple))
step_tag_label = "{}@{}".format(step_name, tag_tuple_string)
if step_tag_label in tags_to_nodes:
step_tag_id = tags_to_nodes[step_tag_label]
else:
step_tag_id = event_graph.create_node_id(actions.RUNNING, step_name, tag_tuple_string)
tags_to_nodes[step_tag_label] = step_tag_id
event_graph.add_node(step_tag_id)
event_graph.style_step(step_tag_id, step_name, tag_tuple_string)
for output in step_functions[step_name].outputs:
if output.kind in {"STEP", "ITEM"}:
tag_list = output.key if output.kind == "ITEM" else output.tag
coll = output.collName
elif output.kind == "IF":
out_ref = output.refs[0]
tag_list = out_ref.key if out_ref.kind == "ITEM" else out_ref.tag
coll = out_ref.collName
# Substitute, and add edge in graph
output_node = "{}@{}".format(coll, ", ".join(
[str(sympify(t.expr).subs(tags)) for t in tag_list]))
if output_node in tags_to_nodes:
event_graph.add_node_with_children(step_tag_id, [tags_to_nodes[output_node]])
# Now we re-traverse the graph and only blame step nodes with indegree = 0.
filtered = {}
for blame_node, blame_candidate in zip(potentially_deadlocked, blame_candidateses):
for step_name, tags in blame_candidate.iteritems():
tag_tuple = tuple([int(tags[tag]) for tag in step_functions[step_name].tag])
tag_tuple_string = ", ".join(map(str, tag_tuple))
step_tag_label = "{}@{}".format(step_name, tag_tuple_string)
if event_graph.in_degree(tags_to_nodes[step_tag_label]) == 0:
filtered.setdefault(_node_to_name(blame_node, event_graph), {})[step_name] = tags
return filtered
| [
"romain.e.cledat@intel.com"
] | romain.e.cledat@intel.com |
70ec64d86ed5c0c271e847db24f5640fba8b206c | d6f95f4347c2bd934393603819787acf70aaf4eb | /2018年11月15日福建省/gg.py | af58924762c19caec1070c8e9830667202198a39 | [] | no_license | moto-faith/work | 531804bca7b6ecb6d9776ed2086bbf9952e2043b | e77e40dbbb7dbb80bd2bc2584a6d1d020f92d2b4 | refs/heads/master | 2020-04-08T11:20:37.533419 | 2019-03-18T08:09:27 | 2019-03-18T08:09:27 | 159,302,505 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,942 | py | #!/usr/bin/env python
#coding=utf-8
import time
import datetime
import re
import json
import requests
import time
import redis
import sys
from urlparse import urljoin
from db import DB
reload (sys)
import copy
import MySQLdb
sys.setdefaultencoding ("utf-8")
import htmlparser
from PIL import Image
def handle_post(post):
post = copy.deepcopy(post)
for k,v in post.iteritems():
print k,v
if isinstance(v, unicode):
v = v.encode("utf8")
if not isinstance(v,str) and not isinstance(v, int) and not isinstance(v, float):
v = json.dumps(v)
try:v = MySQLdb.escape_string(v)
except:pass
post.update({k:v})
return post
db = DB ().create ('mysql://zhxg:ZHxg2017!@192.168.1.19:3306/sjk')
table = "list_info"
result1 = "list_model_filter"
urls = db.table(table).where('''siteName = "福建省公共资源交易中心"''').find()
dict_page_info = [url for url in urls if url is not None]
print "********-->", len (dict_page_info)
for str_urls in dict_page_info:
dict_post = str_urls
# print isinstance(dict_post,dict)
# dict_post = json.loads(dict_post)
# for k,v in dict_post.items():
# print k,v
# dd = dict_post.get("detailUrl")
dict_post["tf"]="1"
dict_post["irepeat"]="1"
dict_post["service"]="勘察设计"
dict_post["industry"]="industry"
dic = handle_post (dict_post)
try:
db.table (result1).add (dic)
except Exception as e:
print e
# for k,v in dict_post.items():
# print k,v
# detailUrl = dict_post.get ("detailUrl")
if __name__ == "__main__":
pass | [
"noreply@github.com"
] | noreply@github.com |
bff52e00b4a04a210acca40c9976bc005d73aee2 | bcd696ad0ad74b092dcb1c2de66d497854a6b040 | /login/serializers.py | d9092a41471e012c80dfbe2002e139e5690bf5f1 | [] | no_license | gopalgoyal1999/login-logout-mongodb | df1d20f0197931f9c29e87e5d7acef84b0067314 | 225543e28e521947e81163d9cc7da72302d721f5 | refs/heads/master | 2022-10-28T10:25:16.707939 | 2020-06-19T10:44:02 | 2020-06-19T10:44:02 | 270,811,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,512 | py | from rest_framework_mongoengine import serializers,generics
#from django.contrib.auth import get_user_model
from mongoengine.queryset.visitor import Q
from django_mongoengine import fields
from django_mongoengine.mongo_auth.models import MongoUser
from .models import User
#User = get_user_model()
class UserCreateSerializer(serializers.DocumentSerializer):
email2 = fields.EmailField(label='Confirm Email')
class Meta:
model=User
fields = ('username','email','password')
extra_kwargs={
"password":{"write_only": True}
}
def validate_email(self,value):
data = self.get_initial()
email1 = data.get("email")
email2 = value
if email1 != email2:
raise generics.ValidationError("Emails must match.")
user_qs = User.objects.filter(email=email2)
if user_qs:
raise generics.ValidationError("This user has already registered.")
return value
def create(self,validated_data):
username = validated_data['username']
email = validated_data['email']
password = validated_data['password']
user_obj = User(
username = username,
email = email,
password=password
)
#user_obj.set_password(password)
user_obj.save()
return validated_data
class UserLoginSerializer(serializers.DocumentSerializer):
username = fields.StringField(max_length=50,blank=True)
email = fields.EmailField(label="Email Address",blank=True)
class Meta:
model = User
fields = ('username','email','password')
extra_kwargs={
"password":{"write_only": True}
}
def validated_data(self,data):
user_obj = None
email = data.get("email",None)
username = data.get("username",None)
password = data["password"]
if not email and not username:
raise generics.ValidationError("A Username or email is required to login.")
user = User.objects.filter(
Q(email=email) |
Q(username=username)
)
if user and user.count() == 1:
user_obj=user.first()
else:
raise generics.ValidationError("This Username/Email is not valid!")
if user_obj:
if not user_obj.password==password:
raise generics.ValidationError("Incorrect credentials please try again.")
return data
| [
"44091979+gopalgoyal1999@users.noreply.github.com"
] | 44091979+gopalgoyal1999@users.noreply.github.com |
6edd2272d0f7f9743514ba61f57c5843e4987222 | 8aab35fe94aac25d2315572bc16235016674de96 | /crawler/instagram.py | 3886aed5ceb6f847b933c6370c26c180800e50b5 | [
"MIT"
] | permissive | fablabnbg/nichtparasoup | ebe64b81ad16df6f1552048e27b1304ac6921984 | d66d80b02fe4c83868e5bd0639a77be965d37602 | refs/heads/master | 2020-12-06T17:20:55.114636 | 2015-04-11T00:31:06 | 2015-04-11T00:31:06 | 33,756,676 | 0 | 0 | null | 2015-04-11T00:30:27 | 2015-04-11T00:30:27 | null | UTF-8 | Python | false | false | 1,491 | py |
try:
import urllib.request as urllib2 # py3
except:
import urllib2 # py2
try:
import urllib.parse as urlparse # py3
except:
import urlparse # py2
import re
import json
from bs4 import BeautifulSoup
from . import Crawler, CrawlerError
class Instagram(Crawler):
""" instagram image provider """
__uri = ""
__last = ""
## class methods
@classmethod
def __build_uri(cls, uri):
return uri +"/media/"
## instance methods
def _restart_at_front(self):
self.__last = ""
def __init__(self, uri):
self.__uri = self.__class__.__build_uri(uri)
self._restart_at_front()
def _crawl(self):
uri = urlparse.urljoin(self.__uri, "?max_id="+self.__last)
self.__class__._log("debug", "%s crawls url: %s" % (self.__class__.__name__, uri))
request = urllib2.Request(uri, headers=self.__class__.headers())
response = urllib2.urlopen(request, timeout=self.__class__.timeout())
charset = 'utf8'
try: # py3
charset = response.info().get_param('charset', charset)
except:
pass
data = json.loads(response.read().decode(charset))
if data["status"] != "ok":
raise CrawlerError()
for item in data['items']:
if item["type"] == "image":
self.__last = item['id']
image = item['images']['standard_resolution']['url']
self._add_image(image)
| [
"jan.kowalleck@gmail.com"
] | jan.kowalleck@gmail.com |
6c4f7b1800279c5c259918a6a72d721df5e2ee24 | 4ee7c06dc561358753b1f31ce95a5806af80ec7f | /PycharmProjects/DiveIn01/DiveIn01.py | c48a6b265a966d0394d00387fb855afa36f82832 | [] | no_license | marc-hines/Sparky | 6890a36fcc25bc7e3d19ab8e7cc162d5353baed4 | 6048eb9156791a07982adf28d3ed49056cec115d | refs/heads/master | 2020-04-12T07:21:02.307798 | 2018-05-03T15:37:31 | 2018-05-03T15:37:31 | 64,499,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 965 | py | SUFFIXES = {1000: ['KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'],
1024: ['KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']}
def approximate_size(size, a_kilobyte_is_1024_bytes=True) :
'''Convert a file size to human-readable form.
Keyword arguments:
size -- file size in bytes
a_kilobyte_is_1024_bytes -- if True (default), use multiples of 1024
if False, use multiples of 1000
Returns: string
'''
if size < 0:
raise ValueError('number must be non-negative')
multiple = 1024 if a_kilobyte_is_1024_bytes else 1000
for suffix in SUFFIXES[multiple]:
size /= multiple
if size < multiple:
return '{0:.1f} {1}'.format(size, suffix)
raise ValueError('number too large')
if __name__ == '__main__':
print(approximate_size(1000000000000, False))
print(approximate_size(1000000000000))
print(approximate_size.__doc__)
| [
"marc.hines@cdk.com"
] | marc.hines@cdk.com |
a9fa1f05a49145676d8d384b3c7e7cc8f4b16897 | 33836016ea99776d31f7ad8f2140c39f7b43b5fe | /fip_collab/2016_09_26_polycrystal_FIP_allpoint/plot_evd.py | d523d88b853904fc3267a94e0c6fc19be735c236 | [] | no_license | earthexploration/MKS-Experimentation | 92a2aea83e041bfe741048d662d28ff593077551 | 9b9ff3b468767b235e7c4884b0ed56c127328a5f | refs/heads/master | 2023-03-17T23:11:11.313693 | 2017-04-24T19:24:35 | 2017-04-24T19:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,689 | py | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from constants import const
import h5py
import sys
def pltevd(H):
C = const()
"""define the colors of interest"""
n_col = len(C['sid'])
colormat = cm.rainbow(np.linspace(0, 1, n_col))
f_reg = h5py.File("regression_results_L%s.hdf5" % H, 'r')
fig = plt.figure(figsize=[5.5, 4])
f = h5py.File("responses.hdf5", 'r')
for ii in xrange(n_col):
sid = C['sid'][ii]
"""get the x, y data for plotting the evd"""
x = f.get('evd_%s' % sid)[...]
if ii == 0:
xmin = np.log(x).min()
xmax = np.log(x).max()
else:
xmin = np.min([xmin, np.log(x).min()])
xmax = np.max([xmax, np.log(x).max()])
y = (np.arange(x.size)+1)/np.float32(x.size)
"""plot the original data and the fits"""
# plt.plot(np.log(x), y, '.', markersize=2, color=colormat[ii, :],
# label=sid)
plt.plot(np.log(x), y, '-', color=colormat[ii, :],
label=sid)
f.close()
f_reg.close()
plt.xlabel("ln(FIP)")
plt.ylabel("CDF")
plt.legend(loc='lower right', shadow=True, fontsize='small')
rng = np.abs(xmax - xmin)
xmin += -0.01*rng
xmax += 0.01*rng
plt.xlim((xmin, xmax))
ymin = y.min()
ymax = y.max()
rng = ymax - ymin
ymin = 0
ymax += 0.01*rng
plt.ylim((ymin, ymax))
plt.tight_layout()
fig_name = 'evd_orig_L%s.png' % H
fig.canvas.set_window_title(fig_name)
plt.savefig(fig_name)
if __name__ == '__main__':
sid = sys.argv[1]
pltevd(sid)
plt.show()
| [
"noahhpaulson@gmail.com"
] | noahhpaulson@gmail.com |
52c71e113f95af5f97bc153819bd4e1a1df443ec | b35133adbff1dc6f586c95b959b08c88d9426a47 | /pythonTest/beforeBegin/3countWords.py | 782f6ecd8ef35eebfd942ae0707234333140144a | [] | no_license | honeysss/2019-04-20 | 276296b8669af8b2b44aeadceb9eefd0defb78e3 | d565471bc68b795f4bdc63f76ab2d7abb6bfbb35 | refs/heads/master | 2020-05-15T15:55:53.985883 | 2019-04-21T04:16:11 | 2019-04-21T04:16:11 | 182,381,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | # 输入一个字符串 统计出字母 空格 数字 其他字符各有多少个
import string
s = input('input a string:')
letter = 0
space = 0
digit = 0
other = 0
# 遍历字符串
for c in s:
if c.isalpha():
letter += 1
elif c.isspace():
space += 1
elif c.isdigit():
digit += 1
else:
other += 1
# 这里不需要逗号分隔
print('字母有%d个,空格有%d个,数字有%d个,\
其他字符有%d个' %(letter, space, digit, other))
| [
"907641898@qq.com"
] | 907641898@qq.com |
8031b6089c1c749fc677558f21b5e5fa3458d6de | 959c24200a36a5f96e76d35d5e5de63751a98927 | /OneDrive/Escritorio/bootcamp python/M2/Ejercicios/5/grupal/tablero_walter.py | 1c8a2c8d1c5e0ec39539eb7c44ca069bea7f175e | [] | no_license | Maguilera85/tarea5g_m2 | 3787cf24e66ffe072a94886c0be3192e83f640fd | 9cdcc3a90e62e5e6725f66df7301222c0c537e55 | refs/heads/master | 2023-02-12T08:07:17.551456 | 2020-11-05T23:07:01 | 2020-11-05T23:07:01 | 310,428,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,249 | py | def imprimir_tablero(tablero):
for fila in tablero:
for i in range(len(fila)):
if i == len(fila) - 1:
print(fila[i], end='\n')
else:
print(fila[i], end=' ')
def cambiar_tablero(tablero, posicion, jugador):
if jugador:
simbolo = 'x'
else:
simbolo = 'o'
if posicion == 1:
if tablero[4][0] == ' ':
tablero[4][0] = simbolo
return 0
else:
return 'Esa posicion ya esta ocupada.'
elif posicion == 2:
if tablero[4][2] == ' ':
tablero[4][2] = simbolo
return 0
else:
return 'Esa posicion ya esta ocupada.'
elif posicion == 3:
if tablero[4][4] == ' ':
tablero[4][4] = simbolo
return 0
else:
return 'Esa posicion ya esta ocupada.'
elif posicion == 4:
if tablero[2][0] == ' ':
tablero[2][0] = simbolo
return 0
else:
return 'Esa posicion ya esta ocupada.'
elif posicion == 5:
if tablero[2][2] == ' ':
tablero[2][2] = simbolo
return 0
else:
return 'Esa posicion ya esta ocupada.'
elif posicion == 6:
if tablero[2][4] == ' ':
tablero[2][4] = simbolo
return 0
else:
return 'Esa posicion ya esta ocupada.'
elif posicion == 7:
if tablero[0][0] == ' ':
tablero[0][0] = simbolo
return 0
else:
return 'Esa posicion ya esta ocupada.'
elif posicion == 8:
if tablero[0][2] == ' ':
tablero[0][2] = simbolo
return 0
else:
return 'Esa posicion ya esta ocupada.'
elif posicion == 9:
if tablero[0][4] == ' ':
tablero[0][4] = simbolo
return 0
else:
return 'Esa posicion ya esta ocupada.'
else:
return 'Esa posicion no existe.'
def hay_ganador(tablero):
for simbolo in ['x', 'o']:
fila_0 = tablero[0][0] == simbolo and tablero[0][2] == simbolo and tablero[0][4] == simbolo
fila_2 = tablero[2][0] == simbolo and tablero[2][2] == simbolo and tablero[2][4] == simbolo
fila_4 = tablero[4][0] == simbolo and tablero[4][2] == simbolo and tablero[4][4] == simbolo
columna_0 = tablero[0][0] == simbolo and tablero[2][0] == simbolo and tablero[4][0] == simbolo
columna_2 = tablero[0][2] == simbolo and tablero[2][2] == simbolo and tablero[4][2] == simbolo
columna_4 = tablero[0][4] == simbolo and tablero[2][4] == simbolo and tablero[4][4] == simbolo
diagonal_abajo = tablero[0][0] == simbolo and tablero[2][2] == simbolo and tablero[4][4] == simbolo
diagonal_arriba = tablero[4][0] == simbolo and tablero[2][2] == simbolo and tablero[0][4] == simbolo
if fila_0 or fila_2 or fila_4 or columna_0 or columna_2 or columna_4 or diagonal_abajo or diagonal_arriba:
if simbolo == 'x':
return 1
elif simbolo == 'o':
return 2
break
tablero = [
[' ', '|', ' ', '|', ' '],
['-', '+', '-', '+', '-'],
[' ', '|', ' ', '|', ' '],
['-', '+', '-', '+', '-'],
[' ', '|', ' ', '|', ' ']
]
turno_1 = True
jugador_1 = ''
jugador_2 = ''
turno = 0
imprimir_tablero(tablero)
while turno < 9:
if jugador_1 == '':
print('Nombre de jugador 1 (x)')
jugador_1 = input()
print('Nombre de jugador 2 (o)')
jugador_2 = input()
else:
if turno_1:
print(jugador_1 + ', elegi una posicion')
else:
print(jugador_2 + ', elegi una posicion')
jugada = int(input())
valor = cambiar_tablero(tablero, jugada, turno_1)
if valor == 0:
turno_1 = not turno_1
turno += 1
imprimir_tablero(tablero)
if hay_ganador(tablero) == 1:
print(jugador_1 + " gano!")
break
elif hay_ganador(tablero) == 2:
print(jugador_2 + " gano!")
break
else:
print(valor)
if turno == 9:
print("Empate...")
| [
"drianketing@gmail.com"
] | drianketing@gmail.com |
6a48d215e7e64e2bf3e5b6d0e175a107b0de42fc | 6ddaecc7720aaf29af41fa251f256683882d7a98 | /CodeAcademy-Python/hurricane_analysis_ending/script.py | d390280e73743fa318ed871457a9fa96ea6f817a | [] | no_license | ShivaKakarla512/CodecademyProjects | 114b579139195da0b99b040f1226450d63eee746 | cd728792e4da03969e51b3c69fb36aed47457654 | refs/heads/master | 2022-06-23T09:49:06.180408 | 2020-05-11T09:28:10 | 2020-05-11T09:28:10 | 262,997,123 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,957 | py | # names of hurricanes
names = ['Cuba I', 'San Felipe II Okeechobee', 'Bahamas', 'Cuba II', 'CubaBrownsville', 'Tampico', 'Labor Day', 'New England', 'Carol', 'Janet', 'Carla', 'Hattie', 'Beulah', 'Camille', 'Edith', 'Anita', 'David', 'Allen', 'Gilbert', 'Hugo', 'Andrew', 'Mitch', 'Isabel', 'Ivan', 'Emily', 'Katrina', 'Rita', 'Wilma', 'Dean', 'Felix', 'Matthew', 'Irma', 'Maria', 'Michael']
# months of hurricanes
months = ['October', 'September', 'September', 'November', 'August', 'September', 'September', 'September', 'September', 'September', 'September', 'October', 'September', 'August', 'September', 'September', 'August', 'August', 'September', 'September', 'August', 'October', 'September', 'September', 'July', 'August', 'September', 'October', 'August', 'September', 'October', 'September', 'September', 'October']
# years of hurricanes
years = [1924, 1928, 1932, 1932, 1933, 1933, 1935, 1938, 1953, 1955, 1961, 1961, 1967, 1969, 1971, 1977, 1979, 1980, 1988, 1989, 1992, 1998, 2003, 2004, 2005, 2005, 2005, 2005, 2007, 2007, 2016, 2017, 2017, 2018]
# maximum sustained winds (mph) of hurricanes
max_sustained_winds = [165, 160, 160, 175, 160, 160, 185, 160, 160, 175, 175, 160, 160, 175, 160, 175, 175, 190, 185, 160, 175, 180, 165, 165, 160, 175, 180, 185, 175, 175, 165, 180, 175, 160]
# areas affected by each hurricane
areas_affected = [['Central America', 'Mexico', 'Cuba', 'Florida', 'The Bahamas'], ['Lesser Antilles', 'The Bahamas', 'United States East Coast', 'Atlantic Canada'], ['The Bahamas', 'Northeastern United States'], ['Lesser Antilles', 'Jamaica', 'Cayman Islands', 'Cuba', 'The Bahamas', 'Bermuda'], ['The Bahamas', 'Cuba', 'Florida', 'Texas', 'Tamaulipas'], ['Jamaica', 'Yucatn Peninsula'], ['The Bahamas', 'Florida', 'Georgia', 'The Carolinas', 'Virginia'], ['Southeastern United States', 'Northeastern United States', 'Southwestern Quebec'], ['Bermuda', 'New England', 'Atlantic Canada'], ['Lesser Antilles', 'Central America'], ['Texas', 'Louisiana', 'Midwestern United States'], ['Central America'], ['The Caribbean', 'Mexico', 'Texas'], ['Cuba', 'United States Gulf Coast'], ['The Caribbean', 'Central America', 'Mexico', 'United States Gulf Coast'], ['Mexico'], ['The Caribbean', 'United States East coast'], ['The Caribbean', 'Yucatn Peninsula', 'Mexico', 'South Texas'], ['Jamaica', 'Venezuela', 'Central America', 'Hispaniola', 'Mexico'], ['The Caribbean', 'United States East Coast'], ['The Bahamas', 'Florida', 'United States Gulf Coast'], ['Central America', 'Yucatn Peninsula', 'South Florida'], ['Greater Antilles', 'Bahamas', 'Eastern United States', 'Ontario'], ['The Caribbean', 'Venezuela', 'United States Gulf Coast'], ['Windward Islands', 'Jamaica', 'Mexico', 'Texas'], ['Bahamas', 'United States Gulf Coast'], ['Cuba', 'United States Gulf Coast'], ['Greater Antilles', 'Central America', 'Florida'], ['The Caribbean', 'Central America'], ['Nicaragua', 'Honduras'], ['Antilles', 'Venezuela', 'Colombia', 'United States East Coast', 'Atlantic Canada'], ['Cape Verde', 'The Caribbean', 'British Virgin Islands', 'U.S. Virgin Islands', 'Cuba', 'Florida'], ['Lesser Antilles', 'Virgin Islands', 'Puerto Rico', 'Dominican Republic', 'Turks and Caicos Islands'], ['Central America', 'United States Gulf Coast (especially Florida Panhandle)']]
# damages (USD($)) of hurricanes
damages = ['Damages not recorded', '100M', 'Damages not recorded', '40M', '27.9M', '5M', 'Damages not recorded', '306M', '2M', '65.8M', '326M', '60.3M', '208M', '1.42B', '25.4M', 'Damages not recorded', '1.54B', '1.24B', '7.1B', '10B', '26.5B', '6.2B', '5.37B', '23.3B', '1.01B', '125B', '12B', '29.4B', '1.76B', '720M', '15.1B', '64.8B', '91.6B', '25.1B']
# deaths for each hurricane
deaths = [90,4000,16,3103,179,184,408,682,5,1023,43,319,688,259,37,11,2068,269,318,107,65,19325,51,124,17,1836,125,87,45,133,603,138,3057,74]
def convert_damages_data(damages):
"""Convert damages data from string to float and return converted data as a list."""
conversion = {"M": 1000000,
"B": 1000000000}
updated_damages = list()
for damage in damages:
if damage == "Damages not recorded":
updated_damages.append(damage)
if damage.find('M') != -1:
updated_damages.append(float(damage[0:damage.find('M')])*conversion["M"])
if damage.find('B') != -1:
updated_damages.append(float(damage[0:damage.find('B')])*conversion["B"])
return updated_damages
# update damages data
updated_damages = convert_damages_data(damages)
def create_dictionary(names, months, years, max_sustained_winds, areas_affected, updated_damages, deaths):
"""Create dictionary of hurricanes with hurricane name as the key and a dictionary of hurricane data as the value."""
hurricanes = dict()
num_hurricanes = len(names)
for i in range(num_hurricanes):
hurricanes[names[i]] = {"Name": names[i],
"Month": months[i],
"Year": years[i],
"Max Sustained Wind": max_sustained_winds[i],
"Areas Affected": areas_affected[i],
"Damage": updated_damages[i],
"Deaths": deaths[i]}
return hurricanes
# create hurricanes dictionary
hurricanes = create_dictionary(names, months, years, max_sustained_winds, areas_affected, updated_damages, deaths)
def create_year_dictionary(hurricanes):
"""Convert dictionary with hurricane name as key to a new dictionary with hurricane year as the key and return new dictionary."""
hurricanes_by_year= dict()
for cane in hurricanes:
current_year = hurricanes[cane]['Year']
current_cane = hurricanes[cane]
if current_year not in hurricanes_by_year:
hurricanes_by_year[current_year] = [current_cane]
else:
hurricanes_by_year[current_year].append(current_cane)
return hurricanes_by_year
# create a new dictionary of hurricanes with year and key
hurricanes_by_year = create_year_dictionary(hurricanes)
print(hurricanes_by_year[1932])
def count_affected_areas(hurricanes):
"""Find the count of affected areas across all hurricanes and return as a dictionary with the affected areas as keys."""
affected_areas_count = dict()
for cane in hurricanes:
for area in hurricanes[cane]['Areas Affected']:
if area not in affected_areas_count:
affected_areas_count[area] = 1
else:
affected_areas_count[area] += 1
return affected_areas_count
# create dictionary of areas to store the number of hurricanes involved in
affected_areas_count = count_affected_areas(hurricanes)
def most_affected_area(affected_areas_count):
"""Find most affected area and the number of hurricanes it was involved in."""
max_area = 'Central America'
max_area_count = 0
for area in affected_areas_count:
if affected_areas_count[area] > max_area_count:
max_area = area
max_area_count = affected_areas_count[area]
return max_area, max_area_count
# find most frequently affected area and the number of hurricanes involved in
max_area, max_area_count = most_affected_area(affected_areas_count)
print(max_area,max_area_count)
def highest_mortality(hurricanes):
"""Find the highest mortality hurricane and the number of deaths it caused."""
max_mortality_cane = 'Cuba I'
max_mortality = 0
for cane in hurricanes:
if hurricanes[cane]['Deaths'] > max_mortality:
max_mortality_cane = cane
max_mortality = hurricanes[cane]['Deaths']
return max_mortality_cane, max_mortality
# find highest mortality hurricane and the number of deaths
max_mortality_cane, max_mortality = highest_mortality(hurricanes)
print(max_mortality_cane, max_mortality)
def categorize_by_mortality(hurricanes):
"""Categorize hurricanes by mortality and return a dictionary."""
mortality_scale = {0: 0,
1: 100,
2: 500,
3: 1000,
4: 10000}
hurricanes_by_mortality = {0:[],1:[],2:[],3:[],4:[],5:[]}
for cane in hurricanes:
num_deaths = hurricanes[cane]['Deaths']
if num_deaths == mortality_scale[0]:
hurricanes_by_mortality[0].append(hurricanes[cane])
elif num_deaths > mortality_scale[0] and num_deaths <= mortality_scale[1]:
hurricanes_by_mortality[1].append(hurricanes[cane])
elif num_deaths > mortality_scale[1] and num_deaths <= mortality_scale[2]:
hurricanes_by_mortality[2].append(hurricanes[cane])
elif num_deaths > mortality_scale[2] and num_deaths <= mortality_scale[3]:
hurricanes_by_mortality[3].append(hurricanes[cane])
elif num_deaths > mortality_scale[3] and num_deaths <= mortality_scale[4]:
hurricanes_by_mortality[4].append(hurricanes[cane])
elif num_deaths > mortality_scale[4]:
hurricanes_by_mortality[5].append(hurricanes[cane])
return hurricanes_by_mortality
# categorize hurricanes in new dictionary with mortality severity as key
hurricanes_by_mortality = categorize_by_mortality(hurricanes)
print(hurricanes_by_mortality[5])
def highest_damage(hurricanes):
"""Find the highest damage inducing hurricane and its total cost."""
max_damage_cane = 'Cuba I'
max_damage = 0
for cane in hurricanes:
if hurricanes[cane]['Damage'] == "Damages not recorded":
pass
elif hurricanes[cane]['Damage'] > max_damage:
max_damage_cane = cane
max_damage = hurricanes[cane]['Damage']
return max_damage_cane, max_damage
# find highest damage inducing hurricane and its total cost
max_damage_cane, max_damage = highest_damage(hurricanes)
print(max_damage_cane, max_damage)
def categorize_by_damage(hurricanes):
"""Categorize hurricanes by damage and return a dictionary."""
damage_scale = {0: 0,
1: 100000000,
2: 1000000000,
3: 10000000000,
4: 50000000000}
hurricanes_by_damage = {0:[],1:[],2:[],3:[],4:[],5:[]}
for cane in hurricanes:
total_damage = hurricanes[cane]['Damage']
if total_damage == "Damages not recorded":
hurricanes_by_damage[0].append(hurricanes[cane])
elif total_damage == damage_scale[0]:
hurricanes_by_damage[0].append(hurricanes[cane])
elif total_damage > damage_scale[0] and total_damage <= damage_scale[1]:
hurricanes_by_damage[1].append(hurricanes[cane])
elif total_damage > damage_scale[1] and total_damage <= damage_scale[2]:
hurricanes_by_damage[2].append(hurricanes[cane])
elif total_damage > damage_scale[2] and total_damage <= damage_scale[3]:
hurricanes_by_damage[3].append(hurricanes[cane])
elif total_damage > damage_scale[3] and total_damage <= damage_scale[4]:
hurricanes_by_damage[4].append(hurricanes[cane])
elif total_damage > damage_scale[4]:
hurricanes_by_damage[5].append(hurricanes[cane])
return hurricanes_by_damage
# categorize hurricanes in new dictionary with damage severity as key
hurricanes_by_damage = categorize_by_damage(hurricanes)
print(hurricanes_by_damage[5])
| [
"noreply@github.com"
] | noreply@github.com |
ab6333b26ca5c5e92c98730f02f2f883ba820907 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_frizzing.py | 1ed7c522d5ae15fc66c9c2b646ba67fb89ea4cfa | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py |
from xai.brain.wordbase.verbs._frizz import _FRIZZ
#calss header
class _FRIZZING(_FRIZZ, ):
def __init__(self,):
_FRIZZ.__init__(self)
self.name = "FRIZZING"
self.specie = 'verbs'
self.basic = "frizz"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
0ba11892cfabb4de64c8d0762f24f627263354a8 | 491401e64e372cb1ae2d650a00e79c3e074a5005 | /neural_networks/rnn.py | 56fd46a4baa74a39bcf16a744491bca37c2ff259 | [] | no_license | alexeytra/textAnalization | a913e8434ff3496edc7b0bda6f95fe49075cb5b8 | 64cd3fc5fa343f70e1dd2239e2896dbd88986f16 | refs/heads/master | 2020-05-15T11:37:42.632988 | 2019-08-27T14:32:07 | 2019-08-27T14:32:07 | 182,238,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,943 | py | from keras.models import Sequential
from keras.layers import Dense, Dropout, SpatialDropout1D
from keras.layers import Embedding
from keras.layers import LSTM
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from sklearn.preprocessing import LabelEncoder, LabelBinarizer
import pandas as pd
import voсabulary as voc
from neural_networks.service import Service
class RNNModel:
def __init__(self):
self.__sentences_train = None
self.__action_train = None
self.__sentences_test = None
self.__action_test = None
self.__actions = None
self.__max_len = None
self.__X_train = None
self.__y_train = None
self.__X_test = None
self.__y_test = None
self.__num_actions = None
self.__tokenizer = None
self.__encoder = None
self.__num_samples = None
def __text_preproccessing(self):
self.__sentences_train, self.__action_train, self.__sentences_test, self.__action_test, self.__actions = voc.getSample()
self.__num_actions = pd.Series(self.__actions, name='A').unique()
self.__max_len = 100
tokenizer = Tokenizer()
tokenizer.fit_on_texts(self.__sentences_train)
cnn_texts_seq = tokenizer.texts_to_sequences(self.__sentences_train)
self.__X_train = sequence.pad_sequences(cnn_texts_seq, maxlen=self.__max_len)
encoder = LabelBinarizer()
encoder.fit(self.__action_train)
self.__y_train = encoder.transform(self.__action_train)
self.__y_test = encoder.transform(self.__action_test)
self.__tokenizer = tokenizer
self.__encoder = encoder
self.__num_samples = pd.Series(self.__actions, name='A').unique()
def activate_rnn_model_v1(self):
self.__text_preproccessing()
model = Sequential()
model.add(Embedding(1000, 32, input_length=self.__max_len))
model.add(LSTM(32))
model.add(Dense(self.__num_samples.size, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
history = model.fit(self.__X_train, self.__y_train, epochs=20, batch_size=10, verbose=1, validation_split=0.1)
service = Service(self.__encoder, self.__tokenizer, model, "RNN V1")
service.plot_history(history)
service.prediction_cnn(self.__max_len)
def activate_rnn_model_v2(self):
self.__text_preproccessing()
model = Sequential()
model.add(Embedding(1000, 64, input_length=self.__max_len))
model.add(SpatialDropout1D(0.2))
model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(self.__num_samples.size, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
print(model.summary())
history = model.fit(self.__X_train, self.__y_train, epochs=30, batch_size=10, verbose=1, validation_split=0.20)
service = Service(self.__encoder, self.__tokenizer, model, "CNN V2")
service.plot_history(history)
service.prediction_cnn(self.__max_len)
def activate_rnn_model_v3(self):
self.__text_preproccessing()
model = Sequential()
model.add(Embedding(50000, 100))
model.add(SpatialDropout1D(0.2))
model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(self.__num_samples.size, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
history = model.fit(self.__X_train, self.__y_train, epochs=30, batch_size=5, verbose=1, validation_split=0.20)
service = Service(self.__encoder, self.__tokenizer, model, "CNN V3")
service.plot_history(history)
service.prediction_cnn(self.__max_len)
| [
"alekceytr@mail.ru"
] | alekceytr@mail.ru |
e35c47d421a21cc2c02d82e0af606b3cd2c09b18 | f7d11d0d0061199c85aee31bda43e14f20bed921 | /pestle/pestle/sigtools/SigSubtract/__init__.py | 8cd8814acb0ab099197e38f8c0c62877d34d2f5c | [] | no_license | NoopDawg/pestle_tools | 632199586775196a6d5a5573608f90b5659f59fd | 7edec01dd2f270c4bd4f6918e9d9ac37b7c63b17 | refs/heads/master | 2022-07-26T09:41:36.168181 | 2020-05-11T19:13:45 | 2020-05-11T19:13:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | import os
from pestle.pestle.base.SigTool import SigTool
from pestle.utils import pestlepath
class SigSubtract(SigTool):
from ._checkArgs import _checkArgs
from ._runAnalysis import _runAnalysis
from ._saveResults import _saveResults
def __init__(self, *argv):
sigName = "SigSubtract"
configFile = os.path.join(
pestlepath(), "resources", ".".join(["pestle", "sigtools", sigName, "arg"])
)
super().__init__(sigName, configFile, *argv)
| [
"APJonchhe@gmail.com"
] | APJonchhe@gmail.com |
16b69ed33c7b2ded52714eaba17e201c2f861ee0 | 0066a5657bcaabd536f716c55b7b110b3ff5126c | /tests/conftest.py | 0ed63ae40ddf4fe4bfbb46eaf42231bc1bece811 | [
"MIT"
] | permissive | Windact/classification_model | f684bd3b3040c63a1dd0af41ea015930bd6b37dc | e42e8099069467eeb21f94f6777eb34e68906500 | refs/heads/main | 2023-05-27T21:08:29.238115 | 2021-06-08T22:46:12 | 2021-06-08T22:46:12 | 362,130,332 | 0 | 0 | MIT | 2021-06-08T22:46:12 | 2021-04-27T13:53:33 | Python | UTF-8 | Python | false | false | 1,390 | py | import pytest
import numpy as np
from sklearn.model_selection import train_test_split
from classification_model.config import core
from classification_model.processing import utils
@pytest.fixture(scope="session")
def pipeline_inputs():
# read the data
data = utils.load_dataset(core.config.app_config.TRAINING_DATA_FILE)
# Split in X and y
X = data.drop(labels=core.config.model_config.TARGET_FEATURE_NAME, axis=1)
y = data[core.config.model_config.TARGET_FEATURE_NAME]
# For the 2 classes classification
y = np.where(
y == "functional", "functional", "non functional or functional needs repair"
)
# Train test split
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
random_state=core.config.model_config.SEED,
test_size=core.config.model_config.TEST_SIZE,
)
return X_train, X_test, y_train, y_test
@pytest.fixture(scope="session")
def pipeline_inputs_tests():
# read the data
data = utils.load_dataset(core.config.app_config.TESTING_DATA_FILE)
# Split in X and y
X = data.drop(labels=core.config.model_config.TARGET_FEATURE_NAME, axis=1)
y = data[core.config.model_config.TARGET_FEATURE_NAME]
# For the 2 classes classification
y = np.where(
y == "functional", "functional", "non functional or functional needs repair"
)
return X, y
| [
"52325204+Windact@users.noreply.github.com"
] | 52325204+Windact@users.noreply.github.com |
b6f490deab8b0d16a1adff8b3c97ecf942ab4482 | 9908dc07233b4025425dc212b5e4acb3b087971e | /Medium/findRedundantConnection.py | c3fd9af33cb44dbda9d4c81e96ae23b61cd0a8ad | [] | no_license | Abdelhamid-bouzid/problem-Sovling- | 15769da71d19186947607574860462ad81f34e40 | fa0eecab8a94d1ad20b5aa129973f59eddd5678d | refs/heads/main | 2023-08-27T21:49:32.337979 | 2021-10-23T21:57:55 | 2021-10-23T21:57:55 | 317,097,388 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | class Solution:
def findRedundantConnection(self, edges: List[List[int]]) -> List[int]:
self.g = collections.defaultdict(list)
for u,v in edges:
self.g[u].append(v)
self.g[v].append(u)
for u,v in edges[::-1]:
self.vis=set()
self.dfs(1,u,v)
if len(self.vis)==len(self.g):
return [u,v]
def dfs(self,node,u,v):
if node in self.vis:
return True
self.vis.add(node)
for adj in self.g[node]:
if [node,adj]!=[u,v] and [adj,node]!=[u,v]:
self.dfs(adj,u,v)
| [
"noreply@github.com"
] | noreply@github.com |
b2987bc90d057d88982c7203de4a1b0a7c0ff006 | 26ba2331378b2315a599106f6a9f1a774e957e06 | /binary tree/No116_node_next.py | 885db67e4da23daf8868792ebc33a6715bcd418d | [] | no_license | riCoYanG-byte/leetcode | 353e22714355524b93581ac993f16fb79774226a | b2c0d058197e8f1822eb2519aa46f6586c7c4758 | refs/heads/master | 2023-02-13T17:24:41.351621 | 2021-01-07T12:39:05 | 2021-01-07T12:39:05 | 299,176,772 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,369 | py | import collections
# similar question like zigzag
class Solution:
def connect(self, root: 'Node') -> 'Node':
if not root:
return root
# Initialize a queue data structure which contains
# just the root of the tree
Q = collections.deque([root])
# Outer while loop which iterates over
# each level
while Q:
# Note the size of the queue
size = len(Q)
# Iterate over all the nodes on the current level
for i in range(size):
# Pop a node from the front of the queue
node = Q.popleft()
# This check is important. We don't want to
# establish any wrong connections. The queue will
# contain nodes from 2 levels at most at any
# point in time. This check ensures we only
# don't establish next pointers beyond the end
# of a level
if i < size - 1:
node.next = Q[0]
# Add the children, if any, to the back of
# the queue
if node.left:
Q.append(node.left)
if node.right:
Q.append(node.right)
# Since the tree has now been modified, return the root node
return root
| [
"839891341@qq.com"
] | 839891341@qq.com |
9a8c303ddd1cc2644a61c45509904c05e637617c | f6898848211786f2e949be5e1171f8221ab2bff4 | /tools/package.py | 5e8ac65886343c3d9e756ff60abba4817f1a1e9c | [] | no_license | jmaack24/VortexSim | cfbeb21d56d4a64b8b2ed49e26ce7b4cb4d95c1f | 9ec5f3a4b9d7c2bc8c6a734764807f35e262668f | refs/heads/master | 2020-03-19T10:14:57.123815 | 2018-06-06T17:35:35 | 2018-06-06T17:35:35 | 136,354,788 | 0 | 0 | null | 2018-06-06T17:35:36 | 2018-06-06T16:14:51 | null | UTF-8 | Python | false | false | 2,710 | py | #!/usr/bin/python
import argparse
import os
import subprocess
# Set locations for various things
home = os.path.expanduser("~")
VORTEX = home + "/Vortex/"
SCRIPTS = home + "/Vortex/tools/"
RESULTS = home + "/Vortex/output/"
CLOSE = home + "/Vortex/results/"
LOG = home + "/Vortex/log/"
DATA = home + "/Vortex/data/"
BIN = home + "/Vortex/bin/"
MOVIE = home + "/Vortex/movies/"
# Useful lists
dirs = [DATA, RESULTS, MOVIE, CLOSE]
app = ["_in", "_out", "_pos", "_oc"]
# Create command line argument parser
parser = argparse.ArgumentParser()
# Define accepted arguments
parser.add_argument("root", help="root name for files produced by these runs")
parser.add_argument("-u", "--unpack", help="unpack the given archives",
action="store_true")
parser.add_argument("-s", "--skip-position", action="store_true",
help="skip the position files in packing or unpacking")
# Parse arguments
args = parser.parse_args()
bname = args.root
if args.unpack:
targs = ["tar", "-xf"]
gargs = ["gzip", "-d"]
for i in range(len(dirs)):
os.chdir(dirs[i])
arc = VORTEX + bname + app[i]
target = dirs[i] + bname
if os.path.exists(arc + ".tar.gz"):
if os.path.exists(target):
print target, "already exists"
else:
if dirs[i] == MOVIE and args.skip_position:
print "Moving", arc + ".tar.gz", "to", MOVIE
os.rename(arc + ".tar.gz", MOVIE + arc + ".tar.gz")
else:
print "Unpacking", arc + ".tar.gz"
pargs = gargs + [arc + ".tar.gz"]
retcode = subprocess.call(pargs)
pargs = targs + [arc + ".tar"]
retcode = subprocess.call(pargs)
os.remove(arc + ".tar")
else:
print "No archive for", target
else:
targs = ["tar", "-cf"]
gargs = ["gzip", "--best"]
for i in range(len(dirs)):
os.chdir(dirs[i])
aname = bname + app[i] + ".tar"
tname = bname
if os.path.exists(tname):
if dirs[i] == MOVIE and args.skip_position:
print "Skipping", dirs[i] + bname + app[i] + ".tar.gz"
continue
else:
print "Archiving", dirs[i] + tname
pargs = targs + [aname, tname]
retcode = subprocess.call(pargs)
pargs = gargs + [aname]
retcode = subprocess.call(pargs)
os.rename(dirs[i] + bname + app[i] + ".tar.gz",
VORTEX + bname + app[i] + ".tar.gz")
else:
print "Not found in", dirs[i]
| [
"noreply@github.com"
] | noreply@github.com |
a8c9784d544998ce1bf6a21ee7527612137dfd25 | f398f6914329be8f6368206c124107d46d88f051 | /datacleaning/remove_duplicate_980_HEP.py | 5f927121566b8cbe22108db900fae69cc37ee423 | [] | no_license | robk5uj/inspire-scripts | ecb0f9d8730a9227a3efedc45c1a30f7941f0256 | 58d0548a6f6364cec693b85eb52d297d4da6607e | refs/heads/master | 2021-01-15T17:45:44.470206 | 2014-12-27T21:54:54 | 2014-12-27T21:54:54 | 28,985,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,134 | py | from tempfile import mkstemp
import os
import time
from invenio.search_engine import perform_request_search
from invenio.config import CFG_TMPDIR
from invenio.dbquery import run_sql
from invenio.bibtask import task_low_level_submission
from invenio.search_engine_utils import get_fieldvalues
from invenio.search_engine import get_record
from invenio.bibrecord import print_rec, \
record_add_field, \
record_add_fields, \
field_get_subfield_instances, \
record_get_field_instances
SCRIPT_NAME = '980-spires-clean'
def submit_task(to_update):
# Save new record to file
(temp_fd, temp_path) = mkstemp(prefix=SCRIPT_NAME,
dir=CFG_TMPDIR)
temp_file = os.fdopen(temp_fd, 'w')
temp_file.write('<?xml version="1.0" encoding="UTF-8"?>')
temp_file.write('<collection>')
for el in to_update:
temp_file.write(el)
temp_file.write('</collection>')
temp_file.close()
return task_low_level_submission('bibupload', SCRIPT_NAME, '-P', '5',
'-c', temp_path, '--notimechange')
def submit_bibindex_task(to_update):
recids = [str(r) for r in to_update]
return task_low_level_submission('bibindex', SCRIPT_NAME, '-w', 'collection',
'-i', ','.join(recids))
def wait_for_task(task_id):
sql = 'SELECT status FROM schTASK WHERE id = %s'
while run_sql(sql, [task_id])[0][0] != 'DONE':
time.sleep(5)
class OurInstance(object):
def __init__(self, field):
self.field = field
def __eq__(self, b):
return hash(self) == hash(b)
def __hash__(self):
return hash(tuple(field_get_subfield_instances(self.field)))
def create_our_record(recid):
old_record = get_record(recid)
instances = record_get_field_instances(old_record, '980')
new_instances = [l.field for l in set(OurInstance(i) for i in instances)]
record = {}
record_add_field(record, '001', controlfield_value=str(recid))
record_add_fields(record, '980', new_instances)
return print_rec(record)
def main():
to_update = []
to_update_recids = []
recids = perform_request_search(p="980:HEP")
for done, recid in enumerate(recids):
if done % 50 == 0:
print 'done %s of %s' % (done + 1, len(recids))
count = get_fieldvalues(recid, '980__a').count('HEP')
if count > 1:
print recid, count
xml = create_our_record(recid)
to_update.append(xml)
to_update_recids.append(recid)
if len(to_update) == 1000 or done + 1 == len(recids) and len(to_update) > 0:
task_id = submit_task(to_update)
print 'submitted task id %s' % task_id
wait_for_task(task_id)
task_id = submit_bibindex_task(to_update_recids)
print 'submitted task id %s' % task_id
wait_for_task(task_id)
to_update = []
to_update_recids = []
if __name__ == '__main__':
main()
| [
"alessio.deiana@cern.ch"
] | alessio.deiana@cern.ch |
d016ea701a2d78e5839118e6dee223ab0a6da631 | 9962d61f6a4f1bc9c61070e5cdfb15d16a577580 | /calcWinRate.py | 644676bd64d5bd407a4e8fa3e1d614a256fcb7ab | [
"MIT"
] | permissive | timwuu/AnaPoker | d03171d2082b3053b186a52d5e2f020952d41fb1 | 7cb125c4639a5cd557a6b45c92b5793dcc39def8 | refs/heads/main | 2023-09-01T19:14:10.989765 | 2021-10-04T03:44:54 | 2021-10-04T03:44:54 | 404,658,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,998 | py | import sys
import random
import ranks
gCOMBIN_7_5 = []
def card( num, pattern):
return num*4 + pattern - 7
def card_pattern( n):
s = card_no(n)
return s + ('s','c','d','h')[n%4] # 2021.10.04 changed format
#return ('S','C','D','H')[n%4] + '.' + s
def card_no(n):
return ('1','2','3','4','5','6','7','8','9','T','J','Q','K','A')[(n+3)//4]
def card_is_same_suit(lst):
s = lst[0]%4
if( s== lst[1]%4 and s== lst[2]%4 and s== lst[3]%4 and s==lst[4]%4):
return True
return False
def card_key(lst):
key = map( card_no, lst)
if card_is_same_suit(lst):
return ''.join(key)+'s'
return ''.join(key)
def card_lst(lst):
result = map( card_pattern, lst)
return list(result)
def get_rank( hand):
best_rank = ranks.RANK_MAX
for i in gCOMBIN_7_5:
lst= (hand[i[0]],hand[i[1]],hand[i[2]],hand[i[3]],hand[i[4]])
key= card_key(lst)
tmp = ranks.ranks[key]
if( tmp < best_rank):
best_rank= tmp
best_key= key
best_comb = i
return best_rank, best_key, best_comb
def get_rank_b( hand):
best_rank = ranks.RANK_MAX
for i in gCOMBIN_7_5:
lst= (hand[i[0]],hand[i[1]],hand[i[2]],hand[i[3]],hand[i[4]])
key = list(map(lambda x:(x-1)//4, lst))
if card_is_same_suit(lst):
tmp = ranks.rank_tree_b[key[0]][key[1]][key[2]][key[3]][key[4]][1]
else:
tmp = ranks.rank_tree_b[key[0]][key[1]][key[2]][key[3]][key[4]][0]
if( tmp < best_rank):
best_rank= tmp
best_key= key
best_comb = i
return best_rank, best_key, best_comb
def calc_win_rate( player_a, player_b, table_cards, k=10000):
cards = [ x for x in range(1,53)]
for i in player_a:
cards.remove(i)
for i in table_cards:
cards.remove(i)
if player_b == []:
n = 7 - len(table_cards)
else:
n = 5 - len(table_cards)
for i in player_b:
cards.remove(i)
win_a = 0
win_b = 0
for i in range(k):
s = random.sample( cards, k=n)
if player_b == []:
pl_a = player_a + table_cards + s[2:]
pl_b = s[:2] + table_cards + s[2:]
else:
pl_a = player_a + table_cards + s
pl_b = player_b + table_cards + s
pl_a.sort() # 2021.10.02 not using "reverse=True"
pl_b.sort()
rnk_a = get_rank( pl_a)
rnk_b = get_rank( pl_b)
if(rnk_a[0] < rnk_b[0]): # 2021.09.30 bug fix
win_a += 1
elif( rnk_b[0] < rnk_a[0]):
win_b += 1
return ( win_a/k, win_b/k)
############## setup combination array ##################
i=0
tmp = []
for m in range(6,-1,-1):
for n in range(m-1,-1,-1):
for p in range(n-1,-1,-1):
for q in range(p-1,-1,-1):
for r in range(q-1,-1,-1):
tmp.append([m,n,p,q,r])
i=i+1
gCOMBIN_7_5 = tuple(tmp) | [
"timwuu@gmail.com"
] | timwuu@gmail.com |
61e3e3ae436a1ef9bc2ce4e44b7ecf8735fa75ae | b6996fe334a4d2e21a7a360ca3b75439a1509b20 | /Check_IP/Check_IP.py | f6b6564395cf3869e99ab688d1d8662ac728106c | [] | no_license | TarampikosAndreas/Python-Exercises | d979c2faec70585e02a2e66ce470c6256514aa2e | 1fa6bc5575e0027b520c0e75ccd2f5becac788c2 | refs/heads/master | 2020-03-25T11:38:36.899431 | 2018-08-07T09:24:47 | 2018-08-07T09:24:47 | 141,704,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | import socket
ip = input('Enter an IP Address :')
try:
socket.inet_aton(ip)
print("Valid IP")
except socket.error:
print("Not Valid IP")
| [
"TarampikosAndreas@users.noreply.github.com"
] | TarampikosAndreas@users.noreply.github.com |
e21060ab2adb49b9091ed1b2891b907ec09d1ca1 | e25e3be8ec84f351537b4ec848a3d23544997f63 | /time_display/settings.py | f204540b041f4db9ea0b2df72723f0e6e91c7fa9 | [] | no_license | GodwinYeboah1/time_display | e6b5f7022a0b47f3c3c51389510d0d1c0ed6207f | 31854efecb205a41e7e65df9f2c705ceb86e3fb6 | refs/heads/master | 2021-05-10T14:49:18.893870 | 2018-01-23T00:00:28 | 2018-01-23T00:00:28 | 118,531,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,133 | py | """
Django settings for time_display project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u1@w&1va_q#ojulpku*8gn=l^sjn*+8e66pky$^@6g1-(-fr#e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.time_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'time_display.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'time_display.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"godwinyeboah95@gmail.com"
] | godwinyeboah95@gmail.com |
e64b09a0dfc3298c8150eac7cb5c0abb33f81534 | 52794dc6a97afe04e4237c7298bd6984d92b6e1e | /venv/Scripts/pip3-script.py | 0f607a6b5eeda1ec7597fdb04298694f605c444f | [] | no_license | namitmohale/HumanDetection | 15635792b92acee00bb22ff12387ec5360290b08 | bd69c4271a518f312bc117716c019d316a37b53e | refs/heads/master | 2020-04-09T05:55:22.226721 | 2019-05-22T21:49:53 | 2019-05-22T21:49:53 | 160,087,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | #!C:\Users\mohal\PycharmProjects\CVProject2\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"mohale.namit95@gmail.com"
] | mohale.namit95@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.