blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0e79ac12c90e53fce66fab9dc0f08bf6bae60219 | f694182e6b7d1281cacdbe716fcbce0d7419c2b3 | /system_communication_home/urls.py | 1898512992218e47a30b1cfc36ef2f184adc0968 | [] | no_license | Afollower/MiniProjectCommunication | 15cf07926095c7e39a4f0341832ae2a813619d83 | bdc5f8edc3637f1e80d1706de39281879290819b | refs/heads/master | 2022-09-20T08:14:43.971772 | 2020-06-04T09:09:46 | 2020-06-04T09:09:46 | 263,805,545 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | from django.urls import path, include
from system_communication_home import views
urlpatterns = [
path('', views.index),
path('problem_proposed/', views.problem_proposed),
path('my_problem/', views.my_problem_list),
path('problem_solve/', views.problem_solve),
path('problem_resolved/', views.problem_resolved),
] | [
"rencheng_310813@qq.com"
] | rencheng_310813@qq.com |
184895f8be106c50e7efd39383cec128bad28d48 | 8780bc7f252f14ff5406ce965733c099034920b7 | /pyCode/pagesize/pagesize/wsgi.py | dfa911c708e95d9a1ec3be3f2f82bcdcfb628314 | [] | no_license | 13661892653/workspace | 5e4e458d31b9355c67d67ba7d9faccbcc1ac9f6b | 17960becabb3b4f0fc30009c71a11c4f7a5f8330 | refs/heads/master | 2020-12-24T20:00:15.541432 | 2018-08-14T13:56:15 | 2018-08-14T13:56:15 | 86,225,975 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | """
WSGI config for pagesize project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pagesize.settings")
application = get_wsgi_application()
| [
"470563152@qq.com"
] | 470563152@qq.com |
b0dba4e0631197008ca13f2d1a976ebad5e037cd | 1af735a8dcd2b2bd0e0144714390da6c8393b12a | /strip_ansi_escapes.py | 081a4c22ecb3196aa0a71069171c7840dc929741 | [
"MIT"
] | permissive | liona24/snipppets | 4e05fac55e7abc821fdf2687c7a4780dfb968327 | 08dc9164f9fa9f89176ad71c9b4dc24e72cb4c83 | refs/heads/master | 2022-12-06T12:30:26.795349 | 2022-11-20T20:54:00 | 2022-11-20T20:54:00 | 174,822,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | def strip_ansi_b(s: bytes):
# 7-bit and 8-bit C1 ANSI sequences
ansi_escape_8bit = re.compile(br'''
(?: # either 7-bit C1, two bytes, ESC Fe (omitting CSI)
\x1B
[@-Z\\-_]
| # or a single 8-bit byte Fe (omitting CSI)
[\x80-\x9A\x9C-\x9F]
| # or CSI + control codes
(?: # 7-bit CSI, ESC [
\x1B\[
| # 8-bit CSI, 9B
\x9B
)
[0-?]* # Parameter bytes
[ -/]* # Intermediate bytes
[@-~] # Final byte
)
''', re.VERBOSE)
return ansi_escape_8bit.sub(b'', s)
| [
"noreply@github.com"
] | liona24.noreply@github.com |
a745700dbf79b2aef262496bd6e89b09681f349d | b82f502c14327bbecec273519ec73e4f44cf1be3 | /revision_v1_2.py | 6b38f8df4fd727813ea46dd3750d50197b3a5f2c | [] | no_license | cereus/PadArq | 166707a25b1fd7920e19cb5c75aa7a2240dcbab3 | f6d00d8ff947f783297e09be6ac10bd86d508c11 | refs/heads/master | 2021-01-01T05:10:35.318226 | 2016-05-17T17:32:42 | 2016-05-17T17:32:42 | 59,039,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,319 | py | """
INFORMACIÓN DEL PROGRAMA:
Módulo: revision_v1_2.py
Propósito: Revisa factores de diseño arquitectónico para trámite de permisos de construcción
Versión: 1.2 Windows 10
Autor: Angel Flores Chávez
Fecha: 06-05-2016
"""
from tkinter import * # Importar funciones para crear ventanas
from tkinter import ttk
from tkinter import font
from tkinter import messagebox
from tabulate import tabulate
import datetime
import subprocess # Incluir para abrir archivos desde su aplicación
def infobox(): # Botón info...
messagebox.showinfo(message='Módulo:\t\tPadArq Win\n'
'Versión:\t\t1.2 Windows\n'
'Autor:\t\tAngel Flores Chávez\n'
'Fecha:\t\t03-05-2016\n'
'\nDescripción:\n'
'Revisa factores de diseño arquitectónico para trámite de permisos de construcción '
'en la cuidad de Saltillo, Coah. Mx.\n\n'
'\nLos parámetros de diseño se comparan de la siguiente manera:\n\n'
' COS --> Calculado >= Proyecto = Correcto\n'
' CAS --> Calculado <= Proyecto = Correcto\n'
' CUS --> Calculado >= Proyecto = Correcto',
title='Acerca de PadARQ Win'
)
def rep(*args): # Botón de Reporte
np = n_proy.get() # Nombre del proyecto
PryTxt = '\tNombre del proyecto --> ' + np + '\n'
nc = n_cli.get() # Nombre del cliente
PryCli = '\tNombre del cliente --> ' + nc + '\n'
yy = datetime.date.today().year # Fecha de hoy
mm = datetime.date.today().month
dd = datetime.date.today().day
PryHoy = '\tFecha --> ' + str(dd) + '/' + str(mm) +'/' + str(yy) + '\n'
# Definición de tabla
coeficientes = [['Ocupación de Superficie (COS)', dsp_f_cos.get(), dsp_cos.get(),
dsp_s_cont.get(), stat_cos.get()],
['Absorción de Superficie (CAS)', dsp_f_cas.get(), dsp_cas.get(),
dsp_s_abso.get(), stat_cos.get()],
['Utilización de Superficie (CUS)', dsp_f_cus.get(), dsp_f_cus.get(),
dsp_cus.get(), stat_cus.get()]]
tabla = tabulate(coeficientes, headers=['Coeficiente', 'Factor', 'Calculado', 'Proyecto', 'Estado'],
tablefmt='orgtbl', numalign='right')
datos_pry = [['Sup. de Terreno', s_ter.get()], ['Sup. de Construcción', s_cons.get()],
['Sup. de Contacto', s_cont.get()], ['Sup. de Absorción', s_abso.get()]]
tablad = tabulate(datos_pry, tablefmt='orgtbl', numalign='right', floatfmt='8.2f')
f = open('reporte.txt', 'w+')
f.write('\n'
'------------------------------------------------------------------------------------\n'
'Revisión de parámetros de diseño de proyectos arquitectónicos para factores COS,\n'
'CUS y CAS utilizados en trámites de permisos de construcción para el municipio de\n'
'Saltillo, Coah. Mx.\n'
'------------------------------------------------------------------------------------\n'
'\tVersión: 1.2 Win\n'
'\tFecha : 03-05-2016\n'
'\tAutor : Angel Flores Chávez\n'
'------------------------------------------------------------------------------------\n')
f.write('\n\n')
f.write('------------------------------------------------------------------------------------\n')
f.write('\tDATOS GENERALES\n')
f.write('------------------------------------------------------------------------------------\n')
f.write('\n')
f.write(PryTxt)
f.write(PryCli)
f.write(PryHoy)
f.write('\n\n')
f.write('------------------------------------------------------------------------------------\n')
f.write('\tPARÁMETROS DE DISEÑO\n')
f.write('------------------------------------------------------------------------------------\n')
f.write(tablad)
f.write('\n\n\n')
f.write('+----------------------------------------------------------------------------------+\n')
f.write('|\tTABLA DE RESULTADOS |\n')
f.write('+----------------------------------------------------------------------------------+\n')
f.write(tabla)
f.write('\n')
f.write('+----------------------------------------------------------------------------------+\n')
f.write('\n')
f.write('\tNOTA:\n'
'\tLos parámetros de diseño se comparan de la siguiente manera:\n'
'\tCOS --> Calculado >= Proyecto = Correcto\n'
'\tCAS --> Calculado <= Proyecto = Correcto\n'
'\tCUS --> Calculado >= Proyecto = Correcto\n'
)
f.write('\n\n')
f.write('\t<-- Fin del Reporte -->')
f.write('\n')
f.close()
messagebox.showinfo(message='Se creó el archivo "reporte.txt" dentro de la carpeta del programa.\n\n'
'Cada vez que se crea un archivo de reporte, el nuevo sustituye al anterior.\n\n'
'Para conservar el actual, renombra el archivo o cámbialo de ubicación.',
title='¡ A V I S O !'
)
# Abrir con Bloc de Notas
txtfile = "reporte.txt"
notepadPath = r'C:\WINDOWS\system32\notepad.exe'
subprocess.Popen("%s %s" % (notepadPath, txtfile))
def calculos(*args): # Calcular el Coeficientes
try:
var1 = float(s_ter.get())
var2 = float(s_cons.get())
vcos = float(f_cos.get())
vcas = float(f_cas.get())
vcus = float(f_cus.get())
vabs = float(s_abso.get())
vctk = float(s_cont.get())
res_cos.set(vcos * var1)
res_cas.set(vcas * var1)
res_cus.set(var2 / var1)
flt_cos = float(res_cos.get())
dsp_cos.set(round(flt_cos, 2)) # Para mostrar resultado en dos decimales
dsp_f_cos.set(round(vcos, 2))
flt_cas = float(res_cas.get())
dsp_cas.set(round(flt_cas, 2)) # Para mostrar resultado en dos decimales
dsp_f_cas.set(round(vcas, 2))
flt_cus = float(res_cus.get())
dsp_cus.set(round(flt_cus, 2)) # Para mostrar resultado en dos decimales
dsp_f_cus.set(round(vcus, 2)) # Para mostrar resultado en dos decimales
dsp_s_abso.set(round(vabs, 2)) # Para mostrar resultado en dos decimales
dsp_s_cont.set(round(vctk, 2)) # Para mostrar resultado en dos decimales
if flt_cos <= vctk:
stat_cos.set('Ajustar')
else:
stat_cos.set('Correcto')
if flt_cas >= vabs:
stat_cas.set('Ajustar')
else:
stat_cas.set('Correcto')
if flt_cus >= vcus:
stat_cus.set('Ajustar')
else:
stat_cus.set('Correcto')
except ValueError:
pass
root = Tk() # Establece ventana principal
root.title('PadARQ - Revisión de Parámetros de Diseño Arquitectónico')
root.minsize(790, 410)
root.maxsize(790, 410)
appHighlightFont = font.Font(family='Arial', size=10, weight='bold')
n_proy = StringVar() # Variables para cálculos y datos generales
n_cli = StringVar()
f_cos = StringVar()
f_cas = StringVar()
f_cus = StringVar()
s_ter = StringVar()
s_cons = StringVar()
s_cont = StringVar()
s_abso = StringVar()
res_cos = StringVar()
res_cas = StringVar()
res_cus = StringVar()
stat_cos = StringVar()
stat_cas = StringVar()
stat_cus = StringVar()
dsp_f_cos = DoubleVar() # Variables para mostrar resultados
dsp_f_cas = DoubleVar()
dsp_f_cus = DoubleVar()
dsp_s_abso = DoubleVar()
dsp_s_cont = DoubleVar()
dsp_cos = DoubleVar()
dsp_cas = DoubleVar()
dsp_cus = DoubleVar()
content = ttk.Frame(root, padding=(3, 3, 12, 12)) # Definición de widgets
datoslbl = ttk.Label(content, text='DATOS DEL PROYECTO', font=appHighlightFont)
proylbl = ttk.Label(content, text='Nombre del Proyecto')
proy = ttk.Entry(content, textvariable=n_proy)
clilbl = ttk.Label(content, text='Nombre del Cliente')
cli = ttk.Entry(content, textvariable=n_cli)
coslbl = ttk.Label(content, text='Factor COS')
cos = ttk.Entry(content, textvariable=f_cos)
caslbl = ttk.Label(content, text='Factor CAS')
cas = ttk.Entry(content, textvariable=f_cas)
cuslbl = ttk.Label(content, text='Factor CUS')
cus = ttk.Entry(content, textvariable=f_cus)
terlbl = ttk.Label(content, text='Sup. de Terreno')
ter = ttk.Entry(content, textvariable=s_ter)
conslbl = ttk.Label(content, text='Sup. de Construcción')
cons = ttk.Entry(content, textvariable=s_cons)
contlbl = ttk.Label(content, text='Sup. de Contacto')
cont = ttk.Entry(content, textvariable=s_cont)
abslbl = ttk.Label(content, text='Sup. de Absorción')
abso = ttk.Entry(content, textvariable=s_abso)
tbllbl = ttk.Label(content, text='\nTABLA DE RESULTADOS', font=appHighlightFont)
tblsep = ttk.Separator(content, orient=HORIZONTAL)
parlbl = ttk.Label(content, text='Parámetro', font=appHighlightFont)
faclbl = ttk.Label(content, text='Factor', font=appHighlightFont)
calclbl = ttk.Label(content, text='Calculado', font=appHighlightFont)
reallbl = ttk.Label(content, text='Proyecto', font=appHighlightFont)
resultllbl = ttk.Label(content, text='Resultado', font=appHighlightFont)
tbcoslbl = ttk.Label(content, text='Coeficiente de Ocupación de Superficie (COS)')
tbcaslbl = ttk.Label(content, text='Coeficiente de Absorción de Superficie (CAS)')
tbcuslbl = ttk.Label(content, text='Coeficiente de Utilización de Superficie (CUS)')
content.grid(column=0, row=0, sticky=(N, S, E, W)) # Distribución de widgets
datoslbl.grid(column=0, row=0, columnspan=5, sticky=(N, W), pady=5, padx=5)
proylbl.grid(column=0, row=1, columnspan=1, sticky=(N, E), pady=5, padx=5)
proy.grid(column=1, row=1, columnspan=6, sticky=(N, E, W), pady=5, padx=5)
clilbl.grid(column=0, row=2, columnspan=1, sticky=(N, E), pady=5, padx=5)
cli.grid(column=1, row=2, columnspan=6, sticky=(N, E, W), pady=5, padx=5)
coslbl.grid(column=0, row=3, columnspan=1, sticky=(N, E), pady=5, padx=5)
cos.grid(column=1, row=3, columnspan=1, sticky=(N, E, W), pady=5, padx=5)
caslbl.grid(column=2, row=3, columnspan=1, sticky=(N, E), pady=5, padx=5)
cas.grid(column=3, row=3, columnspan=1, sticky=(N, E, W), pady=5, padx=5)
cuslbl.grid(column=4, row=3, columnspan=1, sticky=(N, E), pady=5, padx=5)
cus.grid(column=5, row=3, columnspan=1, sticky=(N, E, W), pady=5, padx=5)
terlbl.grid(column=0, row=4, columnspan=1, sticky=(N, E), pady=5, padx=5)
ter.grid(column=1, row=4, columnspan=1, sticky=(N, E, W), pady=5, padx=5)
conslbl.grid(column=2, row=4, columnspan=1, sticky=(N, E), pady=5, padx=5)
cons.grid(column=3, row=4, columnspan=1, sticky=(N, E, W), pady=5, padx=5)
contlbl.grid(column=4, row=4, columnspan=1, sticky=(N, E), pady=5, padx=5)
cont.grid(column=5, row=4, columnspan=1, sticky=(N, E, W), pady=5, padx=5)
abslbl.grid(column=0, row=5, columnspan=1, sticky=(N, E), pady=5, padx=5)
abso.grid(column=1, row=5, columnspan=1, sticky=(N, E, W), pady=5, padx=5)
tbllbl.grid(column=0, row=6, columnspan=5, sticky=(N, W), pady=5, padx=5)
parlbl.grid(column=0, row=7, columnspan=2, sticky=(N, W), pady=5, padx=5)
faclbl.grid(column=2, row=7, columnspan=1, sticky=(N, E), pady=5, padx=5)
calclbl.grid(column=3, row=7, columnspan=1, sticky=(N, E), pady=5, padx=5)
reallbl.grid(column=4, row=7, columnspan=1, sticky=(N, E), pady=5, padx=5)
resultllbl.grid(column=5, row=7, columnspan=1, sticky=(N, E), pady=5, padx=5)
tblsep.grid(column=0, row=8, columnspan=6, sticky=(E, W))
tbcoslbl.grid(column=0, row=9, columnspan=2, sticky=(N, W), pady=5, padx=5)
tbcaslbl.grid(column=0, row=10, columnspan=2, sticky=(N, W), pady=5, padx=5)
tbcuslbl.grid(column=0, row=11, columnspan=2, sticky=(N, W), pady=5, padx=5)
ttk.Label(content, textvariable=dsp_f_cos).grid(column=2, row=9, columnspan=1, sticky=(N, E), pady=5, padx=5)
ttk.Label(content, textvariable=dsp_f_cas).grid(column=2, row=10, columnspan=1, sticky=(N, E), pady=5, padx=5)
ttk.Label(content, textvariable=dsp_f_cus).grid(column=2, row=11, columnspan=1, sticky=(N, E), pady=5, padx=5)
ttk.Label(content, textvariable=dsp_cos).grid(column=3, row=9, columnspan=1, sticky=(N, E), pady=5, padx=5)
ttk.Label(content, textvariable=dsp_cas).grid(column=3, row=10, columnspan=1, sticky=(N, E), pady=5, padx=5)
ttk.Label(content, textvariable=dsp_f_cus).grid(column=3, row=11, columnspan=1, sticky=(N, E), pady=5, padx=5)
ttk.Label(content, textvariable=dsp_s_cont).grid(column=4, row=9, columnspan=1, sticky=(N, E), pady=5, padx=5)
ttk.Label(content, textvariable=dsp_s_abso).grid(column=4, row=10, columnspan=1, sticky=(N, E), pady=5, padx=5)
ttk.Label(content, textvariable=dsp_cus).grid(column=4, row=11, columnspan=1, sticky=(N, E), pady=5, padx=5)
ttk.Label(content, textvariable=stat_cos).grid(column=5, row=9, columnspan=1, sticky=(N, E), pady=5, padx=5)
ttk.Label(content, textvariable=stat_cas).grid(column=5, row=10, columnspan=1, sticky=(N, E), pady=5, padx=5)
ttk.Label(content, textvariable=stat_cus).grid(column=5, row=11, columnspan=1, sticky=(N, E), pady=5, padx=5)
ttk.Label(content, text='').grid(column=0, row=12) # Botones
ttk.Button(content, text="Calcular", command=calculos).grid(column=5, row=13, columnspan=1, sticky=(E, W), padx=5)
ttk.Button(content, text="Reporte", command=rep).grid(column=4, row=13, columnspan=1, sticky=(E, W))
ttk.Button(content, text="Info...", command=infobox).grid(column=0, row=13, columnspan=1, sticky=(E, W), padx=5)
root.columnconfigure(0, weight=1) # Configuración general de filas y columnas
root.rowconfigure(0, weight=1)
content.columnconfigure(1, weight=1)
content.columnconfigure(2, weight=1)
content.columnconfigure(3, weight=1)
content.columnconfigure(4, weight=1)
content.columnconfigure(5, weight=1)
proy.focus() # Enfoque y comandos de ejecución
root.bind('<Return>', calculos)
root.mainloop()
| [
"cadops.dc@gmail.com"
] | cadops.dc@gmail.com |
d56d2f9f8cdeb13510cc3e3fcd4c33f8bdd32d90 | 32337949b8b216c7ae621ab094fc2576b8031a85 | /blockstore/lib/blockstore/BlockStoreService-remote | ab68ff1c4d919609dd1267535aa1bae164a2b8fb | [] | no_license | jianlinl/openblockchain | e2c82ca0688b3319272bea9bcdea7413ed27c95c | c90df4d65907d5444729b877482cf545b9c9ef47 | refs/heads/master | 2020-05-20T22:26:07.144202 | 2016-06-23T09:23:21 | 2016-06-23T09:23:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,171 | #!/usr/bin/env python
#
# Autogenerated by Thrift Compiler (0.9.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
import sys
import pprint
from urlparse import urlparse
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.transport import TSSLSocket
from thrift.transport import THttpClient
from thrift.protocol import TBinaryProtocol
from blockstore import BlockStoreService
from blockstore.ttypes import *
if len(sys.argv) <= 1 or sys.argv[1] == '--help':
print('')
print('Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] [-s[sl]] function [arg1 [arg2...]]')
print('')
print('Functions:')
print(' Block getBlock(Network network, string blockhash)')
print(' Block getTipBlock(Network network)')
print(' getTailBlockList(Network network, i32 n)')
print(' Verification verifyBlock(Network network, Block block)')
print(' void addBlock(Network network, Block block, txIds)')
print(' void rewindTip(Network network, i32 height)')
print(' Tx getTx(Network network, string txid)')
print(' getTxList(Network network, txids)')
print(' getMissingTxIdList(Network network, txids)')
print(' Verification verifyTx(Network network, Tx tx, bool mempool)')
print(' void addTxList(Network network, txes, bool mempool)')
print(' void removeTx(Network network, string txid)')
print(' getTxListSince(Network network, string objId, i32 n)')
print(' getTailTxList(Network network, i32 n)')
print(' getRelatedTxList(Network network, addresses)')
print(' getRelatedTxIdList(Network network, addresses)')
print(' getSendingTxList(Network network)')
print(' getSendTxList(Network network, txids)')
print(' void sendTx(Network network, SendTx sendTx)')
print(' getUnspent(Network network, addresses)')
print(' getMissingInvList(Network network, invs)')
print(' void pushPeers(Network network, peers)')
print(' popPeers(Network network, i32 n)')
print('')
sys.exit(0)
pp = pprint.PrettyPrinter(indent = 2)
host = 'localhost'
port = 9090
uri = ''
framed = False
ssl = False
http = False
argi = 1
if sys.argv[argi] == '-h':
parts = sys.argv[argi+1].split(':')
host = parts[0]
if len(parts) > 1:
port = int(parts[1])
argi += 2
if sys.argv[argi] == '-u':
url = urlparse(sys.argv[argi+1])
parts = url[1].split(':')
host = parts[0]
if len(parts) > 1:
port = int(parts[1])
else:
port = 80
uri = url[2]
if url[4]:
uri += '?%s' % url[4]
http = True
argi += 2
if sys.argv[argi] == '-f' or sys.argv[argi] == '-framed':
framed = True
argi += 1
if sys.argv[argi] == '-s' or sys.argv[argi] == '-ssl':
ssl = True
argi += 1
cmd = sys.argv[argi]
args = sys.argv[argi+1:]
if http:
transport = THttpClient.THttpClient(host, port, uri)
else:
socket = TSSLSocket.TSSLSocket(host, port, validate=False) if ssl else TSocket.TSocket(host, port)
if framed:
transport = TTransport.TFramedTransport(socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = BlockStoreService.Client(protocol)
transport.open()
if cmd == 'getBlock':
if len(args) != 2:
print('getBlock requires 2 args')
sys.exit(1)
pp.pprint(client.getBlock(eval(args[0]),args[1],))
elif cmd == 'getTipBlock':
if len(args) != 1:
print('getTipBlock requires 1 args')
sys.exit(1)
pp.pprint(client.getTipBlock(eval(args[0]),))
elif cmd == 'getTailBlockList':
if len(args) != 2:
print('getTailBlockList requires 2 args')
sys.exit(1)
pp.pprint(client.getTailBlockList(eval(args[0]),eval(args[1]),))
elif cmd == 'verifyBlock':
if len(args) != 2:
print('verifyBlock requires 2 args')
sys.exit(1)
pp.pprint(client.verifyBlock(eval(args[0]),eval(args[1]),))
elif cmd == 'addBlock':
if len(args) != 3:
print('addBlock requires 3 args')
sys.exit(1)
pp.pprint(client.addBlock(eval(args[0]),eval(args[1]),eval(args[2]),))
elif cmd == 'rewindTip':
if len(args) != 2:
print('rewindTip requires 2 args')
sys.exit(1)
pp.pprint(client.rewindTip(eval(args[0]),eval(args[1]),))
elif cmd == 'getTx':
if len(args) != 2:
print('getTx requires 2 args')
sys.exit(1)
pp.pprint(client.getTx(eval(args[0]),args[1],))
elif cmd == 'getTxList':
if len(args) != 2:
print('getTxList requires 2 args')
sys.exit(1)
pp.pprint(client.getTxList(eval(args[0]),eval(args[1]),))
elif cmd == 'getMissingTxIdList':
if len(args) != 2:
print('getMissingTxIdList requires 2 args')
sys.exit(1)
pp.pprint(client.getMissingTxIdList(eval(args[0]),eval(args[1]),))
elif cmd == 'verifyTx':
if len(args) != 3:
print('verifyTx requires 3 args')
sys.exit(1)
pp.pprint(client.verifyTx(eval(args[0]),eval(args[1]),eval(args[2]),))
elif cmd == 'addTxList':
if len(args) != 3:
print('addTxList requires 3 args')
sys.exit(1)
pp.pprint(client.addTxList(eval(args[0]),eval(args[1]),eval(args[2]),))
elif cmd == 'removeTx':
if len(args) != 2:
print('removeTx requires 2 args')
sys.exit(1)
pp.pprint(client.removeTx(eval(args[0]),args[1],))
elif cmd == 'getTxListSince':
if len(args) != 3:
print('getTxListSince requires 3 args')
sys.exit(1)
pp.pprint(client.getTxListSince(eval(args[0]),args[1],eval(args[2]),))
elif cmd == 'getTailTxList':
if len(args) != 2:
print('getTailTxList requires 2 args')
sys.exit(1)
pp.pprint(client.getTailTxList(eval(args[0]),eval(args[1]),))
elif cmd == 'getRelatedTxList':
if len(args) != 2:
print('getRelatedTxList requires 2 args')
sys.exit(1)
pp.pprint(client.getRelatedTxList(eval(args[0]),eval(args[1]),))
elif cmd == 'getRelatedTxIdList':
if len(args) != 2:
print('getRelatedTxIdList requires 2 args')
sys.exit(1)
pp.pprint(client.getRelatedTxIdList(eval(args[0]),eval(args[1]),))
elif cmd == 'getSendingTxList':
if len(args) != 1:
print('getSendingTxList requires 1 args')
sys.exit(1)
pp.pprint(client.getSendingTxList(eval(args[0]),))
elif cmd == 'getSendTxList':
if len(args) != 2:
print('getSendTxList requires 2 args')
sys.exit(1)
pp.pprint(client.getSendTxList(eval(args[0]),eval(args[1]),))
elif cmd == 'sendTx':
if len(args) != 2:
print('sendTx requires 2 args')
sys.exit(1)
pp.pprint(client.sendTx(eval(args[0]),eval(args[1]),))
elif cmd == 'getUnspent':
if len(args) != 2:
print('getUnspent requires 2 args')
sys.exit(1)
pp.pprint(client.getUnspent(eval(args[0]),eval(args[1]),))
elif cmd == 'getMissingInvList':
if len(args) != 2:
print('getMissingInvList requires 2 args')
sys.exit(1)
pp.pprint(client.getMissingInvList(eval(args[0]),eval(args[1]),))
elif cmd == 'pushPeers':
if len(args) != 2:
print('pushPeers requires 2 args')
sys.exit(1)
pp.pprint(client.pushPeers(eval(args[0]),eval(args[1]),))
elif cmd == 'popPeers':
if len(args) != 2:
print('popPeers requires 2 args')
sys.exit(1)
pp.pprint(client.popPeers(eval(args[0]),eval(args[1]),))
else:
print('Unrecognized method %s' % cmd)
sys.exit(1)
transport.close()
| [
"daniel.socials@gmail.com"
] | daniel.socials@gmail.com | |
3ee0d75b0a776e0534a549a5b773ee1bc50c7ac1 | 01b244cef33b0299ae6337da12c7a4a493128d07 | /debug_doc/test_logging.py | 84888703ce1b2a4a35b26186e873cb078d87fd17 | [] | no_license | Lee-1024/AutoTest | 84d40275be35954d0dd6c125fe4a8031f13e49e4 | 9ed0079ebbaed637d65032cba359c4d279345653 | refs/heads/master | 2021-07-08T09:02:48.773178 | 2019-04-09T08:41:44 | 2019-04-09T08:41:44 | 148,727,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,626 | py | #_*_ coding:utf-8 _*_
__author__ = 'Lee'
import logging
# 创建一个logger
logger = logging.getLogger()
logger1 = logging.getLogger('mylogger')
logger1.setLevel(logging.DEBUG)
logger2 = logging.getLogger('mylogger')
logger2.setLevel(logging.INFO)
logger3 = logging.getLogger('mylogger.child1')
logger3.setLevel(logging.WARNING)
logger4 = logging.getLogger('mylogger.child1.child2')
logger4.setLevel(logging.DEBUG)
logger5 = logging.getLogger('mylogger.child1.child2.child3')
logger5.setLevel(logging.DEBUG)
# 创建一个handler,用于写入日志文件
fh = logging.FileHandler('D:/AutoTest/debug_doc/test.log')
# 再创建一个handler,用于输出到控制台
ch = logging.StreamHandler()
# 定义handler的输出格式formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
#定义一个filter
#filter = logging.Filter('mylogger.child1.child2')
#fh.addFilter(filter)
# 给logger添加handler
#logger.addFilter(filter)
logger.addHandler(fh)
logger.addHandler(ch)
#logger1.addFilter(filter)
logger1.addHandler(fh)
logger1.addHandler(ch)
logger2.addHandler(fh)
logger2.addHandler(ch)
#logger3.addFilter(filter)
logger3.addHandler(fh)
logger3.addHandler(ch)
#logger4.addFilter(filter)
logger4.addHandler(fh)
logger4.addHandler(ch)
logger5.addHandler(fh)
logger5.addHandler(ch)
# 记录一条日志
logger.debug('logger debug message')
logger.info('logger info message')
logger.warning('logger warning message')
logger.error('logger error message')
logger.critical('logger critical message')
logger1.debug('logger1 debug message')
logger1.info('logger1 info message')
logger1.warning('logger1 warning message')
logger1.error('logger1 error message')
logger1.critical('logger1 critical message')
logger2.debug('logger2 debug message')
logger2.info('logger2 info message')
logger2.warning('logger2 warning message')
logger2.error('logger2 error message')
logger2.critical('logger2 critical message')
logger3.debug('logger3 debug message')
logger3.info('logger3 info message')
logger3.warning('logger3 warning message')
logger3.error('logger3 error message')
logger3.critical('logger3 critical message')
logger4.debug('logger4 debug message')
logger4.info('logger4 info message')
logger4.warning('logger4 warning message')
logger4.error('logger4 error message')
logger4.critical('logger4 critical message')
logger5.debug('logger5 debug message')
logger5.info('logger5 info message')
logger5.warning('logger5 warning message')
logger5.error('logger5 error message')
logger5.critical('logger5 critical message') | [
"lee_1024@yeah.net"
] | lee_1024@yeah.net |
72400fc6b6ffce55fbd9162fc62cecddf26120d2 | d8169f7c2efdeb40fe9dcdd59ce040138804d2af | /2nd/mysite/settings.py | b80fdf7c56fd69ac13acd7925dd80038a10abed8 | [] | no_license | KimDoKy/pyDjango | d9ab67b6da6541ebd04658945922d9924a85b107 | 53ef776dd20488f0dfda6b7e3fd5281e8f3e98fd | refs/heads/master | 2020-12-30T13:08:15.951633 | 2017-10-04T10:01:15 | 2017-10-04T10:01:15 | 91,325,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,604 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*kr)qf=3+unt7*9chabk@bc#(esu0cs8_o)nqgg8!e%crpv@5+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bookmark.apps.BookmarkConfig',
'blog.apps.BlogConfig',
'tagging.apps.TaggingConfig',
'disqus',
'django.contrib.sites',
'photo.apps.PhotoConfig',
]
DISQUS_WEBSITE_SHORTNAME = 'dokys-blog'
SITE_ID = 1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
# TIME_ZONE = 'UTC'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
# Media
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
#LOGIN_URL = '/accounts/login/'
#LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/' | [
"makingfunk0@gmail.com"
] | makingfunk0@gmail.com |
a9ae2fb2f78a15607a4b525541fcba1d493dd20b | 18e8e0150f97ffc75c414b501c9da1d5f4c6914b | /Chapter_2/name_cases.py | d0e2fa881357e98d8427fc50e9ae57a7e9ac8445 | [] | no_license | fredsteam/python_work | 5460bb11bd0d80865f509241a290f47bf1ab7f2f | 315d942cad8158deae645307b0b6b00d480a5283 | refs/heads/main | 2023-07-14T06:38:05.008973 | 2021-07-12T19:04:02 | 2021-07-12T19:04:02 | 315,713,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | my_name = "Fred Russell"
print(my_name.lower())
print(my_name.upper())
print(my_name.title())
| [
"freddie2test@gmail.com"
] | freddie2test@gmail.com |
52bc7f128792a60754a8768605b64ec973f3a0b1 | c61f41a8655b39098ffa257fb994979d17dfb10c | /cremilda/parser.py | f9fd5a4c5ff50d4043307e03b5cbf47de4a5c04b | [] | no_license | luizpaulosilva/compiladores-1 | 48f09085c0f61b2f1bea0507adde9a03473b2d23 | f553d9de0b6cd764d11bd533cec6bde9877d6587 | refs/heads/master | 2020-03-18T10:50:01.200756 | 2018-05-03T17:13:41 | 2018-05-03T17:13:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | import ox
from .lexer import tokens
from .ast import BinOp, FCall, Atom, Assign
def make_parser():
return ox.make_parser([
('module : statement SEMICOLON', lambda x, _: [x]),
('module : statement SEMICOLON module', statements),
('statement : NAME EQ expr', var_def),
('expr : atom OP expr', op_call),
('expr : atom', identity),
('atom : NUMBER', lambda x: Atom(float(x))),
('atom : STRING', lambda x: Atom(x[1:-1])),
('atom : BOOL', lambda x: Atom(x == 'true')),
('atom : LPAR expr RPAR', lambda x, y, z: y),
('atom : fcall', identity),
('fcall : NAME LPAR RPAR', lambda x, y, z: FCall(x, [])),
('fcall : NAME LPAR args RPAR', fcall),
('args : expr COMMA args', lambda x, _, xs: [x, *xs]),
('args : expr', lambda x: [x]),
], tokens=tokens)
# Funçoes auxiliares
identity = (lambda x: x)
op_call = (lambda x, op, y: BinOp(op, x, y))
fcall = (lambda x, y, z, w: FCall(x, z))
statements = (lambda x, _, xs: [x, *xs])
var_def = (lambda name, eq, expr: Assign(name, expr))
# Cria parser
parser = make_parser()
| [
"fabiomacedomendes@gmail.com"
] | fabiomacedomendes@gmail.com |
5c6ce2a9506d583b28bb577a5d0244721e0ec5d6 | 16fef38a28227a743517b9a4d75adc85e6a82b2d | /setup-py2exe.py | 8ef466c0a962b464f5b4b5889e9868898cb027f4 | [] | no_license | atmouse-/operapass-git | c6223d5dade40db809d9def96da5ad4f7d42042c | d763b48ff1ae04fd96740fb662e49c5232a24651 | refs/heads/master | 2020-04-12T07:07:23.045802 | 2014-05-19T09:48:06 | 2014-05-19T09:48:06 | 5,193,684 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | ####
#### Note: mailtray runs fine with Windows. I just can't get py2exe to work
####
import glob
import sys
from distutils.core import setup
import py2exe
ver = '%s%s' % (sys.version_info.major, sys.version_info.minor)
setup(
name = 'operapass',
version = '1.2',
author = 'atmouse',
author_email = 'xxx@gmail.com',
url = 'https://github.com/atmouse-',
scripts = ('operapass-dump',),
packages = ('operapass',),
#data_files = (),
options = {
'py2exe': {
#'compressed': True,
#'optimize': 2,
#'unbuffered': True,
# Turning this one causes issues with pygtk
#'bundle_files': 1,
#'dll_excludes': ['POWRPROF.DLL', 'MSWSOCK.DLL', ],
#'excludes': ['_ssl', 'doctest', 'pdb', 'unittest', 'difflib', 'inspect',
# 'pyreadline', 'optparse', 'calendar', 'pyexpat', 'bz2',],
}
}
)
| [
"at86mouse@gmail.com"
] | at86mouse@gmail.com |
2f2fab27e81dac73c1f3c5ab03f887a2515dc33c | d060db4eed9de0e87b462b1694d30e1c0810394d | /CompScience/2b2012/DMT/DMT-assignment3/Naive_Count_01.py | 48785f55b57f5db143047d4a065995302273b8e4 | [] | no_license | jrnhofman/MSc-Computational-Science | d6d5e9bc434c19d977544b0ef860057a95186db7 | 7f2ee6eccf91e0704d14265f47715a6ca2be392f | refs/heads/master | 2022-11-29T16:02:21.742533 | 2020-08-03T09:09:35 | 2020-08-03T09:09:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,672 | py |
from pandas import *
from numpy import *
from random import *
import datetime
from matplotlib.pyplot import *
import sys
TRAIN_ON_EVERYTHING = True
#Read dates
def readdate(s):
return datetime.datetime.strptime(s, '%d/%m/%Y')
#Read in file
data = read_csv('FIFA0212.csv',parse_dates=True)
#Throw away unplayed matches and unused attributes
if TRAIN_ON_EVERYTHING:
unplayed = data[(data['Score1'] == '?')]
unplayed.Date = map(readdate, unplayed.Date)
unplayed = unplayed[unplayed.Date <= datetime.datetime(2012,06,05)]
unplayed = unplayed.join(DataFrame(range(len(unplayed)),columns=['Outcome']))
data = data[(data['Score1'] != '?')]
data = data[['Date','Team1','Team2','Score1','Score2','HomeTeam']]
#Throw away irrelevant countries
data = data[data['Team1'] != 'Yugoslavia']
data = data[data['Team2'] != 'Yugoslavia']
data = data[data['Team1'] != 'NetherlandsAntilles']
data = data[data['Team2'] != 'NetherlandsAntilles']
data = data[data['Team1'] != 'SerbiaandMontenegro']
data = data[data['Team2'] != 'SerbiaandMontenegro'].reset_index()
#Set datatypes
data['Score1'] = map(int,data['Score1'])
data['Score2'] = map(int,data['Score2'])
data['HomeTeam'] = map(int,data['HomeTeam'])
#Read in FIFA ranking list
rank = read_csv('Ranking.txt',sep='\t',header=None,names=['ActualRank','Team','Rank','bla2', 'bla3', 'bla4'] )
rank = rank[['Rank','Team']]
rank['Rank'] = map(float,rank['Rank'])
rank['Rank'] = zeros(len(rank))
#Checking whether spelling is the same in both lists
for i in range(len(rank)):
for j in range(len(data)):
if str(data.get_value(j,'Team1')) == rank.get_value(i,'Team') or data.get_value(j,'Team2') == rank.get_value(i,'Team'):
break
if j==len(data)-1:
print 'Country not found!',rank.get_value(i,'Team')
# print 'All country names checked!'
data = data.join(DataFrame(zeros(len(data)),columns=['Outcome']))
data.Outcome[data.Score1 > data.Score2] = 1
data.Outcome[data.Score1 == data.Score2] = 0.5
resultcsv = []
for t1,t2 in zip(unplayed.Team1, unplayed.Team2):
matches = data[((data.Team1 == t1) & (data.Team2 == t2))
| ((data.Team1 == t2) & (data.Team2 == t1))]
if len(matches) == 0:
resultcsv.append([1/3.0]*3)
else:
w = matches[((matches.Outcome == 0) & (matches.Team1 == t2))
|((matches.Outcome == 1) & (matches.Team1 == t1))]
t = matches[matches.Outcome == 0.5]
w = len(w) / float(len(matches))
t = len(t) / float(len(matches))
l = 1 - w - t
resultcsv.append([w,t,l])
with open('result.csv', 'w') as f:
for w,t,l in resultcsv:
f.write("%s,%s,%s\n" % (w,t,l))
| [
"jrn.hofman@gmail.com"
] | jrn.hofman@gmail.com |
d6c10f9ac176af80bb48c67f0250ed3c3d5b9909 | 134253ff4e4db236bcb1b9be413cef4341d58f61 | /naivebayes.py | 1f9ea214de70f11c23cb7d9ecf4f79d6c72b865a | [] | no_license | ashish-2409/Machine-Learning- | 2c424cd1d4d4e4abe7132d7f1b724fa41b02ae2d | 6543880cd1129b2b130d7d1c108756acf64217ce | refs/heads/main | 2023-06-16T08:16:29.830857 | 2021-07-19T16:50:41 | 2021-07-19T16:50:41 | 387,533,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | # -*- coding: utf-8 -*-
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
import pandas as pd
import numpy as np
import missingno as msno
f=datasets.load_iris()
x=f.data
y=f.target
data=pd.DataFrame(x,columns=f.feature_names)
data
msno.matrix(data)
data.isnull().sum()
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.25,random_state=1)
model=GaussianNB()
model.fit(x_train,y_train)
y_pred=model.predict(x_test)
print('Accuracy ',accuracy_score(y_pred,y_test)*100)
print(confusion_matrix(y_pred,y_test))
print(classification_report(y_test,y_pred))
| [
"noreply@github.com"
] | ashish-2409.noreply@github.com |
1582104914b9a7325ea2769e5689176e12bbf4fb | ef957026ab9da2e574477bb6ea706fd16b536d82 | /motor/sbp_main.py | 68af634c974fa4a5171d55a82164b7f920d0ae4c | [] | no_license | Akisazaki/af-sbp | b04a4ebcd8098eff2ace68384bf3bf1654955a61 | e28f320a0141d7f3b5214a4c559efbc6c11cd9b3 | refs/heads/master | 2021-10-13T06:32:15.886339 | 2021-10-07T14:26:53 | 2021-10-07T14:26:53 | 203,739,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,842 | py | #!/usr/bin/env python
import rospy
import roslib
import numpy
import math
import tf
from std_msgs.msg import String, Float32, UInt8
from geometry_msgs.msg import Twist, Point, Quaternion
from nav_msgs.msg import Odometry
import time
import threading
import sys
import serial
import parameter
import controller
from utils import getch
arming_flag = False
exit_flag = False
rs485_lock = threading.Lock()
command_lock = threading.Lock()
control_lock = threading.Lock()
vel_spatial = 0.0
vel_angular = 0.0
vel_left = 0
vel_right = 0
vel_fan = 0.0
WHEEL_CIRCUMFERENCE = parameter.WHEEL_RADIUS * 2.0 * math.pi
WHEEL_ARC_PER_DEGREE = WHEEL_CIRCUMFERENCE / 360.0
INV_WHEEL_ARC_PER_DEGREE = 1.0 / WHEEL_ARC_PER_DEGREE
# DRONE_CIRCUMFERENCE = WHEEL_DISTANCE * math.pi
# DRONE_ARC_PER_DEGREE = DRONE_CIRCUMFERENCE / 360.0
def mps2dps(meterPerSec):
'''
Meter per second to Degrees per second
arguments:
[float] meterPerSec
return:
[float] degreePerSec
'''
return meterPerSec * INV_WHEEL_ARC_PER_DEGREE
def angular2mps(angularPerSec):
'''
Angular per second to Meter per second
arguments:
[float] angularPerSec
return:
[float] meterPerSec
'''
return angularPerSec * parameter.WHEEL_DISTANCE
sDrvFan = serial.Serial(
port = parameter.SER_DRV_FAN,
baudrate = parameter.SER_BAUDRATE,
parity = serial.PARITY_NONE,
stopbits = serial.STOPBITS_ONE,
bytesize = serial.EIGHTBITS,
timeout = parameter.SER_TIMEOUT
)
cmdSubscriber = None
fanSubscriber = None
if sDrvFan.isOpen():
print("Succeeded to connect to drive fan controller")
else:
print("Failed to connect to the drive fan controller")
utils.getch()
quit()
def cmd_vel_callback(data):
global vel_spatial
global vel_angular
global vel_left
global vel_right
# rospy.loginfo(rospy.get_caller_id() + " I heard %s", data.linear.x)
if vel_spatial != data.linear.x or vel_angular != data.angular.z:
vel_spatial = data.linear.x
vel_angular = data.angular.z
vel_diff = angular2mps(vel_angular) * 0.5
left_val = int(mps2dps(-vel_spatial + vel_diff))
right_val = int(mps2dps(vel_spatial + vel_diff))
command_lock.acquire()
vel_left = left_val
vel_right = right_val
command_lock.release()
print("Cmd: %f %f\r\n" % (vel_spatial, vel_angular))
def cmd_vel_listener():
global cmdSubscriber
cmdSubscriber = rospy.Subscriber("cmd_vel", Twist, callback=cmd_vel_callback, queue_size=10)
# rospy.init_node('odom_node', anonymous=True)
# odom_pub = rospy.Publisher('odom', Odometry, queue_size=10)
# rospy.spin()
def fan_vel_callback(data):
global vel_fan
vel_fan = data.data
print("Fan: %f\r\n" % vel_fan)
def fan_vel_listener():
global fanSubscriber
fanSubscriber = rospy.Subscriber("fan_vel", Float32, callback=fan_vel_callback, queue_size=3)
def init_listener():
rospy.init_node('motor_controllers', anonymous=False)
cmd_vel_listener()
fan_vel_listener()
def stop_listener():
global cmdSubscriber
global fanSubscriber
if None != cmdSubscriber:
cmdSubscriber.unregister()
cmdSubscriber = None
if None != fanSubscriber:
fanSubscriber.unregister()
fanSubscriber = None
class Subscriber(threading.Thread):
'''
subscribe and read the motor's state data
'''
def __init__(self, threadID, name, controller, time_step):
'''
set up the subscriber
arguments:
int [thread ID]
str [thread name]
command.controller [target controller]
float [time step]
'''
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.controller = controller
self.time_step = time_step
def run(self):
'''
run the subscriber thread
returns:
bool [terhimation without an error]
'''
while arming_flag:
rs485_lock.acquire()
self.controller.request()
rs485_lock.release()
time.sleep(self.time_step)
printout = 'Thread ' + str(self.threadID) + ' disabled\r\n'
sys.stdout.write(printout)
return True
class Publisher(threading.Thread):
'''
publish the motor's state data
'''
def __init__(self, threadID, name, controller, time_step):
'''
set up the publisher
arguments:
int [thread ID]
str [thread name]
command.controller [target controller]
float [time step]
'''
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.time_step = max(time_step - 0.05, 0)
self.controller = controller
def run(self):
'''
run the publisher thread
returns:
bool [terhimation without an error]
'''
while arming_flag:
rs485_lock.acquire()
control_lock.acquire()
data = self.controller.control()
control_lock.release()
time.sleep(0.05) # 0.005 with onboard
rs485_lock.release()
time.sleep(self.time_step)
printout = 'Thread ' + str(self.threadID) + ' disabled\r\n'
sys.stdout.write(printout)
return True
class Key(threading.Thread):
def __init__(self, time_step):
threading.Thread.__init__(self)
self.threadID = 0
self.name = "emergency_key"
self.time_step = time_step
self.time_init = time.time()
def run(self):
'''
run the subscriber thread
returns:
bool [terhimation without an error]
'''
global arming_flag
while arming_flag:
# time_past = time.time()- self.time_init
# if time_past >10:
# arming_flag = False
ch = getch()
if ch == 'q':
arming_flag = False
break
time.sleep(self.time_step)
printout = 'Thread ' + str(self.threadID) + ' disabled\r\n'
sys.stdout.write(printout)
return True
if __name__ == "__main__":
queue = []
try:
port = serial.Serial(
port = parameter.SER_DRV_FAN,
baudrate = parameter.SER_BAUDRATE,
parity = serial.PARITY_NONE,
stopbits = serial.STOPBITS_ONE,
bytesize = serial.EIGHTBITS,
timeout = parameter.SER_TIMEOUT
)
driving_controller = controller.Controller(port, controller.TYPE_DEV_DRV)
fan_controller = controller.Controller(port, controller.TYPE_DEV_FAN)
init_listener()
set_key = Key(0.005)
# observer_thread = Subscriber(1, "observer", driving_controller, 0.5)
controller_thread = Publisher(2, "controller", driving_controller, 0.5)
fan_controller_thread = Publisher(3, "controller", fan_controller, 0.7)
# queue.append(observer_thread)
queue.append(controller_thread)
queue.append(fan_controller_thread)
try:
arming_flag = True
set_key.start()
driving_controller.enable()
fan_controller.enable()
sys.stdout.write('Motor controller enabled\r\n')
time.sleep(5.0e-1)
# set default value
driving_controller.control()
time.sleep(1.0e-2)
# observer_thread.start()
controller_thread.start()
time.sleep(1.0e-2)
fan_controller_thread.start()
time_init = time.time()
time.sleep(1.0)
control_lock.acquire()
driving_controller.refer((0, 0))
fan_controller.refer((0, 0))
control_lock.release()
driving_controller.control()
time.sleep(5.0e-2)
fan_controller.control()
time.sleep(5.0e-2)
fan_val = 0
while True:
time_past = time.time() - time_init
command_lock.acquire()
left_val = vel_left
right_val = vel_right
command_lock.release()
control_lock.acquire()
driving_controller.refer((left_val, right_val))
control_lock.release()
time.sleep(5.0e-2)
command_lock.acquire()
if 0.0 >= vel_fan:
fan_val = 0
elif 1.0 <= vel_fan:
fan_val = parameter.FAN_MAX
else:
fan_val = max(parameter.FAN_MIN, int(parameter.FAN_MAX * vel_fan))
command_lock.release()
control_lock.acquire()
fan_controller.refer((fan_val, 0))
control_lock.release()
if arming_flag != True:
for s in queue:
s.join
sys.stdout.write("Exiting main thread\r\n")
control_lock.acquire()
driving_controller.refer((0, 0))
control_lock.release()
driving_controller.control()
time.sleep(5.0e-2)
driving_controller.disable()
sys.stdout.write("Motor controller disabled\r\n")
time.sleep(1.0e-1)
break
except Exception as err1:
print ("Error: unable to start thread")
print (err1)
stop_listener()
except Exception as err2:
print ("Error: unable to generate thread")
print (err2) | [
"akisazaki@gmail.com"
] | akisazaki@gmail.com |
d35ed28ae71ab53f059762d90843dca65d29ded6 | acd25aba4c957b8996f9b2f61199aba7b12649e0 | /Day 64/HBT.py | f86f86210deca0df8dd62ff67874aa477bba6dd6 | [
"MIT"
] | permissive | IamBikramPurkait/100DaysOfAlgo | 2abb3591ccdbb7922bc7e737ea78c9c97ee0084f | e5f775b24fe93ed30d854558756218a295d2486e | refs/heads/master | 2023-04-15T07:08:06.069471 | 2020-11-10T13:17:53 | 2020-11-10T13:17:53 | 311,734,442 | 1 | 0 | MIT | 2021-04-25T00:30:42 | 2020-11-10T17:25:31 | null | UTF-8 | Python | false | false | 2,214 | py | '''Given Inorder traversal and Level Order traversal of a Binary Tree. The task is to calculate the height of the tree without constructing it.
Example:
Input : Input: Two arrays that represent Inorder
and level order traversals of a
Binary Tree
in[] = {4, 8, 10, 12, 14, 20, 22};
level[] = {20, 8, 22, 4, 12, 10, 14};
Output : 4
'''
# Python3 program to calculate height of Binary Tree from InOrder and LevelOrder Traversals
# Function to find the index of value in the InOrder Traversal list
def search(arr, start, end, value):
for i in range(start, end + 1):
if arr[i] == value:
return i
return -1
# Function to calculate the height of the Binary Tree
def getHeight(inOrder, levelOrder,
start, end, height, n):
# Base Case
if start > end:
return 0
# Get Index of current root in InOrder Traversal
getIndex = search(inOrder, start, end, levelOrder[0])
if getIndex == -1:
return 0
# Count elements in Left Subtree
leftCount = getIndex - start
# Count elements in Right Subtree
rightCount = end - getIndex
# Declare two lists for left and right subtrees
newLeftLevel = [None for _ in range(leftCount)]
newRightLevel = [None for _ in range(rightCount)]
lheight, rheight, k = 0, 0, 0
# Extract values from level order traversal list
# for current left subtree
for i in range(n):
for j in range(start, getIndex):
if levelOrder[i] == inOrder[j]:
newLeftLevel[k] = levelOrder[i]
k += 1
break
k = 0
# Extract values from level order traversal list
# for current right subtree
for i in range(n):
for j in range(getIndex + 1, end + 1):
if levelOrder[i] == inOrder[j]:
newRightLevel[k] = levelOrder[i]
k += 1
break
# Recursively call to calculate height
# of left subtree
if leftCount > 0:
lheight = getHeight(inOrder, newLeftLevel, start,
getIndex - 1, height, leftCount)
# Recursively call to calculate height
# of right subtree
if rightCount > 0:
rheight = getHeight(inOrder, newRightLevel,
getIndex + 1, end, height, rightCount)
# current height
height = max(lheight + 1, rheight + 1)
# return height
return height
| [
"noreply@github.com"
] | IamBikramPurkait.noreply@github.com |
de523ce13bb8af7b596e93936e787c96da860b82 | 6786dcdfbf0ab426e04c29fd1094ec198ae4b120 | /crmapp/subscribers/migrations/0001_initial.py | dac6c66df27a6e1b754cbd9d92ee0576edf8f65c | [] | no_license | cicekhayri/CRM-app-built-with-Django | f705d63e0b687dcc128bf3304236b85dbc7d5b89 | 24931d5888c269ccc9a8a72043bb6a46cb02a5af | refs/heads/master | 2021-01-20T19:30:34.050088 | 2016-07-27T12:20:31 | 2016-07-27T12:20:31 | 64,306,055 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,181 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-07-26 20:52
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Subscriber',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address_one', models.CharField(max_length=100)),
('address_two', models.CharField(blank=True, max_length=100)),
('city', models.CharField(max_length=50)),
('state', models.CharField(max_length=2)),
('stripe_id', models.CharField(blank=True, max_length=30)),
('user_rec', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'subscribers',
},
),
]
| [
"hello@hayricicek.se"
] | hello@hayricicek.se |
51ed26d155d3ac70a5b01ef59f20d79a642bf07f | ef6229d281edecbea3faad37830cb1d452d03e5b | /ucsmsdk/mometa/adaptor/AdaptorIscsiAuth.py | 85653c123f5847d9bf6701d752efdd160c69cfe0 | [
"Apache-2.0"
] | permissive | anoop1984/python_sdk | 0809be78de32350acc40701d6207631322851010 | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | refs/heads/master | 2020-12-31T00:18:57.415950 | 2016-04-26T17:39:38 | 2016-04-26T17:39:38 | 57,148,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,402 | py | """This module contains the general information for AdaptorIscsiAuth ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class AdaptorIscsiAuthConsts():
pass
class AdaptorIscsiAuth(ManagedObject):
"""This is AdaptorIscsiAuth class."""
consts = AdaptorIscsiAuthConsts()
naming_props = set([])
mo_meta = MoMeta("AdaptorIscsiAuth", "adaptorIscsiAuth", "iscsi-auth", VersionMeta.Version201m, "InputOutput", 0x1f, [], ["read-only"], [], [], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version201m, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"password": MoPropertyMeta("password", "password", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version201m, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"user_id": MoPropertyMeta("user_id", "userId", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"password": "password",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"userId": "user_id",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.password = None
self.sacl = None
self.status = None
self.user_id = None
ManagedObject.__init__(self, "AdaptorIscsiAuth", parent_mo_or_dn, **kwargs)
| [
"test@cisco.com"
] | test@cisco.com |
c58289fbd95b5d04ab6dde3e38e1d8e6d3e2388d | c9081f0d087a7103062ed741fec7c93450cbede6 | /scrape/scraper.py | 0a797841649537a73f8bba6890679e65505c3463 | [] | no_license | alexseong/capstone_project | f3391dd917fdf281120e8a79725633e819dc4e42 | 354bf85ee3aea356ed915dc26c809554179ebf6a | refs/heads/master | 2020-06-16T07:09:01.355326 | 2016-11-28T07:34:49 | 2016-11-28T07:34:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,226 | py | import pandas as pd
import numpy as np
import random
import time
import project
import founder
from bs4 import BeautifulSoup
from project import get_project, get_rewards
from founder import get_profile, get_bio
from featurizer1 import featurize
from pymongo import MongoClient
import urllib2
# Set user_agent to Mozilla browser
user_agent = {'User-Agent': 'Mozilla/5.0'}
# Initialize MongoDB database and collection
client = MongoClient()
db = client['ksdb']
collection = db['ksdata']
# Load array of project_id, founder_id
# url_list = np.loadtxt('url_list.csv', delimiter=',')
df = pd.read_csv('id_url_list.csv', dtype=object, header=None)
id_url_list = df.values
#
# def timeit(method):
# '''
# Wrapper for timing functions.
# '''
# def timed(*args, **kw):
# ts = time.time()
# result = method(*args, **kw)
# te = time.time()
#
# print '%r %2.2f sec' % \
# (method.__name__, te-ts)
#
# return result
#
# return timed
def subsample(arr, p=1):
'''
Returns random subsample of 2D array. Default sample
size is 100.
'''
mask = np.random.choice([True, False], arr.shape[0],
p = [p, 1 - p])
sub = arr[mask]
return sub
def id_generator(arr):
'''
Create new generator.
'''
return (x for x in arr)
# @timeit
def extract(generator):
'''
Scrapes Kickstarter pages and parses features into
MongoDB database. This function calls the featurize
function from the featurizer module to insert data
into the MongoDB database.
'''
go = True
progress = 0
skipped = 0
failed = 0
while go:
block_size = random.randint(5, 10)
wait_time = random.randint(2, 4)
wait = False
print '\n'
print 'Block size: {0}'.format(block_size)
for i in xrange(0, block_size):
# founder_id, project_id = (int(x) for x in generator.next())
project_id,founder_id, project_url, founder_url, rewards_url = (x for x in generator.next())
collection_check = set(db.ksdata.distinct('project_id', {}))
if project_id in collection_check:
print "Already scraped"
skipped += 1
wait = False
else:
try:
project_soup, project_url, status1 = get_project(project_url)
founder_soup, founder_url, status2 = get_profile(founder_url)
rewards_soup, rewards_url, status3 = get_rewards(rewards_url)
if (status1 & status2 & status3) == 200:
featurize(project_id, founder_id, project_url, founder_url, rewards_url, project_soup, founder_soup, rewards_soup, collection)
progress += 1
wait = True
except requests.ConnectionError:
failed +=1
print '\n'
print 'Scraped: {}'.format(progress)
print 'Skipped: {}'.format(skipped)
print 'Failed: {}'.format(failed)
print 'Next block: {}s'.format(wait_time)
if wait:
time.sleep(wait_time)
else:
pass
| [
"lizsia@Elizabeths-MacBook-Pro.local"
] | lizsia@Elizabeths-MacBook-Pro.local |
b07068e53d5ceac86d2431c09b775cdc9a8e872a | 159aed4755e47623d0aa7b652e178296be5c9604 | /data/scripts/templates/object/tangible/lair/base/shared_poi_all_lair_insecthill_small_fog_gray.py | 7b3c9de5103bfa24a894b44ac8e8c59d5f7349ac | [
"MIT"
] | permissive | anhstudios/swganh | fb67d42776864b1371e95f769f6864d0784061a3 | 41c519f6cdef5a1c68b369e760781652ece7fec9 | refs/heads/develop | 2020-12-24T16:15:31.813207 | 2016-03-08T03:54:32 | 2016-03-08T03:54:32 | 1,380,891 | 33 | 44 | null | 2016-03-08T03:54:32 | 2011-02-18T02:32:45 | Python | UTF-8 | Python | false | false | 469 | py | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/base/shared_poi_all_lair_insecthill_small_fog_gray.iff"
result.attribute_template_id = -1
result.stfName("lair_n","insecthill")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | [
"rwl3564@rit.edu"
] | rwl3564@rit.edu |
8f87991b0988e7ee32771e0a5721ecd45ceed216 | e12b3da8286fa7171ee0bd3f451839c15e16cf96 | /lib/galaxy/model/migrate/versions/0179_drop_transferjob_table.py | acd6c76744b6a88a7b446fc6de0864ea59d7fef8 | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | yvanlebras/galaxy | 973b66c22a5d908acfec6248ee3687f5a3c1ce32 | 6b8489ca866825bcdf033523120a8b24ea6e6342 | refs/heads/dev | 2023-07-24T12:09:50.312573 | 2022-03-09T08:23:52 | 2022-03-09T08:23:52 | 85,213,472 | 0 | 0 | NOASSERTION | 2022-03-08T10:20:07 | 2017-03-16T15:45:16 | Python | UTF-8 | Python | false | false | 1,663 | py | """
Drop unused TransferJob table and foreign key column on genome_index_tool_data.
"""
import logging
from sqlalchemy import (
Column,
DateTime,
ForeignKey,
Integer,
MetaData,
String,
Table,
)
from galaxy.model.custom_types import JSONType
from galaxy.model.migrate.versions.util import (
add_column,
create_table,
drop_column,
drop_table,
)
from galaxy.model.orm.now import now
log = logging.getLogger(__name__)
metadata = MetaData()
TransferJob_table = Table(
"transfer_job",
metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("state", String(64), index=True),
Column("path", String(1024)),
Column("params", JSONType),
Column("pid", Integer),
Column("socket", Integer),
)
transfer_job_id = Column("transfer_job_id", Integer, ForeignKey("transfer_job.id"), index=True)
def upgrade(migrate_engine):
print(__doc__)
metadata.bind = migrate_engine
metadata.reflect()
try:
drop_column(transfer_job_id.name, "genome_index_tool_data", metadata)
drop_table(TransferJob_table)
except Exception:
log.exception("Dropping transfer_job table failed")
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
try:
create_table(TransferJob_table)
add_column(
transfer_job_id, "genome_index_tool_data", metadata, index_name="ix_genome_index_tool_data_transfer_job_id"
)
except Exception:
log.exception("Creating transfer_job table failed")
| [
"sgolitsynskiy@gmail.com"
] | sgolitsynskiy@gmail.com |
13d3c9ff2fe421a4621b9dbf60e8c25f2103de18 | 17f3237b827e9a6a51ba6302aff16ee99f789886 | /loginRegistration/loginRegistration/urls.py | 594d5e656a5d849c635751064b30cf3c1e915d2a | [] | no_license | pjl4/DjangoWork | f7d8b49d382e3a7481bb3aa9edd651af772f86a1 | 0cb1508783145e88ccc0ee81f532fec7b5b8f3bd | refs/heads/master | 2020-04-17T19:23:22.148956 | 2019-01-31T15:39:20 | 2019-01-31T15:39:20 | 166,863,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | """loginRegistration URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
urlpatterns = [
url(r'^',include('apps.login.urls',namespace='login')),
url(r'^admin/', admin.site.urls),
]
| [
"pj.lutz17@gmail.com"
] | pj.lutz17@gmail.com |
cb3ed87c192e9365120dc2428d692024d5ff3cbf | e34c0256056bc1cde9a0743ffa333775ad598aa7 | /pandas/lesson2.py | cf5777277d96a034fcbbb0c74fe01c2fdff78bb4 | [] | no_license | toanbui1991/machineLearning | 3f96f13c138a0823be92d24386d6ce4f83ca7218 | afdf0482ee392a30ceae2407bba197e6697adf72 | refs/heads/master | 2021-04-28T23:38:39.574582 | 2017-05-20T09:01:41 | 2017-05-20T09:01:41 | 77,728,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,758 | py | # -*- coding: utf-8 -*-
"""
Created on Wed May 17 16:55:58 2017
@author: Bui Xuan Toan
"""
# Import all libraries needed for the tutorial
import pandas as pd
import os
from numpy import random
import matplotlib.pyplot as plt
import sys #only needed to determine Python version number
import matplotlib #only needed to determine Matplotlib version number
# Enable inline plotting
#%matplotlib inline
#test the version.
print('Python version ' + sys.version)
print('Pandas version ' + pd.__version__)
print('Matplotlib version ' + matplotlib.__version__)
# The inital set of baby names
names = ['Bob','Jessica','Mary','John','Mel']
#you want to generate list of names randomly from a set of name.
random.seed(300)
randomNames = [names[random.randint(0, len(names))] for i in range(1000)]
print(len(randomNames))
print(randomNames[:10])
#you want to craete a list of birthRate randomly.
random.seed(300)
randomBirthRate = [random.randint(0,1000) for i in range(1000)]
print(len(randomBirthRate))
print(randomBirthRate[:10])
#you want to create a tempData is a lisf of tuple.
#tempData[('name', birthRate)]
tempData = list(zip(randomNames, randomBirthRate))
print(len(tempData))
print(tempData[:10])
#you want to create Data Frame from tempData.
df = pd.DataFrame(tempData, columns = ['name', 'birthRate'])
print(df.head())
print(df.info())
#now you want to write the data into a csv file.
df.to_csv('birthData.csv', index=False, header=False)
#now you want to read the data again.
location = os.path.join(os.getcwd(), 'birthData.csv')
df = pd.read_csv(location)
print(df.head(6))
#you you want the df and assign names as the same time.
#now the df dont have header you want to assign name to it.
df = pd.read_csv(location, names = ['name', 'birthRate'])
print(df.head(6))
#you want to get the information about the df.
df.info()
#you want to see the tail data.
df.tail(10)
#you want to fine the set of name or unique().
df['name'].unique()
#you want to print unique name in the data set.
for n in df['name'].unique():
print(n)
#now you want more information about name series with decribe()
df['name'].describe()
#you want to group the data point and sum birthRate by name.
name = df.groupby('name') #name is like a df with group index by name column.
groupData = name.sum() #after you index how to group the data now you apply function.
print(groupData)
groupData2 = df.groupby('name').sum()
print(groupData2)
#you want to find the largest data point by sort_values().
sortedData = groupData.sort_values(['birthRate'], ascending=False)
print(sortedData.head(1)) #get the first data point.
#get the largest value of birthRate (evidence).
print(groupData['birthRate'].max())
#plot the sortedData.
sortedData['birthRate'].plot.bar()
| [
"toanbui1991@gmail.com"
] | toanbui1991@gmail.com |
88d8503427ae624abd293d7caa81510121c4e040 | dbc8ab296e2c07b7b5386335df57187834203a1f | /St. Happy Days Pub.py | d4609e3d5644923399380ff4db106d6c8db63e43 | [] | no_license | TheOneOtherCoder/St.-Happy-Day-Pub | 0b5dc7c72af9b759ddeb3cb1a72086a9d8bbbb34 | bf68427fece25f0bef0d19a3c61d63a59e20d10b | refs/heads/master | 2021-03-19T19:08:57.424345 | 2020-03-13T18:39:43 | 2020-03-13T18:39:43 | 247,139,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,121 | py | def start():
AGE_LIMIT = 18
StHappyDaySpecialBrew = float(40)
ClassicGuiness = float(25)
OldFashionedBrew = float(10)
QuickShotGlass = float(5)
FriendStat = 0
HP = 100
AlchoholLVL = 0
age = int(raw_input("Welcome to the St. Happy Days Pub, how old are ya? >> "))
if age < AGE_LIMIT:
print ("Sorry kid, but " + str(age) + " is below the age limit. Come back when ya older and start counting, cause you got " + str(18 - age) + " years left.")
else:
print ("Welp, " + str(age) + " is old enough. Come on in and have a good time.")
print ("You enter the pub and walk up to the bar, you look at the menue to see what you can get.")
print ("1 = St. Happy Day Special Brew")
print ("2 = Classic Guiness")
print ("3 = Old Fashioned Brew")
print ("4 = The Quick Shot Glass")
Beverage = (raw_input("Hello there, what would you like to have today sir? >> "))
if Beverage in (str("1")):
print ("The St. Happy Day Special Brew, a man in great taste I see. That will be $" + "%.2f" %(StHappyDaySpecialBrew))
AlchoholLVL = (AlchoholLVL + 50)
print ("Your AlchoholLVL raised by 50. Reach 100 and times won't be good. AlchoholLVL = " + str(AlchoholLVL))
elif Beverage in (str("2")):
print ("A classic guiness, an amazing standard drink. That will be $" + "%.2f" %(ClassicGuiness))
AlchoholLVL = (AlchoholLVL + 30)
print ("Your AlchoholLVL raised by 30. Reach 100 and times won't be good. AlchoholLVL = " + str(AlchoholLVL))
elif Beverage in (str("3")):
print ("The Old Fashioned Brew, quite the simple looking beverage, but it sure packs a kick. That will be $" + "%.2f" %(OldFashionedBrew))
AlchoholLVL = (AlchoholLVL + 70)
print ("Your AlchoholLVL raised by 70. Reach 100 and times won't be good. AlchoholLVL = " + str(AlchoholLVL))
elif Beverage in (str("4")):
print ("Ah, the quick shot, a perfect choice for people new to drinking. That will be $" + "%.2f" %(QuickShotGlass))
AlchoholLVL = (AlchoholLVL + 10)
print ("Your AlchoholLVL raised by 10. Reach 100 and times won't be good. AlchoholLVL = " + str(AlchoholLVL))
print ("You pull out you wallet and give the bartender the money. You take the drink from the bartender and look around the pub.")
print ("You see some areas you could check out")
print ("1 = Pool Tables")
print ("2 = Group of Men")
print ("3 = Bowling Alley")
print ("4 = Juke Box")
AreaFirst = raw_input("Which area would you like to check out first? >> ")
if AreaFirst in (str("1")):
print ("There are some men playing a game. They appear to not know what they're doing. You would make fun of them to yourself, but you also don't know how to play pool.")
elif AreaFirst in (str("2")):
print ("They have leather jackets and matching tattoos, I wouldn't mess with them.")
elif AreaFirst in (str("3")):
print ("You have no idea why a bar would have a bowling alley, but drunk bowling could be fun to watch.")
elif AreaFirst in (str("4")):
print ("You wonder over to the juke box. It's broken, but the sticker on it says it's a 'Harper's Juke Box'.")
print ("You head back to the bar and figure out what you want to check out now")
AreaSecond = raw_input("Which area would you like to check out now >> ")
if AreaSecond in (str("1")):
print ("There are some men playing a game. They appear to not know what they're doing. You would make fun of them to yourself, but you also don't know how to play pool.")
elif AreaSecond in (str("2")):
print ("They have leather jackets and matching tattoos, I wouldn't mess with them.")
elif AreaSecond in (str("3")):
print ("You have no idea why a bar would have a bowling alley, but drunk bowling could be fun to watch.")
elif AreaSecond in (str("4")):
print ("You wonder over to the juke box. It's broken, but the sticker on it says it's a 'Harper's Juke Box'.")
print ("You head back to the bar and figure out what you want to check out now")
AreaThird = raw_input("What's the third thing you would like to look at? >> ")
if AreaThird in (str("1")):
print ("There are some men playing a game. They appear to not know what they're doing. You would make fun of them to yourself, but you also don't know how to play pool.")
elif AreaThird in (str("2")):
print ("They have leather jackets and matching tattoos, I wouldn't mess with them.")
elif AreaThird in (str("3")):
print ("You have no idea why a bar would have a bowling alley, but drunk bowling could be fun to watch.")
elif AreaThird in (str("4")):
print ("You wonder over to the juke box. It's broken, but the sticker on it says it's a 'Harper's Juke Box'.")
print ("You head back to the bar and figure out what you want to check out now")
AreaFourth = raw_input("What's the final thing you would like to look at? >> ")
if AreaFourth in (str("1")):
print ("There are some men playing a game. They appear to not know what they're doing. You would make fun of them to yourself, but you also don't know how to play pool.")
elif AreaFourth in (str("2")):
print ("They have leather jackets and matching tattoos, I wouldn't mess with them.")
elif AreaFourth in (str("3")):
print ("You have no idea why a bar would have a bowling alley, but drunk bowling could be fun to watch.")
elif AreaFourth in (str("4")):
print ("You wonder over to the juke box. It's broken, but the sticker on it says it's a 'Harper's Juke Box'.")
print ("You head back to the bar and figure out what to do after looking at what you wanted to.")
LeaveBar = raw_input("Would you like to leave the bar right now? (1 = yes 2 = no) >> ")
if LeaveBar in (str("1")):
print ("You leave the bar and will come back another day. ENDING 1 of 7: EARLY DEPARTING")
else:
print ("After some thinking, you decide that the St. Happy Day Pub is good enough to stay for a little bit longer.")
print
print ("You notice one of your friends at a table. You think about going to greet him")
FriendChoice = raw_input("Do you greet your friend? (1 = yes 2 = no) >> ")
if FriendChoice in (str("2")):
print ("You decide to not greet your friend.")
if FriendChoice in (str("1")):
print ("You decided to greet your friend. You had a nice chat and seemed to have raised his spirits. Your Friend stat has increased by 1.")
FriendStat = (FriendStat + 1)
print
print ("You went to the bowling alley to see if anyone way playing a game. You spotted a group of people playing, (Well, as close as playing possible while being drunk), and you question whether or not to join them")
BowlingGame = raw_input("Would you like to play a game with the drunk group? (1 = yes 2 = no) >> ")
if BowlingGame in (str("1")) and FriendStat > 0:
print ("You played a game with the drunken group. You laughed a lot because almost every time they went to roll they almost fell over. You even got your friend to join you. Both of you had a lot of fun")
elif BowlingGame in (str("1")):
print ("You played a game with the drunken group. You laughed a lot because almost every time they went to roll they almost fell over.")
if BowlingGame in (str("2")):
print ("You decided to not join them in fear that they might throw the bowling ball at you by mistake.")
print
print ("After a bit you get bored. You want to do something before you are overtaken by boredom.")
print ("You look over towards the bar and find a group of men and think about fighting them.")
Fightthem = raw_input("Would you like to attempt to fight the group of men? (1 = yes 2 = no) >> ")
if Fightthem in (str("1")):
print "You attempted to fight the men and ended up getting clobbered (AKA: you lost the fight, real bad). Your HP dropped down by 100."
HP = (HP - 100)
print ("Your HP is now " + str(HP))
if HP <= 0:
print ("You ran out of health. You were sent to the hospital. ENDING 2 of 7: Beaten Down")
if Fightthem in (str("2")):
print "You made the right choice and decided to not fight the group of men. Instead, you decided to play some solo pool."
elif Fightthem in (str("2")) and FriendStat > 0:
print "You made the right choice and decided to not fight the group of men. Instead, you decided to play some pool with your friend."
print
print ("After playing a game of pool you found out it just took that game to clear the boredom out of your mind.")
print ("You head to the bar. Thirsty, you go to a drink. Be careful though, your AlchoholLVL limits your choices.")
print ("As you head to the bar counter, the bartender says the same thing as before.")
print ("1 = St. Happy Day Special Brew")
print ("2 = Classic Guiness")
print ("3 = Old Fashioned Brew")
print ("4 = The Quick Shot Glass")
Beverage = (raw_input("Hello there, what would you like to have today sir? >> "))
if Beverage in (str("1")):
print ("The St. Happy Day Special Brew, a man in great taste I see. That will be $" + "%.2f" %(StHappyDaySpecialBrew))
AlchoholLVL = (AlchoholLVL + 50)
print ("Your AlchoholLVL raised by 50. Reach 100 and times won't be good. AlchoholLVL = " + str(AlchoholLVL))
if AlchoholLVL >= 100:
print ("Oh no, your AlchoholLVL became to high. You were taken to the hospital to be treated. ENDING 3 of 7: A Bit Too Much To Drink")
if Beverage in (str("2")):
print ("A classic guiness, an amazing standard drink. That will be $" + "%.2f" %(ClassicGuiness))
AlchoholLVL = (AlchoholLVL + 30)
print ("Your AlchoholLVL raised by 30. Reach 100 and times won't be good. AlchoholLVL = " + str(AlchoholLVL))
if AlchoholLVL >= 100:
print ("Oh no, your AlchoholLVL became to high. You were taken to the hospital to be treated. ENDING 3 of 7: A Bit Too Much To Drink")
if Beverage in (str("3")):
print ("The Old Fashioned Brew, quite the simple looking beverage, but it sure packs a kick. That will be $" + "%.2f" %(OldFashionedBrew))
AlchoholLVL = (AlchoholLVL + 70)
print ("Your AlchoholLVL raised by 70. Reach 100 and times won't be good. AlchoholLVL = " + str(AlchoholLVL))
if AlchoholLVL >= 100:
print ("Oh no, your AlchoholLVL became to high. You were taken to the hospital to be treated. ENDING 3 of 7: A Bit Too Much To Drink")
if Beverage in (str("4")):
print ("Ah, the quick shot, a perfect choice for newcomers. That will be $" + "%.2f" %(QuickShotGlass))
AlchoholLVL = (AlchoholLVL + 10)
print ("Your AlchoholLVL raised by 10. Reach 100 and times won't be good. AlchoholLVL = " + str(AlchoholLVL))
if AlchoholLVL >= 100:
print ("Oh no, your AlchoholLVL became to high. You were taken to the hospital to be treated. ENDING 3 of 7: A Bit Too Much To Drink")
if AlchoholLVL < 100:
print
print ("After a nice drink, you check your watch and see that it's 9:57 PM. You want to stay at the bar a little longer but going home now would be a good idea.")
print ("1 = yes 2 = no")
leavenow = raw_input("Do you want to leave the bar now? >> ")
if leavenow in (str("1")):
print ("You decided to head home. Now the big question, how do you get home?")
print ("You know that you can get your car in the morning and you can get an Uber ride, but you could also just drive your own car.")
print ("Be careful, AlchoholLVL can affect your choices")
print ("Your AlchoholLVL is " + str(AlchoholLVL))
howtogo = raw_input("How do you want to go home? 1 = drive your own car 2 = Uber >> ")
if howtogo in (str("1")) and AlchoholLVL >80:
print ("You drove your own car but crashed your car on the way home. You were sent to the hospital. ENDING 4 of 7: Crash Ending")
elif howtogo in (str("1")):
print ("You drove your own car and made it safely home. ENDING 5 of 7: Safe Way Back")
if howtogo in (str("2")):
print ("You took an Uber home. Luckily they were not creepy or the type who tries to socialize when no one wants to. ENDING 5 of 7: Safe Way Back")
if leavenow in (str("2")):
print "You decided that you can stay longer. You are thirsty, so you head to the bar again to get another drink."
if AlchoholLVL > 80:
print ("As you head over to the bar, the bartender says that you have had too much to drink")
print ("The bartender was kind enough to pay for an Uber to send you back home. ENDING 5 of 7: Safe Way Back")
else:
print ("I think you already know how this will work. Just do not go over 100. Got it, good")
print ("1 = St. Happy Day Special Brew")
print ("2 = Classic Guiness")
print ("3 = Old Fashioned Brew")
print ("4 = The Quick Shot Glass")
Beverage2 = (raw_input("Hello there, what would you like to have today sir? >> "))
if Beverage2 in (str("1")):
print ("The St. Happy Day Special Brew, a man in great taste I see. That will be $" + "%.2f" %(StHappyDaySpecialBrew))
AlchoholLVL = (AlchoholLVL + 50)
print ("Your AlchoholLVL raised by 50. Reach 100 and times won't be good. AlchoholLVL = " + str(AlchoholLVL))
if AlchoholLVL >= 100:
print ("Oh no, your AlchoholLVL became to high. You were taken to the hospital to be treated. ENDING 3 of 7: A Bit Too Much To Drink")
if Beverage2 in (str("2")):
print ("A classic guiness, an amazing standard drink. That will be $" + "%.2f" %(ClassicGuiness))
AlchoholLVL = (AlchoholLVL + 30)
print ("Your AlchoholLVL raised by 30. Reach 100 and times won't be good. AlchoholLVL = " + str(AlchoholLVL))
if AlchoholLVL >= 100:
print ("Oh no, your AlchoholLVL became to high. You were taken to the hospital to be treated. ENDING 3 of 7: A Bit Too Much To Drink")
if Beverage2 in (str("3")):
print ("The Old Fashioned Brew, quite the simple looking beverage, but it sure packs a kick. That will be $" + "%.2f" %(OldFashionedBrew))
AlchoholLVL = (AlchoholLVL + 70)
print ("Your AlchoholLVL raised by 70. Reach 100 and times won't be good. AlchoholLVL = " + str(AlchoholLVL))
if AlchoholLVL >= 100:
print ("Oh no, your AlchoholLVL became to high. You were taken to the hospital to be treated. ENDING 3 of 7: A Bit Too Much To Drink")
if Beverage2 in (str("4")):
print ("Ah, the quick shot, a perfect choice for newcomers. That will be $" + "%.2f" %(QuickShotGlass))
AlchoholLVL = (AlchoholLVL + 10)
print ("Your AlchoholLVL raised by 10. Reach 100 and times won't be good. AlchoholLVL = " + str(AlchoholLVL))
if AlchoholLVL >= 100:
print ("Oh no, your AlchoholLVL became to high. You were taken to the hospital to be treated. ENDING 3 of 7: A Bit Too Much To Drink")
if AlchoholLVL < 100:
print
print ("You are most likely drunk at this point and I am running out of ideas of how to continue this game.")
print ("So, I will ask you this one question for the final 2 endings")
print ("Do you want to leave now 1 = yes 2 = no")
leaveattheend = raw_input("Do you want to leave? >> ")
if leaveattheend in (str("1")):
print ("Finally, you have come to your senses. You headed home via Uber and everything ended well. ENDING 6 of 7: GO HOME YOU'RE DRUNK")
if leaveattheend in (str("2")):
print ("Ok then fine. *Sigh*. You decided to stay even though you are about dead from alchohol posioning and you may get knocked out because you decided to fight the group of men. ENDING 7 of 7: You Stupid Drunken Idiot")
endscreen = raw_input("Continue? 1 = yes 2 = no >> ")
if endscreen in (str("1")):
print ("There is nothing left")
endscreen1 = raw_input("Continue? >> ")
if endscreen1 in (str("1")):
print ("There is nothing left")
endscreen2 = raw_input("Continue? >> ")
if endscreen2 in (str("1")):
print ("There is nothing left")
endscreen3 = raw_input("Continue? >> ")
if endscreen3 in (str("1")):
print ("You really wanna see how far you can push me, don't you. I told you already, nothing is left. The game is over and you got your final ending. Stop trying to continue")
endscreen4 = raw_input("Continue? >> ")
if endscreen4 in (str("1")):
print ("What did I just say? You went through the same screen for the 5th time now. You can see nothing is here. Give up now")
endscreen5 = raw_input("Continue? >> ")
if endscreen5 in (str("1")):
print ("You know what. I'm tired of you trying to push a completed game past it's limit. I spent so much time to make this and now you want even more. Do you KNOW how many variables I had to make for this game to work? TOO MANY. So please, stop")
endscreen6 = raw_input("Continue? 2 = no >> ")
if endscreen6 in (str("1")):
print ("Fine, if you're going to be this determined to get something new from this program, then I'll give you a secret ending (OUT OF PITY). Enjoy your useless ending. ENDING 8 of 7: A PITY SECRET")
else:
print ("Thanks for finally listening")
else:
print ("Thanks for understanding, goodbye")
else:
print ("Thanks for not pushing my buttons any further")
else:
print ("Thanks for playing, goodbye")
else:
print ("Thanks for playing, goodbye")
else:
print ("Thanks for playing, goodbye")
else:
print ("Thanks for playing, goodbye") | [
"noreply@github.com"
] | TheOneOtherCoder.noreply@github.com |
130ef5bf035cfe1aef3773e786611bf9dfed9cb4 | 0821a712ebac2f7cfd9051f8ee00fcd94f5daf48 | /selection.py | 014edfa8424d2e01231a405ade510c68cfaa4693 | [] | no_license | AlexCline/problems | da4138e7880e4faa5858e56ece13ed0d04dd94a6 | 3ada5d4fd09c85f62b93e9bad552ee3523130e2f | refs/heads/master | 2021-03-12T19:53:11.919762 | 2013-06-10T17:12:04 | 2013-06-10T17:12:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | def selection(array):
for index in range(0, len(array)):
min = index
for i in range(index, len(array)):
if array[min] > array[i]:
min = i
array[index], array[min] = array[min], array[index]
return array
array = [13,2,5,1,21,8,3,1]
print selection(array)
| [
"acline@us.ibm.com"
] | acline@us.ibm.com |
0616835aa6b4dc3096a756417a13a9598b00533b | 8293100233180d77431ffb2dcfbf4ba38dd3a7c6 | /HW1/code/plotTest.py | dc57eddd8f94ebba92e57201b742a20a535801f3 | [] | no_license | MarcusBlaisdell/MachineLearning | 0d1c08f92ebc7c355f0fcffe7675da0c7e822e72 | b63824f6f2e7200b9fadd688eb2fd6324e473682 | refs/heads/main | 2023-06-01T15:26:04.485132 | 2021-06-17T20:56:19 | 2021-06-17T20:56:19 | 377,914,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 20 12:00:56 2018
@author: Marcus
"""
import matplotlib.pyplot as plt
import numpy as np
# Data for plotting
t = np.arange(0.0, 2.0, 0.01)
s = 1 + np.sin(2 * np.pi * t)
# Note that using plt.subplots below is equivalent to using
# fig = plt.figure and then ax = fig.add_subplot(111)
fig, ax = plt.subplots()
ax.plot(t, s)
ax.plot(t, 2 + np.sin(t) )
ax.plot ([1,2])
ax.abline (intercept=0, slope=1)
ax.set(xlabel='time (s)', ylabel='voltage (mV)',
title='2 Times Pi Times time')
ax.grid()
#fig.savefig("test.png")
plt.show() | [
"noreply@github.com"
] | MarcusBlaisdell.noreply@github.com |
123bd91eada7ece3d6f864c35413bb2c53b6a044 | 156d054848b211fd4ca75057b9b448c9260fdd7d | /python-data-analysis/python_data_analysis/ch06/ch06-6.py | 9c509a860d71e83883edf980f1ddaa56f8617c1d | [] | no_license | wwxFromTju/Python-datascience | adfc06030dc785901b5fd33824529f86fcf41c54 | 7c58526ef54a6f10cbe1d4c7e5e024ddc423908a | refs/heads/master | 2021-01-20T17:36:51.701638 | 2016-09-04T11:21:56 | 2016-09-04T11:21:56 | 58,730,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,564 | py | #!/usr/bin/env python
# encoding=utf-8
from urllib2 import urlopen
from lxml.html import parse
import pandas as pd
import numpy as np
from pandas.io.parsers import TextParser
from pandas import Series, DataFrame
# XML和HTML
# 通过指定kind来获得列名或数据
def _unpack(row, kind='td'):
elts = row.findall('.//%s' % kind)
return [val.text_content() for val in elts]
# 从一个table获得列名和数据
def parse_options_data(table):
rows = table.findall('.//tr')
header = _unpack(rows[0], kind='th')
data = [_unpack(r) for r in rows[1:]]
return TextParser(data, names=header).get_chunk()
# 使用urlopen打开网页,然后使用lxml解析得到数据流
parsed = parse(urlopen('http://finance.yahoo.com/q/op?s=APPL+Options'))
print parsed
doc = parsed.getroot()
print doc
# 使用XPath来访问各个标签
# 访问所有的URL链接
links = doc.findall('.//a')
# 为HTML元素的对象,要得到URL和链接文本,必须使用各对象的get(URL)和text_content(针对显示的文本)
print links[15:20]
lnk = links[28]
print lnk
print lnk.get('href')
print lnk.text_content()
# 使用list comprehension列表推导式来获得所有的URL
urls = [lnk.get('href') for lnk in doc.findall('.//a')]
print urls[-10:]
# tables = doc.findall('.//table')
# calls = tables[0]
# puts = tables[1]
# rows = calls.findall('.//tr')
# 标题行
# print _unpack(rows[0], kind='th')
# 数据
# print _unpack(rows[1], kind='td')
# call_data = parse_options_data(calls)
# put_data = parse_options_data(puts)
# print call_data[:10]
| [
"wxwang@tju.edu.cn"
] | wxwang@tju.edu.cn |
b46e0e668e30a66d48ea56a29cb075d75dbedd6f | 9978ac967caa323d86a808fc707dc0958b37d49c | /final_draw.py | 4f74b36c41f478993ef09fbae88b2cbba76ec71a | [] | no_license | jaredywhip/Robot_Escape | 0498e09b335fdb3b5ce27925bd416f3684de6fba | 39ec8aa71e0d8b742e4c6f07f4ae5208260273a1 | refs/heads/master | 2021-01-10T10:35:21.459015 | 2016-01-06T19:15:01 | 2016-01-06T19:15:01 | 47,234,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,212 | py | '''
/* =======================================================================
Description:
This file contains a virtual robot class and a virtual world class.
========================================================================*/
'''
import math
import Queue
import time
import Tkinter as tk
class virtual_robot:
def __init__(self):
#self.robot = None
self.l = 20*math.sqrt(2) # half diagonal - robot is 40 mm square
self.x = 0 # starting x coordinate
self.y = 20 # starting y coordinate
self.a = 0 # starting angle of the robot, 0 when aligned with verticle axis
self.dist_l = False
self.dist_r = False #distance
self.floor_l = False
self.floor_r = False
self.sl = 0 # speed of left wheel
self.sr = 0 # speed of right wheel
self.t = 0 # last update time
def reset_robot(self):
self.x = 0 # x coordinate
self.y = 20 # y coordinate
self.a = 0# angle of the robot, 0 when aligned with verticle axis
self.dist_l = False
self.dist_r = False #
self.floor_l = False
self.floor_r = False
self.sl = 0 # speed of left wheel
self.sr = 0 # speed of right wheel
self.t = 0 # last update time
def set_robot_speed(self, w_l, w_r):
self.sl = w_l
self.sr = w_r
def set_robot_a_pos(self, a, x, y):
self.a = a
self.x = x
self.y = y
def set_robot_prox_dist(self, dist_l, dist_r):
self.dist_l = dist_l
self.dist_r = dist_r
def set_robot_floor (self, floor_l, floor_r):
self.floor_l = floor_l
self.floor_r = floor_r
class virtual_world:
def __init__(self, drawQueue, pvrobot=None, gvrobot=None, canvas=None, canvas_width=0,
canvas_height=0, mp=None, trace=False, prox_dots=False,
floor_dots=False):
self.drawQueue = drawQueue
self.pvrobot = pvrobot #set prisoner robot
self.gvrobot = gvrobot #set guard robot
self.canvas = canvas
self.canvas_width = canvas_width
self.canvas_height = canvas_height
self.map = mp if mp is not None else []
self.boundary = []
self.decoy = []
self.decoy_obj = None
self.countdown = []
self.countdown_obj = None
self.motionpath = []
self.trace = trace #leave trace of robot
self.prox_dots = prox_dots # draw obstacles detected as dots on map
self.floor_dots = floor_dots
#define a bounding box where the decoy can be placed
def add_boundary(self,rect):
self.boundary.append(rect)
def draw_boundary(self):
canvas_width = self.canvas_width
canvas_height = self.canvas_height
for rect in self.boundary:
x1 = canvas_width + rect[0]
y1 = canvas_height - rect[1]
x2 = canvas_width + rect[2]
y2 = canvas_height - rect[3]
self.canvas.create_rectangle([x1,y1,x2,y2], dash=(3,5))
#define a decoy box
def add_decoy(self,rect):
self.decoy = rect
def draw_decoy(self):
canvas_width = self.canvas_width
canvas_height = self.canvas_height
x1 = canvas_width + self.decoy[0]
y1 = canvas_height - self.decoy[1]
x2 = canvas_width + self.decoy[2]
y2 = canvas_height - self.decoy[3]
if self.decoy_obj is None:
self.decoy_obj = self.canvas.create_rectangle([x1,y1,x2,y2], fill = 'blue')
else:
self.canvas.coords(self.decoy_obj, (x1,y1,x2,y2))
#create a countdown timer for the guard
def add_countdown(self,timer): #timer = [xcoord, ycoord, 'time']
self.countdown = timer
def draw_countdown(self):
canvas_width = self.canvas_width
canvas_height = self.canvas_height
x = canvas_width + self.countdown[0]
y = canvas_height - self.countdown[1]
if self.countdown_obj is None:
self.canvas.create_text(x, y - 27 , font=("Purisa"), text= 'Time Until Guard Returns:')
self.countdown_obj = self.canvas.create_text(x, y, font=("Purisa", 40), fill = 'cyan', text=self.countdown[2])
else:
self.canvas.itemconfig(self.countdown_obj, text = self.countdown[2])
def add_obstacle(self,rect):
self.map.append(rect)
def draw_map(self, fill_color):
canvas_width = self.canvas_width
canvas_height = self.canvas_height
for rect in self.map:
x1 = canvas_width + rect[0]
y1 = canvas_height - rect[1]
x2 = canvas_width + rect[2]
y2 = canvas_height - rect[3]
self.canvas.create_rectangle([x1,y1,x2,y2], fill=fill_color)
def add_waypoint(self,waypoint):
self.motionpath.append(waypoint)
def draw_motionpath(self):
canvas_width = self.canvas_width
canvas_height = self.canvas_height
wp_num = 1
for waypoint in self.motionpath:
x = canvas_width + waypoint[0]
y = canvas_height - waypoint[1]
x1 = x + 3
y1 = y + 3
x2 = x - 3
y2 = y - 3
self.canvas.create_oval(x1, y1, x2, y2, outline='green', fill = 'green')
self.canvas.create_text(x, y + 10, font="Purisa", text=wp_num)
wp_num += 1
# Draw functions for prisoner robot
def draw_pris_robot(self):
canvas_width = self.canvas_width
canvas_height = self.canvas_height
pi4 = 3.1415 / 4 # quarter pi
vrobot = self.pvrobot
a1 = vrobot.a + pi4
a2 = vrobot.a + 3*pi4
a3 = vrobot.a + 5*pi4
a4 = vrobot.a + 7*pi4
x1 = canvas_width + vrobot.l * math.sin(a1) + vrobot.x
x2 = canvas_width + vrobot.l * math.sin(a2) + vrobot.x
x3 = canvas_width + vrobot.l * math.sin(a3) + vrobot.x
x4 = canvas_width + vrobot.l * math.sin(a4) + vrobot.x
y1 = canvas_height - vrobot.l * math.cos(a1) - vrobot.y
y2 = canvas_height - vrobot.l * math.cos(a2) - vrobot.y
y3 = canvas_height - vrobot.l * math.cos(a3) - vrobot.y
y4 = canvas_height - vrobot.l * math.cos(a4) - vrobot.y
points = (x1,y1,x2,y2,x3,y3,x4,y4)
poly_id = vrobot.poly_id
self.drawQueue.put([poly_id, points])
if (self.trace):
pi3 = 3.1415/3
a1 = vrobot.a
a2 = a1 + 2*pi3
a3 = a1 + 4*pi3
x1 = canvas_width + 3 * math.sin(a1) + vrobot.x
x2 = canvas_width + 3 * math.sin(a2) + vrobot.x
x3 = canvas_width + 3 * math.sin(a3) + vrobot.x
y1 = canvas_height - 3 * math.cos(a1) - vrobot.y
y2 = canvas_height - 3 * math.cos(a2) - vrobot.y
y3 = canvas_height - 3 * math.cos(a3) - vrobot.y
self.canvas.create_polygon([x1,y1,x2,y2,x3,y3], outline="blue")
def draw_pris_prox(self, side):
canvas_width = self.canvas_width
canvas_height = self.canvas_height
vrobot = self.pvrobot
if (side == "left"):
a_e = vrobot.a - 3.1415/5 #emitter location
prox_dis = vrobot.dist_l
prox_l_id = vrobot.prox_l_id
else:
a_e = vrobot.a + 3.1415/5 #emitter location
prox_dis = vrobot.dist_r
prox_l_id = vrobot.prox_r_id
if (prox_dis):
x_e = (vrobot.l-4) * math.sin(a_e) + vrobot.x #emiter pos of left sensor
y_e = (vrobot.l-4) * math.cos(a_e) + vrobot.y #emiter pos of right sensor
x_p = prox_dis * math.sin(vrobot.a) + x_e
y_p = prox_dis * math.cos(vrobot.a) + y_e
if (self.prox_dots):
self.canvas.create_oval(canvas_width+x_p-1, canvas_height-y_p-1, canvas_width+x_p+1, canvas_height-y_p+1, outline='red')
points = (canvas_width+x_e, canvas_height-y_e, canvas_width+x_p, canvas_height-y_p)
self.drawQueue.put([prox_l_id, points])
else:
points = (0,0,0,0)
self.drawQueue.put([prox_l_id, points])
def draw_pris_floor(self, side):
canvas_width = self.canvas_width
canvas_height = self.canvas_height
vrobot = self.pvrobot
if (side == "left"):
border = vrobot.floor_l
floor_id = vrobot.floor_l_id
a = vrobot.a - 3.1415/7 #rough position of the left floor sensor
else:
border = vrobot.floor_r
floor_id = vrobot.floor_r_id
a = vrobot.a + 3.1415/7 #rough position of the left floor sensor
x_f = (vrobot.l - 12) * math.sin(a) + vrobot.x
y_f = (vrobot.l - 12) * math.cos(a) + vrobot.y
points = (canvas_width+x_f-2, canvas_height-y_f-2, canvas_width+x_f+2, canvas_height-y_f+2)
self.drawQueue.put([floor_id, points])
if (border):
self.canvas.itemconfig(floor_id, outline = "black", fill="black")
if (self.floor_dots):
self.canvas.create_oval(canvas_width+x_f-2, canvas_height-y_f-2, canvas_width+x_f+2, canvas_height-y_f+2, fill='black')
else:
self.canvas.itemconfig(floor_id, outline = "white", fill="white")
# Draw functions for guard robot
def draw_guard_robot(self):
canvas_width = self.canvas_width
canvas_height = self.canvas_height
pi4 = 3.1415 / 4 # quarter pi
vrobot = self.gvrobot
a1 = vrobot.a + pi4
a2 = vrobot.a + 3*pi4
a3 = vrobot.a + 5*pi4
a4 = vrobot.a + 7*pi4
x1 = canvas_width + vrobot.l * math.sin(a1) + vrobot.x
x2 = canvas_width + vrobot.l * math.sin(a2) + vrobot.x
x3 = canvas_width + vrobot.l * math.sin(a3) + vrobot.x
x4 = canvas_width + vrobot.l * math.sin(a4) + vrobot.x
y1 = canvas_height - vrobot.l * math.cos(a1) - vrobot.y
y2 = canvas_height - vrobot.l * math.cos(a2) - vrobot.y
y3 = canvas_height - vrobot.l * math.cos(a3) - vrobot.y
y4 = canvas_height - vrobot.l * math.cos(a4) - vrobot.y
points = (x1,y1,x2,y2,x3,y3,x4,y4)
poly_id = vrobot.poly_id
self.drawQueue.put([poly_id, points])
if (self.trace):
pi3 = 3.1415/3
a1 = vrobot.a
a2 = a1 + 2*pi3
a3 = a1 + 4*pi3
x1 = canvas_width + 3 * math.sin(a1) + vrobot.x
x2 = canvas_width + 3 * math.sin(a2) + vrobot.x
x3 = canvas_width + 3 * math.sin(a3) + vrobot.x
y1 = canvas_height - 3 * math.cos(a1) - vrobot.y
y2 = canvas_height - 3 * math.cos(a2) - vrobot.y
y3 = canvas_height - 3 * math.cos(a3) - vrobot.y
self.canvas.create_polygon([x1,y1,x2,y2,x3,y3], outline="blue")
def draw_guard_prox(self, side):
canvas_width = self.canvas_width
canvas_height = self.canvas_height
vrobot = self.gvrobot
if (side == "left"):
a_e = vrobot.a - 3.1415/5 #emitter location
prox_dis = vrobot.dist_l
prox_l_id = vrobot.prox_l_id
else:
a_e = vrobot.a + 3.1415/5 #emitter location
prox_dis = vrobot.dist_r
prox_l_id = vrobot.prox_r_id
if (prox_dis):
x_e = (vrobot.l-4) * math.sin(a_e) + vrobot.x #emiter pos of left sensor
y_e = (vrobot.l-4) * math.cos(a_e) + vrobot.y #emiter pos of right sensor
x_p = prox_dis * math.sin(vrobot.a) + x_e
y_p = prox_dis * math.cos(vrobot.a) + y_e
if (self.prox_dots):
self.canvas.create_oval(canvas_width+x_p-1, canvas_height-y_p-1, canvas_width+x_p+1, canvas_height-y_p+1, outline='red')
points = (canvas_width+x_e, canvas_height-y_e, canvas_width+x_p, canvas_height-y_p)
self.drawQueue.put([prox_l_id, points])
else:
points = (0,0,0,0)
self.drawQueue.put([prox_l_id, points])
def draw_guard_floor(self, side):
canvas_width = self.canvas_width
canvas_height = self.canvas_height
vrobot = self.gvrobot
if (side == "left"):
border = vrobot.floor_l
floor_id = vrobot.floor_l_id
a = vrobot.a - 3.1415/7 #rough position of the left floor sensor
else:
border = vrobot.floor_r
floor_id = vrobot.floor_r_id
a = vrobot.a + 3.1415/7 #rough position of the left floor sensor
x_f = (vrobot.l - 12) * math.sin(a) + vrobot.x
y_f = (vrobot.l - 12) * math.cos(a) + vrobot.y
points = (canvas_width+x_f-2, canvas_height-y_f-2, canvas_width+x_f+2, canvas_height-y_f+2)
self.drawQueue.put([floor_id, points])
if (border):
self.canvas.itemconfig(floor_id, outline = "black", fill="black")
if (self.floor_dots):
self.canvas.create_oval(canvas_width+x_f-2, canvas_height-y_f-2, canvas_width+x_f+2, canvas_height-y_f+2, fill='black')
else:
self.canvas.itemconfig(floor_id, outline = "white", fill="white") | [
"root@rescomp-14-285844.stanford.edu"
] | root@rescomp-14-285844.stanford.edu |
dbf1df70714398eb0bb053335bd7a5f660ec9d93 | c011c7d70fbceeac679b69f3cc809e3ad955e078 | /dsp_python_imp/Ch12/IIR_impulse_response.py | 47b9b82d36deb6a42550fecc3a5d7b2f5dd704ca | [
"MIT"
] | permissive | xrick/Lcj-DSP-in-Python | b6e6624133754a2dc0c6be506017aec6177cb9f8 | f27ee7036dc0df41b96e0b06ed13bb8fd874a714 | refs/heads/master | 2020-12-01T08:26:32.230538 | 2019-12-28T09:55:28 | 2019-12-28T09:55:28 | 230,591,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
n = np.array( [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ] )
x = np.array( [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 ] )
b = np.array( [ 1 ] )
a = np.array( [ 1, -0.8 ] )
y = signal.lfilter( b, a, x )
print( "x =", x )
print( "y =", y )
plt.figure( 1 )
plt.stem( n, x )
plt.xlabel( 'n' )
plt.ylabel( 'x[n]' )
plt.figure( 2 )
plt.stem( n, y )
plt.xlabel( 'n' )
plt.ylabel( 'y[n]' )
plt.show( ) | [
"xrickliao@gmail.com"
] | xrickliao@gmail.com |
0243fca320209c2522051b1b89d64c9a349e4937 | 7160f0637ba4fdd85feeb43aca2125c3479c474c | /config/spec.py | b1b021a751d98f14b75db187ffcdf0c648646468 | [
"MIT"
] | permissive | RENCI/pdspi-mapper-parallex-example | 86a39e513f1e07f73be1281c81b2b143ed7e5d80 | 1c99fa42b7b9bc2c09e9cad2f1c55ea10549814a | refs/heads/master | 2023-05-11T04:29:58.354329 | 2021-03-03T23:14:21 | 2021-03-03T23:14:21 | 260,721,734 | 0 | 2 | MIT | 2023-05-01T21:42:44 | 2020-05-02T15:54:12 | Python | UTF-8 | Python | false | false | 5,561 | py | from pdsphenotypemapping.clinical_feature import *
from tx.dateutils.utils import strtodate
from dateutil.relativedelta import relativedelta
requested_patient_variable_ids = get_patient_variable_ids(patientVariables)
timestamp_datetime = strtodate(timestamp)
for patient_id in patientIds:
patient_data = deref(data, patient_id)
patient = get_patient_patient(patient_data)
pid = patient["id"]
yield {
"patientId": pid
}
condition = get_condition_patient(fhir=patient_data)
observation = get_observation_patient(fhir=patient_data)
if "LOINC:2160-0" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:2160-0",
**serum_creatinine(observation, "mg/dL", timestamp_datetime)
}]
}
if "LOINC:82810-3" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:82810-3",
**pregnancy(condition, None, timestamp_datetime)
}]
}
if "HP:0001892" in requested_patient_variable_ids:
yield {
"values": [{
"id": "HP:0001892",
**bleeding(condition, None, timestamp_datetime)
}]
}
if "HP:0000077" in requested_patient_variable_ids:
yield {
"values": [{
"id": "HP:0000077",
**kidney_dysfunction(condition, None, timestamp_datetime)
}]
}
if "LOINC:30525-0" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:30525-0",
**age(patient, "year", timestamp_datetime)
}]
}
if "LOINC:54134-2" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:54134-2",
**race(patient, None, timestamp_datetime)
}]
}
if "LOINC:54120-1" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:54120-1",
**ethnicity(patient, None, timestamp_datetime)
}]
}
if "LOINC:21840-4" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:21840-4",
**sex(patient, None, timestamp_datetime)
}]
}
if "LOINC:8302-2" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:8302-2",
**height(observation, "m", timestamp_datetime)
}]
}
if "LOINC:29463-7" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:29463-7",
**weight(observation, "kg", timestamp_datetime)
}]
}
if "LOINC:39156-5" in requested_patient_variable_ids:
height = height(observation, "m", timestamp_datetime)
weight = weight(observation, "kg", timestamp_datetime)
yield {
"values": [{
"id": "LOINC:39156-5",
**bmi(height, weight, observation, "kg/m^2", timestamp_datetime)
}]
}
if "LOINC:45701-0" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:45701-0",
**fever(condition, None, timestamp_datetime)
}]
}
if "LOINC:LP212175-6" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:LP212175-6",
**date_of_fever_onset(condition, None, timestamp_datetime)
}]
}
if "LOINC:64145-6" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:64145-6",
**cough(condition, None, timestamp_datetime)
}]
}
if "LOINC:85932-2" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:85932-2",
**date_of_cough_onset(condition, None, timestamp_datetime)
}]
}
if "LOINC:54564-0" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:54564-0",
**shortness_of_breath(condition, None, timestamp_datetime)
}]
}
if "LOINC:LP128504-0" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:LP128504-0",
**autoimmune_disease(condition, None, timestamp_datetime)
}]
}
if "LOINC:54542-6" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:54542-6",
**pulmonary_disease(condition, None, timestamp_datetime)
}]
}
if "LOINC:LP172921-1" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:LP172921-1",
**cardiovascular_disease(condition, None, timestamp_datetime)
}]
}
if "LOINC:56799-0" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:56799-0",
**address(patient, None, timestamp_datetime)
}]
}
if "LOINC:LP21258-6" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:54542-6",
**oxygen_saturation(observation, None, timestamp_datetime)
}]
}
| [
"xuh@cs.unc.edu"
] | xuh@cs.unc.edu |
4fe591f6d4fbdb0935739c7108d585826f7958fa | 09ecdb0d81cfb2a47f6829b34583553a3730065b | /ABC/141/C.py | 80e48c4e86ba1fd742052bae4878a705c9148390 | [] | no_license | ucho0303/AtCoder | fd757a5efe6f124281d51a9d190f4e1eb407f51c | 0d37265c4a53187ed34f7aaf5a9415a5d9b19517 | refs/heads/master | 2021-05-25T18:40:55.010475 | 2021-02-16T04:17:07 | 2021-02-16T04:17:07 | 253,874,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | n,k,q =map(int,input().split())
a = [int(input()) for i in range(q)]
b = [k-q]*n
for i in a :
b[i-1] += 1
for i in b :
if i > 0 :
print('Yes')
else :
print('No') | [
"ucho.0303.ryo@gmail.com"
] | ucho.0303.ryo@gmail.com |
9b9f5a6aa2b7a29dac86f4a812a20703f94b5222 | a41997010915d726971c0694fbe2e5da1e8204c0 | /framework/linalg/tests/testUHDU.py | 7ff427254a620dac016f1015f5edabd1cadbd225 | [
"MIT"
] | permissive | fquivira/cos | 744f17f58833f4752096b60d1c63c304e8a340bb | 89a26aa13df3b8cc7f451f4e74661943f235a11e | refs/heads/master | 2020-03-16T04:34:22.181032 | 2014-04-28T19:27:28 | 2014-04-28T19:27:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | '''
Created on 14. sep. 2011
@author: Hybel
'''
import unittest
import testMatrixOperations as tmo
import framework.linalg.matrixDecomp as md
class TestUHDU(tmo.TestMatrixOperations):
def testUHDUDecomposition(self):
UD = md.uhdu(self.complexA4x4, 4)
self.assertMatrixAlmosteEqual(self.U4x4.tolist(), UD[0].tolist(), 4)
self.assertMatrixAlmosteEqual(self.D4x4.tolist(), UD[1].tolist(), 4)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(TestUHDU)
| [
"jpaasen@ifi.uio.no"
] | jpaasen@ifi.uio.no |
3cddb569d9024b700990ce6cc8f33593395a2037 | 9a4798d927866f11df835517ae1035c60b1ab4c9 | /algoritimos4.py | 7c5b9cc7d722c28ef0c9f3980c3ee246953a524c | [] | no_license | luizgnjunior/Impacta | ce09cb2e6f5dff022df2cdbdd78532cdb7386506 | cb8c442bec28ff1b5bef021a4ac095efaa703fd0 | refs/heads/master | 2021-05-15T11:26:05.804298 | 2020-02-17T19:01:53 | 2020-02-17T19:01:53 | 108,325,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | print("Bem vindo ao Aumento de Salário")
SALARIO=float(input("Por favor, insira o valor do seu atual sálario"))
if SALARIO > 900:
print("Infelizmente você não pode receber o beneficio.")
elif SALARIO <= 300:
CREDITO = float (SALARIO/100*15)
SALDOAUMENTO= SALARIO+CREDITO
print("Devido ao aumento, seu salário de:",SALARIO, "você recebera um acrescimo de 15% e passa a receber:" ,SALDOAUMENTO,)
elif SALARIO > 300 and SALARIO <= 600:
CREDITO = float (SALARIO/100*10)
SALDOAUMENTO= SALARIO+CREDITO
print("Devido ao aumento, seu salário de:",SALARIO, "você recebera um acrescimo de 10% e passa a receber:" ,SALDOAUMENTO,)
elif SALARIO > 600 and SALARIO <= 900:
CREDITO = float (SALARIO/100*5)
SALDOAUMENTO= SALARIO+CREDITO
print("Devido ao aumento, seu salário de:",SALARIO, "você recebera um acrescimo de 5% e passa a receber:" ,SALDOAUMENTO,)
| [
"noreply@github.com"
] | luizgnjunior.noreply@github.com |
206494e27a4e33018dcfbb23b90c5fa250dea24c | eda67cc12434d1b661da46771ce4280842798bf9 | /google-cloud-sdk/lib/googlecloudsdk/api_lib/compute/instance_template_utils.py | d86efba15894b77281d3fcbb6bb137c11602be46 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | hexingren/data-pipeline | b48870618bbbcb428dd060b64f91e049815e9980 | ea1eda9977bb21b1bf58df4e74655640c50cb080 | refs/heads/master | 2021-01-11T16:48:46.202556 | 2017-05-14T23:10:38 | 2017-05-14T23:10:38 | 79,674,630 | 0 | 3 | null | 2020-07-25T05:43:03 | 2017-01-21T21:40:38 | Python | UTF-8 | Python | false | false | 8,248 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convenience functions for dealing with instance templates."""
from googlecloudsdk.api_lib.compute import alias_ip_range_utils
from googlecloudsdk.api_lib.compute import constants
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.command_lib.compute import scope as compute_scope
from googlecloudsdk.command_lib.compute.networks.subnets import flags as subnet_flags
EPHEMERAL_ADDRESS = object()
# TODO(user): Add unit tests for utilities
def CreateNetworkInterfaceMessage(
resources, scope_lister, messages, network, region, subnet, address,
alias_ip_ranges_string=None):
"""Creates and returns a new NetworkInterface message.
Args:
resources: generates resource references,
scope_lister: function, provides scopes for prompting subnet region,
messages: GCE API messages,
network: network,
region: region for subnetwork,
subnet: regional subnetwork,
address: specify static address for instance template
* None - no address,
* EPHEMERAL_ADDRESS - ephemeral address,
* string - address name to be fetched from GCE API.
alias_ip_ranges_string: command line string specifying a list of alias
IP ranges.
Returns:
network_interface: a NetworkInterface message object
"""
# By default interface is attached to default network. If network or subnet
# are specified they're used instead.
network_interface = messages.NetworkInterface()
if subnet is not None:
subnet_ref = subnet_flags.SubnetworkResolver().ResolveResources(
[subnet], compute_scope.ScopeEnum.REGION, region, resources,
scope_lister=scope_lister)[0]
network_interface.subnetwork = subnet_ref.SelfLink()
if network is not None:
network_ref = resources.Parse(network, collection='compute.networks')
network_interface.network = network_ref.SelfLink()
elif subnet is None:
network_ref = resources.Parse(
constants.DEFAULT_NETWORK, collection='compute.networks')
network_interface.network = network_ref.SelfLink()
if address:
access_config = messages.AccessConfig(
name=constants.DEFAULT_ACCESS_CONFIG_NAME,
type=messages.AccessConfig.TypeValueValuesEnum.ONE_TO_ONE_NAT)
# If the user provided an external IP, populate the access
# config with it.
if address != EPHEMERAL_ADDRESS:
access_config.natIP = address
network_interface.accessConfigs = [access_config]
if alias_ip_ranges_string:
network_interface.aliasIpRanges = (
alias_ip_range_utils.CreateAliasIpRangeMessagesFromString(
messages, False, alias_ip_ranges_string))
return network_interface
def CreateNetworkInterfaceMessages(
resources, scope_lister, messages, network_interface_arg, region):
"""Create network interface messages.
Args:
resources: generates resource references,
scope_lister: function, provides scopes for prompting subnet region,
messages: creates resources.
network_interface_arg: CLI argument specifying network interfaces.
region: region of the subnetwork.
Returns:
list, items are NetworkInterfaceMessages.
"""
result = []
if network_interface_arg:
for interface in network_interface_arg:
address = interface.get('address', None)
# pylint: disable=g-explicit-bool-comparison
if address == '':
address = EPHEMERAL_ADDRESS
result.append(CreateNetworkInterfaceMessage(
resources, scope_lister, messages, interface.get('network', None),
region,
interface.get('subnet', None),
address,
interface.get('aliases', None)))
return result
def CreatePersistentAttachedDiskMessages(messages, disks):
"""Returns a list of AttachedDisk messages and the boot disk's reference.
Args:
messages: GCE API messages,
disks: disk objects - contains following properties
* name - the name of disk,
* mode - 'rw' (R/W), 'ro' (R/O) access mode,
* boot - whether it is a boot disk,
* autodelete - whether disks is deleted when VM is deleted,
* device-name - device name on VM.
Returns:
list of API messages for attached disks
"""
disks_messages = []
for disk in disks:
name = disk['name']
# Resolves the mode.
mode_value = disk.get('mode', 'rw')
if mode_value == 'rw':
mode = messages.AttachedDisk.ModeValueValuesEnum.READ_WRITE
else:
mode = messages.AttachedDisk.ModeValueValuesEnum.READ_ONLY
boot = disk.get('boot') == 'yes'
auto_delete = disk.get('auto-delete') == 'yes'
attached_disk = messages.AttachedDisk(
autoDelete=auto_delete,
boot=boot,
deviceName=disk.get('device-name'),
mode=mode,
source=name,
type=messages.AttachedDisk.TypeValueValuesEnum.PERSISTENT)
# The boot disk must end up at index 0.
if boot:
disks_messages = [attached_disk] + disks_messages
else:
disks_messages.append(attached_disk)
return disks_messages
def CreatePersistentCreateDiskMessages(scope_prompter, messages, create_disks):
"""Returns a list of AttachedDisk messages.
Args:
scope_prompter: Scope prompter object,
messages: GCE API messages,
create_disks: disk objects - contains following properties
* name - the name of disk,
* mode - 'rw' (R/W), 'ro' (R/O) access mode,
* disk-size - the size of the disk,
* disk-type - the type of the disk (HDD or SSD),
* image - the name of the image to initialize from,
* image-family - the image family name,
* image-project - the project name that has the image,
* auto-delete - whether disks is deleted when VM is deleted,
* device-name - device name on VM.
Returns:
list of API messages for attached disks
"""
disks_messages = []
for disk in create_disks or []:
name = disk.get('name')
# Resolves the mode.
mode_value = disk.get('mode', 'rw')
if mode_value == 'rw':
mode = messages.AttachedDisk.ModeValueValuesEnum.READ_WRITE
else:
mode = messages.AttachedDisk.ModeValueValuesEnum.READ_ONLY
auto_delete = disk.get('auto-delete') == 'yes'
disk_size_gb = utils.BytesToGb(disk.get('size'))
image_uri, _ = scope_prompter.ExpandImageFlag(
image=disk.get('image'),
image_family=disk.get('image-family'),
image_project=disk.get('image-project'),
return_image_resource=False)
create_disk = messages.AttachedDisk(
autoDelete=auto_delete,
boot=False,
deviceName=disk.get('device-name'),
initializeParams=messages.AttachedDiskInitializeParams(
diskName=name,
sourceImage=image_uri,
diskSizeGb=disk_size_gb,
diskType=disk.get('type')),
mode=mode,
type=messages.AttachedDisk.TypeValueValuesEnum.PERSISTENT)
disks_messages.append(create_disk)
return disks_messages
def CreateDefaultBootAttachedDiskMessage(
messages, disk_type, disk_device_name, disk_auto_delete, disk_size_gb,
image_uri):
"""Returns an AttachedDisk message for creating a new boot disk."""
return messages.AttachedDisk(
autoDelete=disk_auto_delete,
boot=True,
deviceName=disk_device_name,
initializeParams=messages.AttachedDiskInitializeParams(
sourceImage=image_uri,
diskSizeGb=disk_size_gb,
diskType=disk_type),
mode=messages.AttachedDisk.ModeValueValuesEnum.READ_WRITE,
type=messages.AttachedDisk.TypeValueValuesEnum.PERSISTENT)
| [
"hexingren@gmail.com"
] | hexingren@gmail.com |
33df19f351ae1e38a5fef7a942b3eaaee767871b | 6e46a850cc4ece73476a350e676ea55ce72b200a | /aliyun-python-sdk-reid/aliyunsdkreid/request/v20190928/ImportSpecialPersonnelRequest.py | fb397d0415577135ad8be89532374fbb0d1edd62 | [
"Apache-2.0"
] | permissive | zhxfei/aliyun-openapi-python-sdk | fb3f22ca149988d91f07ba7ca3f6a7a4edf46c82 | 15890bf2b81ce852983f807e21b78a97bcc26c36 | refs/heads/master | 2022-07-31T06:31:24.471357 | 2020-05-22T17:00:17 | 2020-05-22T17:00:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,552 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkreid.endpoint import endpoint_data
class ImportSpecialPersonnelRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'reid', '2019-09-28', 'ImportSpecialPersonnel')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_UkId(self):
return self.get_body_params().get('UkId')
def set_UkId(self,UkId):
self.add_body_params('UkId', UkId)
def get_Description(self):
return self.get_body_params().get('Description')
def set_Description(self,Description):
self.add_body_params('Description', Description)
def get_ExternalId(self):
return self.get_body_params().get('ExternalId')
def set_ExternalId(self,ExternalId):
self.add_body_params('ExternalId', ExternalId)
def get_PersonType(self):
return self.get_body_params().get('PersonType')
def set_PersonType(self,PersonType):
self.add_body_params('PersonType', PersonType)
def get_Urls(self):
return self.get_body_params().get('Urls')
def set_Urls(self,Urls):
self.add_body_params('Urls', Urls)
def get_PersonName(self):
return self.get_body_params().get('PersonName')
def set_PersonName(self,PersonName):
self.add_body_params('PersonName', PersonName)
def get_StoreIds(self):
return self.get_body_params().get('StoreIds')
def set_StoreIds(self,StoreIds):
self.add_body_params('StoreIds', StoreIds)
def get_Status(self):
return self.get_body_params().get('Status')
def set_Status(self,Status):
self.add_body_params('Status', Status) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
08682a0dcd0e887979ab96c3fdde4d911d2d0eba | e2ea6a08b1b086fbc1cf1410bf8d6cfa793e56ea | /scripts/deploy.py | 8e37c988b26ec92ab3126bc93564051f03ce14a8 | [] | no_license | Cesarioo/brownie_fund_me | 0b4e3bd0e762913c00ab602675173cc598a6aca4 | 77d7968e23da11cf7f385fe3c2ab4eef4ae301d2 | refs/heads/main | 2023-08-29T00:02:38.153318 | 2021-10-29T15:00:27 | 2021-10-29T15:00:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 888 | py | from brownie import FundMe, MockV3Aggregator, network, config
from scripts.helpful_scripts import (
get_account,
deploy_mocks,
LOCAL_BLOCKCHAIN_ENVIRONMENTS,
FORKED_LOCAL_ENVIRONNEMENTS,
)
def deploy_fund_me():
account = get_account()
if (
network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS
or network.show_active() in FORKED_LOCAL_ENVIRONNEMENTS
):
price_feed_address = config["networks"][network.show_active()][
"eth_usd_price_feed"
]
else:
deploy_mocks()
price_feed_address = MockV3Aggregator[-1].address
fund_me = FundMe.deploy(
price_feed_address,
{"from": account},
publish_source=config["networks"][network.show_active()].get("verify"),
)
print(f"Contract deployed to {fund_me.address}")
return fund_me
def main():
deploy_fund_me()
| [
"o@mairey.net"
] | o@mairey.net |
f3a74e7d4d43f3e7ec0607a6c8f4bae06b4c4c9a | 180932542c405e70d665075e183171bab12b3ee2 | /HackIDE_project/wsgi.py | 814b6fd6712b38b58a6ce4ee2ec6ed566c20634d | [] | no_license | vnkmr/onlineide | e2ad6c75cb2119e106e48995b362bcad9c7d3a19 | 599efa41806258410a67e3b120aa8f0a8ea1ae0c | refs/heads/master | 2023-01-03T18:42:17.212702 | 2016-03-22T07:33:45 | 2016-03-22T07:33:45 | 54,453,390 | 0 | 0 | null | 2022-12-26T20:05:17 | 2016-03-22T07:13:38 | JavaScript | UTF-8 | Python | false | false | 407 | py | """
WSGI config for HackIDE_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "HackIDE_project.settings")
application = get_wsgi_application()
| [
"kanv.k13@gmail.com"
] | kanv.k13@gmail.com |
c06cbcd32edb287bacb24be40665226d14edf0e7 | d2017e2e5fc96317cdbafaad985d4fb645d08c1d | /tests/system/conftest.py | cc2c2a4dcbaf5666a5b9258603ab9e883b76204b | [
"Apache-2.0"
] | permissive | gianghdo/python-bigquery | c3f2cbce6ca221c0c83fc5c1f5df21e649299206 | 4f229cb973dff6a71b322806a85e950a57aa6582 | refs/heads/main | 2023-07-16T03:28:15.157799 | 2021-08-27T22:20:50 | 2021-08-27T22:20:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,371 | py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import pytest
import test_utils.prefixer
from google.cloud import bigquery
from google.cloud.bigquery import enums
from . import helpers
prefixer = test_utils.prefixer.Prefixer("python-bigquery", "tests/system")
DATA_DIR = pathlib.Path(__file__).parent.parent / "data"
@pytest.fixture(scope="session", autouse=True)
def cleanup_datasets(bigquery_client: bigquery.Client):
for dataset in bigquery_client.list_datasets():
if prefixer.should_cleanup(dataset.dataset_id):
bigquery_client.delete_dataset(
dataset, delete_contents=True, not_found_ok=True
)
@pytest.fixture(scope="session")
def bigquery_client():
return bigquery.Client()
@pytest.fixture(scope="session")
def project_id(bigquery_client: bigquery.Client):
return bigquery_client.project
@pytest.fixture(scope="session")
def bqstorage_client(bigquery_client):
from google.cloud import bigquery_storage
return bigquery_storage.BigQueryReadClient(credentials=bigquery_client._credentials)
@pytest.fixture(scope="session")
def dataset_id(bigquery_client):
dataset_id = prefixer.create_prefix()
bigquery_client.create_dataset(dataset_id)
yield dataset_id
bigquery_client.delete_dataset(dataset_id, delete_contents=True, not_found_ok=True)
@pytest.fixture
def table_id(dataset_id):
return f"{dataset_id}.table_{helpers.temp_suffix()}"
@pytest.fixture(scope="session")
def scalars_table(bigquery_client: bigquery.Client, project_id: str, dataset_id: str):
schema = bigquery_client.schema_from_json(DATA_DIR / "scalars_schema.json")
job_config = bigquery.LoadJobConfig()
job_config.schema = schema
job_config.source_format = enums.SourceFormat.NEWLINE_DELIMITED_JSON
full_table_id = f"{project_id}.{dataset_id}.scalars"
with open(DATA_DIR / "scalars.jsonl", "rb") as data_file:
job = bigquery_client.load_table_from_file(
data_file, full_table_id, job_config=job_config
)
job.result()
yield full_table_id
bigquery_client.delete_table(full_table_id)
@pytest.fixture(scope="session")
def scalars_extreme_table(
bigquery_client: bigquery.Client, project_id: str, dataset_id: str
):
schema = bigquery_client.schema_from_json(DATA_DIR / "scalars_schema.json")
job_config = bigquery.LoadJobConfig()
job_config.schema = schema
job_config.source_format = enums.SourceFormat.NEWLINE_DELIMITED_JSON
full_table_id = f"{project_id}.{dataset_id}.scalars_extreme"
with open(DATA_DIR / "scalars_extreme.jsonl", "rb") as data_file:
job = bigquery_client.load_table_from_file(
data_file, full_table_id, job_config=job_config
)
job.result()
yield full_table_id
bigquery_client.delete_table(full_table_id)
| [
"noreply@github.com"
] | gianghdo.noreply@github.com |
1e36da1718b8730cae933e9ea4fa3a24588040fc | 43860dacf21982839a67d415704ef696e7faf3f3 | /osr/apps/registry/models/__init__.py | 007bb9dcfd628f67db7c98f9ae8eae79b26ae5e5 | [
"MIT"
] | permissive | offurface/osr | 1cfa1b28feb376d9c32ade92fa8e823e7a702b9b | 83ebcc11f0d9d8050ac2139bc6e9e560e93e8b9e | refs/heads/master | 2022-12-08T11:07:34.535257 | 2020-03-10T15:23:24 | 2020-03-10T15:23:24 | 215,564,914 | 0 | 0 | MIT | 2022-12-05T12:42:31 | 2019-10-16T14:11:33 | Python | UTF-8 | Python | false | false | 49 | py | from .objects import *
from .directories import * | [
"KiselevVN@rsue.ru"
] | KiselevVN@rsue.ru |
d061099cc86109debb7207b9a2b119224794223e | e2fa60686c8b45e1c2ca560e08a4c23cf4953e1d | /twitter.py | 36aad03711f6077cb1974cb11f429dc690d3fc52 | [] | no_license | HarishArikatla/PythonProjects | 32cc5ce2cced672c9e4141f180da2886fff5c5ad | a3a91cad2c022fa175630a937ecd49cf5dde57f2 | refs/heads/main | 2023-01-19T14:52:35.358024 | 2020-12-05T13:11:43 | 2020-12-05T13:11:43 | 318,456,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,533 | py |
from selenium import webdriver
# For using sleep function because selenium
# works only when the all the elements of the
# page is loaded.
import time
from selenium.webdriver.common.keys import Keys
# Creating an instance webdriver
browser = webdriver.chrome()
browser.get('https://www.twitter.com')
# Let's the user see and also load the element
time.sleep(2)
login = browser.find_elements_by_xpath('//*[@id="doc"]/div[1]/div/div[1]/div[2]/a[3]')
# using the click function which is similar to a click in the mouse.
login[0].click()
print("Login in Twitter")
user = browser.find_elements_by_xpath('//*[@id="login-dialog-dialog"]/div[2]/div[2]/div[2]/form/div[1]/input')
# Enter User Name
user[0].send_keys('USER-NAME')
user = browser.find_element_by_xpath('//*[@id="login-dialog-dialog"]/div[2]/div[2]/div[2]/form/div[2]/input')
# Reads password from a text file because
# saving the password in a script is just silly.
with open('test.txt', 'r') as myfile:
Password = myfile.read().replace('\n', '')
user.send_keys(Password)
LOG = browser.find_elements_by_xpath('//*[@id="login-dialog-dialog"]/div[2]/div[2]/div[2]/form/input[1]')
LOG[0].click()
print("Login Successful")
time.sleep(5)
elem = browser.find_element_by_name("q")
elem.click()
elem.clear()
elem.send_keys("Geeks for geeks ")
# using keys to send special KEYS
elem.send_keys(Keys.RETURN)
print("Search Successful")
# closing the browser
browser.close()
| [
"noreply@github.com"
] | HarishArikatla.noreply@github.com |
4145bee524cb93a8f4134d17be3190b7891be2ae | c36e0f0e7a1fc3e47e478568ed41a0f69600553a | /manage.py | 7952430f89f9f5c5840711e098f69b554fb16bb6 | [] | no_license | pavanreddy22/models | 9936988d6f29ec63f2c4bf21dc21b5a0bc1b7f27 | b89a7f5882d7cc4a2ba30b5fe7a2fdebcd2a79b9 | refs/heads/master | 2023-06-21T06:34:59.802461 | 2021-07-07T09:14:02 | 2021-07-07T09:14:02 | 383,679,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project44.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"harshadvali14331@gmail.com"
] | harshadvali14331@gmail.com |
67691d6f2106e5040498c338cc37dcec9c7e04d9 | 9a05706921899064efa16bad5853e862209588f4 | /multipleoften.py | f9b1322b035b8a137f8c4cfdb31be8672a9e95e7 | [] | no_license | wildbill2/python_projects | 66b2a5803db934b7db7e50985d624c9db6cce5a2 | ea9ecf89b69cd86b2a83782416c5dd84dde9df93 | refs/heads/master | 2023-01-28T10:52:58.472146 | 2020-12-05T18:52:37 | 2020-12-05T18:52:37 | 267,709,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | number = input("Give me a number and I will tell you if it is a multiple of ten.")
number = int(number)
if number % 10 == 0:
print("\nYour number is a multiple of 10!")
else:
print("\nThis number is not a multiple of 10.")
| [
"bcbillock@gmail.com"
] | bcbillock@gmail.com |
6ecb651a0ab8bab22ad2ac01bfa864c8084d539e | 089a970703643698ba2414dc8d9513ba6f8e4239 | /train_text.py | 00baf178cbfe2add62892f465340ed9ff9bbbbc5 | [] | no_license | qiangz520/ChatBot | 45dfe5e738e93e8b66e88e22fd6206213d49fa6b | 4480965bd86c0649d0bdbf5d22272500e2708f2f | refs/heads/master | 2020-04-04T09:54:28.020628 | 2018-12-14T12:18:05 | 2018-12-14T12:18:05 | 154,253,313 | 0 | 0 | null | 2018-10-23T03:03:20 | 2018-10-23T03:03:20 | null | UTF-8 | Python | false | false | 7,295 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : train.py
# @Author : ZJianbo
# @Date : 2018/10/13
# @Function : 开始训练
import random
from predata import *
"""训练单条数据。
train(input_tensors, target_tensors, input_sizes, target_sizes, batchsize)
Args:
*_tensor: 输入/输出的tensor类型
*_size: 输入/输出的语句的实际长度
use_teacher_forcing: 是否使用“教师强迫”,即在decoder时,
以一定概率将target,而不是预测的输出,作为下一个的输入
******************************
Creat:@ZJianbo @2018.10.15
Update:@ZJianbo @2018.10.21 添加了进行batch_size批量训练
"""
def train(tensor_text, tensor_face, batchsize):
EnOptimizer_text_glo.zero_grad()
DeOptimizer_text_glo.zero_grad()
loss_size = 0
loss = 0
for num_batch in range(batchsize):
loss_size += tensor_text[1][1][num_batch].numpy()
# 隐藏层初始化
entext_hidden = Encoder_text_glo.init_hidden()
# 编码得到最后的隐藏层
for ei in range(tensor_text[0][1][num_batch]):
entext_output, entext_hidden = Encoder_text_glo(tensor_text[0][0][num_batch][ei], entext_hidden)
'''
entextHST_hidden=[]
for i in range(10):
entextHST_hidden[i] = Encoder_text_glo.init_hidden()
enHST_hidden = Encoder_HST_glo.init_hidden()
'''
detext_input = torch.tensor([[SOS_token]], device=device)
detext_hidden = entext_hidden[:Decoder_text_glo.n_layers]
teacher_forcing_ratio = 0.5
#use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
use_teacher_forcing = False
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(tensor_text[1][1][num_batch]):
detext_output, detext_hidden = Decoder_text_glo(detext_input, detext_hidden)
loss += Criterion_text_glo(detext_output, tensor_text[1][0][num_batch][di])
detext_input = tensor_text[1][0][num_batch][di] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(tensor_text[1][1][num_batch]):
detext_output, detext_hidden = Decoder_text_glo(detext_input, detext_hidden)
topv, topi = detext_output.topk(1)
detext_input = topi.squeeze().detach() # detach from history as input
loss += Criterion_text_glo(detext_output, tensor_text[1][0][num_batch][di])
if detext_input.item() == EOS_token:
break
loss.backward()
EnOptimizer_text_glo.step()
DeOptimizer_text_glo.step()
return loss.item()/loss_size
"""训练数据集。
train_iters(train_dataloader, n_iters, print_every, plot_every)
Args:
train_dataloader: 训练样本的dataloader
n_iters: 训练次数
print_every: 打印相关信息的间隔
plot_every: 展示loss变化的间隔
******************************
Creat:@ZJianbo @2018.10.15
Update:
"""
def train_iters(train_dataloader, n_iters=10, print_every=100, plot_every=10):
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
for i_iter in range(1, n_iters + 1):
for num, training_data in enumerate(train_dataloader):
tensor_text = training_data[0]
tensor_face = training_data[1]
loss = train(tensor_text, tensor_face, train_dataloader.batch_size)
print_loss_total += loss
plot_loss_total += loss
if i_iter % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print('%s (%d %d%%) %.4f' % (CalculateTime().calc_time(start, i_iter / n_iters),
i_iter, i_iter / n_iters * 100, print_loss_avg))
evaluate_randomly(Encoder_text_glo, Decoder_text_glo, trainData, n=1)
if i_iter % plot_every == 0:
plot_loss_avg = plot_loss_total / plot_every
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
print(plot_losses)
#showPlot(plot_losses)
"""数据集某一条数据的测试
跟训练时一样的步骤,但是不需要反向传播,只是用来测试
@ZJianbo @2018.10.15
"""
def evaluate(encoder, decoder, sentence):
input_tensor, input_size = Batch2Tensor().tensor_from_sentence(allDataWords, sentence)
encoder_outputs = torch.zeros(MAX_LENGTH, Encoder_text_glo.hidden_size, device=device)
encoder_hidden = encoder.init_hidden()
for ei in range(input_size):
encoder_output, encoder_hidden = encoder(input_tensor[ei], encoder_hidden)
encoder_outputs[ei] += encoder_output[0,0]
# print(encoder_hidden)
detext_input = torch.tensor([[SOS_token]], device=device) # SOS
detext_hidden = encoder_hidden[:Decoder_text_glo.n_layers]
decoded_words = []
for di in range(MAX_LENGTH):
detext_output, detext_hidden = decoder(detext_input, detext_hidden)
# detext_output, detext_hidden, detext_attention = Decoder_text_glo(
# detext_input, detext_hidden, encoder_outputs)
topv, topi = detext_output.data.topk(1)
if topi.item() == EOS_token:
decoded_words.append('<EOS>')
break
else:
decoded_words.append(allDataWords.index2word[topi.item()])
detext_input = topi.squeeze().detach()
return decoded_words
"""数据集中随机语句的测试,可选择数目n
@ZJianbo @2018.10.15
"""
def evaluate_randomly(encoder, decoder, batches, n=3):
for i in range(n):
batch = random.choice(batches)
print('> ', batch['text'])
print('= ', batch['text_next'])
output_words= evaluate(encoder, decoder, batch['text'])
output_sentence = ' '.join(output_words)
print('< ', output_sentence)
print('')
"""对数据集外未知语句的测试
@ZJianbo @2018.10.15
"""
def test_sentence(encoder, decoder, sentences):
print("***** Test *****")
for stc in sentences:
print('> ', stc)
output_words = evaluate(encoder, decoder, stc)
output_sentence = ' '.join(output_words)
print('< ', output_sentence)
print('')
if __name__ == '__main__':
# 进行训练,Train_Iters和Print_Every可在配置文件中设置
train_iters(trainDataloader, n_iters=Train_Iters, print_every=Print_Every)
# 保存模型
if IsSaveModel:
torch.save(Encoder_text_glo.state_dict(), 'entext.pkl')
torch.save(Decoder_text_glo.state_dict(), 'detext.pkl')
# 从测试集中随机取n条数据进行测试
print("***** Training Evaluate *****")
evaluate_randomly(Encoder_text_glo, Decoder_text_glo, trainData, n=1)
# 从测试集中随机取n条数据进行测试
print("***** Evaluate *****")
evaluate_randomly(Encoder_text_glo, Decoder_text_glo, testData, n=1)
# 随机输入语句进行测试,TestSentence可在配置文件中设置
test_sentence(Encoder_text_glo, Decoder_text_glo, TestSentence)
| [
"1357977299@qq.com"
] | 1357977299@qq.com |
da53e6b40eaac380b601b01f05b2e28e0e0ea875 | 7461f1bd1df09bf5e721ade156f88b2b62da4ae1 | /app/email.py | c1130963454ed4072daa624afc5f67eb47cf00c8 | [] | no_license | shubao5612/flasky1 | d0c9fbbe5f1b1ccceb2b372aa20baee8e65b456d | 1acc459b5500cf78b34d82687db2edce6b91e398 | refs/heads/master | 2022-12-07T09:10:48.559371 | 2017-11-18T08:45:16 | 2017-11-18T08:45:16 | 111,189,934 | 0 | 1 | null | 2022-11-26T23:35:53 | 2017-11-18T08:42:05 | Python | UTF-8 | Python | false | false | 617 | py | from flask import current_app,render_template
from flask_mail import Message
from threading import Thread
from . import mail
def send_async_email(app,msg):
with app.app_context():
mail.send(msg)
def send_email(to,subject,template,**kwargs):
app=current_app._get_current_object()
msg=Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX']+subject,sender=app.config['FLASKY_MAIL_SENDER'],recipients=[to])
msg.body=render_template(template+'.txt',**kwargs)
msg.html=render_template(template+'.html',**kwargs)
thr=Thread(target=send_async_email,args=[app,msg])
thr.start()
return thr
| [
"wangshen5612@163.com"
] | wangshen5612@163.com |
0565407cdfc58de77957fbefb50611a9c24c4748 | d9720a7b4bfe713426f766547062aaeacdfa2566 | /models/city.py | 44ade30d3614b334699e4c1bc26318d31b39b2b7 | [
"MIT"
] | permissive | AlisonQuinter17/AirBnB_clone | b90a96bc2256e32f648bb2b9a8e1dbdba90ca4eb | c890e3b4f9eb7a3ded96ac756387109351e6b13f | refs/heads/main | 2023-01-19T05:10:39.635975 | 2020-11-18T17:36:59 | 2020-11-18T17:36:59 | 308,370,255 | 1 | 2 | MIT | 2020-11-03T16:03:36 | 2020-10-29T15:27:01 | Python | UTF-8 | Python | false | false | 144 | py | #!/usr/bin/python3
from models.base_model import BaseModel
class City(BaseModel):
""" city attributes """
state_id = ""
name = ""
| [
"afarizap@gmail.com"
] | afarizap@gmail.com |
978244eb1927da42b120cde439a93e82048646cb | 111cc7174b0ec938d5221bdf823073021577f6dc | /setup_test.py | 5cc6f66495087c98125837cab434559f4fcdceda | [
"MIT"
] | permissive | AkiraDemenech/Postimpressionism | 1e9ab33ed1b6ffeb0208a95f451b5afc57de62aa | 2a88ed95d2fb108bdddeb1803b4aad8ab6b7d0da | refs/heads/master | 2021-09-25T06:19:25.111919 | 2021-09-22T13:14:33 | 2021-09-22T13:14:33 | 162,750,921 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 888 | py | # -*- coding: utf-8 -*-
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='postimpressionism',
version='0.1.7.1',
url='https://github.com/AkiraDemenech/Postimpressionism',
license='MIT License',
author='Akira Demenech',
author_email='akira.demenech@gmail.com',
keywords='art image postproduction postimpressionism',
description="An Art Exploration Package based on Modernist Avant-garde as a Post-Production of Digital Contemporary Art;",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
install_requires=['matplotlib', 'numpy', 'Pillow'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) | [
"guiakira.demenechmori@gmail.com"
] | guiakira.demenechmori@gmail.com |
2b117cb43b2993dc5748ae809156750eb0e3a3f7 | 6bf005128fb95ea21994325ace59cf0664d0159e | /U3DAutomatorClient/script/windows/PPT3DTestCase/FirstStageTestCase/InsertWordArtTestCase.py | 101fe499da4ad6957383cee6f9715c1d14d63a4c | [] | no_license | Bigfishisbig/U3DAutomatorTest | 5ab4214fc6cda678a5f266fb013f7dd7c52fcaf8 | 93a73d8995f526f998ff50b51a77ef0bbf1b4ff8 | refs/heads/master | 2023-01-07T11:59:19.025497 | 2019-09-20T06:06:55 | 2019-09-20T06:06:55 | 209,458,914 | 0 | 0 | null | 2022-12-27T15:35:30 | 2019-09-19T03:58:43 | Python | UTF-8 | Python | false | false | 1,796 | py | #!/usr/bin/env python
# coding=utf-8
"""
文件名称:InsertWordArtTestCase.py
作者:ycy
版本:PPTPro
创建时间:2019/1/18 15:51
修改时间:
软件:PyCharm
"""
from script.windows.Operation import *
from script.windows.SystemDialog import SystemDiaglog
from script.windows.PPT3DTestCase.Action import Action
from script.windows.PPT3DSetting.SourcePath import SourcePath
reload(sys)
sys.setdefaultencoding('UTF-8') # 将脚本编码格式转化未置顶的编码格式
class InsertWordArtTestCase(Action, Operation, SystemDiaglog):
'''插入艺术字'''
def test_main(self):
'''插入艺术字'''
self.OperationSetting()
self.Init3DPPT()
self.SetTag("插入艺术字", time.time())
tag = (self.__class__.__doc__ or u"测试") + "_" + self.__class__.__name__
self.startScene(tag)
self.InputPara()
self.InputStr(u"黑夜给了你黑色的眼睛,你却用它来寻找光明。")
wordart = [SourcePath.File_Img_WordArt_Text_1, SourcePath.File_Img_WordArt_Text_2, SourcePath.File_Img_WordArt_Text_3]
for i in range(3):
self.OneClick("BtnFormat")
path = self.getText()
# self.OneClickL(path, 50)
self.ListClick("RotateByZAxisNor")
self.ListClick("WordArtStyle", i)
self.s_witForImg(wordart[i], 10, "艺术字插入失败", None, 0.4)
self.OneClick("BtnStart")
self.OneClick("BtnRevert")
self.s_waitForImgVanish(wordart[i], 10, "撤销艺术字失败", 0.4)
self.OneClick("BtnStart")
self.OneClick("BtnRecover")
self.s_witForImg(wordart[i], 10, "艺术字插入失败")
self.endScene(tag)
time.sleep(1)
self.EndTag()
| [
"1227479796@qq.com"
] | 1227479796@qq.com |
741d349ff26d60d025fc1acff2fe842d0596e5f5 | f850c9ed463c9d12e1333979b6196ba8ac83d4f5 | /cleaning/urls.py | 70e6b3bf61d66c27fa1bb112ce2b493806db46ff | [] | no_license | AlekseyDatsenko/cleaning | 2be26ec004074ac658b765cebadec0d2966bfba7 | 7a4d8b95c3e9ba15dc14a16d7ea0c95ed0c12efa | refs/heads/master | 2020-08-15T18:45:38.215824 | 2019-10-16T11:39:12 | 2019-10-16T11:39:12 | 215,390,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | """cleaning URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('', include('www.urls')),
path('admin/', admin.site.urls),
]
| [
"d.a.s.82@ukr.net"
] | d.a.s.82@ukr.net |
e9e5d9560e3538cf8acd44dda5426de0b90d8434 | 2d7237e1b35a7feb659c34c04da7e3069b1ed1ee | /virtual/bin/easy_install | ccf28af20e903607266746a93b5af7a4f57b5faa | [
"MIT"
] | permissive | Juru-10/STA | c204dfc6d58f5322eece46e84ad038ba51bc6f88 | df9fdef0c70dece49cca09018ad4f57583d05b73 | refs/heads/master | 2020-04-21T06:20:39.004444 | 2019-05-08T10:20:35 | 2019-05-08T10:20:35 | 169,363,398 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 267 | #!/home/wecode/Desktop/DJANGO/STA/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"jurassu10@gmail.com"
] | jurassu10@gmail.com | |
a0a6a76adcf4ee4a434581e752de8fa223d50582 | ee9d05aff388629d2d356cbdfb1ffcbc32b091fa | /HTML_Form/urls.py | 3625ced5abe2d73dabb388b772ab174edc1100eb | [] | no_license | CHIINGTING/HTML_Form | 487bd56debf49e98812e7b9dfa5ea03b4f6f2833 | 2838a6ae18e3cdf9e42ff82990e4df6493875f27 | refs/heads/master | 2023-08-14T05:00:18.124294 | 2021-09-15T19:06:24 | 2021-09-15T19:06:24 | 401,056,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py | """HTML_Form URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from mysite import views
from django.conf.urls import include
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index),
path('get_example/', views.get_example),
path('<int:pid>/<str:del_pass>', views.index),
path('list/', views.listing),
path('post/', views.posting),
path('contact/', views.contact),
path('post2db/', views.post2db),
path('captcha/', include('captcha.urls')),
]
| [
"tad20077@gmail.com"
] | tad20077@gmail.com |
6c3521f7f8735e45cd7fa0cd7ff651fbf0bf0d51 | 717171ed7a14ad60dd42d62fe0dd217a0c0c50fd | /19年7月/7.02/base64处理图形验证码.py | bd54377e9b458358c19bb62f61375ac74e346fcc | [] | no_license | friedlich/python | 6e9513193227e4e9ee3e30429f173b55b9cdb85d | 1654ef4f616fe7cb9fffe79d1e6e7d7721c861ac | refs/heads/master | 2020-09-04T14:34:48.237404 | 2019-11-18T14:54:44 | 2019-11-18T14:54:44 | 219,756,451 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,693 | py | import requests,base64,sys,csv
from PIL import Image
address_url = 'https://www.ele.me/restapi/bgs/poi/search_poi_nearby?'
place = input('请输入你的收货地址:')
params = {
'geohash': 'wtw3sjq6n6um',
'keyword': place,
'latitude': '31.23037',
'limit': '20',
'longitude': '121.473701',
'type': 'nearby'
}
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'}
address_res = requests.get(address_url,headers=headers,params=params)
address_json = address_res.json()
print('以下,是与'+place+'相关的位置信息:\n')
n=0
for address in address_json:
print(str(n)+'. '+address['name']+':'+address['short_address']+'\n')
n = n+1
address_num = int(input('请输入您选择位置的序号:'))
final_address = address_json[address_num]
session = requests.session()
url_1 = 'https://h5.ele.me/restapi/eus/login/mobile_send_code'
tel = input('请输入手机号:')
data_1 = {
'captcha_hash':'',
'captcha_value':'',
'mobile': tel,
'scf': "ms"
}
login = session.post(url_1,headers=headers,data=data_1)
code = login.status_code
print(type(login))
print(login.text)
print('status code of login:' + str(code))
if code == 200: #前三次登录没有图片验证过程
token = login.json()['validate_token']
url_2 = 'https://h5.ele.me/restapi/eus/login/login_by_mobile'
code = input('请输入手机验证码:')
data_2 = {
'mobile': tel,
'scf': 'ms',
'validate_code': code,
'validate_token': token
}
session.post(url_2,headers=headers,data=data_2)
elif code == 400: #登录超过3次,网站会要求图片验证
print('有图形验证码')
url_3 = 'https://h5.ele.me/restapi/eus/v3/captchas'
data_3 = {'captcha_str': tel}
# 提取验证码。
cap =session.post(url_3,headers=headers,data=data_3)
hash = cap.json()['captcha_hash']
value = cap.json()['captcha_image'].replace('data:image/jpeg;base64,','')
# 验证码字符串转图形文件保存到本地
x = base64.b64decode(value)
file = open(sys.path[0]+'\\captcha.jpg','wb')
file.write(x)
file.close()
im = Image.open(sys.path[0]+'\\captcha.jpg')
im.show() #展示验证码图形
captche_value = input('请输入验证码:')
#将图片验证码作为参数post到饿了吗服务器登录
url_1 = 'https://h5.ele.me/restapi/eus/login/mobile_send_code'
data_4 = {
'captcha_hash': hash,
'captcha_value': captche_value,
'mobile': tel,
'scf': "ms"
}
# 将验证码发送到服务器。
login = session.post(url_1,headers=headers,data=data_4)
print(login.json())
token = login.json()['validate_token']
url_2 = 'https://h5.ele.me/restapi/eus/login/login_by_mobile'
code = input('请输入手机验证码:')
data_2 = {
'mobile': tel,
'scf': 'ms',
'validate_code': code,
'validate_token': token
}
session.post(url_2,headers=headers,data=data_2)
restaurants_url = 'https://www.ele.me/restapi/shopping/restaurants'
params={
'extras[]': 'activities',
'geohash': final_address['geohash'],
'latitude': final_address['latitude'],
'limit': '24',
'longitude': final_address['longitude'],
'offset': '0',
'terminal': 'web'
}
restaurants_res = session.get(restaurants_url,headers=headers,params=params)
restaurants_json = restaurants_res.json()
with open(sys.path[0]+'\\restaurants.csv','w',newline='',encoding='utf_8_sig') as f:
writer = csv.writer(f)
for restaurant in restaurants_json:
writer.writerow(restaurant['name'])
| [
"1164166295@qq.com"
] | 1164166295@qq.com |
088996042a93ae238b711567e033ad1c4c183dea | 043c582ae5e96c47baafd898f9266046a63f3f72 | /pyloadutils/merger.py | f7c6584688a4c224dc615147f08cf1e0d5e3f8fc | [] | no_license | thammi/pyload-utils | cbe67ba794df6b1ebe05fa07a96b0c2223d6ee6c | 3f24a40a909dd68e087611e61db6b30ad2411ea5 | refs/heads/master | 2016-09-06T11:29:52.140554 | 2013-07-17T12:46:26 | 2013-07-17T12:46:26 | 4,619,178 | 2 | 2 | null | 2016-01-07T19:07:34 | 2012-06-10T23:06:15 | Python | UTF-8 | Python | false | false | 2,092 | py | #!/usr/bin/env python3
import re
import sys
from pyloadutils.pyload import PyloadConnection
def main():
con = PyloadConnection()
collected = con.getCollectorData()
bags = {}
regex = re.compile(sys.argv[1] if len(sys.argv) > 1 else '.*')
for package in collected:
match = regex.match(package['name'])
if match:
if match.groups():
part = match.group(1)
else:
part = match.group(0)
if part in bags:
bags[part].append(package)
else:
bags[part] = [package]
for key, packages in list(bags.items()):
if len(packages) > 1:
def count_finished(package):
count = 0
for link in package['links']:
if link['status'] == 0:
count += 1
return count
packages.sort(key=count_finished, reverse=True)
else:
del bags[key]
for key, packages in sorted(bags.items()):
print(key)
for package in packages:
files = len(package['links'])
hosters = set(link['plugin'] for link in package['links'])
print("- %s (%i files; %s)" % (package['name'], files, ', '.join(hosters)))
print()
if not bags:
print("Nothing to merge")
return
accept = input("Do you want to merge these packages? ")
if accept not in ['y', 'j']:
print("OK, aborting!")
return
print()
for key, packages in bags.items():
print("Merging", key, "...")
root = packages[0]
rest = packages[1:]
pid = root['pid']
if root['name'] != key:
con.setPackageName(pid=pid, name=key)
links = []
for package in rest:
links += (link['url'] for link in package['links'])
con.addFiles(pid=pid, links=links)
pids = [package['pid'] for package in rest]
con.deletePackages(pids=pids)
if __name__ == '__main__':
sys.exit(main())
| [
"thammi@chaossource.net"
] | thammi@chaossource.net |
f3ac75934f5a622e8283021758aa0f5d3b350d95 | 7479bfd8c4f8c52f9fa667c5f8f044128e79b38d | /hungry.py | 742b8f74bf719dee6348ff9291c986b2c0d3cdba | [] | no_license | ojaswi8827/test | d7a132f2243383ab6228feabcc47578dcd39f6b9 | 788adff0b2ef5edfc2bfe702f9980e24c0266091 | refs/heads/master | 2020-08-24T11:52:51.648837 | 2019-10-24T18:20:41 | 2019-10-24T18:20:41 | 216,820,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23 | py | print("I'm hungry...")
| [
"ojaswi.athghara@gmail.com"
] | ojaswi.athghara@gmail.com |
00ba35084fd85f86371304ec7b21410b8b24b17a | 38c75ea38cab758b96236fb01e77ae504491de37 | /PGML-BL/OpenFoam_Laminar/laminarFlatPlate/Different_Reynolds_Number/ml_v2.py | 99d5f0f94aa579b7adf1f55b5ba97a949567321f | [
"Apache-2.0"
] | permissive | zuokuijun/PGML | afa4ea49cc2d6e3249fada1000b813730ea5e6e6 | 2b919d65e6467b2afdd9f58b9a72f1d5ec74132f | refs/heads/main | 2023-07-16T18:17:29.166672 | 2021-08-23T01:39:42 | 2021-08-23T01:39:42 | 435,793,580 | 1 | 0 | Apache-2.0 | 2021-12-07T08:05:32 | 2021-12-07T08:05:31 | null | UTF-8 | Python | false | false | 4,247 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 9 15:26:04 2021
@author: suraj
"""
import pyvista as pv
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Dense, Dropout, Flatten
from tensorflow.keras.layers import concatenate
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.callbacks import TensorBoard, EarlyStopping
from keras import backend as kb
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from keras.regularizers import l2
from tensorflow.keras import regularizers
#from helpers import get_lift_drag, preprocess_image, read_csv_file, save_model, load_model
# font for plotting
import matplotlib as mpl
font = {'family' : 'normal',
'size' : 14}
mpl.rc('font', **font)
#%%
font = {'size' : 14}
plt.rc('font', **font)
filename = f'../../../Similarity_Solution/su2.csv'
data_blasisus = np.genfromtxt(filename, delimiter=',')
data = np.load('./Re_0.5e+5/data.npz')
u = data['u']
v = data['v']
p = data['p']
x = data['x']
y = data['y']
X = data['X']
Y = data['Y']
nx = x.shape[0]
ny = y.shape[0]
Xf = X.flatten()
Yf = Y.flatten()
uf = u.flatten()
vf = v.flatten()
pf = p.flatten()
#%%
features = np.vstack((Xf, Yf)).T
labels = np.array([uf,vf,pf]).T
sampling = 2 # [1] half-x, [2] random 50%,
if sampling == 1:
train_slice = features[:,0] <= 0.05
features_train = features[train_slice]
labels_train = labels[train_slice]
elif sampling == 2:
num_samples = features.shape[0]
fraction = 0.5
train_slice = np.random.randint(num_samples, size=int(fraction*num_samples))
features_train = features[train_slice]
labels_train = labels[train_slice]
#%%
sc_input = MinMaxScaler(feature_range=(-1,1))
sc_input = sc_input.fit(features_train)
xtrain = sc_input.transform(features_train)
sc_output = MinMaxScaler(feature_range=(-1,1))
sc_output = sc_output.fit(labels_train)
ytrain = sc_output.transform(labels_train)
#%%
nf1 = 2
nl = 3
n_layers = 2
n_neurons = [20,20]
lr = 0.001
epochs = 600
batch_size = 64
x1 = Input(shape=(nf1,))
x = Dense(n_neurons[0],activation='relu')(x1)
x = Dense(n_neurons[0],activation='relu')(x)
output = Dense(nl,activation='linear')(x)
model = Model(inputs=x1, outputs=output)
print(model.summary())
opt = tf.keras.optimizers.Adam(learning_rate=lr)
"""## compile the model"""
model.compile(loss='mean_squared_error', optimizer=opt)
history_callback = model.fit(x=xtrain,
y=ytrain,
batch_size=batch_size,
epochs=epochs,
validation_split=0.3,
shuffle=True,
callbacks=[])
#%%
loss = history_callback.history["loss"]
val_loss = history_callback.history["val_loss"]
plt.figure()
epochs = range(1, len(loss) + 1)
plt.semilogy(epochs, loss, 'b', label='Training loss')
plt.semilogy(epochs, val_loss, 'r', label='Validationloss')
plt.title('Training and validation loss')
plt.legend()
filename = f'loss.png'
plt.savefig(filename, dpi = 300)
plt.show()
#%%
xtest = sc_input.transform(features)
ytest = np.copy(labels)
ypred_sc = model.predict(xtest)
ypred = sc_output.inverse_transform(ypred_sc)
#%%
x = data['x']
y = data['y']
n_slice = 200
x_slice = features[:,0] == x[n_slice]
x_005 = features[x_slice]
#%%
print(f'Prediction for x = {x[n_slice]}')
ytest_x = ytest[x_slice]
ypred_x = ypred[x_slice]
#%%
fig, ax = plt.subplots(1,1,figsize=(6,5),sharey=True,)
ax.plot(ytest_x[:,0],y,'ro-',label='True')
ax.plot(ypred_x[:,0],y,'bs-',label='Pred')
ax.set_ylim([0,0.003])
ax.legend()
ax.grid()
plt.show()
#%%
fig, ax = plt.subplots(3,1,figsize=(8,12),sharey=True,)
cs = ax[0].contourf(X,Y, np.reshape(ytest[:,2],[ny,nx]),120)
fig.colorbar(cs, ax=ax[0])
cs = ax[1].contourf(X,Y, np.reshape(ypred[:,2],[ny,nx]),120)
fig.colorbar(cs, ax=ax[1])
diff = ytest - ypred
cs = ax[2].contourf(X,Y, np.reshape(diff[:,2],[ny,nx]),120, cmap='jet')
fig.colorbar(cs, ax=ax[2])
for i in range(3):
ax[i].set_ylim([0,0.004])
plt.show() | [
"pawarsuraj92@gmail.com"
] | pawarsuraj92@gmail.com |
f3802d6e821566c96f6be21f5a4bb5af03dd2beb | ca123d832c2d3f327d6036d0d799600f10d44275 | /src/StateEstimator/uuvEkfEstimator copy.py | 8d2dd85939370ead7080b43d6a3329f0652a0d98 | [] | no_license | alliWong/AuvEstimator | e1c5449b7fd3dfcce63a52956ce62f9247f0f0b6 | ea9d89da14c58f28c4dd7b77377b65465c36d867 | refs/heads/main | 2023-04-11T20:58:29.641881 | 2021-05-05T05:02:37 | 2021-05-05T05:02:37 | 361,925,020 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 20,630 | py | #! /usr/bin/env python
# """
# The purpose of this file is to estimate the state of the vehicle with EKF
# using DVL, IMU, and barometer
# """
""" Import libraries """
import sys
import numpy as np
from numpy import array, zeros, reshape, matmul, eye, sqrt, cos, sin
# from tf.transformations import quaternion_from_euler, euler_from_quaternion
from transformations import quaternion_from_euler, euler_from_quaternion
from ekf import ExtendedKalmanFilter
from commons import EuclideanDistance, SkewSymmetric, Rot, TrapIntegrate, MapAngVelTrans, PressureToDepth
class EkfEstimator(object):
def __init__(self, dt, enuFrame, pseudoLinPos):
""" Initial Parameters """
self.est_dt = dt
self.enuFrame = enuFrame # frame flag (1 to set ENU, 0 to set NED)
# Decide which sensor to calculate pseudo position estimate
if pseudoLinPos == 0:
self.est_useDvlLinPos = 1
if pseudoLinPos == 1:
self.est_useImuLinPos = 1
if pseudoLinPos == 2:
self.est_useDvlLinPos = 1
self.est_useImuLinPos = 1
""" Sensor setup """
# Instantiate DVL variables and arrays
self.sen_dvl_update = 0 # dvl update flag (0 to set 0, 1 to set 1)
self.sen_dvl_time = 0 # current dvl sensor time
self.sen_dvl_previousTime = 0 # last time since dvl updated
self.sen_dvl_rbtLinVel = zeros(shape=(3,1)) # robot frame linear velocity from dvl
self.sen_dvl_mapLinVel = zeros(shape=(3,1)) # NED map frame linear velocity from dvl
self.sen_dvl_aprxMapLinPos = zeros(shape=(3,1)) # map frame approximated linear position from dvl
self.sen_dvl_offset = zeros(shape=(3,1)) # dvl physical offset from vehicle COM
# Instantiate IMU variables and arrays
self.sen_imu_update = 0 # imu update flag (0 to set 0, 1 to set 1)
self.sen_imu_time = 0 # current imu sensor time
self.sen_imu_previousTime = 0 # last time since imu updated
self.sen_imu_rbtAngVel = zeros(shape=(3,1)) # robot frame angular velocity from IMU
self.sen_imu_rbtLinAcc = zeros(shape=(3,1)) # robot frame linear acceleration from IMU
self.sen_imu_mapLinAcc = zeros(shape=(3,1)) # map frame linear acceleration from IMU
self.sen_imu_mapLinAccNoGrav = zeros(shape=(3,1)) # map frame linear acceleration without gravity
self.sen_imu_mapAngPos = zeros(shape=(3,3)) # orientation rotation matrix
self.sen_imu_mapEulAngPos = zeros(shape=(3,1)) # orientation euler angle matrix
self.sen_imu_mapAngVel = zeros(shape=(3,1)) # map frame angular velocity from IMU
self.sen_imu_aprxMapLinPos = zeros(shape=(3,1)) # map frame approximated linear position from integrated linear acceleration IMU
self.sen_imu_aprxMapLinVel = zeros(shape=(3,1)) # map frame approximated linear velocity from integrated linear acceleration IMU
self.sen_imu_lastMapAprxLinVel = zeros(shape=(3,1)) # map frame approximated last linear velocity from integrated linear acceleration IMU
# Instantiate Barometer variables and arrays
self.sen_bar_update = 0 # barometer update flag (0 to set 0, 1 to set 1)
self.sen_bar_previousTime = 0 # last time since bar updated
self.sen_bar_mapLinPos = 0 # map frame barometer linear position
# Sensor frame setup
# Configure DVL frame to ENU (x-forward, z-upwards)
self.sen_dvl_enuFrameRoll = np.deg2rad(0)
self.sen_dvl_enuFramePitch = np.deg2rad(90) # -90
self.sen_dvl_enuFrameYaw = np.deg2rad(0)
# Configure DVL frame to NED (x-forward, z-downwards)
self.sen_dvl_nedFrameRoll = np.deg2rad(0)
self.sen_dvl_nedFramePitch = np.deg2rad(90)
self.sen_dvl_nedFrameYaw = np.deg2rad(180)
# Configure IMU frame to NED (x-forward, z-downwards)
self.sen_imu_nedFrameRoll = np.deg2rad(180)
self.sen_imu_nedFramePitch = np.deg2rad(0)
self.sen_imu_nedFrameYaw = np.deg2rad(0)
self.frameTrans = Rot(self.sen_dvl_enuFrameRoll, self.sen_dvl_enuFramePitch, self.sen_dvl_enuFrameYaw)
""" Estimator setup """
# Instantiate estimator measurement variables and arrays
self.meas_update = 0
self.est_mapLinPos = zeros(shape=(3,1)) # map frame estimator linear position array
self.est_mapLinVel = zeros(shape=(3,1)) # map frame estimator linear velocity array
self.est_mapPrevLinVel = zeros(shape=(3,1)) # previous map frame estimator linear velocity array
self.est_mapAngPos = zeros(shape=(3,3)) # map frame angular position array
self.est_mapEulAngPos = zeros(shape=(3,1)) # orientation euler angle matrix
self.est_mapAngVel = zeros(shape=(3,1)) # map frame angular velocity array
self.est_mapLinAcc = zeros(shape=(3,1)) # map frame linear acceleration
self.est_mapPrevLinAcc = zeros(shape=(3,1)) # previous map frame linear acceleration
self.est_rbtLinVel = zeros(shape=(3,1)) # robot frame estimator linear velocity array
self.est_rbtAngVel = zeros(shape=(3,1)) # robot frame angular velocity array
self.est_rbtLinAcc = zeros(shape=(3,1)) # robot frame linear acceleration
self.est_rbtAccBias = zeros(shape=(3,1)) # robot frame acceleration bias
self.est_rbtGyrBias = zeros(shape=(3,1)) # robot frame gyroscope bias
# Instantiate estimator matrices
est_inputDimState = 6 # number of estimator inputs
est_measDimState = 13 # number of measurements being taken
self.est_dimState = 15 # number of estimate states
self.gravity = array([[0], [0], [9.80665]]) # gravity vector [m/s^2]
self.est_A = eye(self.est_dimState) # jacobian state matrix
self.est_H = zeros(shape=(est_measDimState,self.est_dimState)) # jacobian observation matrix
self.est_u = zeros(shape=(est_inputDimState,1)) # input matrix
self.est_m = zeros(shape=(self.est_dimState,1)) # frame estimator measurement matrix
self.est_x = zeros(shape=(self.est_dimState,1)) # frame estimator state estimate
self.est_L = zeros(shape=(self.est_dimState,self.est_dimState)) # frame estimator kalman gain matrix
self.est_P = zeros(shape=(self.est_dimState,self.est_dimState)) # frame estimator covariance matrix
self.est_prev_x = zeros(shape=(self.est_dimState,1)) # frame estimator state estimate
def ComputeQ(self, est_Q_linPos, est_Q_angPos, est_Q_linVel, est_Q_acBias, est_Q_gyBias):
self.est_Q = array([
[np.power(est_Q_linPos,2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, np.power(est_Q_linPos,2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, np.power(est_Q_linPos,2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, np.power(est_Q_angPos,2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, np.power(est_Q_angPos,2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, np.power(est_Q_angPos,2), 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, np.power(est_Q_linVel,2), 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, np.power(est_Q_linVel,2), 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, np.power(est_Q_linVel,2), 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, np.power(est_Q_acBias,2), 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, np.power(est_Q_acBias,2), 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, np.power(est_Q_acBias,2), 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, np.power(est_Q_gyBias,2), 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, np.power(est_Q_gyBias,2), 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, np.power(est_Q_gyBias,2)]]) # process noise covariance matrix
def ComputeR(self, est_R_linPosZ, est_R_angPos, est_R_linVel, est_R_acBias, est_R_gyBias):
self.est_R = array([
[np.power(est_R_linPosZ,2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, np.power(est_R_angPos,2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, np.power(est_R_angPos,2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, np.power(est_R_angPos,2), 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, np.power(est_R_linVel,2), 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, np.power(est_R_linVel,2), 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, np.power(est_R_linVel,2), 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, np.power(est_R_acBias,2), 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, np.power(est_R_acBias,2), 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, np.power(est_R_acBias,2), 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, np.power(est_R_gyBias,2), 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, np.power(est_R_gyBias,2), 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, np.power(est_R_gyBias,2)]]) # measurement noise covariance matrix
""" Raw sensor measurements """
def DvlCallback(self, rbtLinVel, offset, t):
# Instantiate dvl variables
self.sen_dvl_update = 1
self.sen_dvl_time = t
# Raw measurements
self.sen_dvl_rbtLinVel = rbtLinVel # robot linear velocity array
self.sen_dvl_offset = offset # offset
# Perform measurement update
self.meas_update = 1
self.RunEst()
self.meas_update = 0
# Update sensor flag
self.sen_dvl_update = 0
def ImuCallback(self, rbtLinAcc, rbtAngVel, mapAngPos, mapEulAngPos, rbtAccBias, rbtGyrBias, t):
# Instantiate imu variables
self.sen_imu_update = 1
self.sen_imu_time = t
# Raw measurements
self.sen_imu_rbtLinAcc = rbtLinAcc # robot linear acceleration array
self.sen_imu_rbtAngVel = rbtAngVel # robot angular velocity array
self.sen_imu_mapAngPos = mapAngPos # map angular position rotation matrix array
self.sen_imu_mapEulAngPos = mapEulAngPos # map angular position euler angle matrix array
self.sen_imu_accBias = rbtAccBias # robot accel bias array
self.sen_imu_gyrBias = rbtGyrBias # robot gyro bias array
# Perform time update
self.meas_update = 0
self.RunEst()
# Update sensor flag
self.sen_imu_update = 0
def DepthCallback(self, rbtLinPos, t):
# Instantiate barometer variables
self.sen_bar_update = 1
self.sen_bar_time = t
# Raw measurements
self.sen_bar_mapLinPos = rbtLinPos # map linear position in z-direction
# Perform time update
self.meas_update = 0
self.RunEst()
# Update sensor flag
self.sen_bar_update = 0
""" Convert sensor measurements from robot body frame to map frame """
def RbtToMap(self):
if self.enuFrame == 1: # ENU (x-forward, z-upwards)
# IMU
# Convert linear acceleration from robot frame into map frame
self.sen_imu_mapLinAcc = np.matmul(self.sen_imu_mapAngPos, self.sen_imu_rbtLinAcc)
self.sen_imu_mapLinAccNoGrav = self.est_u[0:3] - self.gravity
# Convert angular velocity from robot frame into map frame
self.sen_imu_mapAngVel = np.matmul(MapAngVelTrans(self.sen_imu_mapEulAngPos[0], self.sen_imu_mapEulAngPos[1], self.sen_imu_mapEulAngPos[2]), self.sen_imu_rbtAngVel)
# DVL
# Correct DVL coordinate frame wrt to ENU
dvl_enuTransRbtLinVel = np.matmul(self.frameTrans, self.sen_dvl_rbtLinVel)
dvl_enuTransRbtLinVel -= np.cross(self.sen_imu_rbtAngVel.T, self.sen_dvl_offset.T).T
# Convert velocity from robot frame into map frame
self.sen_dvl_mapLinVel = np.matmul(self.sen_imu_mapAngPos, dvl_enuTransRbtLinVel)
""" Estimator measurement array """
def SenMeasArrays(self):
# Update DVL sensor array if a sensor update occurs
if self.sen_dvl_update == 1:
# Set estimator last known sensor update to last known sensor update
self.sen_dvl_previousTime = self.sen_dvl_time
# Update the linear velocity in map and robot frame measurement array
self.est_mapLinVel = self.sen_dvl_mapLinVel
self.est_rbtLinVel = self.sen_dvl_rbtLinVel
# Update barometer sensor array if a sensor update occurs
if self.sen_bar_update == 1:
# Set estimator last known sensor update to last known sensor update
self.sen_bar_previousTime = self.sen_bar_time
# Update the linear position (map frame) measurement array
self.est_mapLinPos[2] = self.sen_bar_mapLinPos
# Update IMU sensor array if a sensor update occurs
if self.sen_imu_update == 1:
# Set estimator last known sensor update to last known sensor update
self.sen_imu_previousTime = self.sen_imu_time
# Update the linear acceleration in map and robot frame measurement array
self.est_mapLinAcc = self.sen_imu_mapLinAcc
self.est_rbtLinAcc = self.sen_imu_rbtLinAcc
# Update the angular position in map frame measurement array
self.est_mapAngPos = self.sen_imu_mapAngPos # rotation matrix
self.est_mapEulAngPos = self.sen_imu_mapEulAngPos # euler angle
# Update the angular velocity in map and robot frame measurement array
self.est_mapAngVel = self.sen_imu_mapAngVel
self.est_rbtAngVel = self.sen_imu_rbtAngVel
# Update the acceleration and gyro bias in robot frame measurement array
self.est_rbtAccBias = self.sen_imu_accBias
self.est_rbtGyrBias = self.sen_imu_gyrBias
""" Compute for map frame linear position """
def MapLinPos(self):
# Use only DVL
if self.est_useDvlLinPos == 1 and self.est_useImuLinPos == 0:
# Integrate velocity using the trapezoidal method to compute for position
self.est_mapLinPos[0:2] = TrapIntegrate(self.sen_dvl_time, self.est_mapLinVel[0:2], self.est_x[0:2], self.sen_dvl_previousTime, self.est_mapPrevLinVel[0:2])
# Use only IMU
if self.est_useDvlLinPos == 0 and self.est_useImuLinPos == 1:
# Integrate acceleration using the trapezoidal method to compute for velocity
self.sen_imu_aprxMapLinVel[0:2] = TrapIntegrate(self.sen_imu_time, self.est_mapLinAcc[0:2], self.est_x[6:8], self.sen_imu_previousTime, self.est_mapPrevLinAcc[0:2])
self.est_mapLinPos[0:2] = TrapIntegrate(self.sen_imu_time, self.sen_imu_aprxMapLinVel[0:2], self.est_x[0:2], self.sen_imu_previousTime, self.sen_imu_lastMapAprxLinVel[0:2])
# Use both DVL and IMU
if self.est_useDvlLinPos == 1 and self.est_useImuLinPos == 1:
# Integrate velocity using the trapezoidal method to compute for position
self.sen_dvl_aprxMapLinPos[0:2] = TrapIntegrate(self.sen_dvl_time, self.est_mapLinVel[0:2], self.est_x[0:2], self.sen_dvl_previousTime, self.est_mapPrevLinVel[0:2])
# Integrate acceleration using the trapezoidal method to compute for velocity then position
self.sen_imu_aprxMapLinVel[0:2] = TrapIntegrate(self.sen_imu_time, self.est_mapLinAcc[0:2], self.est_x[6:8], self.sen_imu_previousTime, self.est_mapPrevLinAcc[0:2])
self.sen_imu_aprxMapLinPos[0:2] = TrapIntegrate(self.sen_imu_time, self.sen_imu_aprxMapLinVel[0:2], self.est_x[0:2], self.sen_imu_previousTime, self.sen_imu_lastMapAprxLinVel[0:2])
# Combine linear position calculated from DVL and IMU
self.est_mapLinPos[0:2] = (self.sen_dvl_aprxMapLinPos[0:2] + self.sen_imu_aprxMapLinPos[0:2])/2
# Update parameters
self.est_mapPrevLinVel = self.est_mapLinVel
self.est_mapPrevLinAcc = self.est_mapLinAcc
self.sen_imu_lastMapAprxLinVel = self.sen_imu_aprxMapLinVel
""" Prediction step """
def TimePropagation(self):
# Position update
self.est_x[0:3] = self.est_x[0:3] + self.est_x[6:9]*self.est_dt
# Rotation update (constant angular velocity)
self.est_x[3:6] = self.est_x[3:6] + self.est_u[3:6]*self.est_dt
# Velocity update
self.est_x[6:9] = self.est_x[6:9] + self.sen_imu_mapLinAccNoGrav*self.est_dt
# Compute state transition jacobian matrix
self.ComputeJacobianA()
def ComputeJacobianA(self):
# Rotation matrix transform
Rzyx = Rot(self.est_x[3], self.est_x[4], self.est_x[5])
# Compute jacobian of state transition matrix
cr = cos(self.est_x[3])
sr = sin(self.est_x[3])
tp = np.tan(self.est_x[4])
secp = 1/cos(self.est_x[4])
cp = cos(self.est_x[4])
sp = sin(self.est_x[4])
cy = cos(self.est_x[5])
sy = sin(self.est_x[5])
mat1 = np.array([
[0, cr*sp*cy+sr*sy, -sr*sp*cy+cr*sy],
[-sp*cy, sr*cp*cy-cr*sy, cr*cp*cy+sr*sy],
[0, cr*sp*cy+sr*sy, -sr*sp*cy+cr*sy]
])
J1 = matmul(mat1, self.est_x[6:9]) * self.est_dt
mat2 = np.array([
[0, cr*sp*sy-sr*cy, sr*sp*sy-cr*cy],
[-sp*sy, sr*cp*sy+cr*cy, cr*cp*sy-sr*cy],
[cp*cy, sr*sp*cy-cr*sy, cr*sp*cy+sr*sy]
])
J2 = matmul(mat2, self.est_x[6:9]) * self.est_dt
mat3 = np.array([
[0, cr*cp, -sr*cp],
[-cp, -sr*sp, -cr*sp],
[0, 0, 0]
])
J3 = matmul(mat3, self.est_x[6:9]) * self.est_dt
p = np.array([[1], [0], [0]])
mat4 = np.array([
[0, cr*tp, -sr*tp],
[0, sr*(tp**2+1), -cr*cp],
[0, 0, 0]
])
J4 = p+matmul(mat4, self.est_u[3:6]) * self.est_dt
q = np.array([[0], [1], [0]])
mat5 = np.array([
[0, -sr, cr],
[0, 0, 0],
[0, 0, 0]
])
J5 = q+matmul(mat5, self.est_u[3:6]) * self.est_dt
r = np.array([[0], [0], [1]])
mat6 = np.array([
[0, cr*secp, -sr*secp],
[0, sr*secp*tp, cr*secp*tp],
[0, 0, 0]
])
J6 = r+matmul(mat6, self.est_u[3:6]) * self.est_dt
self.est_A = array([
[1, 0, 0, J1[0], J1[1], J1[2], Rzyx[0,0]*self.est_dt, Rzyx[0,1]*self.est_dt, Rzyx[0,2]*self.est_dt, 0, 0, 0, 0, 0, 0],
[0, 1, 0, J2[0], J2[1], J2[2], Rzyx[1,0]*self.est_dt, Rzyx[1,1]*self.est_dt, Rzyx[1,2]*self.est_dt, 0, 0, 0, 0, 0, 0],
[0, 0, 1, J3[0], J3[1], J3[2], Rzyx[2,0]*self.est_dt, Rzyx[2,1]*self.est_dt, Rzyx[2,2]*self.est_dt, 0, 0, 0, 0, 0, 0],
[0, 0, 0, J4[0], J4[1], J4[2], 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, J5[0], J5[1], J5[2], 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, J6[0], J6[1], J6[2], 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], # dx
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], # dy
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], # dz
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]], dtype = float) # jacobian state matrix
""" Observation measurement array """
def EstObsArray(self):
if self.sen_bar_update == 1:
self.est_H[0,2] = 1
if self.sen_dvl_update == 1:
self.est_H[4,6] = 1
self.est_H[5,7] = 1
self.est_H[6,8] = 1
self.est_H[7,9] = 1
self.est_H[8,10] = 1
self.est_H[9,11] = 1
self.est_H[10,12] = 1
self.est_H[11,13] = 1
self.est_H[12,14] = 1
if self.sen_imu_update == 1:
self.est_H[1,3] = 1
self.est_H[2,4] = 1
self.est_H[3,5] = 1
""" Estimator measurement array """
def EstMeasArray(self):
# Position measurements
self.est_m[0:3] = self.est_mapLinPos # linear position in map frame
# Rotation measurements
self.est_m[3:6] = self.est_mapEulAngPos # angular position in map frame
# Linear velocity measurements
self.est_m[6:9] = self.est_mapLinVel # linear velocity in map frame
# Linear acceleration bias measurements
self.est_m[9:12] = self.est_rbtAccBias # linear accel bias in robot frame
# Gyroscope bias measurements
self.est_m[12:15] = self.est_rbtGyrBias # linear gyro bias in robot frame
""" Run Estimator """
def RunEst(self):
# Assign inputs
self.est_u[0:3] = self.sen_imu_mapLinAcc # linear acceleration input
self.est_u[3:6] = self.sen_imu_mapAngVel # gyroscope input
self.RbtToMap() # convert sensor measurements from robot frame into map frame
self.SenMeasArrays() # collect sensor measurements
self.MapLinPos() # compute map frame linear position
self.EstObsArray() # update observation matrix
# Instantiate EKF
xDim = self.est_dimState # number of states
x = self.est_x # last estimate from robot frame
P = self.est_P # last covariance matrix
Q = self.est_Q # process noise covariance
R = self.est_R # measurement noise covariance
H = self.est_H # measurement matrix
z = self.est_m[2:15] # measurement
u = self.est_u # control input vector
A = self.est_A # jacobian state matrix
state = ExtendedKalmanFilter(xDim, x, P, z, u, A, Q, R, H)
# Perform time update if condition is 0
if self.meas_update == 0:
self.TimePropagation()
self.est_P = state.predict(P)
# Perform measurement update if condition is 1
else:
self.EstMeasArray() # estimator measurement array
self.est_x, self.est_L, self.est_P = state.update(x, P, z)
# Output state, covariance, and control input
self.OutputEst()
def OutputEst(self):
return self.est_x, self.est_P, self.est_u | [
"awong3@andrew.cmu.edu"
] | awong3@andrew.cmu.edu |
5dde43f8c468ef838be4f628df95f0cfb6d4d045 | 8d52a48c55688393538560a0d57913283228a25e | /files_Othello3/main1.py | d8fa8824aaf7d4ef69d0998f573622b30eaeef94 | [] | no_license | Miondine/Othello | 458d925447e9ee3bd1c18586e82a6fcb25ce94d8 | 068faa8999a63835d0953c5f3f2269a162e39775 | refs/heads/master | 2023-07-12T02:24:45.247246 | 2021-08-22T13:01:23 | 2021-08-22T13:01:23 | 365,510,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | import tournament as tournament
# players_simple = ['GREEDY','DYNAMIC_ROXANNE','ROXANNE','GAMBLER']
# players_minimax = ['ALPHA_BETA','NEGAMAX', 'STATIC_BOARD','DYNAMIC_BOARD','EDAX']
# players_mcts = ['MCTS_MAX_ITER','MCTS_REM_MAX_ITER']'
def main():
num_games = 100 # number of games per cycle
num_cycles = 10
type_player1 = 'DYNAMIC_BOARD'
name_player1 = 'DynamicBoard'
depth_player1 = 5
opponent_types = ['DYNAMIC_ROXANNE']
tournamnet_name = f'Tournament_simple1'
tournament1 = tournament.Tournament(tournamnet_name,type_player1,name_player1,depth_player1 = depth_player1, opponent_types = opponent_types, num_games = num_games, num_cycles = num_cycles, max_depth=None)
tournament1.play()
if __name__ == "__main__":
main() | [
"paula.b.hofm@gmail.com"
] | paula.b.hofm@gmail.com |
89f0777ca39336e4662f2fbb1d3ad93e077a83ea | 70712151044f7b4f7c101a409e41e0b634249ec6 | /Anti_Drift_Thing.py | 03bbceecd23143b3005207b8761310b3fc092b7b | [
"MIT"
] | permissive | Atomatrons/CityShaper | 6dfcb61f7c459ef6fd23f41098a56247e9d35691 | b1e6ae01e53becb41a21eb99c1e767e948544085 | refs/heads/master | 2020-07-21T13:17:22.856786 | 2020-02-27T01:30:03 | 2020-02-27T01:30:03 | 206,877,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 652 | py | #!/usr/bin/env micropython
import Robot
import My_block
def Anti_Drift_Thing():
"""
Checks is gyro is drifting, if it is, do a manual reset
"""
print("Checking Gyro...")
old_angle = Robot.gyro.angle
Robot.sleep(5)
new_angle = Robot.gyro.angle
while old_angle != new_angle:
old_angle = Robot.gyro.angle
Robot.sleep(2)
new_angle = Robot.gyro.angle
if old_angle == new_angle:
print("Gyro is not drifting")
Robot.sleep(1)
break
else:
print("Gyro is drifting")
Robot.sleep(3)
print("Gyro is not drifting")
Robot.sleep(1)
| [
"superastro33@gmail.com"
] | superastro33@gmail.com |
d51a4ed5ed57520dd94f9fd01474c61d284ae3e4 | 58ca8cb294168ef78db313e326ac97dda63d2ccc | /blog/migrations/0003_auto_20190904_2108.py | 9ac52b0ea83305c5f9e189025459f86ed6a2c788 | [] | no_license | RoLL1k/Simple-Blog | 4d4f8feba1a1e2db25b46649eb804778bb90914c | ac88aec690c4ec79a76308b3711f63a569742ce4 | refs/heads/master | 2022-07-24T01:00:41.403474 | 2020-05-16T20:21:58 | 2020-05-16T20:21:58 | 264,519,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | # Generated by Django 2.2.3 on 2019-09-04 18:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_comment_post'),
]
operations = [
migrations.AlterField(
model_name='post',
name='title',
field=models.CharField(db_index=True, max_length=50),
),
migrations.AlterField(
model_name='tag',
name='slug',
field=models.SlugField(blank=True, unique=True),
),
]
| [
"django232@gmail.com"
] | django232@gmail.com |
19acfeedde17e2b77e96900b01d98cd205499e10 | 35dbf8489dc1cb63087dd01ba9de57643e9b3aba | /ogb/io/save_dataset.py | c128812f1b2e3be2f34a5ec6971ef796d8831fd0 | [
"MIT"
] | permissive | edwardelson/ogb | 9c6c6fcfeb04ae042919c05b7a060c143c7f3d5c | c783060c5ada3641c0f08527acd1d53626f9f9c9 | refs/heads/master | 2023-06-02T05:12:08.056741 | 2021-06-16T17:55:42 | 2021-06-16T17:55:42 | 356,612,389 | 2 | 1 | MIT | 2021-06-16T17:55:43 | 2021-04-10T14:51:59 | Python | UTF-8 | Python | false | false | 28,886 | py | import torch
import pandas as pd
import os
import os.path as osp
from datetime import date
import shutil
from tqdm import tqdm
import numpy as np
from ogb.io.read_graph_raw import read_binary_graph_raw, read_binary_heterograph_raw
from ogb.utils.torch_util import all_numpy
class DatasetSaver(object):
'''
A class for saving graphs and split in OGB-compatible manner
Create submission_datasetname/ directory, and output the following two files:
- datasetname.zip (OGB-compatible zipped dataset folder)
- meta_dict.pt (torch files storing all the necessary dataset meta-information)
'''
def __init__(self, dataset_name, is_hetero, version, root = 'submission'):
# verify input
if not ('ogbn-' in dataset_name or 'ogbl-' in dataset_name or 'ogbg-' in dataset_name):
raise ValueError('Dataset name must have valid ogb prefix (e.g., ogbn-*).')
if not isinstance(is_hetero, bool):
raise ValueError('is_hetero must be of type bool.')
if not (isinstance(version, int) and version >= 0):
raise ValueError('version must be of type int and non-negative')
self.dataset_name = dataset_name
self.is_hetero = is_hetero
self.version = version
self.root = root
self.dataset_prefix = dataset_name.split('-')[0] # specify the task category
self.dataset_suffix = '_'.join(dataset_name.split('-')[1:])
self.submission_dir = self.root + '_' + self.dataset_prefix + '_' + self.dataset_suffix
self.dataset_dir = osp.join(self.submission_dir, self.dataset_suffix)
self.meta_dict_path = osp.join(self.submission_dir, 'meta_dict.pt')
if self.dataset_prefix == 'ogbg' and self.is_hetero:
raise NotImplementedError('Heterogeneous graph dataset object has not been implemented for graph property prediction yet.')
if osp.exists(self.dataset_dir):
if input(f'Found an existing submission directory at {self.submission_dir}/. \nWill you remove it? (y/N)\n').lower() == 'y':
shutil.rmtree(self.submission_dir)
print('Removed existing submission directory')
else:
print('Process stopped.')
exit(-1)
# make necessary dirs
self.raw_dir = osp.join(self.dataset_dir, 'raw')
os.makedirs(self.raw_dir, exist_ok=True)
os.makedirs(osp.join(self.dataset_dir, 'processed'), exist_ok=True)
# create release note
with open(osp.join(self.dataset_dir, f'RELEASE_v{version}.txt'), 'w') as fw:
fw.write(f'# Release note for {self.dataset_name}\n\n### v{version}: {date.today()}')
# check list
self._save_graph_list_done = False
self._save_split_done = False
self._copy_mapping_dir_done = False
if 'ogbl' == self.dataset_prefix:
self._save_target_labels_done = True # for ogbl, we do not need to give predicted labels
else:
self._save_target_labels_done = False # for ogbn and ogbg, need to give predicted labels
self._save_task_info_done = False
self._get_meta_dict_done = False
self._zip_done = False
def _save_graph_list_hetero(self, graph_list):
dict_keys = graph_list[0].keys()
# check necessary keys
if not 'edge_index_dict' in dict_keys:
raise RuntimeError('edge_index_dict needs to be provided in graph objects')
if not 'num_nodes_dict' in dict_keys:
raise RuntimeError('num_nodes_dict needs to be provided in graph objects')
print(dict_keys)
# Store the following files
# - edge_index_dict.npz (necessary)
# edge_index_dict
# - num_nodes_dict.npz (necessary)
# num_nodes_dict
# - num_edges_dict.npz (necessary)
# num_edges_dict
# - node_**.npz (optional, node_feat_dict is the default node features)
# - edge_**.npz (optional, edge_feat_dict the default edge features)
# extract entity types
ent_type_list = sorted([e for e in graph_list[0]['num_nodes_dict'].keys()])
# saving num_nodes_dict
print('Saving num_nodes_dict')
num_nodes_dict = {}
for ent_type in ent_type_list:
num_nodes_dict[ent_type] = np.array([graph['num_nodes_dict'][ent_type] for graph in graph_list]).astype(np.int64)
np.savez_compressed(osp.join(self.raw_dir, 'num_nodes_dict.npz'), **num_nodes_dict)
print(num_nodes_dict)
# extract triplet types
triplet_type_list = sorted([(h, r, t) for (h, r, t) in graph_list[0]['edge_index_dict'].keys()])
print(triplet_type_list)
# saving edge_index_dict
print('Saving edge_index_dict')
num_edges_dict = {}
edge_index_dict = {}
for triplet in triplet_type_list:
# representing triplet (head, rel, tail) as a single string 'head___rel___tail'
triplet_cat = '___'.join(triplet)
edge_index = np.concatenate([graph['edge_index_dict'][triplet] for graph in graph_list], axis = 1).astype(np.int64)
if edge_index.shape[0] != 2:
raise RuntimeError('edge_index must have shape (2, num_edges)')
num_edges = np.array([graph['edge_index_dict'][triplet].shape[1] for graph in graph_list]).astype(np.int64)
num_edges_dict[triplet_cat] = num_edges
edge_index_dict[triplet_cat] = edge_index
print(edge_index_dict)
print(num_edges_dict)
np.savez_compressed(osp.join(self.raw_dir, 'edge_index_dict.npz'), **edge_index_dict)
np.savez_compressed(osp.join(self.raw_dir, 'num_edges_dict.npz'), **num_edges_dict)
for key in dict_keys:
if key == 'edge_index_dict' or key == 'num_nodes_dict':
continue
if graph_list[0][key] is None:
continue
print(f'Saving {key}')
feat_dict = {}
if 'node_' in key:
# node feature dictionary
for ent_type in graph_list[0][key].keys():
if ent_type not in num_nodes_dict:
raise RuntimeError(f'Encountered unknown entity type called {ent_type}.')
# check num_nodes
for i in range(len(graph_list)):
if len(graph_list[i][key][ent_type]) != num_nodes_dict[ent_type][i]:
raise RuntimeError(f'num_nodes mistmatches with {key}[{ent_type}]')
# make sure saved in np.int64 or np.float32
dtype = np.int64 if 'int' in str(graph_list[0][key][ent_type].dtype) else np.float32
cat_feat = np.concatenate([graph[key][ent_type] for graph in graph_list], axis = 0).astype(dtype)
feat_dict[ent_type] = cat_feat
elif 'edge_' in key:
# edge feature dictionary
for triplet in graph_list[0][key].keys():
# representing triplet (head, rel, tail) as a single string 'head___rel___tail'
triplet_cat = '___'.join(triplet)
if triplet_cat not in num_edges_dict:
raise RuntimeError(f"Encountered unknown triplet type called ({','.join(triplet)}).")
# check num_edges
for i in range(len(graph_list)):
if len(graph_list[i][key][triplet]) != num_edges_dict[triplet_cat][i]:
raise RuntimeError(f"num_edges mismatches with {key}[({','.join(triplet)})]")
# make sure saved in np.int64 or np.float32
dtype = np.int64 if 'int' in str(graph_list[0][key][triplet].dtype) else np.float32
cat_feat = np.concatenate([graph[key][triplet] for graph in graph_list], axis = 0).astype(dtype)
feat_dict[triplet_cat] = cat_feat
else:
raise RuntimeError(f'Keys in graph object should start from either \'node_\' or \'edge_\', but \'{key}\' given.')
np.savez_compressed(osp.join(self.raw_dir, f'{key}.npz'), **feat_dict)
print('Validating...')
# testing
print('Reading saved files')
graph_list_read = read_binary_heterograph_raw(self.raw_dir, False)
print('Checking read graphs and given graphs are the same')
for i in tqdm(range(len(graph_list))):
for key0, value0 in graph_list[i].items():
if value0 is not None:
for key1, value1 in value0.items():
if isinstance(graph_list[i][key0][key1], np.ndarray):
assert(np.allclose(graph_list[i][key0][key1], graph_list_read[i][key0][key1], rtol=1e-04, atol=1e-04, equal_nan=True))
else:
assert(graph_list[i][key0][key1] == graph_list_read[i][key0][key1])
del graph_list_read
def _save_graph_list_homo(self, graph_list):
dict_keys = graph_list[0].keys()
# check necessary keys
if not 'edge_index' in dict_keys:
raise RuntimeError('edge_index needs to be provided in graph objects')
if not 'num_nodes' in dict_keys:
raise RuntimeError('num_nodes needs to be provided in graph objects')
print(dict_keys)
data_dict = {}
# Store the following keys
# - edge_index (necessary)
# - num_nodes_list (necessary)
# - num_edges_list (necessary)
# - node_** (optional, node_feat is the default node features)
# - edge_** (optional, edge_feat is the default edge features)
# saving num_nodes_list
num_nodes_list = np.array([graph['num_nodes'] for graph in graph_list]).astype(np.int64)
data_dict['num_nodes_list'] = num_nodes_list
# saving edge_index and num_edges_list
print('Saving edge_index')
edge_index = np.concatenate([graph['edge_index'] for graph in graph_list], axis = 1).astype(np.int64)
num_edges_list = np.array([graph['edge_index'].shape[1] for graph in graph_list]).astype(np.int64)
if edge_index.shape[0] != 2:
raise RuntimeError('edge_index must have shape (2, num_edges)')
data_dict['edge_index'] = edge_index
data_dict['num_edges_list'] = num_edges_list
for key in dict_keys:
if key == 'edge_index' or key == 'num_nodes':
continue
if graph_list[0][key] is None:
continue
if 'node_' == key[:5]:
# make sure saved in np.int64 or np.float32
dtype = np.int64 if 'int' in str(graph_list[0][key].dtype) else np.float32
# check num_nodes
for i in range(len(graph_list)):
if len(graph_list[i][key]) != num_nodes_list[i]:
raise RuntimeError(f'num_nodes mistmatches with {key}')
cat_feat = np.concatenate([graph[key] for graph in graph_list], axis = 0).astype(dtype)
data_dict[key] = cat_feat
elif 'edge_' == key[:5]:
# make sure saved in np.int64 or np.float32
dtype = np.int64 if 'int' in str(graph_list[0][key].dtype) else np.float32
# check num_edges
for i in range(len(graph_list)):
if len(graph_list[i][key]) != num_edges_list[i]:
raise RuntimeError(f'num_edges mistmatches with {key}')
cat_feat = np.concatenate([graph[key] for graph in graph_list], axis = 0).astype(dtype)
data_dict[key] = cat_feat
else:
raise RuntimeError(f'Keys in graph object should start from either \'node_\' or \'edge_\', but \'{key}\' given.')
print('Saving all the files!')
np.savez_compressed(osp.join(self.raw_dir, 'data.npz'), **data_dict)
print('Validating...')
# testing
print('Reading saved files')
graph_list_read = read_binary_graph_raw(self.raw_dir, False)
print('Checking read graphs and given graphs are the same')
for i in tqdm(range(len(graph_list))):
# assert(graph_list[i].keys() == graph_list_read[i].keys())
for key in graph_list[i].keys():
if graph_list[i][key] is not None:
if isinstance(graph_list[i][key], np.ndarray):
assert(np.allclose(graph_list[i][key], graph_list_read[i][key], rtol=1e-4, atol=1e-4, equal_nan=True))
else:
assert(graph_list[i][key] == graph_list_read[i][key])
del graph_list_read
def save_task_info(self, task_type, eval_metric, num_classes = None):
'''
task_type (str): For ogbg and ogbn, either classification or regression.
eval_metric (str): the metric
if task_type is 'classification', num_classes must be given.
'''
if self.dataset_prefix == 'ogbn' or self.dataset_prefix == 'ogbg':
if not ('classification' in task_type or 'regression' in task_type):
raise ValueError(f'task type must contain eighther classification or regression, but {task_type} given')
self.task_type = task_type
print(self.task_type)
print(num_classes)
if 'classification' in self.task_type:
if not (isinstance(num_classes, int) and num_classes > 1):
raise ValueError(f'num_classes must be an integer larger than 1, {num_classes} given.')
self.num_classes = num_classes
else:
self.num_classes = -1 # in the case of regression, just set to -1
self.eval_metric = eval_metric
self._save_task_info_done = True
def save_target_labels(self, target_labels):
'''
target_label (numpy.narray): storing target labels. Shape must be (num_data, num_tasks)
'''
if self.dataset_prefix == 'ogbl':
raise RuntimeError('ogbl link prediction dataset does not need to call save_target_labels')
if not self._save_graph_list_done:
raise RuntimeError('save_graph_list must be done beforehand.')
if self.is_hetero:
if not (isinstance(target_labels, dict) and len(target_labels) == 1):
raise ValueError(f'target label must be of dictionary type with single key')
key = list(target_labels.keys())[0]
if key not in self.num_data:
raise ValueError(f'Unknown entity type called {key}.')
if len(target_labels[key]) != self.num_data[key]:
raise RuntimeError(f'The length of target_labels ({len(target_labels[key])}) must be the same as the number of data points ({self.num_data[key]}).')
if self.dataset_prefix == 'ogbg':
raise NotImplementedError('hetero graph for graph-level prediction has not been implemented yet.')
elif self.dataset_prefix == 'ogbn':
np.savez_compressed(osp.join(self.raw_dir, 'node-label.npz'), **target_labels)
self.num_tasks = target_labels[key].shape[1]
else:
# check type and shape
if not isinstance(target_labels, np.ndarray):
raise ValueError(f'target label must be of type np.ndarray')
if len(target_labels) != self.num_data:
raise RuntimeError(f'The length of target_labels ({len(target_labels)}) must be the same as the number of data points ({self.num_data}).')
if self.dataset_prefix == 'ogbg':
np.savez_compressed(osp.join(self.raw_dir, 'graph-label.npz'), graph_label = target_labels)
elif self.dataset_prefix == 'ogbn':
np.savez_compressed(osp.join(self.raw_dir, 'node-label.npz'), node_label = target_labels)
self.num_tasks = target_labels.shape[1]
self._save_target_labels_done = True
def save_graph_list(self, graph_list):
if not all_numpy(graph_list):
raise RuntimeError('graph_list must only contain list/dict of numpy arrays, int, or float')
if self.dataset_prefix == 'ogbn' or self.dataset_prefix == 'ogbl':
if len(graph_list) > 1:
raise RuntimeError('Multiple graphs not supported for node/link property prediction.')
if self.is_hetero:
self._save_graph_list_hetero(graph_list)
self.has_node_attr = ('node_feat_dict' in graph_list[0]) and (graph_list[0]['node_feat_dict'] is not None)
self.has_edge_attr = ('edge_feat_dict' in graph_list[0]) and (graph_list[0]['edge_feat_dict'] is not None)
else:
self._save_graph_list_homo(graph_list)
self.has_node_attr = ('node_feat' in graph_list[0]) and (graph_list[0]['node_feat'] is not None)
self.has_edge_attr = ('edge_feat' in graph_list[0]) and (graph_list[0]['edge_feat'] is not None)
# later used for checking the shape of target_label
if self.dataset_prefix == 'ogbg':
self.num_data = len(graph_list) # number of graphs
elif self.dataset_prefix == 'ogbn':
if self.is_hetero:
self.num_data = graph_list[0]['num_nodes_dict'] # number of nodes
else:
self.num_data = graph_list[0]['num_nodes'] # number of nodes
else:
self.num_data = None
self._save_graph_list_done = True
def save_split(self, split_dict, split_name):
'''
Save dataset split
split_dict: must contain three keys: 'train', 'valid', 'test', where the values are the split indices stored in numpy.
split_name (str): the name of the split
'''
self.split_dir = osp.join(self.dataset_dir, 'split', split_name)
os.makedirs(self.split_dir, exist_ok=True)
# verify input
if not 'train' in split_dict:
raise ValueError('\'train\' needs to be given in save_split')
if not 'valid' in split_dict:
raise ValueError('\'valid\' needs to be given in save_split')
if not 'test' in split_dict:
raise ValueError('\'test\' needs to be given in save_split')
if not all_numpy(split_dict):
raise RuntimeError('split_dict must only contain list/dict of numpy arrays, int, or float')
## directly save split_dict
## compatible with ogb>=v1.2.3
torch.save(split_dict, osp.join(self.split_dir, 'split_dict.pt'))
self.split_name = split_name
self._save_split_done = True
def copy_mapping_dir(self, mapping_dir):
target_mapping_dir = osp.join(self.dataset_dir, 'mapping')
os.makedirs(target_mapping_dir, exist_ok=True)
file_list = [f for f in os.listdir(mapping_dir) if osp.isfile(osp.join(mapping_dir, f))]
if 'README.md' not in file_list:
raise RuntimeError(f'README.md must be included in mapping_dir {mapping_dir}')
# copy all the files in the mapping_dir to
for f in file_list:
shutil.copyfile(osp.join(mapping_dir, f), osp.join(target_mapping_dir, f))
self._copy_mapping_dir_done = True
def get_meta_dict(self):
'''
output:
meta_dict: a dictionary that stores meta-information about data, which can be directly passed to OGB dataset object.
Useful for debugging.
'''
# check everything is done before getting meta_dict
if not self._save_graph_list_done:
raise RuntimeError('save_graph_list not completed.')
if not self._save_split_done:
raise RuntimeError('save_split not completed.')
if not self._copy_mapping_dir_done:
raise RuntimeError('copy_mapping_dir not completed.')
if not self._save_target_labels_done:
raise RuntimeError('save_target_labels not completed.')
if not self._save_task_info_done:
raise RuntimeError('save_task_info not completed.')
meta_dict = {'version': self.version, 'dir_path': self.dataset_dir, 'binary': 'True'}
if not self.dataset_prefix == 'ogbl':
meta_dict['num tasks'] = self.num_tasks
meta_dict['num classes'] = self.num_classes
meta_dict['task type'] = self.task_type
meta_dict['eval metric'] = self.eval_metric
meta_dict['add_inverse_edge'] = 'False'
meta_dict['split'] = self.split_name
meta_dict['download_name'] = self.dataset_suffix
map_dict = {'ogbg': 'graphproppred', 'ogbn': 'nodeproppred', 'ogbl': 'linkproppred'}
meta_dict['url'] = f'https://snap.stanford.edu/ogb/data/{map_dict[self.dataset_prefix]}/' + meta_dict['download_name'] + '.zip'
meta_dict['add_inverse_edge'] = 'False'
meta_dict['has_node_attr'] = str(self.has_node_attr)
meta_dict['has_edge_attr'] = str(self.has_node_attr)
meta_dict['additional node files'] = 'None'
meta_dict['additional edge files'] = 'None'
meta_dict['is hetero'] = str(self.is_hetero)
# save meta-dict for submission
torch.save(meta_dict, self.meta_dict_path)
self._get_meta_dict_done = 'True'
return meta_dict
def zip(self):
# check everything is done before zipping
if not self._save_graph_list_done:
raise RuntimeError('save_graph_list not completed.')
if not self._save_split_done:
raise RuntimeError('save_split not completed.')
if not self._copy_mapping_dir_done:
raise RuntimeError('copy_mapping_dir not completed.')
if not self._save_target_labels_done:
raise RuntimeError('save_target_labels not completed.')
if not self._save_task_info_done:
raise RuntimeError('save_task_info not completed.')
if not self._get_meta_dict_done:
raise RuntimeError('get_meta_dict not completed.')
shutil.make_archive(self.dataset_dir, 'zip', self.dataset_dir)
self._zip_done = True
def cleanup(self):
if self._zip_done:
try:
shutil.rmtree(self.dataset_dir)
except FileNotFoundError:
print('Files already deleted.')
else:
raise RuntimeError('Clean up after calling zip()')
def test_datasetsaver():
# test on graph classification
# ogbg-molhiv
test_task = 'link'
# testing all the dataset objects are working.
if test_task == 'graph':
from ogb.graphproppred import PygGraphPropPredDataset, DglGraphPropPredDataset,GraphPropPredDataset
dataset_name = 'ogbg-molhiv'
dataset = PygGraphPropPredDataset(dataset_name)
dataset.get_idx_split()
dataset = DglGraphPropPredDataset(dataset_name)
dataset.get_idx_split()
dataset = GraphPropPredDataset(dataset_name)
dataset.get_idx_split()
elif test_task == 'node':
from ogb.nodeproppred import NodePropPredDataset, PygNodePropPredDataset, DglNodePropPredDataset
dataset_name = 'ogbn-arxiv' # test ogbn-proteins
dataset = PygNodePropPredDataset(dataset_name)
dataset.get_idx_split()
dataset = DglNodePropPredDataset(dataset_name)
dataset.get_idx_split()
dataset = NodePropPredDataset(dataset_name)
dataset.get_idx_split()
elif test_task == 'link':
from ogb.linkproppred import LinkPropPredDataset, PygLinkPropPredDataset, DglLinkPropPredDataset
dataset_name = 'ogbl-collab'
dataset = PygLinkPropPredDataset(dataset_name)
dataset.get_edge_split()
dataset = DglLinkPropPredDataset(dataset_name)
dataset.get_edge_split()
dataset = LinkPropPredDataset(dataset_name)
dataset.get_edge_split()
elif test_task == 'heteronode':
from ogb.nodeproppred import NodePropPredDataset, PygNodePropPredDataset, DglNodePropPredDataset
dataset_name = 'ogbn-mag'
dataset = PygNodePropPredDataset(dataset_name)
dataset.get_idx_split()
dataset = DglNodePropPredDataset(dataset_name)
dataset.get_idx_split()
dataset = NodePropPredDataset(dataset_name)
dataset.get_idx_split()
elif test_task == 'heterolink':
from ogb.linkproppred import LinkPropPredDataset, PygLinkPropPredDataset, DglLinkPropPredDataset
dataset_name = 'ogbl-biokg'
dataset = PygLinkPropPredDataset(dataset_name)
dataset.get_edge_split()
dataset = DglLinkPropPredDataset(dataset_name)
dataset.get_edge_split()
dataset = LinkPropPredDataset(dataset_name)
dataset.get_edge_split()
else:
raise ValueError('Invalid task category')
print(dataset[0])
if 'link' in test_task:
print(dataset.get_edge_split())
else:
print(dataset.get_idx_split())
if 'graph' in test_task:
graph_list = dataset.graphs
else:
graph_list = [dataset.graph]
if 'link' not in test_task:
labels = dataset.labels
is_hetero = 'hetero' in test_task
version = 2 if dataset_name == 'ogbn-mag' else 1
saver = DatasetSaver(dataset_name, is_hetero, version=version)
# saving graph objects
saver.save_graph_list(graph_list)
# saving target labels
if 'link' not in test_task:
saver.save_target_labels(labels)
# saving split
if 'link' in test_task:
split_idx = dataset.get_edge_split()
else:
split_idx = dataset.get_idx_split()
# second argument must be the name of the split
saver.save_split(split_idx, dataset.meta_info['split'])
# copying mapping dir
saver.copy_mapping_dir(f"dataset/{'_'.join(dataset_name.split('-'))}/mapping/")
saver.save_task_info(dataset.task_type, dataset.eval_metric, dataset.num_classes if hasattr(dataset, 'num_classes') else None)
meta_dict = saver.get_meta_dict()
print(meta_dict)
print('Now testing.')
if 'graph' in test_task:
print('library agnostic')
dataset = GraphPropPredDataset(dataset_name, meta_dict = meta_dict)
dataset = GraphPropPredDataset(dataset_name, meta_dict = meta_dict)
print(dataset[0])
print(dataset.get_idx_split())
print('Pytorch Geometric')
dataset = PygGraphPropPredDataset(dataset_name, meta_dict = meta_dict)
dataset = PygGraphPropPredDataset(dataset_name, meta_dict = meta_dict)
print(dataset[0])
print(dataset.get_idx_split())
print('DGL')
dataset = DglGraphPropPredDataset(dataset_name, meta_dict = meta_dict)
dataset = DglGraphPropPredDataset(dataset_name, meta_dict = meta_dict)
print(dataset[0])
print(dataset.get_idx_split())
elif 'node' in test_task:
print('library agnostic')
dataset = NodePropPredDataset(dataset_name, meta_dict = meta_dict)
dataset = NodePropPredDataset(dataset_name, meta_dict = meta_dict)
print(dataset[0])
print(dataset.get_idx_split())
print('Pytorch Geometric')
dataset = PygNodePropPredDataset(dataset_name, meta_dict = meta_dict)
dataset = PygNodePropPredDataset(dataset_name, meta_dict = meta_dict)
print(dataset[0])
print(dataset.get_idx_split())
print('DGL')
dataset = DglNodePropPredDataset(dataset_name, meta_dict = meta_dict)
dataset = DglNodePropPredDataset(dataset_name, meta_dict = meta_dict)
print(dataset[0])
print(dataset.get_idx_split())
elif 'link' in test_task:
print('library agnostic')
dataset = LinkPropPredDataset(dataset_name, meta_dict = meta_dict)
dataset = LinkPropPredDataset(dataset_name, meta_dict = meta_dict)
print(dataset[0])
# print(dataset.get_edge_split())
print('Pytorch Geometric')
dataset = PygLinkPropPredDataset(dataset_name, meta_dict = meta_dict)
dataset = PygLinkPropPredDataset(dataset_name, meta_dict = meta_dict)
print(dataset[0])
# print(dataset.get_edge_split())
print('DGL')
dataset = DglLinkPropPredDataset(dataset_name, meta_dict = meta_dict)
dataset = DglLinkPropPredDataset(dataset_name, meta_dict = meta_dict)
print(dataset[0])
# print(dataset.get_edge_split())
else:
raise ValueError('Invalid task category')
# zip
saver.zip()
print('Finished zipping!')
saver.cleanup()
if __name__ == '__main__':
test_datasetsaver()
| [
"weihua916@gmail.com"
] | weihua916@gmail.com |
3805c56766ebbc3631018518ef9d0618cd7fe7fd | 85a9ce9f74fe0c10fe5d90f5bb77036dbbee7d9d | /timpani/wsgi.py | b988c921a480cc839af7aa65636fe594328f973e | [
"MIT"
] | permissive | ollien/timpani | e648e7ca52d234f4d316613a4c7ac750962a11e9 | 0d1aac467e0bcbe2d1dadb4e6c025315d6be45cb | refs/heads/master | 2020-04-11T16:07:55.817571 | 2016-07-21T02:45:13 | 2016-07-21T02:45:13 | 40,745,863 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | from . import timpani
from . import database
import atexit
def shutdown():
print("[Timpani] Closing database connection")
database.ConnectionManager.closeConnection("main")
atexit.register(shutdown)
application = timpani.run(startServer=False)
| [
"njk828@gmail.com"
] | njk828@gmail.com |
0c6ba0d6d6ed40f71e1bcdbb1b5aeea04b710628 | e67ed585cf7212546e2314182cee34dd3fbda62f | /lesson2/task5/fizz-buzz.py | 9a3f7fd28e17c7f502d279d1198dc656ff4bd829 | [] | no_license | jsateja/nauka | b04228962cae037c0577625e3092a1d6181c8d7e | 8f2a4197b7d1572c44a1f6a2d996cda2ecbe65af | refs/heads/master | 2021-09-20T23:01:16.537839 | 2018-08-16T11:33:44 | 2018-08-16T11:33:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | # Your optional code here
# You can import some modules or create additional functions
def checkio(number: int) -> str:
# Your code here
# It's main function. Don't remove this function
# It's using for auto-testing and must return a result for check.
if number%3 == 0 and number%5 == 0:
string = ("Fizz Buzz")
return string
if number%3 == 0:
string = ("Fizz")
return string
if number%5 == 0:
string = ("Buzz")
return string
else:
return str(number)
# Some hints:
# Convert a number in the string with str(n)
# These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
assert checkio(15) == "Fizz Buzz", "15 is divisible by 3 and 5"
assert checkio(6) == "Fizz", "6 is divisible by 3"
assert checkio(5) == "Buzz", "5 is divisible by 5"
assert checkio(7) == "7", "7 is not divisible by 3 or 5"
print("Coding complete? Click 'Check' to review your tests and earn cool rewards!")
| [
"jagoda@localhost.localdomain"
] | jagoda@localhost.localdomain |
9b5ed02ef8f9a97423d06e0b4104626a9ee863b9 | 7f87ff253e30b269abd618f9e1180b922674b7d5 | /app/models.py | 5c02f74a8c2185f3d415097b401c2ae48da4032c | [] | no_license | ksmsbou/Eventos | 0db1b159e93843e0723997103541674940481819 | b7b5b22e135509ce0d361d18524d9751dccb4626 | refs/heads/master | 2021-03-12T20:27:43.093108 | 2015-04-28T00:20:37 | 2015-04-28T00:20:37 | 34,625,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | from app import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
nickname = db.Column(db.String(64), index=True, unique=True)
contrasena = db.Column(db.String(16), index=True, unique=True)
def __repr__(self):
return '<User %r>' % (self.nickname)
class Event(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128))
descripccion = db.Column(db.String(256))
ubicacion = db.Column(db.String(256))
fecha = db.Column(db.DateTime)
capacidad = db.Column(db.Integer)
disponibilidad = db.Column(db.Integer)
afiche = db.Column(db.String(256))
def __repr__(self):
return '<Event %r>' % (self.name)
class Person(db.Model):
id = db.Column(db.Integer, primary_key=True)
nombres = db.Column(db.String(128))
apellidos = db.Column(db.String(128))
nickname = db.Column(db.String(64), index=True, unique=True)
contrasena = db.Column(db.String(16))
edad = db.Column(db.Integer)
telefono = db.Column(db.String(11))
correo = db.Column(db.String(120), index=True, unique=True)
admin = db.Column(db.Boolean)
def __repr__(self):
return '<Person %r>' % (self.nombres) % (self.apellidos)
| [
"alvaro92092@gmail.com"
] | alvaro92092@gmail.com |
6e69fdd4cf0482ff3e97388259b0d5080bf5145d | a6c8f35aa3920a0756834ebcc9acd778d5029b41 | /hangman/hangman.py | 82119f37f9325c31a961705b0526de091a2f51b5 | [] | no_license | roman4ello/100days_python_code | b9206598b578f066b7067d6a48734342abdd7967 | 891d2880e08d86b395ce36fbfc3d4f7a540d070b | refs/heads/main | 2023-04-14T07:38:04.864436 | 2021-04-29T16:51:44 | 2021-04-29T16:51:44 | 357,113,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,112 | py | import os
import random
from hangman_art import logo, stages
from hangman_words import word_list
def clear():
# for windows
if os.name == 'nt':
os.system('cls')
# for mac and linux(here, os.name is 'posix')
else:
os.system('clear')
chosen_word = random.choice(word_list)
word_length = len(chosen_word)
lives = 6
display = []
for i in range(word_length):
display += "_"
endOfTheGame = False
print(logo)
while not endOfTheGame:
guess = input("Guess a letter: ").lower()
# os.system('clear')
clear()
if guess in display:
print(f'You already chose: {guess}. ')
for position in range(word_length):
letter = chosen_word[position]
if letter == guess:
display[position] = letter
print(f"{' '.join(display)}")
if "_" not in display:
endOfTheGame = True
print("You win.")
if guess not in chosen_word:
print(f'Letter >> {guess} << not in the word. ')
lives -= 1
if lives == 0:
print("You lose.")
endOfTheGame = True
print(stages[lives])
| [
"rboyar@slice.com"
] | rboyar@slice.com |
f7faeb8a5859104c664cc4c0977e9a4ce88a14ee | 995fdc60bc7d7b5f41c5943450142ad7c9c477ac | /day16/part1/run.py | 97b527fcc5c88c78164a75b4415dfd07ef291913 | [] | no_license | stevecolby/advent-of-code-2020 | 61d4c0cfe275064c979e56eaea2d271c984d0178 | a0516f2ac31d78f2e6a0e083ee9d77e9bf65e5a4 | refs/heads/main | 2023-02-08T00:03:54.818392 | 2020-12-19T01:46:53 | 2020-12-19T01:46:53 | 318,047,498 | 0 | 0 | null | 2020-12-19T01:46:54 | 2020-12-03T02:09:13 | Python | UTF-8 | Python | false | false | 1,476 | py | def Run(lines):
section_count = 0
rules = []
nearby_tickets = []
for line in lines:
if line == '':
section_count += 1
else:
if section_count == 0:
#rules
index = line.index(':')
words = list(line[index:].split())
rule_name = line[0:index].replace(':', '')
range_1_min = int(words[1].split('-')[0])
range_1_max = int(words[1].split('-')[1])
range_2_min = int(words[3].split('-')[0])
range_2_max = int(words[3].split('-')[1])
rules.append([rule_name, range_1_min, range_1_max, range_2_min, range_2_max])
#elif section_count == 1:
#your ticket
elif section_count == 2:
#nearby tickets
if ':' not in line:
nearby_tickets.append(list(line.split(',')))
scanning_error_rate = 0
for ticket in nearby_tickets:
#check each number for validity
for ticket_number in ticket:
if not has_any_valid_match(int(ticket_number), rules):
scanning_error_rate += int(ticket_number)
print(scanning_error_rate)
def has_any_valid_match(ticket_number, rules):
for rule in rules:
if (rule[1] <= ticket_number <= rule[2]
or rule[3] <= ticket_number <= rule[4]):
return True
return False | [
"stevecolby@gmail.com"
] | stevecolby@gmail.com |
3a6fd257beb2a77595e3f8ef356410543cd8b839 | 983a0bbb32e486bf92df7dc0cc95892bdba3937d | /aws_glacier_manager/manager.py | c4f0d5a13375c03e0b2edcc788a02cb890f31f3e | [
"Apache-2.0"
] | permissive | matthiashuschle/aws_tools | 0b0068b8108900e997d9e8b1349657ac8d147f65 | c9f1fb5cc48cc531c71ba5bc912e262cb1606e53 | refs/heads/master | 2021-11-15T18:16:55.653934 | 2018-10-13T08:50:30 | 2018-10-13T08:50:30 | 133,557,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | from . import glacier_io, database, encryption
class VaultManager:
def __init__(self, vault_name):
self.vault_name = vault_name
self.storage = glacier_io.VaultStorage(vault_name)
self.db = database.InventoryLog(vault_name)
def get_inventory(self):
# get inventory from database
inv = self.db.get_latest_response()
# ToDo: extract inventory from job result
return inv
def request_inventory(self):
# ToDo: check whether there are open requests
request = self.storage.request_inventory()
self.db.store_request(request)
def store_inventory_responses(self):
for request in self.db.get_open_requests():
response = self.storage.retrieve_inventory(request.job_id)
self.db.store_response(request.request_id, response)
| [
"matthiashuschle@gmail.com"
] | matthiashuschle@gmail.com |
05be3d589bb0eef2a4cd064c43dcf7e93a68c7a2 | e8b38b8dfa348ff006eb197a7906ca8e491a23dc | /tests/conftest.py | 79144b69789b7a96597410997a9f0eea0252414d | [
"MIT"
] | permissive | pyccel/pyccel | d79a81dbdff1172839a6a1227abfcc1f97e6c97b | 1896b761ba662c90b14c195bbb6eb5cddc57cbfc | refs/heads/devel | 2023-08-30T12:15:25.244401 | 2023-08-28T09:31:32 | 2023-08-28T09:31:32 | 100,463,736 | 307 | 39 | MIT | 2023-09-14T19:29:26 | 2017-08-16T07:59:14 | Python | UTF-8 | Python | false | false | 2,423 | py | # pylint: disable=missing-function-docstring, missing-module-docstring
import logging
import os
import shutil
import pytest
from mpi4py import MPI
from pyccel.commands.pyccel_clean import pyccel_clean
github_debugging = 'DEBUG' in os.environ
if github_debugging:
import sys
sys.stdout = sys.stderr
@pytest.fixture( params=[
pytest.param("fortran", marks = pytest.mark.fortran),
pytest.param("c", marks = pytest.mark.c),
pytest.param("python", marks = pytest.mark.python)
],
scope = "session"
)
def language(request):
return request.param
def move_coverage(path_dir):
for root, _, files in os.walk(path_dir):
for name in files:
if name.startswith(".coverage"):
shutil.copyfile(os.path.join(root,name),os.path.join(os.getcwd(),name))
def pytest_runtest_teardown(item, nextitem):
path_dir = os.path.dirname(os.path.realpath(item.fspath))
move_coverage(path_dir)
config = item.config
xdist_plugin = config.pluginmanager.getplugin("xdist")
if xdist_plugin is None or "PYTEST_XDIST_WORKER_COUNT" not in os.environ \
or os.getenv('PYTEST_XDIST_WORKER_COUNT') == 1:
print("Tearing down!")
marks = [m.name for m in item.own_markers ]
if 'parallel' not in marks:
pyccel_clean(path_dir, remove_shared_libs = True)
else:
comm = MPI.COMM_WORLD
comm.Barrier()
if comm.rank == 0:
pyccel_clean(path_dir, remove_shared_libs = True)
comm.Barrier()
def pytest_addoption(parser):
parser.addoption("--developer-mode", action="store_true", default=github_debugging, help="Show tracebacks when pyccel errors are raised")
def pytest_sessionstart(session):
# setup_stuff
if session.config.option.developer_mode:
from pyccel.errors.errors import ErrorsMode
ErrorsMode().set_mode('developer')
if github_debugging:
logging.basicConfig()
logging.getLogger("filelock").setLevel(logging.DEBUG)
# Clean path before beginning but never delete anything in parallel mode
path_dir = os.path.dirname(os.path.realpath(__file__))
config = session.config
xdist_plugin = config.pluginmanager.getplugin("xdist")
if xdist_plugin is None:
marks = [m.name for m in session.own_markers ]
if 'parallel' not in marks:
pyccel_clean(path_dir)
| [
"noreply@github.com"
] | pyccel.noreply@github.com |
41a3917f248cec7eca19c81329335ccd0bd32c96 | 696799b824503429a3ac65ebdc28890bfbcaebe0 | /plugins/com.astra.ses.spell.gui.cots_4.0.2.201806070922/win32/spell/spell/lib/adapter/value.py | 4d84f6d6e65da20cf8515b2f88d5d692597a2fe7 | [] | no_license | CalypsoCubesat/SPELL_GUI_4.0.2_win32_x86 | a176886b48873b090ab270c189113a8b2c261a06 | 9275ecfff2195ca4d4c297f894d80c1bcfa609e3 | refs/heads/master | 2021-08-03T08:04:25.821703 | 2019-10-28T04:53:50 | 2019-10-28T04:53:50 | 217,968,357 | 0 | 0 | null | 2021-08-02T17:03:44 | 2019-10-28T04:50:59 | Python | UTF-8 | Python | false | false | 3,989 | py | ###############################################################################
"""
(c) SES-ASTRA 2008
PACKAGE
spell.lib.adapter.value
FILE
user.py
DESCRIPTION
Variant value helper class
COPYRIGHT
This software is the copyrighted work of SES ASTRA S.A.
All rights reserved.
PROJECT
UGCS/USL
AUTHOR
Rafael Chinchilla Camara (GMV)
DATE
02/10/2007
REVISION HISTORY
02/10/2007 10:30 Creation
"""
###############################################################################
from spell.lang.constants import *
from spell.lang.modifiers import *
###############################################################################
class ValueClass:
"""
This class implements a variant value with the following characteristics:
- value
- vtype (long, double...)
- radix (hex, dec, oct..)
- format (eng, raw)
- units (whatsoever)
"""
#==========================================================================
def __init__(self, value, format = ENG, radix = DEC, vtype = LONG, units = '', defCal = True):
self._value = value
self._vtype = vtype
if type(value)==int:
self._vtype = LONG
elif type(value)==float:
self._vtype = FLOAT
elif type(value)==str:
self._vtype = STRING
self._format = format
self._radix = radix
self._units = units
self._defCal = defCal
#==========================================================================
def set(self, value):
self._value = value
#==========================================================================
def get(self):
return self._value
#==========================================================================
def format(self, fmt = None):
if fmt is None:
return self._format
else:
self._format = fmt
#==========================================================================
def vtype(self, vt = None):
if vt is None:
return self._vtype
else:
self._vtype = vt
#==========================================================================
def radix(self, rd = None):
if rd is None:
return self._radix
else:
self._radix = rd
#==========================================================================
def units(self, u = None):
if u is None:
return self._units
else:
self._units = u
#==========================================================================
def __repr__(self):
return "[" + repr(self._value) + ",VType: " + self._vtype + ",Format: " +\
self._format + ", Radix: " + self._radix + ", Units: " + self._units + "]"
#==========================================================================
def evaluate(self, radix = DEC):
cnv = { DEC: '', HEX: '0x', OCT: '0' }
trns = { HEX: hex, OCT: oct }
res = None
try:
if isinstance(self._value, str):
if self._radix == BIN:
res = 0
for c in self._value:
res = res * 2 + eval(c)
elif self._radix in cnv:
res = eval(cnv[self._radix] + self._value)
elif isinstance(self._value, long) or isinstance(self._value, int) or isinstance(self._value, float):
res = self._value
except:
res = None
if res is None:
return None
if radix in trns:
res = trns[radix](res)
elif radix == BIN:
v = ''
while res > 0:
if res % 2 == 1: v = '1' + v
if res % 2 == 0: v = '0' + v
res >>= 1
res = '0b' + v
return res
| [
"matthew.travis@aresinstitute.org"
] | matthew.travis@aresinstitute.org |
5fb66d3a6c647d4331c8be412f0738a3bc5ef776 | 70a5e9bd07fef97c50f3ba6863229c2f5777b2cc | /start/handle_cookie.py | 8c2cee0d041a3700da858ef9393f1670e917fec7 | [] | no_license | Anna-Hanabc/Interface | 56640caba136edc525f8c48b2f21a891c6feb4f6 | bf9209f42989f54c11eac09dc90dde957ecabca3 | refs/heads/master | 2023-02-02T01:15:53.507427 | 2020-12-09T15:01:30 | 2020-12-09T15:01:30 | 316,952,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | #coding=utf-8
from selenium import webdriver
import time
driver = webdriver.Chrome()
driver.get('https://order.imooc.com/myorder')
time.sleep(2)
driver.delete_all_cookies()
time.sleep(2)
cookie_list = {
'domain': '.imooc.com',
#'expiry': '1605051385', 如果expiry在cookie中,需要删掉不然会报错
'httpOnly': False,
'name': 'apsid',
'path': '/',
'secure': False,
'value': 'MxOTk4ODQxZWEyZmQ2ODVmOGNkODA1OTM1OGE5ZmYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANDAyODAyNQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAxODMwMjE2NjAzN0AxNzMuY29tAAAAAAAAAAAAAAAAADYwN2YxZWM1ODlmNjFmNmExNGU0NDEzYzQ1OGRmYzNkouihX6LooV8%3DNW'
}
driver.add_cookie(cookie_list)
time.sleep(2)
driver.get('https://order.imooc.com/myorder')
time.sleep(2)
driver.close()
| [
"18302166037@163.com"
] | 18302166037@163.com |
f43166a28921026d3d7239db0cfac8af4e594da9 | fc608e1463a2f9de36cea8cb31b519220a5a99fb | /Compiler/SymbolTable.py | c37727bd7529c39b5655b9c4a2f42005987e35e9 | [] | no_license | FelippeTeracini/Reverse-Language | 9b560299678e7aa01233e8f46da9d3f4faa2f823 | 122d4f840624c971db6d2bafbd2b4a674fb6992f | refs/heads/master | 2023-01-24T09:21:05.136502 | 2020-12-06T20:32:17 | 2020-12-06T20:32:17 | 297,170,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,933 | py | class SymbolTable:
def __init__(self):
self.symbol_table = {'return': [None, None]}
self.func_table = {}
def set_symbol(self, symbol, symbol_value):
if symbol in self.symbol_table.keys():
self.symbol_table[symbol][0] = symbol_value
else:
raise ValueError(f'Symbol {symbol} not defined')
def get_symbol(self, symbol):
if symbol in self.symbol_table.keys():
if(self.symbol_table[symbol][0] is not None):
return self.symbol_table[symbol]
else:
raise ValueError(f'Symbol {symbol} has no value')
else:
raise ValueError(f'Symbol {symbol} not in symbol table')
def set_type(self, symbol, symbol_type):
if(symbol not in self.func_table.keys() and symbol not in self.symbol_table.keys()):
self.symbol_table[symbol] = [None, symbol_type]
else:
raise ValueError(f'Symbol {symbol} already in symbol/func table')
def get_type(self, symbol):
if symbol in self.symbol_table.keys():
return self.symbol_table[symbol][1]
else:
raise ValueError(f'Symbol {symbol} not in symbol table')
def set_func(self, func_symbol, func, _type):
if(func_symbol not in self.func_table.keys() and func_symbol not in self.symbol_table.keys()):
self.func_table[func_symbol] = [func, _type]
else:
raise ValueError(
f'Func Symbol {func_symbol} already in symbol/func table')
def get_func(self, func_symbol):
if(func_symbol in self.func_table.keys()):
return self.func_table[func_symbol]
else:
raise ValueError(f'Func Symbol {func_symbol} not in func table')
def set_return(self, value, _type):
self.symbol_table['return'] = [value, _type]
def get_return(self):
return self.symbol_table['return']
| [
"fe.n.teracini@hotmail.com"
] | fe.n.teracini@hotmail.com |
41db732b6e877d5e530ada682c9329dc7203ae06 | 53c4e96612642ffca74707cc68693bf62b113da7 | /env/Lib/site-packages/html5lib/filters/sanitizer.py | c9188eec407fcf10938cbfdf073188ce70bfb1c1 | [] | no_license | kevinitsDevaluado/PdfModelProject | aa5bb0358781fcbe315a4d1cb0f4446841750c73 | b6ab683f2bfc77f2fa77a3a37f820ff05f0955fc | refs/heads/master | 2023-05-04T02:58:29.374073 | 2021-05-19T23:59:24 | 2021-05-19T23:59:24 | 369,030,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,886 | py | """Deprecated from html5lib 1.1.
See `here <https://github.com/html5lib/html5lib-python/issues/443>`_ for
information about its deprecation; `Bleach <https://github.com/mozilla/bleach>`_
is recommended as a replacement. Please let us know in the aforementioned issue
if Bleach is unsuitable for your needs.
"""
from __future__ import absolute_import, division, unicode_literals
import re
import warnings
from xml.sax.saxutils import escape, unescape
from six.moves import urllib_parse as urlparse
from . import base
from ..constants import namespaces, prefixes
__all__ = ["Filter"]
_deprecation_msg = (
"html5lib's sanitizer is deprecated; see " +
"https://github.com/html5lib/html5lib-python/issues/443 and please let " +
"us know if Bleach is unsuitable for your needs"
)
warnings.warn(_deprecation_msg, DeprecationWarning)
allowed_elements = frozenset((
(namespaces['html'], 'a'),
(namespaces['html'], 'abbr'),
(namespaces['html'], 'acronym'),
(namespaces['html'], 'address'),
(namespaces['html'], 'area'),
(namespaces['html'], 'article'),
(namespaces['html'], 'aside'),
(namespaces['html'], 'audio'),
(namespaces['html'], 'b'),
(namespaces['html'], 'big'),
(namespaces['html'], 'blockquote'),
(namespaces['html'], 'br'),
(namespaces['html'], 'button'),
(namespaces['html'], 'canvas'),
(namespaces['html'], 'caption'),
(namespaces['html'], 'center'),
(namespaces['html'], 'cite'),
(namespaces['html'], 'code'),
(namespaces['html'], 'col'),
(namespaces['html'], 'colgroup'),
(namespaces['html'], 'command'),
(namespaces['html'], 'datagrid'),
(namespaces['html'], 'datalist'),
(namespaces['html'], 'dd'),
(namespaces['html'], 'del'),
(namespaces['html'], 'details'),
(namespaces['html'], 'dfn'),
(namespaces['html'], 'dialog'),
(namespaces['html'], 'dir'),
(namespaces['html'], 'div'),
(namespaces['html'], 'dl'),
(namespaces['html'], 'dt'),
(namespaces['html'], 'em'),
(namespaces['html'], 'event-source'),
(namespaces['html'], 'fieldset'),
(namespaces['html'], 'figcaption'),
(namespaces['html'], 'figure'),
(namespaces['html'], 'footer'),
(namespaces['html'], 'font'),
(namespaces['html'], 'form'),
(namespaces['html'], 'header'),
(namespaces['html'], 'h1'),
(namespaces['html'], 'h2'),
(namespaces['html'], 'h3'),
(namespaces['html'], 'h4'),
(namespaces['html'], 'h5'),
(namespaces['html'], 'h6'),
(namespaces['html'], 'hr'),
(namespaces['html'], 'i'),
(namespaces['html'], 'img'),
(namespaces['html'], 'input'),
(namespaces['html'], 'ins'),
(namespaces['html'], 'keygen'),
(namespaces['html'], 'kbd'),
(namespaces['html'], 'label'),
(namespaces['html'], 'legend'),
(namespaces['html'], 'li'),
(namespaces['html'], 'm'),
(namespaces['html'], 'map'),
(namespaces['html'], 'menu'),
(namespaces['html'], 'meter'),
(namespaces['html'], 'multicol'),
(namespaces['html'], 'nav'),
(namespaces['html'], 'nextid'),
(namespaces['html'], 'ol'),
(namespaces['html'], 'output'),
(namespaces['html'], 'optgroup'),
(namespaces['html'], 'option'),
(namespaces['html'], 'p'),
(namespaces['html'], 'pre'),
(namespaces['html'], 'progress'),
(namespaces['html'], 'q'),
(namespaces['html'], 's'),
(namespaces['html'], 'samp'),
(namespaces['html'], 'section'),
(namespaces['html'], 'select'),
(namespaces['html'], 'small'),
(namespaces['html'], 'sound'),
(namespaces['html'], 'source'),
(namespaces['html'], 'spacer'),
(namespaces['html'], 'span'),
(namespaces['html'], 'strike'),
(namespaces['html'], 'strong'),
(namespaces['html'], 'sub'),
(namespaces['html'], 'sup'),
(namespaces['html'], 'table'),
(namespaces['html'], 'tbody'),
(namespaces['html'], 'td'),
(namespaces['html'], 'textarea'),
(namespaces['html'], 'time'),
(namespaces['html'], 'tfoot'),
(namespaces['html'], 'th'),
(namespaces['html'], 'thead'),
(namespaces['html'], 'tr'),
(namespaces['html'], 'tt'),
(namespaces['html'], 'u'),
(namespaces['html'], 'ul'),
(namespaces['html'], 'var'),
(namespaces['html'], 'video'),
(namespaces['mathml'], 'maction'),
(namespaces['mathml'], 'math'),
(namespaces['mathml'], 'merror'),
(namespaces['mathml'], 'mfrac'),
(namespaces['mathml'], 'mi'),
(namespaces['mathml'], 'mmultiscripts'),
(namespaces['mathml'], 'mn'),
(namespaces['mathml'], 'mo'),
(namespaces['mathml'], 'mover'),
(namespaces['mathml'], 'mpadded'),
(namespaces['mathml'], 'mphantom'),
(namespaces['mathml'], 'mprescripts'),
(namespaces['mathml'], 'mroot'),
(namespaces['mathml'], 'mrow'),
(namespaces['mathml'], 'mspace'),
(namespaces['mathml'], 'msqrt'),
(namespaces['mathml'], 'mstyle'),
(namespaces['mathml'], 'msub'),
(namespaces['mathml'], 'msubsup'),
(namespaces['mathml'], 'msup'),
(namespaces['mathml'], 'mtable'),
(namespaces['mathml'], 'mtd'),
(namespaces['mathml'], 'mtext'),
(namespaces['mathml'], 'mtr'),
(namespaces['mathml'], 'munder'),
(namespaces['mathml'], 'munderover'),
(namespaces['mathml'], 'none'),
(namespaces['svg'], 'a'),
(namespaces['svg'], 'animate'),
(namespaces['svg'], 'animateColor'),
(namespaces['svg'], 'animateMotion'),
(namespaces['svg'], 'animateTransform'),
(namespaces['svg'], 'clipPath'),
(namespaces['svg'], 'circle'),
(namespaces['svg'], 'defs'),
(namespaces['svg'], 'desc'),
(namespaces['svg'], 'ellipse'),
(namespaces['svg'], 'font-face'),
(namespaces['svg'], 'font-face-name'),
(namespaces['svg'], 'font-face-src'),
(namespaces['svg'], 'g'),
(namespaces['svg'], 'glyph'),
(namespaces['svg'], 'hkern'),
(namespaces['svg'], 'linearGradient'),
(namespaces['svg'], 'line'),
(namespaces['svg'], 'marker'),
(namespaces['svg'], 'metadata'),
(namespaces['svg'], 'missing-glyph'),
(namespaces['svg'], 'mpath'),
(namespaces['svg'], 'path'),
(namespaces['svg'], 'polygon'),
(namespaces['svg'], 'polyline'),
(namespaces['svg'], 'radialGradient'),
(namespaces['svg'], 'rect'),
(namespaces['svg'], 'set'),
(namespaces['svg'], 'stop'),
(namespaces['svg'], 'svg'),
(namespaces['svg'], 'switch'),
(namespaces['svg'], 'text'),
(namespaces['svg'], 'title'),
(namespaces['svg'], 'tspan'),
(namespaces['svg'], 'use'),
))
allowed_attributes = frozenset((
# HTML attributes
(None, 'abbr'),
(None, 'accept'),
(None, 'accept-charset'),
(None, 'accesskey'),
(None, 'action'),
(None, 'align'),
(None, 'alt'),
(None, 'autocomplete'),
(None, 'autofocus'),
(None, 'axis'),
(None, 'background'),
(None, 'balance'),
(None, 'bgcolor'),
(None, 'bgproperties'),
(None, 'border'),
(None, 'bordercolor'),
(None, 'bordercolordark'),
(None, 'bordercolorlight'),
(None, 'bottompadding'),
(None, 'cellpadding'),
(None, 'cellspacing'),
(None, 'ch'),
(None, 'challenge'),
(None, 'char'),
(None, 'charoff'),
(None, 'choff'),
(None, 'charset'),
(None, 'checked'),
(None, 'cite'),
(None, 'class'),
(None, 'clear'),
(None, 'color'),
(None, 'cols'),
(None, 'colspan'),
(None, 'compact'),
(None, 'contenteditable'),
(None, 'controls'),
(None, 'coords'),
(None, 'data'),
(None, 'datafld'),
(None, 'datapagesize'),
(None, 'datasrc'),
(None, 'datetime'),
(None, 'default'),
(None, 'delay'),
(None, 'dir'),
(None, 'disabled'),
(None, 'draggable'),
(None, 'dynsrc'),
(None, 'enctype'),
(None, 'end'),
(None, 'face'),
(None, 'for'),
(None, 'form'),
(None, 'frame'),
(None, 'galleryimg'),
(None, 'gutter'),
(None, 'headers'),
(None, 'height'),
(None, 'hidefocus'),
(None, 'hidden'),
(None, 'high'),
(None, 'href'),
(None, 'hreflang'),
(None, 'hspace'),
(None, 'icon'),
(None, 'id'),
(None, 'inputmode'),
(None, 'ismap'),
(None, 'keytype'),
(None, 'label'),
(None, 'leftspacing'),
(None, 'lang'),
(None, 'list'),
(None, 'longdesc'),
(None, 'loop'),
(None, 'loopcount'),
(None, 'loopend'),
(None, 'loopstart'),
(None, 'low'),
(None, 'lowsrc'),
(None, 'max'),
(None, 'maxlength'),
(None, 'media'),
(None, 'method'),
(None, 'min'),
(None, 'multiple'),
(None, 'name'),
(None, 'nohref'),
(None, 'noshade'),
(None, 'nowrap'),
(None, 'open'),
(None, 'optimum'),
(None, 'pattern'),
(None, 'ping'),
(None, 'point-size'),
(None, 'poster'),
(None, 'pqg'),
(None, 'preload'),
(None, 'prompt'),
(None, 'radiogroup'),
(None, 'readonly'),
(None, 'rel'),
(None, 'repeat-max'),
(None, 'repeat-min'),
(None, 'replace'),
(None, 'required'),
(None, 'rev'),
(None, 'rightspacing'),
(None, 'rows'),
(None, 'rowspan'),
(None, 'rules'),
(None, 'scope'),
(None, 'selected'),
(None, 'shape'),
(None, 'size'),
(None, 'span'),
(None, 'src'),
(None, 'start'),
(None, 'step'),
(None, 'style'),
(None, 'summary'),
(None, 'suppress'),
(None, 'tabindex'),
(None, 'target'),
(None, 'templates'),
(None, 'title'),
(None, 'toppadding'),
(None, 'type'),
(None, 'unselectable'),
(None, 'usemap'),
(None, 'urn'),
(None, 'valign'),
(None, 'value'),
(None, 'variable'),
(None, 'volume'),
(None, 'vspace'),
(None, 'vrml'),
(None, 'width'),
(None, 'wrap'),
(namespaces['xml'], 'lang'),
# MathML attributes
(None, 'actiontype'),
(None, 'align'),
(None, 'columnalign'),
(None, 'columnalign'),
(None, 'columnalign'),
(None, 'columnlines'),
(None, 'columnspacing'),
(None, 'columnspan'),
(None, 'depth'),
(None, 'display'),
(None, 'displaystyle'),
(None, 'equalcolumns'),
(None, 'equalrows'),
(None, 'fence'),
(None, 'fontstyle'),
(None, 'fontweight'),
(None, 'frame'),
(None, 'height'),
(None, 'linethickness'),
(None, 'lspace'),
(None, 'mathbackground'),
(None, 'mathcolor'),
(None, 'mathvariant'),
(None, 'mathvariant'),
(None, 'maxsize'),
(None, 'minsize'),
(None, 'other'),
(None, 'rowalign'),
(None, 'rowalign'),
(None, 'rowalign'),
(None, 'rowlines'),
(None, 'rowspacing'),
(None, 'rowspan'),
(None, 'rspace'),
(None, 'scriptlevel'),
(None, 'selection'),
(None, 'separator'),
(None, 'stretchy'),
(None, 'width'),
(None, 'width'),
(namespaces['xlink'], 'href'),
(namespaces['xlink'], 'show'),
(namespaces['xlink'], 'type'),
# SVG attributes
(None, 'accent-height'),
(None, 'accumulate'),
(None, 'additive'),
(None, 'alphabetic'),
(None, 'arabic-form'),
(None, 'ascent'),
(None, 'attributeName'),
(None, 'attributeType'),
(None, 'baseProfile'),
(None, 'bbox'),
(None, 'begin'),
(None, 'by'),
(None, 'calcMode'),
(None, 'cap-height'),
(None, 'class'),
(None, 'clip-path'),
(None, 'color'),
(None, 'color-rendering'),
(None, 'content'),
(None, 'cx'),
(None, 'cy'),
(None, 'd'),
(None, 'dx'),
(None, 'dy'),
(None, 'descent'),
(None, 'display'),
(None, 'dur'),
(None, 'end'),
(None, 'fill'),
(None, 'fill-opacity'),
(None, 'fill-rule'),
(None, 'font-family'),
(None, 'font-size'),
(None, 'font-stretch'),
(None, 'font-style'),
(None, 'font-variant'),
(None, 'font-weight'),
(None, 'from'),
(None, 'fx'),
(None, 'fy'),
(None, 'g1'),
(None, 'g2'),
(None, 'glyph-name'),
(None, 'gradientUnits'),
(None, 'hanging'),
(None, 'height'),
(None, 'horiz-adv-x'),
(None, 'horiz-origin-x'),
(None, 'id'),
(None, 'ideographic'),
(None, 'k'),
(None, 'keyPoints'),
(None, 'keySplines'),
(None, 'keyTimes'),
(None, 'lang'),
(None, 'marker-end'),
(None, 'marker-mid'),
(None, 'marker-start'),
(None, 'markerHeight'),
(None, 'markerUnits'),
(None, 'markerWidth'),
(None, 'mathematical'),
(None, 'max'),
(None, 'min'),
(None, 'name'),
(None, 'offset'),
(None, 'opacity'),
(None, 'orient'),
(None, 'origin'),
(None, 'overline-position'),
(None, 'overline-thickness'),
(None, 'panose-1'),
(None, 'path'),
(None, 'pathLength'),
(None, 'points'),
(None, 'preserveAspectRatio'),
(None, 'r'),
(None, 'refX'),
(None, 'refY'),
(None, 'repeatCount'),
(None, 'repeatDur'),
(None, 'requiredExtensions'),
(None, 'requiredFeatures'),
(None, 'restart'),
(None, 'rotate'),
(None, 'rx'),
(None, 'ry'),
(None, 'slope'),
(None, 'stemh'),
(None, 'stemv'),
(None, 'stop-color'),
(None, 'stop-opacity'),
(None, 'strikethrough-position'),
(None, 'strikethrough-thickness'),
(None, 'stroke'),
(None, 'stroke-dasharray'),
(None, 'stroke-dashoffset'),
(None, 'stroke-linecap'),
(None, 'stroke-linejoin'),
(None, 'stroke-miterlimit'),
(None, 'stroke-opacity'),
(None, 'stroke-width'),
(None, 'systemLanguage'),
(None, 'target'),
(None, 'text-anchor'),
(None, 'to'),
(None, 'transform'),
(None, 'type'),
(None, 'u1'),
(None, 'u2'),
(None, 'underline-position'),
(None, 'underline-thickness'),
(None, 'unicode'),
(None, 'unicode-range'),
(None, 'units-per-em'),
(None, 'values'),
(None, 'version'),
(None, 'viewBox'),
(None, 'visibility'),
(None, 'width'),
(None, 'widths'),
(None, 'x'),
(None, 'x-height'),
(None, 'x1'),
(None, 'x2'),
(namespaces['xlink'], 'actuate'),
(namespaces['xlink'], 'arcrole'),
(namespaces['xlink'], 'href'),
(namespaces['xlink'], 'role'),
(namespaces['xlink'], 'show'),
(namespaces['xlink'], 'title'),
(namespaces['xlink'], 'type'),
(namespaces['xml'], 'base'),
(namespaces['xml'], 'lang'),
(namespaces['xml'], 'space'),
(None, 'y'),
(None, 'y1'),
(None, 'y2'),
(None, 'zoomAndPan'),
))
attr_val_is_uri = frozenset((
(None, 'href'),
(None, 'src'),
(None, 'cite'),
(None, 'action'),
(None, 'longdesc'),
(None, 'poster'),
(None, 'background'),
(None, 'datasrc'),
(None, 'dynsrc'),
(None, 'lowsrc'),
(None, 'ping'),
(namespaces['xlink'], 'href'),
(namespaces['xml'], 'base'),
))
svg_attr_val_allows_ref = frozenset((
(None, 'clip-path'),
(None, 'color-profile'),
(None, 'cursor'),
(None, 'fill'),
(None, 'filter'),
(None, 'marker'),
(None, 'marker-start'),
(None, 'marker-mid'),
(None, 'marker-end'),
(None, 'mask'),
(None, 'stroke'),
))
svg_allow_local_href = frozenset((
(None, 'altGlyph'),
(None, 'animate'),
(None, 'animateColor'),
(None, 'animateMotion'),
(None, 'animateTransform'),
(None, 'cursor'),
(None, 'feImage'),
(None, 'filter'),
(None, 'linearGradient'),
(None, 'pattern'),
(None, 'radialGradient'),
(None, 'textpath'),
(None, 'tref'),
(None, 'set'),
(None, 'use')
))
allowed_css_properties = frozenset((
'azimuth',
'background-color',
'border-bottom-color',
'border-collapse',
'border-color',
'border-left-color',
'border-right-color',
'border-top-color',
'clear',
'color',
'cursor',
'direction',
'display',
'elevation',
'float',
'font',
'font-family',
'font-size',
'font-style',
'font-variant',
'font-weight',
'height',
'letter-spacing',
'line-height',
'overflow',
'pause',
'pause-after',
'pause-before',
'pitch',
'pitch-range',
'richness',
'speak',
'speak-header',
'speak-numeral',
'speak-punctuation',
'speech-rate',
'stress',
'text-align',
'text-decoration',
'text-indent',
'unicode-bidi',
'vertical-align',
'voice-family',
'volume',
'white-space',
'width',
))
allowed_css_keywords = frozenset((
'auto',
'aqua',
'black',
'block',
'blue',
'bold',
'both',
'bottom',
'brown',
'center',
'collapse',
'dashed',
'dotted',
'fuchsia',
'gray',
'green',
'!important',
'italic',
'left',
'lime',
'maroon',
'medium',
'none',
'navy',
'normal',
'nowrap',
'olive',
'pointer',
'purple',
'red',
'right',
'solid',
'silver',
'teal',
'top',
'transparent',
'underline',
'white',
'yellow',
))
allowed_svg_properties = frozenset((
'fill',
'fill-opacity',
'fill-rule',
'stroke',
'stroke-width',
'stroke-linecap',
'stroke-linejoin',
'stroke-opacity',
))
allowed_protocols = frozenset((
'ed2k',
'ftp',
'http',
'https',
'irc',
'mailto',
'news',
'gopher',
'nntp',
'telnet',
'webcal',
'xmpp',
'callto',
'feed',
'urn',
'aim',
'rsync',
'tag',
'ssh',
'sftp',
'rtsp',
'afs',
'data',
))
allowed_content_types = frozenset((
'image/png',
'image/jpeg',
'image/gif',
'image/webp',
'image/bmp',
'text/plain',
))
data_content_type = re.compile(r'''
^
# Match a content type <application>/<type>
(?P<content_type>[-a-zA-Z0-9.]+/[-a-zA-Z0-9.]+)
# Match any character set and encoding
(?:(?:;charset=(?:[-a-zA-Z0-9]+)(?:;(?:base64))?)
|(?:;(?:base64))?(?:;charset=(?:[-a-zA-Z0-9]+))?)
# Assume the rest is data
,.*
$
''',
re.VERBOSE)
class Filter(base.Filter):
"""Sanitizes token stream of XHTML+MathML+SVG and of inline style attributes"""
def __init__(self,
source,
allowed_elements=allowed_elements,
allowed_attributes=allowed_attributes,
allowed_css_properties=allowed_css_properties,
allowed_css_keywords=allowed_css_keywords,
allowed_svg_properties=allowed_svg_properties,
allowed_protocols=allowed_protocols,
allowed_content_types=allowed_content_types,
attr_val_is_uri=attr_val_is_uri,
svg_attr_val_allows_ref=svg_attr_val_allows_ref,
svg_allow_local_href=svg_allow_local_href):
"""Creates a Filter
:arg allowed_elements: set of elements to allow--everything else will
be escaped
:arg allowed_attributes: set of attributes to allow in
elements--everything else will be stripped
:arg allowed_css_properties: set of CSS properties to allow--everything
else will be stripped
:arg allowed_css_keywords: set of CSS keywords to allow--everything
else will be stripped
:arg allowed_svg_properties: set of SVG properties to allow--everything
else will be removed
:arg allowed_protocols: set of allowed protocols for URIs
:arg allowed_content_types: set of allowed content types for ``data`` URIs.
:arg attr_val_is_uri: set of attributes that have URI values--values
that have a scheme not listed in ``allowed_protocols`` are removed
:arg svg_attr_val_allows_ref: set of SVG attributes that can have
references
:arg svg_allow_local_href: set of SVG elements that can have local
hrefs--these are removed
"""
super(Filter, self).__init__(source)
warnings.warn(_deprecation_msg, DeprecationWarning)
self.allowed_elements = allowed_elements
self.allowed_attributes = allowed_attributes
self.allowed_css_properties = allowed_css_properties
self.allowed_css_keywords = allowed_css_keywords
self.allowed_svg_properties = allowed_svg_properties
self.allowed_protocols = allowed_protocols
self.allowed_content_types = allowed_content_types
self.attr_val_is_uri = attr_val_is_uri
self.svg_attr_val_allows_ref = svg_attr_val_allows_ref
self.svg_allow_local_href = svg_allow_local_href
def __iter__(self):
for token in base.Filter.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
# Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
# stripping out all attributes not in ALLOWED_ATTRIBUTES. Style attributes
# are parsed, and a restricted set, specified by ALLOWED_CSS_PROPERTIES and
# ALLOWED_CSS_KEYWORDS, are allowed through. attributes in ATTR_VAL_IS_URI
# are scanned, and only URI schemes specified in ALLOWED_PROTOCOLS are
# allowed.
#
# sanitize_html('<script> do_nasty_stuff() </script>')
# => <script> do_nasty_stuff() </script>
# sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
# => <a>Click here for $100</a>
def sanitize_token(self, token):
# accommodate filters which use token_type differently
token_type = token["type"]
if token_type in ("StartTag", "EndTag", "EmptyTag"):
name = token["name"]
namespace = token["namespace"]
if ((namespace, name) in self.allowed_elements or
(namespace is None and
(namespaces["html"], name) in self.allowed_elements)):
return self.allowed_token(token)
else:
return self.disallowed_token(token)
elif token_type == "Comment":
pass
else:
return token
def allowed_token(self, token):
if "data" in token:
attrs = token["data"]
attr_names = set(attrs.keys())
# Remove forbidden attributes
for to_remove in (attr_names - self.allowed_attributes):
del token["data"][to_remove]
attr_names.remove(to_remove)
# Remove attributes with disallowed URL values
for attr in (attr_names & self.attr_val_is_uri):
assert attr in attrs
# I don't have a clue where this regexp comes from or why it matches those
# characters, nor why we call unescape. I just know it's always been here.
# Should you be worried by this comment in a sanitizer? Yes. On the other hand, all
# this will do is remove *more* than it otherwise would.
val_unescaped = re.sub("[`\x00-\x20\x7f-\xa0\\s]+", '',
unescape(attrs[attr])).lower()
# remove replacement characters from unescaped characters
val_unescaped = val_unescaped.replace("\ufffd", "")
try:
uri = urlparse.urlparse(val_unescaped)
except ValueError:
uri = None
del attrs[attr]
if uri and uri.scheme:
if uri.scheme not in self.allowed_protocols:
del attrs[attr]
if uri.scheme == 'data':
m = data_content_type.match(uri.path)
if not m:
del attrs[attr]
elif m.group('content_type') not in self.allowed_content_types:
del attrs[attr]
for attr in self.svg_attr_val_allows_ref:
if attr in attrs:
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(attrs[attr]))
if (token["name"] in self.svg_allow_local_href and
(namespaces['xlink'], 'href') in attrs and re.search(r'^\s*[^#\s].*',
attrs[(namespaces['xlink'], 'href')])):
del attrs[(namespaces['xlink'], 'href')]
if (None, 'style') in attrs:
attrs[(None, 'style')] = self.sanitize_css(attrs[(None, 'style')])
token["data"] = attrs
return token
def disallowed_token(self, token):
token_type = token["type"]
if token_type == "EndTag":
token["data"] = "</%s>" % token["name"]
elif token["data"]:
assert token_type in ("StartTag", "EmptyTag")
attrs = []
for (ns, name), v in token["data"].items():
attrs.append(' %s="%s"' % (name if ns is None else "%s:%s" % (prefixes[ns], name), escape(v)))
token["data"] = "<%s%s>" % (token["name"], ''.join(attrs))
else:
token["data"] = "<%s>" % token["name"]
if token.get("selfClosing"):
token["data"] = token["data"][:-1] + "/>"
token["type"] = "Characters"
del token["name"]
return token
def sanitize_css(self, style):
# disallow urls
style = re.compile(r'url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
# gauntlet
if not re.match(r"""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
if not re.match(r"^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
return ''
clean = []
for prop, value in re.findall(r"([-\w]+)\s*:\s*([^:;]*)", style):
if not value:
continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background', 'border', 'margin',
'padding']:
for keyword in value.split():
if keyword not in self.allowed_css_keywords and \
not re.match(r"^(#[0-9a-fA-F]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword): # noqa
break
else:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
| [
"kevin.alvarado8502@utc.edu.ec"
] | kevin.alvarado8502@utc.edu.ec |
7969080b1179beb14ddaf543f8a32366a6d882ae | 253cd5d6074d322a233bda37da4b1c663b6027b3 | /cooking/timestamp/broadcast_utils/user_utils.py | 13c1fc1ea72b1fe54f12287338a2f130e94404fa | [] | no_license | ZandTree/idea_project | 85321156149f9365c6380537d34f05f98e8885ae | e48ea39ef05b54c197b635313fb7b5304bd5691c | refs/heads/main | 2023-08-29T11:12:48.561578 | 2021-11-15T16:18:18 | 2021-11-15T16:18:18 | 417,647,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | def get_ip(req):
"""
if x_forward present return it;
otherwise remote_addr or empty string
"""
try:
forward = req.META.get('HTTP_X_FORWARDED_FOR')
if forward:
return req.META.get('HTTP_X_FORWARDED_FOR', req.META.get('REMOTE_ADDR', '')).split(',')[0].strip()
else:
return req.META.get('REMOTE_ADDR')
except:
return ''
| [
"diafox2015@gmail.com"
] | diafox2015@gmail.com |
9ebb7bec3073e890ab5caf91b3114fbc458f6f26 | d7ed8bfe3c9b2e9a8c6336f0320146ecf7550613 | /CarsDetectionInAVideo/car_DetectionInAVideo.py | 31f7265ff585443e3b09924814b9f8c867d97f91 | [] | no_license | edohgoka/Computer_Vision | fedb658954b3ccd7191226b09c44ec4c5eaf2137 | 2999101c76098b7c7a9b4035701ecc71dc7aaca2 | refs/heads/main | 2023-07-15T02:06:35.180127 | 2021-08-23T18:10:36 | 2021-08-23T18:10:36 | 399,196,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,344 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 21 12:40:25 2020
@author: goka
"""
from __future__ import unicode_literals
# import youtube_dl
import os
# """
# This code is to detect a car in a video
# """
### Now, it's time to write the code to detect the cars in the video frames
# Import librairies of python OpenCV
import cv2
# capture frames from a video
cap = cv2.VideoCapture("relaxing-highway-traffic.mp4")
# Training XML classifiers describes some features of some object we want to detect
# Make sure you put this XML file in the same folder as your code
car_cascade = cv2.CascadeClassifier("cars.xml")
# Loop runs if capturing has been initialized
while True:
# reads frames from a video
ret, frames = cap.read()
# convert to gray scale of each frames
gray = cv2.cvtColor(frames, cv2.COLOR_BGR2GRAY)
# Detects cars of different sizes in the input image
cars = car_cascade.detectMultiScale(gray, 1.1, 1)
# To draw a rectangle in each cars
for (x, y, w, h) in cars:
cv2.rectangle(frames, (x,y), (x+w, y+h), (0,0,255), 2)
# Display frames in a window
cv2.imshow('video2', frames)
# Wait for Esc key to stop
if cv2.waitKey(33) == 27:
break
# De allocate any associated memory usage
cv2.destroyAllWindows()
| [
"gokengoke@gmail.com"
] | gokengoke@gmail.com |
dd7a749494730c6ae37858d35c460f0979313d67 | 1b1416fded6c689921f2448834d7bbb1096eee74 | /EntidadesGenerica.py | 0d9d8e9df7ebfdca5ed6f8bac404b4504bf2fca3 | [] | no_license | pclbusto/Imperdiel | d13badb48c38dc364de16ddcfeb79ae689bc5250 | 27c318f66e010c834f59a8455bd8e6ce1d397eb4 | refs/heads/master | 2021-01-09T23:37:55.622714 | 2016-11-08T18:52:04 | 2016-11-08T18:52:04 | 73,193,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | import sqlite3
class EntidadesGenerica:
def __init__(self,nombreBD):
self.conexion = sqlite3.connect(nombreBD)
self.conexion.row_factory = sqlite3.Row
self.status = 1
self.autocomit=True
def __ejecutarSQL__(self, script, parametros):
c = self.conexion.cursor()
status = 0
try:
if parametros:
c.execute(script, parametros)
else:
c.execute(script)
except sqlite3.Error as er:
print(er)
status = 1
if self.autocomit:
self.conexion.commit()
return status
| [
"pclbusto@gmail.com"
] | pclbusto@gmail.com |
c71644f8eaab634fe0c94a10f524120904ee4f45 | 5f36ddfa8f3245f2557abbfe6a48f57a5816393c | /wheat/Scripts/CountfeaturesfromBed_tauschii.py | 4ea7da29d6814fc7760280a6e10ef6c7613d9561 | [] | no_license | riteshkrishna/cgr_scripts | 64d324955615102c2b3be68d3938bfddd364df24 | 0e073e119465201ac36928de32870c9da0766a38 | refs/heads/master | 2021-01-20T20:27:50.311438 | 2016-08-03T11:22:21 | 2016-08-03T11:22:21 | 64,840,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,089 | py | __author__ = 'riteshk'
'''
bedtools intersect with options -wa and -wb result in a TSV file as noted in the onenote entry on March 9.
Example entry:
1A 100519 103705 ID=gene:Traes_1AS_BEE845715;biotype=protein_coding;version=1 1A 101600 102000 2 125 400 0.3125
1A 100519 103705 ID=gene:Traes_1AS_BEE845715;biotype=protein_coding;version=1 1A 102200 102600 2 125 400 0.3125
Need to count how many reads were aligned against the gene
'''
def count_features(bedfile, outfile):
in_file = open(bedfile)
out_file = open(outfile,'w')
dict_gene_count = {}
dict_gene_coveragerange = {}
dict_gene_basecovered = {}
for line in in_file:
parts = line.split('\t')
if len(parts) < 11:
continue
gene_entry = parts[3]
read_count = int(parts[7])
basecovered = int(parts[8])
coveragerange = int(parts[9])
if gene_entry in dict_gene_count:
new_entry = dict_gene_count[gene_entry] + read_count
dict_gene_count[gene_entry] = new_entry
new_cov = dict_gene_coveragerange[gene_entry] + coveragerange
dict_gene_coveragerange[gene_entry] = new_cov
new_base = dict_gene_basecovered[gene_entry] + basecovered
dict_gene_basecovered[gene_entry] = new_base
else:
dict_gene_count[gene_entry] = read_count
dict_gene_coveragerange[gene_entry] = coveragerange
dict_gene_basecovered[gene_entry] = basecovered
in_file.close()
all_keys = dict_gene_count.keys()
for key in all_keys:
out_file.write(key + '\t' + str(dict_gene_count[key]) + '\t' + str(dict_gene_basecovered[key]) + '\t' + str(dict_gene_coveragerange[key]) + '\n')
out_file.close()
if __name__=="__main__":
bedfile = '/pub9/ritesh/wheat_renseq/Analysis/analysis_AT/analysis_at_pacbio/intersect_gene_coverage_tauschii.bed'
outfile = '/pub9/ritesh/wheat_renseq/Analysis/analysis_AT/analysis_at_pacbio/intersect_gene_coverage_tauschii.count'
count_features(bedfile, outfile)
| [
"riteshk@liv.ac.uk"
] | riteshk@liv.ac.uk |
e89a27d8e3b339944906864fd1b516b63e361eb5 | 63b082dce537834a306f9d071fee2fc1b9be0540 | /tfx_x/components/utils.py | 840e79e36ad3ec9a4b60e6ffeb4aed08ede6fa2b | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ssoudan/tfx_x | 66a4e1f7a890d4c15edb7d4c60544264aea55446 | 667e26358f41d63679b556ca7d28c0c5ed2d1d07 | refs/heads/main | 2023-07-09T21:36:05.471332 | 2021-08-17T22:58:40 | 2021-08-17T22:58:40 | 345,901,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,848 | py | # Lint as: python3
# Copyright 2021 ssoudan. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import logging
import apache_beam as beam
import tensorflow as tf
from typing import Any, Dict, Mapping, List, Text
from tfx import types
from tfx.dsl.components.base import base_executor
from tfx.types import artifact_utils, Artifact
from tfx.utils import io_utils
def copy_over(input_artifact, output_artifact, splits_to_copy):
"""
Copy data from specified splits
Args:
input_artifact: location where the input splits are
output_artifact: location where to copy them
splits_to_copy: list of split names to copy
Returns:
None
"""
split_to_instance = {}
for split in splits_to_copy:
uri = artifact_utils.get_split_uri(input_artifact, split)
split_to_instance[split] = uri
for split, instance in split_to_instance.items():
input_dir = instance
output_dir = artifact_utils.get_split_uri([output_artifact], split)
for filename in tf.io.gfile.listdir(input_dir):
input_uri = os.path.join(input_dir, filename)
output_uri = os.path.join(output_dir, filename)
io_utils.copy_file(src=input_uri, dst=output_uri, overwrite=True)
| [
"sebastien.soudan@gmail.com"
] | sebastien.soudan@gmail.com |
945a640e3ea6999a72a1be9da8a6b927723eaa0c | 2208abb9fce9b35e478e6b6f1d4cc07fa1d51acb | /mysite/itemstore/models.py | d61af5d795ab18c8a1ccce23cff3ef753c031ab5 | [] | no_license | giulio-dioguardi/django | 6748ba3ad559a812e4e433477cf88bcbf315ba2d | 3f6b23bc838c19e89b59d7b975e55b1b5380ba75 | refs/heads/master | 2021-01-19T23:40:08.040420 | 2019-02-13T07:19:51 | 2019-02-13T07:19:51 | 89,002,873 | 1 | 0 | null | 2018-11-16T09:18:50 | 2017-04-21T16:21:16 | JavaScript | UTF-8 | Python | false | false | 395 | py | from django.db import models
class Product(models.Model):
name = models.CharField(max_length=200)
stock = models.IntegerField('number in stock')
description = models.TextField()
def __str__(self):
return self.name
def is_out_of_stock(self):
return self.stock <= 0
is_out_of_stock.boolean = True
is_out_of_stock.short_description = 'Out of stock?'
| [
"giulio.dioguardi@alten.nl"
] | giulio.dioguardi@alten.nl |
9153d99142b829b4196222778f55cc3eaef093e8 | 396ca51170d69a63af3a43a15e16a76b69b27a96 | /CS60050-Machine-Learning/Assignment 1/Source Code/problems.py | 1eb08c4dd1512be06c8de0503376919ebb7549fb | [] | no_license | ysh-1-9/CSE-5th-Semester-IIT-KGP | 89d72085b12820ea9e0c782c0d1a543a153b5c63 | 92184c0f0ef891128a03efd78276422bc82793b8 | refs/heads/main | 2023-08-07T06:25:13.141471 | 2021-09-29T14:18:52 | 2021-09-29T14:18:52 | 411,685,899 | 0 | 0 | null | 2021-09-29T13:29:43 | 2021-09-29T13:29:43 | null | UTF-8 | Python | false | false | 5,721 | py | """
This python file reads the data from the PercentageIncreaseCOVIDWorldwide.csv
dataset and then forms regression tree out of it using the ID3 algorithm and
Variance as an measure of impurity
"""
# Authors: Debajyoti Dasgupta <debajyotidasgupta6@gmail.com>
# Siba Smarak Panigrahi <sibasmarak.p@gmail.com>
import argparse
import time
import random
import matplotlib.pyplot as plt
from utility import train_test_split, read_data, train_valid_split, get_accuracy, print_decision_tree
from model import construct_tree, predict, node
def randomize_select_best_tree(data, max_height, X_test):
# print(max_height, len(data), len(X_test))
if max_height == -1:
max_height = 300
attributes = ["Confirmed", "Recovered", "Deaths", "Date"]
# set the local variables
least_mse, tree, mse_avg, acc_avg = 10**18, None, 0, 0
train, valid = None, None
for _ in range(10):
X_train, X_valid = train_valid_split(data)
decision_tree = construct_tree(X_train, 0, max_height, attributes)
test_mse, _ = predict(decision_tree, X_test)
test_acc = get_accuracy(decision_tree, X_test)
if test_mse < least_mse:
least_mse = test_mse
mse_avg += test_mse
acc_avg += test_acc
tree = decision_tree
train = X_train
valid = X_valid
mse_avg /= 10
acc_avg /= 10
return tree, train, valid, mse_avg, acc_avg
def randomize_select_best_height_tree(train, X_test):
mse, height, cur_mse = [], [], 10**18
decision_tree, ht = None, -1
for h in range(1, 50):
print("[---- Height {} -----] ".format(h), end = '')
decision_tree_sample, temp_train, temp_valid, _, _ = randomize_select_best_tree(train, h, test)
mse_test = predict(decision_tree_sample, test)[0]
if mse_test < cur_mse and h > 4:
decision_tree = decision_tree_sample
cur_mse = mse_test
X_train = temp_train
X_valid = temp_valid
ht = h
data_print(decision_tree_sample, train, X_test, valid)
mse.append(mse_test)
height.append(h)
plt.title("test-mse vs height")
plt.ylabel("test-mse")
plt.xlabel("height")
plt.plot(height, mse)
return decision_tree, X_train, X_valid, ht
def data_print(tree, train, test, valid):
print("train acc: {}, train mse: {}".format(
round(get_accuracy(tree, train)*100,2), round(predict(tree, train)[0],2)), end=', ')
# print("valid acc: {}, valid mse: {}".format(
# round(get_accuracy(tree, valid)*100, 2), round(predict(tree, valid)[0], 2)), end=', ')
print("test acc: {}, test mse: {}".format(
round(get_accuracy(tree, test)*100, 2), round(predict(tree, test)[0], 2)))
if __name__ == '__main__':
# Feel free to change the seed below or comment out the following line
random.seed(100000)
start = time.time()
parser = argparse.ArgumentParser()
parser.add_argument("--height", help="maximum height of decision tree for Q1", type=int)
args = parser.parse_args()
ht = -1
if args.height:
ht = args.height
print("\n ============= READING DATA ============ \n")
DATA_FILE = 'PercentageIncreaseCOVIDWorldwide.csv'
df = read_data(DATA_FILE)
print("Time elapsed = {} ms".format(time.time()-start))
print("\n ============= DATA READ ============ \n\n")
train, test = train_test_split(df)
print("============= TRAIN TEST SPLIT COMPLETE ============\n")
print("train data size: {}, test data size = {} \n\n".format(
len(train), len(test)))
print("============== SOLVING Q1 ==============\n")
# print("select a height ({greater then 0} or {-1}): ")
# ht = int(input())
print("height selected: {}".format(ht if ht != -1 else "Full Tree"))
print("\n========= TRAINING STARTED =========\n")
X_train = train
start = time.time()
tree, train, valid, mse_avg, acc_avg = randomize_select_best_tree(train, ht, test)
print("Time elapsed = {} ms".format(time.time()-start))
print("\n ============= TRAINING FINISHED ============ \n")
print("Average Test Accuracy: {}\n".format(acc_avg * 100))
print("Average Test MSE: {}\n".format(mse_avg))
data_print(tree, train, test, valid)
train = X_train
print("\n============== SOLVED Q1 ==============\n")
print("\n============== SOLVING Q2 ==============\n")
print("\n========= TRAINING STARTED =========\n")
start = time.time()
tree, train, valid, ht = randomize_select_best_height_tree(train, test)
print("Time elapsed = {} ms".format(time.time()-start))
print("\n ============= TRAINING FINISHED ============ \n")
print("BEST TREE: height = {}".format(ht))
data_print(tree, train, test, valid)
print("\n============== SOLVED Q2 ==============\n")
print("\n============== SOLVING Q3 ==============\n")
print("[==== BEFORE PRUNING ====] Valid acc: {}, Valid mse: {}, number of nodes = {}".format(get_accuracy(tree, valid)*100, predict(tree, valid)[0], tree.count_node()))
tree.prune(tree, predict(tree, valid)[0], valid)
print("[==== AFTER PRUNING ====] Valid acc: {}, Valid mse: {}, number of nodes = {}\n".format(get_accuracy(tree, valid)*100, predict(tree, valid)[0], tree.count_node()))
data_print(tree, train, test, valid)
print("\n============== SOLVED Q3 ==============\n")
print("\n============== SOLVING Q4 ==============\n")
print('\n SAVING =====> \n')
print_decision_tree( tree )
print('The image of the graph is saved as [ decision_tree.gv.pdf ]')
print("\n============== SOLVED Q4 ==============\n")
plt.show()
| [
"sibasmarak.p@gmail.com"
] | sibasmarak.p@gmail.com |
c25f18424994d72913eddbc636c4c70ef49a0fc9 | 3c3edd4b1b3e286ad5532cc14129422d221d172b | /api/migrations/0022_auto_20190429_0502.py | 599f91c4cec0642d5a26be8c59f412187e779d06 | [] | no_license | xavierlu/mse_meng | b7caa62877d4b3e99f6e60a3923456c963499069 | db93fd3adad829fe85be4a35b6b7d162f1c75802 | refs/heads/master | 2023-01-07T07:33:53.347813 | 2019-09-15T15:31:47 | 2019-09-15T15:31:47 | 163,418,452 | 2 | 5 | null | 2023-01-04T23:28:37 | 2018-12-28T14:22:49 | JavaScript | UTF-8 | Python | false | false | 396 | py | # Generated by Django 2.1.5 on 2019-04-29 05:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0021_post_files'),
]
operations = [
migrations.AlterField(
model_name='post',
name='files',
field=models.CharField(blank=True, default='', max_length=255),
),
]
| [
"xll2@cornell.edu"
] | xll2@cornell.edu |
6d73d4509904137b281d8d1e94290549eded70ac | dab869acd10a3dc76e2a924e24b6a4dffe0a875f | /Laban/LabanLib/dumpLabanRecognizer.py | 23f896b8fb16ae97cc5714311bc2eb2e59973fba | [] | no_license | ranBernstein/Laban | d82aff9b0483dd007e03a06e51f7d635f62ed05d | 54c88afa9493deacbdd182904cc5d180ecb208b4 | refs/heads/master | 2021-01-23T13:17:51.777880 | 2017-02-14T09:02:54 | 2017-02-14T09:02:54 | 25,508,010 | 3 | 1 | null | 2017-02-14T09:02:55 | 2014-10-21T07:16:01 | Tcl | UTF-8 | Python | false | false | 382 | py | import pickle
import LabanUtils.util as labanUtil
X = pickle.load( open( "X", "r" ) )
Y_laban = pickle.load( open( "Y_Laban", "r" ) )
labanClf, selectedIndices = labanUtil.getMultiTaskclassifier(X, Y_laban)
f = open('labanClf', 'w')
f.flush()
pickle.dump(labanClf, f)
f.close()
f = open('selectedIndices', 'w')
f.flush()
pickle.dump(selectedIndices, f)
f.close()
| [
"bernstein.ran@gmail.com"
] | bernstein.ran@gmail.com |
3ef2c274a75233ac30c05e82a045f18bd0f7de5c | 8641ad0912224b19bb3cadf942e1c186af8f2732 | /quick_sort.py | 2efd2303b926f9b30b8158d251a07b3dc3c9bd2a | [] | no_license | mohammed-strive/tarams-exercises | 741dd353b66fe3c616322e3b18ab23fae951eaf2 | 33a9e33e2e30244cdedb3561c3c19ddef7e69e30 | refs/heads/master | 2022-11-15T18:36:35.492115 | 2020-06-22T05:20:05 | 2020-06-22T05:20:05 | 266,085,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | import random
def quick_sort(items):
if len(items) < 2:
return items
seed = random.choice(items)
left = [item for item in items if item <= seed]
right = [item for item in items if item > seed]
return quick_sort(left) + quick_sort(right)
if __name__ == '__main__':
items = [5, 0, 1, 10, 100, 90, 78, 2, -1]
print(quick_sort(items))
| [
"noreply@github.com"
] | mohammed-strive.noreply@github.com |
d30b1d08d9b1bb173cfa1a8ca834f895e0003bbd | 36ae6d500728d7704c89dbcbfc363a548b5932d8 | /winapi/__init__.py | 301dcbdae7768cd160ce8abe1ca0ccb0be0a4999 | [] | no_license | kanishk619/python-pnputil | e9408e4362939e82a531fb285c516cfbe2cc65d2 | d9a032892f613abc0f28550c37f89fc5b9bce76e | refs/heads/main | 2023-03-02T01:54:05.585987 | 2021-02-07T07:15:15 | 2021-02-07T07:15:15 | 336,723,084 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | """
The purpose of this module is to aid Windows driver enumeration
"""
__author__ = 'Kanishk Gandharv'
__version__ = '0.0.1a'
__maintainer__ = __author__
__status__ = 'alpha'
import logging
def setup_logger(name, fh=False):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
# formatter = logging.Formatter('[%(asctime)s,%(msecs)03d][%(levelname)-5s][%(name)-12s] - %(message)s',
# datefmt="%d-%m-%Y %I:%M:%S")
formatter = logging.Formatter('[%(asctime)s,%(lineno)03d][%(levelname)-5s][%(name)-8s] - %(message)s',
datefmt="%d-%m-%Y %I:%M:%S")
sh.setFormatter(formatter)
logger.addHandler(sh)
if fh:
from logging.handlers import RotatingFileHandler
fh = RotatingFileHandler('{}.log'.format(name), maxBytes=5242880, backupCount=10)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
setup_logger('winapi') # register a root logger
| [
"noreply@github.com"
] | kanishk619.noreply@github.com |
03a114ba753d734e7f87649a0798fd89d8b5d225 | 54c3daec472b89d68c8982647d03f70b146c130e | /mcap.py | 1827703fc4d59942e4faf10db4d1e852d6cf8e34 | [] | no_license | hamal03/btc-s2f | 591a639809b88ae057fc33d8378cd289c9c7349a | 0b4090f566f9b502a9598b5d40a44f27df90359b | refs/heads/master | 2022-02-28T07:53:57.413327 | 2022-02-09T12:33:39 | 2022-02-09T12:33:39 | 210,805,031 | 49 | 13 | null | null | null | null | UTF-8 | Python | false | false | 5,997 | py | # This python script will calculate the statistical
# correlation between Bitcoin's "stock to flow" model
# by the pseudonymous user "PlanB". De calculation is based on
# daily price averages from blockstream.info.
# The output serves as input data for a gnuplot script.
#
# Call the script with "--regen" to generate new data even
# if no new data is available at blockstream
# imports
import sys
import numpy as np
import sqlite3
import requests
import json
from datetime import datetime
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
# gnuplot price range max
ymax = 5000000
# Needed for position of te text box on the detail chart
boxfact=.942
textfact=.963
# Date to which we exend the blocks based on 144 blocks per day
extendto = 1798758000 # 2027-01-01
conn = sqlite3.connect('bcinfo.sqlite')
cur = conn.cursor()
cur.execute('select * from btc order by date')
bstr = cur.fetchall()
maxdt = bstr[-1][0]
#if "--regen" not in sys.argv:
# burl = "https://community-api.coinmetrics.io/v2/assets/btc/metricdata"
# bapistr = '?metrics=PriceUSD%2CSplyCur&start='
# tdago = datetime.fromtimestamp(bstr[-3][0]*86400).strftime('%F')
# newdata = requests.get(burl+bapistr+tdago)
# if newdata.status_code != 200:
# print("Getting data from coinmetrics failed")
# sys.exit(1)
# jdata = json.loads(newdata.text)
# for bd in jdata['metricData']['series']:
# if bd['values'][0] is None or bd['values'][1] is None: break
# epdate = int(int(datetime.strptime(bd['time'], '%Y-%m-%dT%H:%M:%S.000Z').\
# strftime('%s'))/86400+.5)
# if epdate <= maxdt: continue
# newentry = (epdate, float(bd['values'][0]), float(bd['values'][1]))
# cur.execute('insert into btc values (?,?,?)', newentry)
# bstr.append(newentry)
# if maxdt == bstr[-1][0]: sys.exit()
# conn.commit()
# maxdt = bstr[-1][0]
dt = list()
coins = list()
height = list()
price = list()
mcap = list()
sf = list()
lnsf = list()
lnprice = list()
lnmcap = list()
p = 0 # halving period
ncoins = 0 #number of coins in beginning of this period
# Read available data and calculate stock to flow (current coins
# divided by last year's additions.
j = 0 # use second index to take skipped records into account
for i in range(len(bstr)):
if bstr[i][1] == 0: continue
dt.append(bstr[i][0]*86400)
price.append(bstr[i][1])
coins.append(bstr[i][2])
mcap.append(bstr[i][1]*bstr[i][2])
if coins[j] >= ncoins + 210000*50/2**p:
ncoins += 210000*50/2**p
p += 1
height.append(210000*p+(coins[j]-ncoins)*2**p/50)
sf.append(coins[j]/(coins[j]-bstr[i-365][2]))
# Calculate ln(S2F) and ln(price)
# ln() values should be in 2D list for sklearn
lnsf.append([np.log(sf[j])])
lnprice.append([np.log(price[j])])
lnmcap.append([np.log(mcap[j])])
j += 1
# Remember the current length of sf[]
lstsf=len(sf)
## extend the lists of coins, height and date into the future
## based on 144 blocks per day
#while dt[-1] < extendto:
# dt.append(dt[-1]+86400)
# height.append(height[-1]+144)
# # Did we cross a halving point?
# if int(height[-1]/210000) > p:
# ncoins += 210000*50/2**p
# p += 1
# coins.append(ncoins+(height[-1]%210000)*50/2**p)
# sf.append(coins[-1]/(coins[-1]-coins[-361]))
# scikit-learn regression
# Model initialization on price
prc_reg_model = LinearRegression()
# Fit the data(train the model)
prc_reg_model.fit(lnsf, lnprice)
# Predict
lnprc_pred = prc_reg_model.predict(lnsf)
# model evaluation
prc_rmse = mean_squared_error(lnprice, lnprc_pred)
prc_r2 = r2_score(lnprice, lnprc_pred)
prc_slope = prc_reg_model.coef_[0][0]
prc_intercept = prc_reg_model.intercept_[0]
prc_e2rmse = np.exp(prc_rmse)
prc_e2intc = np.exp(prc_intercept)
# scikit-learn regression
# Model initialization on price
cap_reg_model = LinearRegression()
# Fit the data(train the model)
cap_reg_model.fit(lnsf, lnmcap)
# Predict
lncap_pred = cap_reg_model.predict(lnsf)
# model evaluation
cap_rmse = mean_squared_error(lnmcap, lncap_pred)
cap_r2 = r2_score(lnmcap, lncap_pred)
cap_slope = cap_reg_model.coef_[0][0]
cap_intercept = cap_reg_model.intercept_[0]
cap_e2rmse = np.exp(cap_rmse)
cap_e2intc = np.exp(cap_intercept)
# Gnuplot variable values
gpvars = open('gpvars.txt', 'w')
gpvars.write(str(round(prc_slope, 2))+"\n")
gpvars.write(str(round(prc_e2intc, 2))+"\n")
gpvars.write(str(round(prc_rmse, 4))+"\n")
gpvars.write(str(round(prc_r2, 4))+"\n")
gpvars.write(str(round(prc_e2rmse, 2))+"\n")
gpvars.write(str(int(maxdt*86400))+"\n")
gpvars.write(str((0.01/prc_e2intc)**(1/prc_slope))+"\n") # Low S2F val for y2 axis
gpvars.write(str((ymax/prc_e2intc)**(1/prc_slope))+"\n") # High S2F val for y2 axis
gpvars.write(str(ymax)+"\n")
gpvars.write(str(round(prc_intercept, 2))+"\n")
gpvars.write(str(round(cap_slope, 2))+"\n")
gpvars.write(str(round(cap_e2intc, 2))+"\n")
gpvars.write(str(round(cap_rmse, 4))+"\n")
gpvars.write(str(round(cap_r2, 4))+"\n")
gpvars.write(str(round(cap_e2rmse, 2))+"\n")
gpvars.write(str(round(cap_intercept, 2))+"\n")
gpvars.close()
for i in range(len(price), len(dt)):
price.append("")
# Gnuplot data for timeline chart
gpdata = open('sftime.csv', 'w')
for i in range(len(dt)):
prc_sfval = sf[i]**prc_slope*prc_e2intc
prc_sd1p = prc_sfval*prc_e2rmse
prc_sd2p = prc_sd1p*2
prc_sd1m = prc_sfval/prc_e2rmse
prc_sd2m = prc_sd1m/2
cap_sfval = sf[i]**cap_slope*cap_e2intc/coins[i]
cap_v_prc = cap_sfval / prc_sfval
gpdata.write(",".join(str(x) for x in [dt[i], prc_sfval, prc_sd1p, prc_sd2p, \
prc_sd1m, prc_sd2m, price[i], cap_sfval, cap_v_prc])+"\n")
gpdata.close()
# Gnuplot regression line values
sfdata = open("sfdata.csv", "w")
for i in range(len(lnsf)):
sfdata.write(str(lnsf[i][0]) + "," + str(lnprice[i][0]) \
+ "," + str(lnmcap[i][0]) +"\n")
sfdata.close()
if "--quiet" not in sys.argv: print("Data files created")
| [
"propdf@hamal.nl"
] | propdf@hamal.nl |
ef5edc350d8043356fd92ecdceac79aef1b90abd | ffb520d5baafeb1998facf2a725f7d79fe2c270e | /InteriorRoadNetworkGenerate/SegByMotion.py | 9966a3478aa3ae5dd83f4aae0eb2a1f41f0290ad | [] | no_license | codefavor2018/RoadNetMapper-01374341 | c1cb64f4b58eba82f008f44be21207ee2d30d02f | 26165c545e0bd1fbbbcf8de2b50d8f4650bdaa3a | refs/heads/master | 2020-07-12T00:45:34.261956 | 2018-05-16T13:33:29 | 2018-05-16T13:33:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,930 | py | # -*- coding:utf-8 -*-
from basic import Segment,PointMotionType,Point
'''
stop detection
ref:Zhang F, Wilkie D, Zheng Y, et al. Sensing the pulse of urban refueling behavior[C]//
ACM International Joint Conference on Pervasive and Ubiquitous Computing. ACM, 2013:13-22.
——5.1 Fig(6)
'''
class MotionSegmentMediator(object):
globalID=0
def __init__(self,disThreshold):
self.disThreshold=disThreshold
def labelStayPoints(self,trace):
'''input:
space-temporal(ordered by time) trace data of certain person
trace points like: (x0,y0,tm0),(x1,y1,tm1),...,(xn,yn,tmn)
given:
disTreshold t to detect stop clusters, others as move points
detection method
ref:Zhang F, Wilkie D, Zheng Y, et al. Sensing the pulse of urban refueling behavior[C]//
ACM International Joint Conference on Pervasive and Ubiquitous Computing. ACM, 2013:13-22.
——5.1 Fig(6)
label stay points in the given trace
return
None
'''
staysegsCnt = 0
if trace is None:
raise ValueError
if len(trace) == 1:
return trace
for i in range(len(trace)):
stayseg = Segment(point=trace[i])
for j in range(i + 1, len(trace)):
disToStart=stayseg.startPoint.distanceToPoint(trace[j])
ditToLast=stayseg.lastPoint.distanceToPoint(trace[j])
if (disToStart<= self.disThreshold) and (ditToLast<= self.disThreshold) and trace[j].speed<1:
stayseg.insertPoint(trace[j])
if (disToStart > self.disThreshold) or (ditToLast > self.disThreshold) or (j == len(trace) - 1) \
or trace[j].speed>=1:
if stayseg.length >= 2:
# label stay points
staysegsCnt += 1
for po in stayseg.pointList:
po.setType(PointMotionType.Stay)
i = j
break
def segTrace(self,trace):
'''input:
space-temporal(ordered by time) trace data of certain person
trace points like: (x0,y0,tm0),(x1,y1,tm1),...,(xn,yn,tmn)
given:
disTreshold t to detect stop clusters, others as move points
detection method
ref:Zhang F, Wilkie D, Zheng Y, et al. Sensing the pulse of urban refueling behavior[C]//
ACM International Joint Conference on Pervasive and Ubiquitous Computing. ACM, 2013:13-22.
——5.1 Fig(6)
return
trace segments in motion divided by stops
[seg1,seg2,...segk], seg=[p0,p1]
'''
staysegsCnt=0
staysegLst=[]
if trace is None:
raise ValueError
if len(trace)==1:
return trace
for i in range(len(trace)):
stayseg = Segment(point=trace[i])
for j in range(i+1,len(trace)):
if (stayseg.startPoint.distanceToPoint(trace[j])<=self.disThreshold and
stayseg.lastPoint.distanceToPoint(trace[j])<=self.disThreshold):
stayseg.insertPoint(trace[j])
if (stayseg.startPoint.distanceToPoint(trace[j]) > self.disThreshold or
stayseg.lastPoint.distanceToPoint(trace[j]) > self.disThreshold or
j==len(trace)-1):
if stayseg.length>=2:
staysegLst.append(stayseg)
#label stay points
staysegsCnt+=1
for po in stayseg.pointList:
po.setType(PointMotionType.Stay)
i=j
break
#conbine segs if the segs touch with each other
if staysegsCnt==0:
return trace
resSegs=[]
for i in range(len(trace)):
if trace[i].motionType!=PointMotionType.Stay:
moveSeg= Segment(point=trace[i])
for j in range(i+1,len(trace)):
if trace[j].motionType!=PointMotionType.Stay:
moveSeg.insertPoint(trace[j])
if trace[j].motionType==PointMotionType.Stay or j==len(trace)-1:
if moveSeg.length>=3:
moveSeg.setAttri(ID=self.globalID)
resSegs.append(moveSeg)
self.globalID+=1
i=j
break
return resSegs
if __name__=="__main__":
from LoadData import LoadTraceDataFromMysqlDBFactory
from basic import Point
try:
loader = LoadTraceDataFromMysqlDBFactory(dbname='mydatabase', tblname="trajectory",
whereClause="IsOutlier=0 order by un,tm")
traceDF = loader.getTraceData()
seg=[]
for i in range(len(traceDF)):
po=Point(zx=traceDF["zx"][i],zy=traceDF["zy"][i],tm=traceDF["tm"][i],
ID=traceDF["UID"][i],TimeInterval=traceDF["TimeInterval"][i],
DistanceInterval=traceDF["DistanceInterval"][i])
seg.append(po)
segCls=MotionSegmentMediator(5)
segCls.labelStayPoints(seg)
for po in seg:
sql=""
if po.motionType==PointMotionType.Move:
sql="update trajectory set motion=1 where UID=%d" %po.ID
elif po.motionType==PointMotionType.Stay:
sql = "update trajectory set motion=0 where UID=%d" % po.ID
loader.database.UpdateTable(sql)
except Exception,e:
print e.message
| [
"506663632@qq.com"
] | 506663632@qq.com |
fb9d367c87544b47cb6841e68e636c1369f62f8e | 4ecc5f8895782a20828bbececec7306bd1e42518 | /sftp/__init__.py | 5a5738343eb69e3f2c0af24bd1d6c11cdb3c7e9a | [] | no_license | rcastill/freezing-bear-sftp | 2ee9e1419de89691bd12888e5c89b6039908bbb2 | 17454ead8d3dc870ec2304cee5fe00af5934ca00 | refs/heads/master | 2016-09-06T18:45:32.677948 | 2014-11-19T12:08:17 | 2014-11-19T12:08:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | __author__ = 'Rodolfo'
from handler import SFTPHandler | [
"rodolfocastillomateluna@gmail.com"
] | rodolfocastillomateluna@gmail.com |
eb03fe561672b829d8ba86e36d4ee415da5ad41c | 38258a7dd9acbfb7adf72983015de68a948a4826 | /B_15000~/B_15652.py | cc6422a80ae1ddbd9b06ca5b3cf7a4710db163d2 | [] | no_license | kangsm0903/Algorithm | 13a7fe5729039a1d0ce91a574c4755a8a92fb02b | 7d713d1c9e2e4dc30141d4f409ac1430a357065b | refs/heads/master | 2022-10-04T00:33:49.247977 | 2022-09-26T12:51:16 | 2022-09-26T12:51:16 | 219,265,010 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | # 중복 허용
# 비내림차순
N,M=list(map(int,input().split()))
result=[]
def is_promising():
base=int(result[0])
for i in range(1,len(result)):
if base>int(result[i]): # 비내림차순이 아닐 때
return False
else:
base=int(result[i]) # 비교값을 최신으로 갱신
continue
return True
def BruteForce():
global result
if len(result)==M:
print(' '.join(result))
return
for i in range(1,N+1):
result.append(str(i))
if is_promising():
BruteForce()
result.pop()
BruteForce() | [
"kangsm0903@naver.com"
] | kangsm0903@naver.com |
cb8f5da3d5c9c97ef7a7f298cff73f58aa40855b | 2d659bf4f88cc6581a7bf7d0c5a66e440b396cdd | /meiduo_mail/meiduo_mail/apps/goods/serializers.py | c193794c3d37d60cc24c3982adbd672411d16552 | [] | no_license | frankky-cyber/meiduo2 | 3a3ddd451a98f171a51a9683e4d6d92a35412494 | f93145b822b13b838f48c78b6b705db06cedf40d | refs/heads/master | 2023-03-26T21:30:43.463637 | 2021-03-26T09:05:38 | 2021-03-26T09:05:38 | 330,854,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | from rest_framework import serializers
from goods.models import SKU
class SKUSerializer(serializers.ModelSerializer):
"""sku商品序列化器"""
class Meta:
model = SKU
fields = ['id', 'name', 'price', 'default_image_url', 'comments'] | [
"18288650698@163.com"
] | 18288650698@163.com |
340a1587cf3cd050c67be30dd485fa4e3dbf21cb | d060c02ffd05cca79f469a4cd8d26827b3f3c3e4 | /plugins/fileOptionReader.py | 7e7a1f592f845842968235b8cbd511c41fec7156 | [] | no_license | symek/job-cli | 4a2a190cf30348e5b2ca27c6c67c081599da495a | dfe2f629bd0a9956bddedd9b3d5544c3b91769d7 | refs/heads/master | 2021-01-13T04:11:46.571599 | 2020-11-17T16:11:12 | 2020-11-17T16:11:12 | 77,702,058 | 0 | 1 | null | 2017-03-03T12:59:48 | 2016-12-30T18:01:20 | Python | UTF-8 | Python | false | false | 2,142 | py | from job.plugin import PluginManager, PluginType
class FileOptionReader(PluginManager):
name = "FileOptionReader"
type = PluginType.OptionReader
def register_signals(self):
self.logger.debug("%s registering as %s", self.name, self.type)
return True
def load_from_file(self, path, extension, options={}):
""" TODO: Make use of Schematics to very our files
follow any known convension...
"""
def _from_json(json_object):
tmp = {}
if isinstance(json_object, dict):
for k in json_object:
tmp[k] = _from_json(json_object[k])
if isinstance(json_object, list):
return tuple(json_object)
return tmp
from glob import glob
from os.path import join, split, splitext
import json
files = []
for postfix in self.job.JOB_PATH_POSTFIX:
path = join(path, postfix)
location = join(path, "*.%s" % extension)
files += glob(location)
self.job.logger.debug("Options found: %s", files)
for file in files:
with open(file) as file_object:
obj = json.load(file_object)
for k, v in obj.items():
# This is for caching and safeness
if isinstance(v, list):
v = tuple(v)
options[k] = v
return options
def __call__(self, jobtemplate, extension=None):
self.job = jobtemplate
import job.cli # Just to find job/schema/* location
from os.path import join, split, realpath, dirname
if not extension:
extension = self.job.OPTION_FILE_EXTENSION
options_paths = [dirname(realpath(job.cli.__file__))]
options_paths += self.job.get_local_schema_path()
options = {}
for path in options_paths:
opt = self.load_from_file(path, extension=extension)
for k, v in opt.items():
options[k] = v
return options | [
"szymon.kapeniak@gmail.com"
] | szymon.kapeniak@gmail.com |
fde932c03103ac0578cbefc8e6596bbc701e8e44 | a72f9af633a9a9aec41b0ccc4eecc2ea37c271ff | /test_1.py | a99ea5440d8dce40e505dc7609226abaa32d60c0 | [] | no_license | alan0415/AI_ML-project | 552d937bea0dff5179d2d72f8a858d941002b094 | 4c49968febc866090db980454bf0094522889087 | refs/heads/master | 2022-11-06T02:52:01.072188 | 2018-09-02T11:23:50 | 2018-09-02T11:23:50 | 147,077,687 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,983 | py | import random
from copy import deepcopy
class Matrix:
def __init__(self, nrows, ncols):
"""Construct a (nrows X ncols) matrix"""
self.wrong_count = 0 #問題: 為何不能在其他方法直接寫 self.wromg_count += 1,而一定要先寫宣告
self.rows = nrows
self.cols = ncols
self.matrix = list()
for i in range(0, self.rows):
swap = list()
for j in range(0, self.cols):
swap.append(random.randint(0,9))
j += 1
self.matrix.append(swap)
i += 1
def add(self, m):
"""return a new Matrix object after summation"""
# m 為傳入預比較的matrix
#用 deepcopy 先備份 A matrix,再將運算結果存成self.matrix,最後再將備份存入A matrix
self.trans_matrix = deepcopy(self.matrix) #將self.matrix 備份成 self.trans_matrix
added_matrix = list()
added_matrix = m
for i in range(0, self.rows):
if len(self.matrix) != len(added_matrix):
print('Matrix size should in the same size!! ')
self.wrong_count += 1
break
else:
for j in range(0, self.cols):
if len(self.matrix[i]) != len(added_matrix):
print('Matrix size should in the same size!! ')
self.wrong_count += 1
break# 確定break跳出的迴圈是否正確 !!!!!!
else:
self.matrix[i][j] = self.matrix[i][j] + added_matrix[i][j]
return self.matrix
def sub(self, m):
"""return a new Matrix object after substraction"""
added_matrix = list()
added_matrix = m
if (self.wrong_count == 1):
print('Matrix size should in the same size!! ')
else:
for i in range(0, self.rows):
for j in range(0, self.cols):
self.matrix[i][j] = self.trans_matrix[i][j] - added_matrix[i][j]
def mul(self, m):
"""return a new Matrix object after multiplication"""
pass
def transpose(self):
"""return a new Matrix object after transpose"""
pass
def display(self):
"""Display the content in the matrix"""
for i in range(0, self.rows):
for j in range(0, self.cols):
print(self.matrix[i][j],end = ' ')
j += 1
i += 1
print(' ')
A_row = int(input("Enter A matrix's rows: "))
A_cols = int(input("Enter A matrix's cols: "))
A_matrix = Matrix(A_row,A_cols)
A_matrix.display()
B_row = int(input("Enter B matrix's rows: "))
B_cols = int(input("Enter B matrix's cols: "))
B_matrix = Matrix(B_row,B_cols)
B_matrix.display()
print('======== A + B ========')
A_matrix.add(B_matrix.matrix)
A_matrix.display()
print('======== A - B ========')
A_matrix.sub(B_matrix.matrix)
A_matrix.display()
| [
"ast9501@gmail.com"
] | ast9501@gmail.com |
67271c8b56d34f2b68e47c25372ad19fbd33d0a3 | ec1046f0d1b7a0aada54964ae3599e734ad69461 | /tensorforce/tests/test_ppo_agent.py | 7abf73e91b1b613135f163a3ecc0ec5dd647639f | [
"Apache-2.0"
] | permissive | jesuscast/tensorforce-clone | d6676fc854d4fac038f04ed7e9cf8b2f069b5deb | 524976f9cdbeebb01eb88c77ae842dbe4c4a1f36 | refs/heads/master | 2021-04-12T09:58:34.673769 | 2018-03-25T03:47:27 | 2018-03-25T03:47:27 | 126,662,271 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,057 | py | # Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import unittest
from tensorforce.agents import PPOAgent
from tensorforce.tests.base_agent_test import BaseAgentTest
class TestPPOAgent(BaseAgentTest, unittest.TestCase):
agent = PPOAgent
deterministic = False
kwargs = dict(
batch_size=8
)
| [
"mi.schaarschmidt@gmail.com"
] | mi.schaarschmidt@gmail.com |
8dd8b965cc2386abd2f1ee9b346f0008c48789a1 | 9d48e2984bc312bb684924697e2994c0e42c903f | /eventex/subscriptions/tests/test_mail_subscribe.py | ea4a57ed5f84691725c85f533af352381ffb4f03 | [
"MIT"
] | permissive | vinigracindo/wttd | 96a4c30b394b8d4a033b3ab9aef26be8b1c70b82 | 1f0ca5917d8b56976c72786f1436d7e3610e533b | refs/heads/master | 2022-04-23T08:39:26.148268 | 2019-12-06T11:12:22 | 2019-12-06T11:12:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,184 | py | from django.core import mail
from django.test import TestCase
from django.shortcuts import resolve_url as r
class SubscribePostValid(TestCase):
def setUp(self):
data = dict(name="Vicente Marçal", cpf="11144477735",
email="vicente.marcal@gmail.com", phone="69-98114-6191")
self.client.post(r('subscriptions:new'), data)
self.email = mail.outbox[0]
def test_subscription_email_subject(self):
expect = 'Confirmação de Inscrição'
self.assertEqual(expect, self.email.subject)
def test_subscription_email_from(self):
expect = 'contato@eventex.com.br'
self.assertEqual(expect, self.email.from_email)
def test_subscription_email_to(self):
expect = ['contato@eventex.com.br', 'vicente.marcal@gmail.com']
self.assertEqual(expect, self.email.to)
def test_subscription_email_body(self):
contents = ['Vicente Marçal',
'11144477735',
'vicente.marcal@gmail.com',
'69-98114-6191']
for content in contents:
with self.subTest():
self.assertIn(content, self.email.body)
| [
"vicente.marcal@gmail.com"
] | vicente.marcal@gmail.com |
0307636f3350b41783f6bc369c9b7562faa04092 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_136/1154.py | d57c56b69272d0a44af0fad344cc5e916a3e8b59 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | infile = open('D:\study\codejam\codejam2014\B-large.in','r')
outfile = open('D:\study\codejam\codejam2014\B-large.out','w')
def main():
T = int(infile.readline())
for case in range(1,T+1):
doCase(case)
infile.close()
outfile.close()
def doCase(case):
c,f,x = [float(x) for x in infile.readline().split()]
outfile.write('Case #'+str(case)+': '+str(check(c,f,x))+'\n')
#print('case #'+str(case)+' '+str(check(c,f,x)))
def check(c,f,x):
rate = 2
time1 = 0
while x/(rate+f)+c/rate < x/rate:
time1 += c/rate
rate += f
time = time1+x/rate
return round(time,7)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
e0797fa8743e514c3f585d9efddac6a1dfd96d2d | ed8d6b9c7795c0b05e1fd320fe22112cc8feae77 | /tests/utils.py | 38bb8b8cc2215eacb8a265a728c9141d228da9c9 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | jacopsd/mollie-api-python | d70eb8826cc64fd06cfa384de20aec204db2ebd9 | 1361cb6bb65d1cf016da62b184a9da72db714103 | refs/heads/master | 2023-01-22T01:42:34.601892 | 2020-12-05T16:01:58 | 2020-12-05T16:01:58 | 318,570,904 | 0 | 0 | BSD-2-Clause | 2020-12-05T16:01:59 | 2020-12-04T16:19:48 | null | UTF-8 | Python | false | false | 688 | py | from mollie.api.objects.list import List
def assert_list_object(obj, object_type):
"""Assert that a List object is correctly working, and has sane contents."""
assert isinstance(obj, List), 'Object {obj} is not a List instance.'.format(obj=obj)
assert isinstance(obj.count, int), 'List count is not an integer.'
assert obj.count > 0
# verify items in list
items = []
for item in obj:
assert isinstance(item, object_type)
assert item.id is not None
items.append(item.id)
assert len(items) == obj.count, "Items in list don't match list count."
assert len(set(items)) == obj.count, 'Not all object ids in the list are unique.'
| [
"tom@fourdigits.nl"
] | tom@fourdigits.nl |
e7979913f0777d5429839bafb0454e9227cf49e4 | 0cb799735ca63580a8e7e53db011fe807df7fa2a | /2014-2019_SFB_Imagery_RemoteSense_GDAL-OGR/OGR_FeatureWalkGetCoordinate.py | e82f07e4f4655b1f3b046b26f4aaf9028da6cf77 | [
"MIT"
] | permissive | leandromet/Geoprocessamento---Geoprocessing | 6db2d34f309dc06c03ee5205e5e1a887513aee06 | 5dabaefeac15f8dda4b2c2f3c6642c22b6931ec4 | refs/heads/master | 2022-06-22T08:11:21.668468 | 2022-06-15T00:28:52 | 2022-06-15T00:28:52 | 12,163,639 | 2 | 1 | null | 2016-10-20T16:58:42 | 2013-08-16T17:02:32 | Python | UTF-8 | Python | false | false | 353 | py | from osgeo import ogr
import os
shapefile = "//mnt//hgfs//Biondo//GINF//Florestas_Parana//pr_300_f22.shp"
driver = ogr.GetDriverByName("ESRI Shapefile")
dataSource = driver.Open(shapefile, 0)
layer = dataSource.GetLayer()
for feature in layer:
geom = feature.GetGeometryRef()
pt = geom.Centroid()
pto = (pt.GetX(),pt.GetY())
print pto
| [
"leandro.biondo@florestal.gov.br"
] | leandro.biondo@florestal.gov.br |
da7150cfe46c579d88a15b291fd872dcc743dd23 | 5617ab91e2d2c2330b16bfe49eec0f4073dce439 | /migrations/versions/5c6a4d41a764_.py | e12df004329b84cf62defcbd0f74b6da96ccc14d | [] | no_license | NDesprez001/JWT_Login | 245379ccd0ebf35d4470d7ddee38ffad1e25b23b | 9f5f9479d78efa582e517aff17535af2a1d022d4 | refs/heads/master | 2022-06-22T09:52:37.888342 | 2020-02-21T17:50:45 | 2020-02-21T17:50:45 | 242,171,042 | 0 | 0 | null | 2022-05-25T02:46:55 | 2020-02-21T15:30:51 | Python | UTF-8 | Python | false | false | 786 | py | """empty message
Revision ID: 5c6a4d41a764
Revises:
Create Date: 2020-02-21 15:51:37.460355
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5c6a4d41a764'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=80), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('users')
# ### end Alembic commands ###
| [
"noshdesprez@gmail.com"
] | noshdesprez@gmail.com |
a7e930de693bcfd552d17184297b515ac319b430 | 20f9d10709eac148b462cf37b33d7b150ec580a6 | /maximisationApproach.py | 67829c09785ba27ed8c33f9a2c48580741b3b891 | [] | no_license | alan-turing-institute/directedCorePeripheryPaper | ace69d6b722d04704ac5bbeca9bdcdf67b8ad2b7 | dc507e6d109b86f5ecdab3bf3db8ad47b6073b65 | refs/heads/master | 2020-09-06T15:35:27.928105 | 2019-11-08T16:01:23 | 2019-11-08T16:01:23 | 220,466,962 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,847 | py | import networkx as nx
import numpy as np
import random as rd
from collections import Counter
from math import log
import time
# tested
def __considerMovement__(G, currentSolution, node, savedVars): # Basic test
# This function will give 4 scores indicating the relative score of each of
# the groups. Note, that this is not necessarily the actual likelihood, but
# we guarentee that the differences are preserved.
n = len(G)
cs = savedVars['cs']
numCats = savedVars['numCats']
curGroup = currentSolution[node]
numCats[curGroup] -= 1
selfLoop = node in G[node]
result = []
forward, backward = savedVars['forwardBackward'][node]
ein = savedVars['ein']
for j in range(4):
ein -= cs[curGroup][j]*forward[j]
ein -= cs[j][curGroup]*backward[j]
if selfLoop:
if cs[curGroup][curGroup]:
ein -= 1
einOld = ein
for i in range(4):
ein = einOld
for j in range(4):
ein += cs[i][j]*forward[j]
ein += cs[j][i]*backward[j]
if selfLoop:
ein += cs[i][i]
numCats[i] += 1
eout = savedVars['numEdges']-ein
Tin = (numCats[0]+numCats[1]+numCats[2])*numCats[1]
Tin += numCats[2]*(numCats[2]+numCats[3])
Tout = n*n-Tin
l1 = 0
if ein != 0:
l1 += ein*log(ein/Tin)
if ein != Tin:
l1 += (Tin-ein)*log(1-ein/Tin)
if eout != 0:
l1 += eout*log(eout/Tout)
if eout != Tout:
l1 += (Tout-eout)*log(1-eout/Tout)
result.append(l1)
numCats[i] -= 1
numCats[curGroup] += 1
return result
# tested
def getEin(G, coms):
cs = __getCorrectStructure__()
ein = 0
for x in G:
if coms[x] != 3:
for y in G[x]:
if cs[coms[x]][coms[y]]:
ein += 1
return ein
# tested
def __updateVarsOnComChange__(savedVars, G, currentSolution, node, newCom):
oldGroup = currentSolution[node]
currentSolution[node] = newCom
savedVars['numCats'][oldGroup] -= 1
savedVars['numCats'][newCom] += 1
cs = savedVars['cs']
for x in G[node]:
if x != node:
savedVars['forwardBackward'][x][1][oldGroup] -= 1
savedVars['forwardBackward'][x][1][newCom] += 1
for x1 in G.in_edges(node):
x = x1[0]
if x != node:
savedVars['forwardBackward'][x][0][oldGroup] -= 1
savedVars['forwardBackward'][x][0][newCom] += 1
coms = currentSolution
for y in G[node]:
if node == y:
continue
savedVars['ein'] -= cs[oldGroup][coms[y]]
savedVars['ein'] += cs[newCom][coms[y]]
for y1 in G.in_edges(node):
y = y1[0]
if node == y:
continue
savedVars['ein'] -= cs[coms[y]][oldGroup]
savedVars['ein'] += cs[coms[y]][newCom]
if node in G[node]:
savedVars['ein'] -= cs[oldGroup][oldGroup]
savedVars['ein'] += cs[newCom][newCom]
# tested
def getForwardBackward(G, node, coms):
# Get groups that each node connects to
forward = [0, 0, 0, 0]
backward = [0, 0, 0, 0]
for x in G[node]:
if x != node:
forward[coms[x]] += 1
for x1 in G.in_edges(node):
x = x1[0]
if x != node:
backward[coms[x]] += 1
return forward, backward
# tested
def getForwardBackwardAll(G, coms):
# Get groups that each node connects to
result = []
for i in range(len(G)):
result.append([[0,0,0,0],[0,0,0,0]])
for i in G:
for j in G[i]:
if i!=j:
result[i][0][coms[j]]+=1
result[j][1][coms[i]]+=1
return result
def __likelihoodMaximisation__(G, initialComs):
# We are going to use the procedure that is described in Newmans paper.
# saved some variables to speed up the computation
change = True
likelihoodTest = __likelihood__(G, initialComs)
maxSol = initialComs[:]
curMax = likelihoodTest
# main loop
maxLikeHelper = __likelihoodMaximisationHelper__
while change:
toConsider = set(range(len(G)))
maxSol, curMax, change = maxLikeHelper(G, maxSol, curMax, toConsider)
return maxSol, curMax
# Basic tests
def __likelihoodMaximisationHelper__(G, maxSolution, currentMax, toConsider):
change = False
currentSol = maxSolution[:]
currentLike = currentMax
# Need to update the saved variables here as we have chosen a point in
# the previous sequence to update to
savedVars = __constructSavedVars__(G, currentSol)
# Move each node once
while len(toConsider) > 0:
bestMove = -np.inf
bestNode = None
bestCom = None
for node in toConsider:
# Look at the moves for this node
solution = __considerMovement__(G, currentSol, node, savedVars)
# discover the best community
sortedSol = sorted(list(zip(solution, range(4))))
if sortedSol[-1][1] != currentSol[node]:
newCom = sortedSol[-1][1]
else:
newCom = sortedSol[-2][1]
diff = solution[newCom]-solution[currentSol[node]]
if diff > bestMove:
bestMove = diff
bestNode = node
bestCom = newCom
# update the saved variables
toConsider.remove(bestNode)
# update the saved variables with the change
__updateVarsOnComChange__(savedVars, G, currentSol, bestNode, bestCom)
# store the best structure
currentLike += bestMove
if currentLike > currentMax:
currentMax = currentLike
maxSolution = currentSol[:]
change = True
return maxSolution, currentMax, change
# Tested
def __likelihood__(G, coms):
n = len(G)
numCats = Counter(coms)
ein = 0
eout = 0
ein = getEin(G, coms)
eout = G.number_of_edges()-ein
Tin = (numCats[0]+numCats[1]+numCats[2])*numCats[1]
Tin += numCats[2]*(numCats[2]+numCats[3])
Tout = n*n-Tin
l1 = 0
if ein != 0:
l1 += ein*log(ein/Tin)
if ein != Tin:
l1 += (Tin-ein)*log(1-ein/Tin)
if eout != 0:
l1 += eout*log(eout/Tout)
if eout != Tout:
l1 += (Tout-eout)*log(1-eout/Tout)
return l1
# Not tested (simple wrapper arround networkx function)
def __convertToIntegers__(G):
nodes = sorted(G.nodes())
G1 = nx.convert_node_labels_to_integers(G, ordering='sorted')
return G1, nodes
# Not tested (simple routine)
def likelihoodMaximisation(G1, numberOfAttempts=10):
G, mapping = __convertToIntegers__(G1)
currentMax = -np.inf
currentSolution = None
for x in range(numberOfAttempts):
initialComs = [rd.randint(0, 3) for x in range(len(G1))]
coms, likelihood = __likelihoodMaximisation__(G, initialComs)
# Compute likelihood independently as a test
likelihoodTest = __likelihood__(G, coms)
assert(abs(likelihood-likelihoodTest) < 10**(-8))
if likelihoodTest > currentMax:
currentMax = likelihoodTest
currentSolution = coms
return currentSolution
# tested
def __estimatePandQ__(G, cats, savedVars):
p = 0
q = 0
numCats = savedVars['numCats']
n = len(G)
p = savedVars['ein']
q = savedVars['numEdges']-p
pDenom = (numCats[0]+numCats[1]+numCats[2])*numCats[1]
pDenom += numCats[2]*(numCats[2]+numCats[3])
qDenom = n*n-pDenom
p = p/pDenom
q = q/qDenom
return p, q
# does not need a test
def __getCorrectStructure__():
correctStructure = []
correctStructure.append([0, 1, 0, 0])
correctStructure.append([0, 1, 0, 0])
correctStructure.append([0, 1, 1, 1])
correctStructure.append([0, 0, 0, 0])
return correctStructure
# No need to test
def hillClimbApproach(G1, reps=10):
# convert graph to deal with non integer graphs
G, mapping = __convertToIntegers__(G1)
curBest = -np.inf
for x in range(reps):
result = __hillClimbApproachHelper__(G)
l1 = __likelihood__(G, result)
if l1 > curBest:
curBest = l1
curPart = result
return curPart
return list(zip(mapping, curPart))
# No test (wrapper around other routines)
def __constructSavedVars__(G, coms):
savedVars = {}
savedVars['numEdges'] = G.number_of_edges()
savedVars['numCats'] = Counter(coms)
savedVars['ein'] = getEin(G, coms)
savedVars['cs'] = __getCorrectStructure__()
# Make the initial forward and backward arrays
savedVars['forwardBackward']=getForwardBackwardAll(G,coms)
# savedVars['forwardBackward']={}
# for node in range(len(G)):
# savedVars['forwardBackward'][node] = getForwardBackward(G, node, coms)
return savedVars
# No test just a wrapper
def __hillClimbApproachHelper__(G):
n = len(G)
# initial guess on the groups
coms = [rd.randint(0, 3) for i in range(n)]
# save some basic data
savedVars = __constructSavedVars__(G, coms)
order = list(range(n))
for runIn in range(5000):
# update p and q
p, q = __estimatePandQ__(G, coms, savedVars)
# so that the algorithm doesnt get stuck someone silly
if p < q:
p, q = q, p
numChanges = 0
rd.shuffle(order)
for node in order:
scores = __considerMovement__(G, coms, node, savedVars)
newCom = max(list(zip(scores, range(4))))[1]
if coms[node] != newCom:
numChanges += 1
__updateVarsOnComChange__(savedVars, G, coms, node, newCom)
assert(coms[node] == newCom)
if numChanges == 0:
break
if numChanges>0:
print('Warning this hill climb replicate did not converge')
return coms
| [
"ande.elliott@gmail.com"
] | ande.elliott@gmail.com |
97f32b326bf8d8108e2788dbb459ad7627bdc16d | 6a79496b3e8cd02b7cde580629e1e6dafdc7afb9 | /eight_ball/eight_ball_gui.py | ff766b7ade560ea5ec81a54ae5a5c62c1e7c43e2 | [] | no_license | c-pari22/beginner-projects | 264daa319d8f86995b8c731a167760e1939b4031 | 70983d08e38139804e09802311af2d838f54e81a | refs/heads/master | 2020-12-24T20:42:21.820671 | 2016-05-20T00:04:02 | 2016-05-20T00:04:02 | 59,250,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | import tkinter
import eight_ball
import random
import time
from tkinter import messagebox
root = tkinter.Tk()
def displayAnswer():
rand_choice = random.randint(0, 20)
text = tkinter.Text(root)
text.insert(tkinter.INSERT, "Searching the Depths of the Universe for the Answer......")
text.pack()
time.sleep(0.5)
if question.get() is not "":
text.pack_forget()
tkinter.messagebox.showinfo("The Answer You Seek", eight_ball.responses[rand_choice])
question.delete(0, tkinter.END)
root.minsize(width = 1300, height = 700)
play_button = tkinter.Button(root, width = 13, height = 3, fg = "red", text = "Find the Answer", font = 108, bg = '#00ffff', command = displayAnswer)
play_button.pack()
question = tkinter.Entry(root, width = 40, textvariable = "", bd = 5)
question.pack()
label = tkinter.Label(text = "Ask Your Question Here!")
label.pack()
root.mainloop()
| [
"cibi.pari@berkeley.edu"
] | cibi.pari@berkeley.edu |
5ed81142cd358de803a556ca744bc771369920b6 | 3db7d6e2aea7c47b68776443ba27f9fa68546e35 | /py/h2o_perf.py | 4853b21b06ac9dda9248be8276096289f75a268d | [
"Apache-2.0"
] | permissive | alixaxel/h2o | edb349168d1856ec0f6d2c6c33a4117e2229db24 | 0868c9df624edb3fd7d946dcd7d5092499bf96cc | refs/heads/master | 2021-01-16T22:44:49.012401 | 2013-04-20T22:55:36 | 2013-04-20T22:55:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,673 | py | import logging, psutil
import h2o
import time, os
class PerfH2O(object):
# so a test can create multiple logs
def change_logfile(self, subtest_name):
# change to another logfile after we've already been going
# just want the base name if we pointed to it from somewhere else
short_subtest_name = os.path.basename(subtest_name)
blog = 'benchmark_' + short_subtest_name + '.log'
print "\nSwitch. Now appending to %s." % blog, "Between tests, you may want to delete it if it gets too big"
# http://stackoverflow.com/questions/5296130/restart-logging-to-a-new-file-python
# manually reassign the handler
logger = logging.getLogger()
logger.handlers[0].stream.close()
logger.removeHandler(logger.handlers[0])
file_handler = logging.FileHandler(blog)
file_handler.setLevel(logging.CRITICAL) # like the init
formatter = logging.Formatter("%(asctime)s %(message)s") # date/time stamp
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def init_logfile(self, subtest_name):
# default should just append thru multiple cloud builds.
# I guess sandbox is cleared on each cloud build. so don't build there.
# just use local directory? (python_test_name global set below before this)
short_subtest_name = os.path.basename(subtest_name)
blog = 'benchmark_' + short_subtest_name + '.log'
self.subtest_name = short_subtest_name
print "\nAppending to %s." % blog, "Between tests, you may want to delete it if it gets too big"
logging.basicConfig(filename=blog,
# we use CRITICAL for the benchmark logging to avoid info/warn stuff
# from other python packages
level=logging.CRITICAL,
format='%(asctime)s %(message)s') # date/time stamp
def __init__(self, python_test_name):
short_python_test_name = os.path.basename(python_test_name)
self.python_test_name = short_python_test_name
self.init_logfile(short_python_test_name)
self.MINCACHETOPRINT = 7
self.JSTACKINTERVAL = 20
self.IOSTATSINTERVAL = 10
# initialize state used for spot rate measurements during polling
statsList = ['read_bytes','write_bytes','read_time','write_time',
'bytes_sent','bytes_recv','dropin','dropout','errin','errout']
self.pollStats = {}
for s in statsList:
self.pollStats[s] = 0
self.pollStats['count'] = 0
self.snapshotTime = time.time()
self.pollStats['lastJstackTime'] = self.snapshotTime
self.pollStats['lastIOstatsTime'] = self.snapshotTime
self.pollStats['time'] = self.snapshotTime
self.elapsedTime = 0
def save(self, cpu_percent=None, dioc=None, nioc=None, jstack=None, iostats=None, snapshotTime=None):
# allow incremental update, or all at once
if cpu_percent:
self.pollStats['cpu_percent'] = cpu_percent
if dioc:
self.pollStats['read_bytes'] = dioc.read_bytes
self.pollStats['write_bytes'] = dioc.write_bytes
self.pollStats['read_time'] = dioc.read_time
self.pollStats['write_time'] = dioc.write_time
if nioc:
self.pollStats['bytes_sent'] = nioc.bytes_sent
self.pollStats['bytes_recv'] = nioc.bytes_recv
# self.pollStats['dropin'] = nioc.dropin
# self.pollStats['dropout'] = nioc.dropout
# self.pollStats['errin'] = nioc.errin
# self.pollStats['errout'] = nioc.errout
if jstack:
self.pollStats['lastJstackTime'] = self.snapshotTime
if iostats:
self.pollStats['lastIOstatsTime'] = self.snapshotTime
# this guy is the 'final'
if snapshotTime:
self.pollStats['time'] = self.snapshotTime
self.pollStats['count'] += 1
# just log a message..useful for splitting tests of files
def message(self, l):
logging.critical(l)
def log_jstack(self, initOnly=False):
# only do jstack if >= JSTACKINTERVAL seconds since lastLine one
if ((self.snapshotTime - self.pollStats['lastJstackTime']) < self.JSTACKINTERVAL):
return
# complicated because it's all one big string
# and lots of info we don't want.
jstackResult = h2o.nodes[0].jstack()
node0 = jstackResult['nodes'][0]
stack_traces = node0["stack_traces"]
# all one string
stackLines = stack_traces.split('\n')
# create cache
def init_cache(self):
self.cache = []
self.cacheHasJstack = False
self.cacheHasTCP = False
def log_and_init_cache(self):
if self.cacheHasTCP or (not self.cacheHasJstack and len(self.cache) >= self.MINCACHETOPRINT):
for c in self.cache:
logging.critical(c)
init_cache(self)
init_cache(self)
# pretend to start at stack trace break
lastLine = ""
for s in stackLines:
# look for gaps, if 7 lines in your cache, print them
if (lastLine==""):
log_and_init_cache(self)
else:
# put a nice "#" char for grepping out jstack stuff
self.cache.append("#" + s)
# always throw it away later if JStack cache
if 'JStack' in s:
self.cacheHasJstack = True
# always print it if it mentions TCP
if 'TCP' in s:
self.cacheHasTCP = True
lastLine = s
# check last one
log_and_init_cache(self)
self.pollStats['lastJstackTime'] = self.snapshotTime
self.save(jstack=True)
def log_cpu(self, snapShotTime, initOnly=False):
cpu_percent = psutil.cpu_percent(percpu=True)
l = "%s %s" % ("cpu_percent:", cpu_percent)
if not initOnly:
logging.critical(l)
self.save(cpu_percent=cpu_percent)
def log_disk(self, initOnly=False):
dioc = psutil.disk_io_counters()
diocSpotRdMBSec = (dioc.read_bytes - self.pollStats['read_bytes']) / (1e6 * self.elapsedTime)
diocSpotWrMBSec = (dioc.write_bytes - self.pollStats['write_bytes']) / (1e6 * self.elapsedTime)
diocSpotRdTime = (dioc.read_time - self.pollStats['read_time']) / 1e3
diocSpotWrTime = (dioc.write_time - self.pollStats['write_time']) / 1e3
l = "Disk. Spot RdMB/s: {:>6.2f} Spot WrMB/s: {:>6.2f} {!s} {!s} elapsed: {:<6.2f}".format(
diocSpotRdMBSec, diocSpotWrMBSec, diocSpotRdTime, diocSpotWrTime, self.elapsedTime)
if not initOnly:
logging.critical(l)
self.save(dioc=dioc)
def log_network(self, initOnly=False):
nioc = psutil.network_io_counters()
niocSpotSentMBSec = (nioc.bytes_sent - self.pollStats['bytes_sent'])/(1e6 * self.elapsedTime)
niocSpotRecvMBSec = (nioc.bytes_recv - self.pollStats['bytes_recv'])/(1e6 * self.elapsedTime)
# niocSpotDropIn = nioc.dropin - self.pollStats['dropin']
# niocSpotDropOut = nioc.dropout - self.pollStats['dropout']
# niocSpotErrIn = nioc.errin - self.pollStats['errin']
# niocSpotErrOut = nioc.errout - self.pollStats['errout']
# stuff doesn't exist on ec2?
niocSpotDropIn = 0
niocSpotDropOut = 0
niocSpotErrIn = 0
niocSpotErrOut = 0
l = "Network. Spot RecvMB/s: {:>6.2f} Spot SentMB/s: {:>6.2f} {!s} {!s} {!s} {!s}".format(
niocSpotRecvMBSec, niocSpotSentMBSec,\
niocSpotDropIn, niocSpotDropOut, niocSpotErrIn, niocSpotErrOut)
if not initOnly:
logging.critical(l)
self.save(nioc=nioc)
def log_iostats(self, initOnly=False):
if ((self.snapshotTime - self.pollStats['lastJstackTime']) < self.IOSTATSINTERVAL):
return
DO_IO_RW = True
DO_IOP = False
node = h2o.nodes[0]
stats = node.iostatus()
### h2o.verboseprint("log_iostats:", h2o.dump_json(stats))
histogram = stats['histogram']
def log_window(w):
if k['window'] == w:
i_o = k['i_o']
node = k['cloud_node_idx']
if k['r_w'] == 'read':
r_w = 'rd'
elif k['r_w'] == 'write':
r_w = 'wr'
else:
r_w = k['r_w']
for l,v in k.iteritems():
fmt = "iostats: window{:<2d} node {:d} {:s} {:s} {:s} MB/sec: {:6.2f}"
if 'peak' in l:
## logging.critical(fmt.format(w, node, i_o, r_w, "peak", (v/1e6)))
pass
if 'effective' in l:
logging.critical(fmt.format(w, node, i_o, r_w, "eff.", (v/1e6)))
if DO_IO_RW:
print "\nlog_iotstats probing node:", str(node.addr) + ":" + str(node.port)
for k in histogram:
### print k
log_window(10)
### log_window(30)
# we want to sort the results before we print them, so grouped by node
if DO_IOP:
iopList = []
raw_iops = stats['raw_iops']
### print
for k in raw_iops:
### print k
node = k['node']
i_o = k['i_o']
r_w = k['r_w']
size = k['size_bytes']
blocked = k['blocked_ns']
duration = k['duration_ms'] * 1e6 # convert to ns
if duration != 0:
blockedPct = "%.2f" % (100 * blocked/duration) + "%"
else:
blockedPct = "no duration"
iopMsg = "node: %s %s %s %d bytes. blocked: %s" % (node, i_o, r_w, size, blockedPct)
iopList.append([node, iopMsg])
iopList.sort(key=lambda iop: iop[0]) # sort by node
totalSockets = len(iopList)
# something wrong if 0?
if totalSockets == 0:
print "WARNING: is something wrong with this io stats response?"
print h2o.dump_json(stats)
logging.critical("iostats: " + "Total sockets: " + str(totalSockets))
for i in iopList:
logging.critical("iostats:" + i[1])
# don't save anything
self.save(iostats=True)
# call with init?
def get_log_save(self, benchmarkLogging=None, initOnly=False):
if not benchmarkLogging:
return
self.snapshotTime = time.time()
self.elapsedTime = self.snapshotTime - self.pollStats['time']
logEnable = {
'cpu': False,
'disk': False,
'network': False,
'jstack': False,
'iostats': False,
}
for e in benchmarkLogging:
logEnable[e] = True
if logEnable['jstack']:
self.log_jstack(initOnly=initOnly)
if logEnable['cpu']:
self.log_cpu(initOnly)
if logEnable['iostats']:
self.log_iostats(initOnly=initOnly)
# these do delta stats. force init if no delta possible
forceInit = self.pollStats['count'] == 0
if logEnable['disk']:
self.log_disk(initOnly=initOnly or forceInit)
if logEnable['network']:
self.log_network(initOnly=initOnly or forceInit)
# done!
self.save(snapshotTime=True)
| [
"kevin@0xdata.com"
] | kevin@0xdata.com |
ab39ec8dc7ed3dc0a971ff1d720fcf1da8835483 | 5a01497e7c29e2488b6a4cb0478405239375eb66 | /apetools/commons/broadcaster.py | c2cb2070a7ee15ecdd67b7b8e8a1da9bc821e7bf | [
"Apache-2.0"
] | permissive | russell-n/oldape | 8b4d9e996181dc1c7175f72d75c6193443da591b | b4d1c77e1d611fe2b30768b42bdc7493afb0ea95 | refs/heads/master | 2021-05-30T20:02:18.895922 | 2016-03-27T04:38:18 | 2016-03-27T04:38:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,651 | py |
from apetools.baseclass import BaseClass
class Broadcaster(BaseClass):
"""
A broadcaster sends a single datum to multiple targets
"""
def __init__(self, receivers):
"""
:param:
- `receivers`: an iterable of callable receivers
"""
super(Broadcaster, self).__init__()
self._receivers = None
self.receivers = receivers
self._temp_receivers = None
return
@property
def receivers(self):
"""
:return: receivers of broadcast
"""
return self._receivers
@receivers.setter
def receivers(self, new_receivers):
"""
:param:
- `new_receivers`: iterable of callable receivers (or single receiver)
"""
try:
self._receivers = [receiver for receiver in new_receivers]
except TypeError as error:
self._receivers = [new_receivers]
self.logger.debug(error)
return
@property
def temp_receivers(self):
"""
:return: iterable of receivers to remove at next set-up
"""
if self._temp_receivers is None:
self._temp_receivers = []
return self._temp_receivers
@temp_receivers.setter
def temp_receivers(self, new_receivers):
"""
:param:
- `new_receivers`: iterable of callable receivers (or single receiver)
"""
try:
self._temp_receivers = [receiver for receiver in new_receivers]
except TypeError as error:
self._temp_receivers = [new_receivers]
self.logger.debug(error)
return
def subscribe(self, receiver):
"""
Adds a new receiver to the receivers (if it isn't already there)
"""
if receiver not in self.receivers:
self.logger.debug("subscribing {0}".format(receiver))
self.receivers.append(receiver)
return
def unsubscribe(self, receiver):
"""
:param:
- `receiver`: a receiver object to remove
"""
self._receivers = [r for r in self._receivers if r is not receiver]
return
def set_up(self, targets=None):
"""
The targets are removed the next time this is called.
:param:
- `targets`: a set of temporary targets
:postcondition: reset method for each permanent receiver called
"""
self._temp_receivers = None
if targets is not None:
self.temp_receivers = targets
for receiver in self.receivers:
try:
receiver.reset()
except AttributeError as error:
self.logger.debug(error)
self.logger.debug("Unable to reset {0}".format(receiver))
return
def reset(self):
"""
:postcondition: self.receivers is None
"""
self._receivers = None
return
def __contains__(self, receiver):
"""
:param:
- `receiver`: an object
:rtype: Boolean
:return: True if item in receivers
"""
return receiver in self.receivers
def __iter__(self):
"""
:return: iterator over self.receivers
"""
return iter(self.receivers)
def __call__(self, datum):
"""
Calls each receiver with the `datum`
:param:
- `datum`: A single data item
"""
for receiver in self.receivers:
receiver(datum)
return
# end class Broadcaster
| [
"necromuralist@google.com"
] | necromuralist@google.com |
30119e16f12f09d9fa55d967a0bb62f049303183 | 2f5ab43956b947b836e8377370d786e5ee16e4b0 | /sklearn2code/sym/test/test_function.py | c0ae2740f50a0419fbe09bbe835b40e8516be96a | [
"MIT"
] | permissive | modusdatascience/sklearn2code | b175fb268fa2871c95f0e319f3cd35dd54561de9 | 3ab82d82aa89b18b18ff77a49d0a524f069d24b9 | refs/heads/master | 2022-09-11T06:16:37.604407 | 2022-08-24T04:43:59 | 2022-08-24T04:43:59 | 115,747,326 | 4 | 2 | MIT | 2018-05-01T00:11:51 | 2017-12-29T19:05:03 | Python | UTF-8 | Python | false | false | 3,495 | py | from sklearn2code.sym.function import Function
from nose.tools import assert_list_equal, assert_equal
from operator import __add__, __mul__, __sub__
from six import PY3
from sklearn2code.sym.expression import RealVariable, RealNumber
def test_map_symbols():
fun0 = Function(('x', 'y'), tuple(), (RealVariable('x') + RealVariable('y'),))
fun = Function(('x', 'y'), (((('z',), (fun0, ('x','y')))),), (RealVariable('x') / RealVariable('z'),))
mapped_fun = fun.map_symbols({'x': 'q'})
assert_list_equal(list(mapped_fun.inputs), list(map(RealVariable, ('q', 'y'))))
assert_equal(set(mapped_fun.calls[0][1][1]), set(map(RealVariable, ('q', 'y'))))
assert_equal(mapped_fun.outputs[0], RealVariable('q') / RealVariable('z'))
def test_compose():
fun0 = Function('x', tuple(), (RealVariable('x'), RealNumber(1) - RealVariable('x')))
fun = Function(('x', 'y'), tuple(), (RealVariable('x') / RealVariable('y'),))
composed_fun = fun.compose(fun0)
assert_equal(composed_fun.calls[0][1][0], fun0)
assert_equal(composed_fun.inputs, fun0.inputs)
assert_equal(fun.outputs, composed_fun.map_output_symbols(dict(zip(composed_fun.calls[0][0], fun.inputs))))
def test_from_expressions():
fun = Function.from_expressions((RealVariable('x'), RealVariable('x') + RealVariable('y')))
assert_equal(fun, Function(('x', 'y'), tuple(), (RealVariable('x'), RealVariable('x') + RealVariable('y'))))
def test_trim():
fun0 = Function('x', ((('u',), (Function.from_expression(RealVariable('x0') + RealVariable('x1')), ('x', 'x'))),),
(RealVariable('u'), RealNumber(1) - RealVariable('x')))
fun = Function(('x', 'y'), ((('z','w'), (fun0, ('y',))),), (RealVariable('x') / RealVariable('w'),)).trim()
assert_equal(fun.inputs, (RealVariable('x'), RealVariable('y')))
assert_equal(fun.outputs, (RealVariable('x') / RealVariable('w'),))
assert_equal(fun.calls, (((RealVariable('w'),), (Function(('x', ), tuple(), (RealNumber(1)-RealVariable('x'),)), (RealVariable('y'),))),))
class TestOps(object):
pass
def add_op(op):
def test_op(self):
fun0 = Function(('x', 'y'), tuple(), (RealVariable('x') + RealVariable('y'),))
fun = Function(('x', 'y'), (((('z',), (fun0, ('x','y')))),), (RealVariable('x') / RealVariable('z'),))
fun_op_two = op(fun, RealNumber(2))
assert_equal(fun_op_two.outputs[0], op(RealVariable('x') / RealVariable('z'), RealNumber(2)))
two_op_fun = op(RealNumber(2), fun)
assert_equal(two_op_fun.outputs[0], op(RealNumber(2), RealVariable('x') / RealVariable('z')))
fun_op_fun = op(fun, fun)
assert_equal(fun_op_fun.outputs[0], op(RealVariable('x') / RealVariable('z'), RealVariable('x') / RealVariable('z')))
assert_equal(fun_op_fun.inputs, fun.inputs)
assert_equal(fun_op_fun.calls, fun.calls)
test_name = 'test_%s' % op.__name__.strip('__')
test_op.__name__ = test_name
setattr(TestOps, test_name, test_op)
add_op(__add__)
add_op(__mul__)
add_op(__sub__)
if PY3:
from operator import __truediv__ # @UnresolvedImport
add_op(__truediv__)
else:
from operator import __div__ # @UnresolvedImport
add_op(__div__)
if __name__ == '__main__':
# This code will run the test in this file.'
import sys
import nose
module_name = sys.modules[__name__].__file__
result = nose.run(argv=[sys.argv[0],
module_name,
'-s', '-v'])
| [
"jcrudy@gmail.com"
] | jcrudy@gmail.com |
954bec00dfac58eba355559facce8658d1886087 | fadb149e0505d54859635a22d96fc709c4303184 | /wishlist/manage.py | 42afca24aaa85e9196b4c2733170035e44f185ac | [] | no_license | mekdeshub123/Travel_wishlist_part2 | 42cca06b40f983f6c0837712d9f5212ba5a66bd9 | f956ea8fa260b971561a523eff8a6732f6bba60d | refs/heads/master | 2021-09-27T05:20:38.071308 | 2020-04-10T04:29:04 | 2020-04-10T04:29:04 | 250,694,918 | 0 | 0 | null | 2021-09-22T18:48:33 | 2020-03-28T02:13:21 | Python | UTF-8 | Python | false | false | 798 | py | #This file also a convenience script that allows to run administrative
# tasks like django-admin
#Manage.py slso eassy to use when working on a single django project.
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'wishlist.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"magie"
] | magie |
7da008d241eb7b2ff35481c57b1b33af6d9d272b | 09f5522ddcd99b32a2689331cdee9a81674e5be2 | /카드놀이_2511.py | a0901869906915dc9452511858ba6c39f91ae155 | [] | no_license | Lyuhitto/solved_BOJ_py | cb492663fc903a968a06019d59e12d1cd863b4ce | 7607cae2f4bfdc3fe91775450f76f6cfb551feb6 | refs/heads/master | 2023-03-14T00:35:10.018298 | 2021-03-12T01:42:11 | 2021-03-12T01:42:11 | 263,941,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | """
TODO 노션에 적기
"""
import sys
r = sys.stdin.readline
a_card = list(map(int, r().split()))
b_card = list(map(int, r().split()))
a_score = 0
b_score = 0
win = 'D'
card_round = []
for i in range(len(a_card)):
if a_card[i] > b_card[i]:
card_round.append('A')
a_score += 3
elif a_card[i] < b_card[i]:
card_round.append('B')
b_score += 3
else:
card_round.append('D')
a_score += 1
b_score += 1
if a_score > b_score:
win = 'A'
elif a_score < b_score:
win = 'B'
else:
if 'A' in card_round or 'B' in card_round:
for r in range(len(card_round)-1, -1, -1):
if card_round[r] != 'D':
win = card_round[r]
break
print(f"{a_score} {b_score}")
print(win)
| [
"liany0616@gmail.com"
] | liany0616@gmail.com |
44bf8b05344a86a1ea5b4bdb9fc2120490c5829b | 0c7b4537db085bec8a30bd3e3845c50ba691e1dd | /First 4 weeks/condicoes.py | c418c0923e5b021d7e0529f2a8d5571125eaa35c | [] | no_license | andrademarcos/python-usp-coursera | 1b67a5dafa24816cebdea16983f14b139637ca0d | 12e63ac58b74102e965224898b0ba35568286b45 | refs/heads/master | 2021-05-21T03:27:35.413227 | 2020-04-02T17:33:29 | 2020-04-02T17:33:29 | 252,522,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | # -*- coding:utf-8 -*-
temperatura = 102
if temperatura > 100:
aguaFerve = True
evaporação = "muito rápida"
print(aguaFerve)
| [
"marcos.andrade@protonmail.com"
] | marcos.andrade@protonmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.