content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import os
from awacs.aws import Policy, Allow, Statement, Principal, Action
from cfn_encrypt import Encrypt, EncryptionContext, SecureParameter, GetSsmValue
from troposphere import (Template, iam, GetAtt, Join, Ref, logs, Output, Sub, Parameter, awslambda,
Base64, Export)
from sys import argv
do_example = False
for arg in argv:
if '-we' in arg:
do_example = True
t = Template()
kms_key_arn = t.add_parameter(Parameter(
"KmsKeyArn",
Type="String",
Description="KMS alias ARN for lambda",
))
if do_example:
plain_text = t.add_parameter(Parameter(
"PlainText",
Type="String",
Description="Text that you want to encrypt ( Hello World )",
Default="Hello World",
NoEcho=True
))
# Create loggroup
log_group_ssm = t.add_resource(logs.LogGroup(
"LogGroupSsm",
LogGroupName=Join("", ["/aws/lambda/", Join("-", [Ref("AWS::StackName"), "ssm"])]),
RetentionInDays=14
))
log_group_get_ssm_value = t.add_resource(logs.LogGroup(
"LogGroupGetSsmValue",
LogGroupName=Join("", ["/aws/lambda/", Join("-", [Ref("AWS::StackName"), "get-ssm-value"])]),
RetentionInDays=14
))
log_group_simple = t.add_resource(logs.LogGroup(
"LogGroupSimple",
LogGroupName=Join("", ["/aws/lambda/", Join("-", [Ref("AWS::StackName"), "simple"])]),
RetentionInDays=14
))
def lambda_from_file(python_file):
"""
Reads a python file and returns a awslambda.Code object
:param python_file:
:return:
"""
lambda_function = []
with open(python_file, 'r') as f:
lambda_function.extend(f.read().splitlines())
return awslambda.Code(ZipFile=(Join('\n', lambda_function)))
kms_policy = iam.Policy(
PolicyName="encrypt",
PolicyDocument=Policy(
Version="2012-10-17",
Statement=[
Statement(
Effect=Allow,
Action=[
Action("kms", "Encrypt"),
],
Resource=[Ref(kms_key_arn)]
)
],
)
)
ssm_policy = iam.Policy(
PolicyName="ssm",
PolicyDocument=Policy(
Version="2012-10-17",
Statement=[
Statement(
Effect=Allow,
Action=[
Action("ssm", "PutParameter"),
Action("ssm", "DeleteParameter"),
],
Resource=[Join("", ["arn:aws:ssm:", Ref("AWS::Region"), ":", Ref("AWS::AccountId"), ":parameter/*"])]
),
Statement(
Effect=Allow,
Action=[
Action("ssm", "DescribeParameters")
],
Resource=["*"]
)
],
)
)
encrypt_lambda_role = t.add_resource(iam.Role(
"EncryptLambdaRole",
AssumeRolePolicyDocument=Policy(
Version="2012-10-17",
Statement=[
Statement(
Effect=Allow,
Principal=Principal("Service", "lambda.amazonaws.com"),
Action=[Action("sts", "AssumeRole")]
)
]),
Path="/",
ManagedPolicyArns=["arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"],
Policies=[
kms_policy
]
))
ssm_lambda_role = t.add_resource(iam.Role(
"SsmLambdaRole",
AssumeRolePolicyDocument=Policy(
Version="2012-10-17",
Statement=[
Statement(
Effect=Allow,
Principal=Principal("Service", "lambda.amazonaws.com"),
Action=[Action("sts", "AssumeRole")]
)
]),
Path="/",
ManagedPolicyArns=["arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"],
Policies=[
kms_policy,
ssm_policy
]
))
get_ssm_value_role = t.add_resource(iam.Role(
"GetSsmValueRole",
AssumeRolePolicyDocument=Policy(
Version="2012-10-17",
Statement=[
Statement(
Effect=Allow,
Principal=Principal("Service", "lambda.amazonaws.com"),
Action=[Action("sts", "AssumeRole")]
)
]),
Path="/",
ManagedPolicyArns=["arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"],
Policies=[
iam.Policy(
PolicyName="decrypt",
PolicyDocument=Policy(
Version="2012-10-17",
Statement=[
Statement(
Effect=Allow,
Action=[
Action("kms", "Decrypt"),
],
Resource=[Ref(kms_key_arn)]
)
],
)
),
iam.Policy(
PolicyName="ssm",
PolicyDocument=Policy(
Version="2012-10-17",
Statement=[
Statement(
Effect=Allow,
Action=[
Action("ssm", "GetParameterHistory"),
],
Resource=[
Join("", ["arn:aws:ssm:", Ref("AWS::Region"), ":", Ref("AWS::AccountId"), ":parameter/*"])]
),
Statement(
Effect=Allow,
Action=[
Action("ssm", "DescribeParameters")
],
Resource=["*"]
)
],
)
)
]
))
simple_encrypt_filename = os.path.join(os.path.dirname(__file__), "cfn_encrypt/simple_encrypt.py")
ssm_parameter_filename = os.path.join(os.path.dirname(__file__), "cfn_encrypt/ssm_parameter.py")
get_ssm_value_filename = os.path.join(os.path.dirname(__file__), "cfn_encrypt/get_ssm_value.py")
encrypt_lambda = t.add_resource(awslambda.Function(
"EncryptLambda",
FunctionName=Join("-", [Ref("AWS::StackName"), "simple"]),
DependsOn=[log_group_simple.title],
Handler="index.handler",
Role=GetAtt(encrypt_lambda_role, "Arn"),
Runtime="python3.7",
Timeout=300,
MemorySize=1536,
Code=lambda_from_file(simple_encrypt_filename),
))
ssm_parameter_lambda = t.add_resource(awslambda.Function(
"SsmParameterLambda",
FunctionName=Join("-", [Ref("AWS::StackName"), "ssm"]),
DependsOn=[log_group_ssm.title],
Handler="index.handler",
Role=GetAtt(ssm_lambda_role, "Arn"),
Runtime="python3.7",
Timeout=300,
MemorySize=1536,
Code=lambda_from_file(ssm_parameter_filename),
))
get_ssm_value_lambda = t.add_resource(awslambda.Function(
"GetSsmValueLambda",
FunctionName=Join("-", [Ref("AWS::StackName"), "get-ssm-value"]),
DependsOn=[log_group_get_ssm_value.title],
Handler="index.handler",
Role=GetAtt(get_ssm_value_role, "Arn"),
Runtime="python3.7",
Timeout=300,
MemorySize=1536,
Code=lambda_from_file(get_ssm_value_filename),
))
t.add_output(Output(
"EncryptLambdaArn",
Description="Encrypt lambda arn",
Value=GetAtt(encrypt_lambda, "Arn"),
Export=Export(
Sub(
"${AWS::StackName}-EncryptLambdaArn"
)
)
))
t.add_output(Output(
"KmsKeyArn",
Description="My secure parameter name",
Value=Ref(kms_key_arn),
Export=Export(
Sub(
"${AWS::StackName}-KmsKeyArn"
)
)
))
t.add_output(Output(
"SsmParameterLambdaArn",
Description="Ssm parameter lambda arn",
Value=GetAtt(ssm_parameter_lambda, "Arn"),
Export=Export(
Sub(
"${AWS::StackName}-SsmParameterLambdaArn"
)
)
))
t.add_output(Output(
get_ssm_value_lambda.title + "Arn",
Description="get ssm value lambda arn",
Value=GetAtt(get_ssm_value_lambda, "Arn"),
Export=Export(
Sub(
"${AWS::StackName}-" + get_ssm_value_lambda.title + "Arn",
)
)
))
if do_example:
my_encrypted_value = t.add_resource(Encrypt(
"MyEncryptedValue",
ServiceToken=GetAtt(encrypt_lambda, "Arn"),
Base64Data=Base64(Ref(plain_text)),
KmsKeyArn=Ref(kms_key_arn)
))
my_encrypted_value_with_context = t.add_resource(Encrypt(
"MyEncryptedValueWithContext",
ServiceToken=GetAtt(encrypt_lambda, "Arn"),
Base64Data=Base64(Ref(plain_text)),
KmsKeyArn=Ref(kms_key_arn),
EncryptionContext=EncryptionContext(
Name="Test",
Value="Test"
)
))
my_secure_parameter = t.add_resource(SecureParameter(
"MySecureParameter",
ServiceToken=GetAtt(ssm_parameter_lambda, "Arn"),
Name="MySecureParameter",
Description="Testing secure parameter",
Value=Ref(plain_text),
KeyId=Ref(kms_key_arn)
))
my_decrypted_value = t.add_resource(GetSsmValue(
"MyDecryptedValue",
ServiceToken=GetAtt(get_ssm_value_lambda, "Arn"),
Name=Ref(my_secure_parameter),
KeyId=Ref(kms_key_arn),
Version=GetAtt(my_secure_parameter,"Version")
))
t.add_output(Output(
"MySecureParameter",
Description="My secure parameter name",
Value=Ref(my_secure_parameter)
))
t.add_output(Output(
"EncryptedValue",
Description="Encrypted value, base64 encoded",
Value=GetAtt(my_encrypted_value, "CiphertextBase64"),
))
t.add_output(Output(
"EncryptedValueWithContext",
Description="Encrypted value, base64 encoded",
Value=GetAtt(my_encrypted_value_with_context, "CiphertextBase64"),
))
t.add_output(Output(
my_decrypted_value.title + "Value",
Value=GetAtt(my_decrypted_value, "Value")
))
t.add_output(Output(
my_decrypted_value.title + "Version",
Value=GetAtt(my_decrypted_value, "Version")
))
print(t.to_json())
|
nilq/baby-python
|
python
|
# Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from vitrage.common.constants import UpdateMethod
NOVA_INSTANCE_DATASOURCE = 'nova.instance'
OPTS = [
cfg.StrOpt('transformer',
default='vitrage.datasources.nova.instance.transformer.'
'InstanceTransformer',
help='Nova instance transformer class path',
required=True),
cfg.StrOpt('driver',
default='vitrage.datasources.nova.instance.driver.'
'InstanceDriver',
help='Nova instance driver class path',
required=True),
cfg.StrOpt('update_method',
default=UpdateMethod.PUSH,
help='None: updates only via Vitrage periodic snapshots.'
'Pull: updates every [changes_interval] seconds.'
'Push: updates by getting notifications from the'
' datasource itself.',
required=True),
]
|
nilq/baby-python
|
python
|
import os
import pandas as pd
import glob
import random
import shutil
INDEX = {
'Shift_png':0,
'Random_png':1,
'Original_png':2,
'Expand_png':3,
'Contract_png':4
}
def make_label_csv(input_path,csv_path,mid_dir=None):
info = []
for subdir in os.scandir(input_path):
index = INDEX[subdir.name]
if mid_dir is None:
path_list = glob.glob(os.path.join(subdir.path,"*.*g"))
else:
mid_path = os.path.join(subdir.path,mid_dir)
# print(mid_path)
path_list = glob.glob(os.path.join(mid_path,"*.*g"))
sub_info = [[item,index] for item in path_list]
info.extend(sub_info)
random.shuffle(info)
# print(len(info))
col = ['id','label']
info_data = pd.DataFrame(columns=col,data=info)
info_data.to_csv(csv_path,index=False)
def make_csv(input_path,csv_path):
id_list = glob.glob(os.path.join(input_path,'*.*g'))
print(len(id_list))
info = {'id':[]}
info['id'] = id_list
df = pd.DataFrame(data=info)
df.to_csv(csv_path,index=False)
if __name__ == "__main__":
# input_path = '/staff/shijun/torch_projects/MLC_CLS/dataset/MLC/train'
# csv_path = './csv_file/MLC.csv'
# input_path = '/staff/shijun/torch_projects/MLC_CLS/dataset/MLC/test'
# csv_path = './csv_file/MLC_test.csv'
# make_label_csv(input_path,csv_path)
input_path = '/staff/shijun/torch_projects/MLC_CLS/dataset/raw_data/train'
csv_path = './csv_file/MLC_gamma2mm.csv'
# input_path = '/staff/shijun/torch_projects/MLC_CLS/dataset/raw_data/test'
# csv_path = './csv_file/MLC_gamma2mm_test.csv'
make_label_csv(input_path,csv_path,mid_dir='gamma2mm')
|
nilq/baby-python
|
python
|
# coding=utf-8
import numpy as np
from time import time
from aux import *
'''
Obre un fitxer a partir del seu nom i guarda cada una de les seves línies en cada posició d'una llista
Paràmetres:
- fileName: Nom del fitxer que es vol obrir.
Return:
- llista amb les paraules que conformen la llista Swadesh d'un idioma en concret
'''
def openFile(fileName):
f = open(fileName, 'r')
llistaParaules = list()
lines = f.readlines()
for line in lines:
llistaParaules.append(line) # afegim a cada posició de la llista una paraula
return llistaParaules
'''
Determina si un fonema és una vocal o no.
Paràmetres:
- char: Fonema
Return:
- True si char és vocal
- False si char no és vocal (i per tant és consonant)
'''
def esVocal(char):
if char in vocals_transcr:
return True # és vocal
return False # no és vocal
'''
Calcula la distància entre dues paraules. Per fer-ho crea una matriu a partir d'un diccionari i es calcula el nombre
mínim de canvis que s'ha de dur a terme per tal d'arribar d'una paraula a una altra. Normalitza la distància en funció de
la mida de la paraula més llarga.
La distància total entre dues paraules es troba a la posició més inferior a la dreta de la matriu.
Paràmetres:
- str1: una de les paraules que es vol comparar
- str2: l'altra paraula que es vol comparar
Return:
- La distància normalitzada entre les dues paraules
'''
def distancia_paraula_lexic(str1, str2):
d = dict() # declarem un diccionari que utilitzarem de matriu
# numerem les lletres de cada paraula
for i in range(len(str1) + 1):
d[i] = dict()
d[i][0] = i
for i in range(len(str2) + 1):
d[0][i] = i
for i in range(1, len(str1) + 1):
for j in range(1, len(str2) + 1):
# calculem tots els elements de la matriu
d[i][j] = min(d[i][j - 1] + 1, d[i - 1][j] + 1, d[i - 1][j - 1] + (not str1[i - 1] == str2[j - 1]))
#print ('distància entre ', str1, " i ", str2, '=', d[len(str1)][len(str2)])
distancia = d[len(str1)][len(str2)] ##retornem el valor de la última posició de la matriu
normalitzador = max(len(str1), len(str2)) # calculem el nombre de lletres de la paraula més llarga
res = distancia / normalitzador # normalitzem la distància
return res
'''
Calcula la distància entre dues transcripcions fonètiques de dues paraules. Per fer-ho crea una matriu a partir d'un
diccionari i s'adapta la distància de Levenshtein de manera que el cost de la substitució és la distància que hi ha entre
dos fonemes (es calcula amb la funció distanciaFonemes(fonema1,fonema2))
La distància total entre les dues transcripcons es troba a la posició més inferior a la dreta de la matriu.
Paràmetres:
- str1: una de les transcripcions que es vol comparar
- str2: l'altra transcripció que es vol comparar
Return:
- La distància normalitzada entre les dues dues transcripcions
'''
def distancia_paraula_fonema(str1, str2):
d = dict() # declarem un diccionari que utilitzarem de matriu
# numerem les lletres de cada paraula
for i in range(len(str1) + 1):
d[i] = dict()
d[i][0] = i
for i in range(len(str2) + 1):
d[0][i] = i
for i in range(1, len(str1) + 1):
for j in range(1, len(str2) + 1):
# calculem tots els elements de la matriu
d[i][j] = min(d[i][j - 1] + 1, d[i - 1][j] + 1,
d[i - 1][j - 1] + distanciaFonemes(str1[i - 1], str2[j - 1]))
#print ('distància entre ', str1, ' i ', str2, ' = ', d[len(str1)][len(str2)])
distancia = d[len(str1)][len(str2)] ##retornem el valor de la última posició de la matriu
normalitzador = max(len(str1), len(str2)) # calculem el nombre de lletres de la paraula més llarga
res = distancia / normalitzador # normalitzem la distància
return res
'''
Calcula la distància entre dos fonemes. Per fer-ho mira si els dos fonemes que es comparen són els dos vocàlics o
consonàntics. Si els dos són vocàlics es crida a la funció distanciaVocals(fonema1,fonema2), que calcula la distància
entre dos sons que es corresponen a vocals. Si els dos són consonàntics es crida a la funció
distanciaConsonants(fonema1,fonema2), que calcula la distància entre dos sons que es corresponen a consonants. Si un fonema
és una vocal i l'altre és una consonant, la distància que se li atorga a l'operació és 1.
Paràmetres:
- fonema1
- fonema2
Return:
- La distància entre els dos fonemes
'''
def distanciaFonemes(fonema1, fonema2):
if esVocal(fonema1) and esVocal(fonema2): # mirem si els dos fonemes són vocals
distancia = distanciaVocals(fonema1, fonema2) # calculem la distància entre els fonemes
elif not (esVocal(fonema1)) and not (esVocal(fonema2)): # mirem si els dos fonemes són consonants
distancia = distanciaConsonants(fonema1, fonema2) # calculem la distància entre els fonemes
else:
#print("Vocal i consonant!")
distancia = 1 # si tractem amb una vocal i una consonant, la distància és màxima
#print ("Distància entre ", fonema1, " i ", fonema2, " = ", distancia)
return distancia
'''
Calcula la distància, des d'un punt de vista lèxic, acumulada entre dos idiomes. Per fer-ho suma la distància de totes
les paraules dels dos idiomes i les normalitza dividint-les per 207, que és el nombre de paraules que hi ha a cada llista.
Paràmetres:
- idioma1: llista de paraules d'un idioma escrites des d'un punt de vista lèxic
- idioma2: llista de paraules d'un idioma escrites des d'un punt de vista lèxic
Return:
- La distància total que hi ha entre dos idiomes. És un valor entre 0 i 1.
'''
def distanciaIdioma_lexic(idioma1, idioma2):
distanciaAcumulada = 0
for x in range(1, 207):
distanciaAcumulada += distancia_paraula_lexic(idioma1[x], idioma2[x]) # calculem la distància entre els idiomes
distanciaAcumulada = distanciaAcumulada / 207 # dividim pel nombre de paraules per normalitzar
#print ("La distància acumulada entre ", idioma1[0], " i ", idioma2[0], "és de ", distanciaAcumulada)
return distanciaAcumulada
'''
Calcula la distància fonètica acumulada entre dos idiomes. Per fer-ho suma la distància de totes les paraules dels dos
idiomes i les normalitza dividint-les per 207, que és el nombre de paraules que hi ha a cada llista.
Paràmetres:
- idioma1: llista de transcripcions fonètiques de les paraules d'un idioma
- idioma2: llista de transcripcions fonètiques de les paraules d'un idioma
Return:
- La distància total que hi ha entre dos idiomes. És un valor entre 0 i 1.
'''
def distanciaIdioma_fonetic(idioma1, idioma2):
distanciaAcumulada = 0
for x in range(1, 207):
distanciaAcumulada += distancia_paraula_fonema(idioma1[x],
idioma2[x]) # calculem la distància entre els idiomes
distanciaAcumulada = distanciaAcumulada / 207 # dividim pel nombre de paraules per normalitzar
#print ("La distància acumulada entre ", idioma1[0], " i ", idioma2[0], "és de ", distanciaAcumulada)
return distanciaAcumulada
'''
Calcula la distància fonètica entre dos vocals. Per fer-ho mira quines posicions ocupa cada vocal dins de la
taula de fonemes vocàlics obtenint els seus índex i els compara per veure en quines característiques coincideixen
(és a dir, mira si els fonemes coincideixen en fila, columna i costat de la columna de la matriu que modeolitza els
sons vocàlics). Si comparteixen tres característiques, la distància és 0; si comparteixen 2 característiques la
distància és 1/3; si comparteixen 1, la distància és de 2/3 i si comparteixen cap característica la distància és 1.
En el cas del fonema w es fa un tractament especial perquè ocupa la mateixa posició que el fonema u. Això es deu a que
expressen el mateix so en situacions diferents. Per simplificar, s'ha considerat a la matriu el so u i, per tenir en compte
el so w, s'utilitzen uns if's per determinar quina posició hauria d'ocupar.
Paràmetres:
- v1: fonema a comparar
- v2: fonema a comparar
Return:
- La distància entre els dos fonemes comparats que pot ser 0, 1/3, 2/3 o 1
'''
def distanciaVocals(v1, v2):
denominador = 3
numerador = 3
v1_pos = (0, 0, 0)
v2_pos = (0, 0, 0)
# calculem els índexs de la primera vocal
for i in range(0, 7):
for j in range(0, 3):
for k in range(0, 2):
if v1 == vocals_transcr_mat[i][j][k]:
v1_pos = (i, j, k)
#print(v1_pos)
# calculem els índexs de la segona vocal
for i in range(0, 7):
for j in range(0, 3):
for k in range(0, 2):
if v2 == vocals_transcr_mat[i][j][k]:
v2_pos = (i, j, k)
#print(v2_pos)
# si el fonema és w, calculem la seva posició directament perquè no es troba a la matriu
if v1 == 'w':
v1_pos = (0, 2, 1)
if v2 == 'w':
v2_pos = (0, 2, 1)
for i in range(0, 3):
if v1_pos[i] == v2_pos[i]:
numerador -= 1
distancia = numerador / denominador
#print ("Distància entre les vocals ", v1, " i ", v2, " = ", distancia)
return distancia
'''
Calcula la distància fonètica entre dos consonant. Per fer-ho mira quines posicions ocupa cada consonant dins de la
taula de fonemes consonàntics obtenint els seus índex i els compara per veure en quines característiques coincideixen
(és a dir, mira si els fonemes coincideixen en fila, columna i costat de la columna de la matriu que modeolitza els sons
vocàlics). Si comparteixen tres característiques, la distància és 0; si comparteixen 2 característiques la distància és
1/3; si comparteixen 1, la distància és de 2/3 i si comparteixen cap característica la distància és 1.
Paràmetres:
- c1: fonema a comparar
- c2: fonema a comparar
Return:
- La distància entre els dos fonemes comparats que pot ser 0, 1/3, 2/3 o 1
'''
def distanciaConsonants(c1, c2):
denominador = 3
numerador = 3
c1_pos = (0, 0, 0)
c2_pos = (0, 0, 0)
# calculem els índexs de la primera consonant
for i in range(0, 8):
for j in range(0, 9):
for k in range(0, 2):
if c1 == conson_transcr_mat[i][j][k]:
c1_pos = (i, j, k)
#print(c1_pos)
# calculem els índexs de la segona consonant
for i in range(0, 8):
for j in range(0, 9):
for k in range(0, 2):
if c2 == conson_transcr_mat[i][j][k]:
c2_pos = (i, j, k)
#print(c2_pos)
for i in range(0, 3):
if c1_pos[i] == c2_pos[i]:
numerador -= 1
distancia = numerador / denominador
#print ("Distància entre les consonants ", c1, " i ", c2, " = ", distancia)
return distancia
'''
Crida a les altres funcions i mostra per pantalla els resultats obtinguts
'''
if __name__ == '__main__':
start = time()
# obrim els fitxer per a cada parella d'idiomes possibles
for i in range(0, 8):
for j in range(0, 8):
idioma1 = openFile("Llistes/" + llista_idiomes[i])
idioma2 = openFile("Llistes/" + llista_idiomes[j])
# calculem la distància lèxica entre cada aprella d'idiomes possible
matriu_resultant_lexic[i][j] = distanciaIdioma_lexic(idioma1, idioma2)
#print ("\n")
# transformem la llista en un np.array per simplificar la impressió per pantalla
matriu_resultant_array_lexic = np.array(matriu_resultant_lexic)
# obrim els fitxer per a cada parella de transcripcions fonètiques d'idiomes possibles
for i in range(0, 9):
for j in range(0, 9):
idioma3 = openFile("Llistes/" + llista_idiomes_fonetic[i])
idioma4 = openFile("Llistes/" + llista_idiomes_fonetic[j])
# calculem la distància fonètica els fitxer per a cada parella de transcripcions fonètiques d'idiomes possible
matriu_resultant_fonetica[i][j] = distanciaIdioma_fonetic(idioma3, idioma4)
#print ("\n")
matriu_resultant_array_fonetic = np.array(
matriu_resultant_fonetica) # transformem la llista en un np.array per simplificar la impressió per pantalla
np.set_printoptions(precision=3) # determinem que el nombre de decimals a mostrar és 3
# imprimim per pantalla les matrius de distàncies
print("Matriu de distàncies lèxiques: \n")
print(matriu_resultant_array_lexic)
print ("\n")
print("Matriu de distàncies fonètiques: \n")
print (matriu_resultant_array_fonetic)
print ("\n")
end = time()
time_elapsed = end - start #calculem el que triga l'execució
print("Time elapsed: ")
print(time_elapsed)
|
nilq/baby-python
|
python
|
def swap_case(s):
swapped_s = ''
for letter in s:
swapped_s += letter.lower() if letter.isupper() else letter.upper()
return swapped_s
if __name__ == '__main__':
s = input()
result = swap_case(s)
print(result)
|
nilq/baby-python
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class PutCustomEventRuleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cms', '2019-01-01', 'PutCustomEventRule','cms')
self.set_method('POST')
def get_Webhook(self):
return self.get_query_params().get('Webhook')
def set_Webhook(self,Webhook):
self.add_query_param('Webhook',Webhook)
def get_RuleName(self):
return self.get_query_params().get('RuleName')
def set_RuleName(self,RuleName):
self.add_query_param('RuleName',RuleName)
def get_Threshold(self):
return self.get_query_params().get('Threshold')
def set_Threshold(self,Threshold):
self.add_query_param('Threshold',Threshold)
def get_EffectiveInterval(self):
return self.get_query_params().get('EffectiveInterval')
def set_EffectiveInterval(self,EffectiveInterval):
self.add_query_param('EffectiveInterval',EffectiveInterval)
def get_EventName(self):
return self.get_query_params().get('EventName')
def set_EventName(self,EventName):
self.add_query_param('EventName',EventName)
def get_EmailSubject(self):
return self.get_query_params().get('EmailSubject')
def set_EmailSubject(self,EmailSubject):
self.add_query_param('EmailSubject',EmailSubject)
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_ContactGroups(self):
return self.get_query_params().get('ContactGroups')
def set_ContactGroups(self,ContactGroups):
self.add_query_param('ContactGroups',ContactGroups)
def get_Level(self):
return self.get_query_params().get('Level')
def set_Level(self,Level):
self.add_query_param('Level',Level)
def get_GroupId(self):
return self.get_query_params().get('GroupId')
def set_GroupId(self,GroupId):
self.add_query_param('GroupId',GroupId)
def get_RuleId(self):
return self.get_query_params().get('RuleId')
def set_RuleId(self,RuleId):
self.add_query_param('RuleId',RuleId)
|
nilq/baby-python
|
python
|
from os.path import join
import matplotlib.pylab as plt
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
from sklearn.metrics import roc_curve, auc
import src.utils
from src.methods.methods import good_methods as methods
from src.methods.methods import mutliple_time_series_combiner
from src.readers.json_dataset_reader import JsonDatasetReader
from src.utils import debug, PROJECT_DIR, s_timestamp, getFeedbackLinks, getForwardLinks, getSubplots, \
plotDiGraphViaGraphViz
def normalize_rowwise(x):
return np.absolute(x) / np.max(np.absolute(x), axis=1, keepdims=True) # /np.std(x,axis=1,keepdims=True)
def evaluateMethodOnInstance(i, method, normalize=False):
cc = mutliple_time_series_combiner(method, i)
for idx_node in range(i.n_nodes):
if normalize:
if cc[idx_node][idx_node] != 0:
cc[idx_node] /= np.absolute(cc[idx_node][idx_node])
cc[idx_node][idx_node] = 0
if normalize:
cc = normalize_rowwise(cc)
y_true = np.reshape(1.0 * (np.absolute(i.y) > 0.01), (i.n_nodes * i.n_nodes, 1))
cc = np.absolute(cc)
cc_flat = cc.flatten()
if debug:
plt.figure()
plt.subplot(1, 2, 1)
plt.imshow(np.reshape(y_true, (i.n_nodes, i.n_nodes)))
plt.subplot(1, 2, 2)
plt.imshow(np.reshape(cc_flat, (i.n_nodes, i.n_nodes)))
plt.title(method.__name__)
plt.show()
return np.reshape(y_true, (i.n_nodes * i.n_nodes,)), np.reshape(cc_flat, (i.n_nodes * i.n_nodes,))
def evaluateMethod(dataset, method, normalize=False):
n = dataset.n_instances
combined_y_true = np.empty((n, dataset.n_nodes * dataset.n_nodes), dtype=np.float64)
combined_y_pred = np.empty((n, dataset.n_nodes * dataset.n_nodes), dtype=np.float64)
result = [evaluateMethodOnInstance(dataset.get(idx_instance), method, normalize) for idx_instance in range(n)]
for i in range(n):
y_true, y_pred = result[i]
combined_y_true[i, :] = y_true
combined_y_pred[i, :] = y_pred
return combined_y_true, combined_y_pred
def evaluateCombinedTotalRocCurves(predictions, true, methods):
res = "{0: <20} {1: <19} {2: <19} {3: <19}\n".format(" ", "auc", "auc forward", "auc feedbacks")
plt.figure(figsize=(35, 12))
best_auc = 0
subplot_i, subplot_k = getSubplots(3)
for f in methods:
y_true, y_pred = true[f], predictions[f]
feedbacks_y_true = np.reshape([getFeedbackLinks(temp) for temp in y_true], (-1, 1))
feedbacks_y_pred = np.reshape([getFeedbackLinks(temp) for temp in y_pred], (-1, 1))
forward_y_true = np.reshape([getForwardLinks(temp) for temp in y_true], (-1, 1))
forward_y_pred = np.reshape([getForwardLinks(temp) for temp in y_pred], (-1, 1))
combined_y_pred = np.reshape(y_pred, (-1, 1))
combined_y_true = np.reshape(y_true, (-1, 1))
plt.subplot(subplot_i, subplot_k, 1)
roc_auc = plotROC(combined_y_true, combined_y_pred, f)
if roc_auc > best_auc and roc_auc < 0.99:
best_auc = roc_auc
plt.subplot(subplot_i, subplot_k, 2)
roc_auc_forward = plotROC(forward_y_true, forward_y_pred, f)
plt.title('ROC for forward only')
plt.subplot(subplot_i, subplot_k, 3)
roc_auc_feedbacks = plotROC(feedbacks_y_true, feedbacks_y_pred, f)
plt.title('ROC for feedbacks only')
res += "{0: <20} {1:16.3f} {2:16.3f} {3:16.3f}\n".format(f, roc_auc, roc_auc_forward, roc_auc_feedbacks)
plt.savefig(join(PROJECT_DIR, 'output', 'evaluation', s_timestamp() + '.pdf'))
with open(join(PROJECT_DIR, 'output', 'evaluation', s_timestamp() + '.txt'), 'w') as pout:
pout.write(res)
return best_auc
def plotROC(y_true, y_pred, label):
fpr, tpr, _ = roc_curve(y_true, y_pred)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, label=label + ' (auc = %0.2f)' % roc_auc)
plt.legend(loc="lower right")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
return roc_auc
def plotPredicted(y_pred, label, predict_n, cmap, n_nodes, pos, node_labels):
y_pred[np.argsort(np.absolute(y_pred))[:-predict_n]] = 0
ebunch = [(k, i, y_pred[n_nodes * i + k]) for i in range(n_nodes) for k in range(n_nodes) if
y_pred[n_nodes * i + k] != 0]
plotDiGraphViaGraphViz(n_nodes, ebunch, cmap, pos, node_labels=node_labels)
plt.title(label)
def evaluateIndividualRocCurvesAndPredictions(d, predictions, true, predict_n, methods):
with PdfPages(join(PROJECT_DIR, 'output', 'visualization_graph_predictions', s_timestamp() + '.pdf')) as pdf:
cmap = plt.cm.Accent
for idx_instance in range(d.n_instances):
instance = d.get(idx_instance)
n_nodes = instance.n_nodes
node_labels = instance.labels
plt.figure(figsize=(40, 20))
subplot_i, subplot_k = getSubplots(len(predictions) + 4)
for subplot_idx, f in enumerate(methods):
y_true = true[f][idx_instance][:]
y_pred = predictions[f][idx_instance][:]
# plot the roc curve for the instance
plt.subplot(subplot_i, subplot_k, len(predictions) + 1)
plotROC(y_true, y_pred, f)
# plot the roc curve for the feedbacks only
plt.subplot(subplot_i, subplot_k, len(predictions) + 2)
plotROC(getFeedbackLinks(y_true), getFeedbackLinks(y_pred), f)
plt.title('ROC for feedbacks only')
# plot the roc curve for the feedbacks only
plt.subplot(subplot_i, subplot_k, len(predictions) + 3)
plotROC(getForwardLinks(y_true), getForwardLinks(y_pred), f)
plt.title('ROC for forward only')
# plot the predicted networks
plt.subplot(subplot_i, subplot_k, subplot_idx + 1)
plotPredicted(y_pred, f, predict_n, cmap, n_nodes, instance.pos, node_labels)
plt.subplot(subplot_i, subplot_k, len(predictions) + 4)
instance.plotTimeSeries_(cmap)
pdf.savefig() # saves the current figure into a pdf page
plt.close()
def evaluateAll(d, normalize=False, predict_n=18, methods=methods):
predictions = {}
true = {}
for f in methods:
y_true, y_pred = evaluateMethod(d, methods[f], normalize=normalize)
predictions[f] = y_pred
true[f] = y_true
res = evaluateCombinedTotalRocCurves(predictions, true, methods)
evaluateIndividualRocCurvesAndPredictions(d, predictions, true, predict_n, methods)
return res
def plotPredictions(dataset, method, predict_n):
cmap = plt.cm.Accent
with PdfPages(join(PROJECT_DIR, 'output', 'visualization_graph_predictions_really', s_timestamp() + '.pdf')) as pdf:
for idx_instance in range(dataset.n_instances):
instance = dataset.get(idx_instance)
plt.figure(figsize=(20, 14))
subplot_i, subplot_k = getSubplots(instance.n_time_series + 1)
labels = ['Pulse', '1 Inhibition', '2 Inhibitions', 'Oscilatory', 'Oscilatory+1 Inhibition']
plt.subplot(subplot_i, subplot_k, 1)
instance.plotDiGraphViaGraphViz_(cmap)
for idx_time_series in range(instance.n_time_series):
plt.subplot(subplot_i, subplot_k, idx_time_series + 2)
instance.setx(idx_time_series)
y_pred = method(instance)
for idx_node in range(instance.n_nodes):
y_pred[idx_node][idx_node] = 0
y_pred = y_pred.reshape(-1, )
plotPredicted(y_pred, labels[idx_time_series], predict_n, cmap, instance.n_nodes, instance.pos,
instance.labels)
pdf.savefig() # saves the current figure into a pdf page
plt.close()
def plotRocForDataset(d, methods=methods):
plt.figure(figsize=(6, 5))
for f in methods:
y_true, y_pred = evaluateMethod(d, methods[f], normalize=False)
combined_y_pred = np.reshape(y_pred, (-1, 1))
combined_y_true = np.reshape(y_true, (-1, 1))
roc_auc = plotROC(combined_y_true, combined_y_pred, f)
plt.grid()
plt.savefig(join(PROJECT_DIR, 'output', 'roc', s_timestamp() + '.pdf'))
def main():
src.utils.s_timestamp_prefix = '50_nodes'
reader = JsonDatasetReader('50_nodes.json.zip')
d = reader.getDataset(n_instances=1, n_nodes=14 * 3, n_time_series=1)
plotPredictions(d, methods["partial_corr"], 70)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Defines functionality for pipelined execution of interfaces
The `EngineBase` class implements the more general view of a task.
.. testsetup::
# Change directory to provide relative paths for doctests
import os
filepath = os.path.dirname(os.path.realpath( __file__ ))
datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
os.chdir(datadir)
"""
from __future__ import (print_function, division, unicode_literals,
absolute_import)
from builtins import object
from future import standard_library
standard_library.install_aliases()
from copy import deepcopy
import re
import numpy as np
from ... import logging
from ...interfaces.base import DynamicTraitedSpec
from ...utils.filemanip import loadpkl, savepkl
logger = logging.getLogger('workflow')
class EngineBase(object):
"""Defines common attributes and functions for workflows and nodes."""
def __init__(self, name=None, base_dir=None):
""" Initialize base parameters of a workflow or node
Parameters
----------
name : string (mandatory)
Name of this node. Name must be alphanumeric and not contain any
special characters (e.g., '.', '@').
base_dir : string
base output directory (will be hashed before creations)
default=None, which results in the use of mkdtemp
"""
self.base_dir = base_dir
self.config = None
self._verify_name(name)
self.name = name
# for compatibility with node expansion using iterables
self._id = self.name
self._hierarchy = None
@property
def inputs(self):
raise NotImplementedError
@property
def outputs(self):
raise NotImplementedError
@property
def fullname(self):
fullname = self.name
if self._hierarchy:
fullname = self._hierarchy + '.' + self.name
return fullname
@property
def itername(self):
itername = self._id
if self._hierarchy:
itername = self._hierarchy + '.' + self._id
return itername
def clone(self, name):
"""Clone an EngineBase object
Parameters
----------
name : string (mandatory)
A clone of node or workflow must have a new name
"""
if (name is None) or (name == self.name):
raise Exception('Cloning requires a new name')
self._verify_name(name)
clone = deepcopy(self)
clone.name = name
clone._id = name
clone._hierarchy = None
return clone
def _check_outputs(self, parameter):
return hasattr(self.outputs, parameter)
def _check_inputs(self, parameter):
if isinstance(self.inputs, DynamicTraitedSpec):
return True
return hasattr(self.inputs, parameter)
def _verify_name(self, name):
valid_name = bool(re.match('^[\w-]+$', name))
if not valid_name:
raise ValueError('[Workflow|Node] name \'%s\' contains'
' special characters' % name)
def __repr__(self):
if self._hierarchy:
return '.'.join((self._hierarchy, self._id))
else:
return '{}'.format(self._id)
def save(self, filename=None):
if filename is None:
filename = 'temp.pklz'
savepkl(filename, self)
def load(self, filename):
if '.npz' in filename:
DeprecationWarning(('npz files will be deprecated in the next '
'release. you can use numpy to open them.'))
return np.load(filename)
return loadpkl(filename)
|
nilq/baby-python
|
python
|
from netCDF4 import Dataset
#-------------------------------------------------------------------------------
def set_difference_fields():
files = {"./output_hex_wachspress_0082x0094_120/output.2000.nc":
["./output_hex_pwl_0082x0094_120/output.2000.nc",
"./output_hex_weak_0082x0094_120/output.2000.nc"],
"./output_quad_wachspress_0080x0080_120/output.2000.nc":
["./output_quad_pwl_0080x0080_120/output.2000.nc",
"./output_quad_weak_0080x0080_120/output.2000.nc"]}
#files = {"./output_hex_wachspress_0082x0094_120/output.2000.nc":
# ["./output/output.2000.nc"]}
fieldnames = ["uVelocity","vVelocity","stressDivergenceU","stressDivergenceV"]
#fieldnames = ["stressDivergenceU"]
#fieldnames = ["uVelocity","vVelocity","stressDivergenceU","stressDivergenceV","strain11var","strain22var","strain12var"]
for filenameBase in files:
for filenameDiff in files[filenameBase]:
for fieldname in fieldnames:
print(fieldname)
filein = Dataset(filenameBase,"r")
field1 = filein.variables[fieldname][:]
dimensionsBase = filein.variables[fieldname].dimensions
filein.close()
fileDiff = Dataset(filenameDiff,"a")
field2 = fileDiff.variables[fieldname][:]
try:
fieldDiff = fileDiff.createVariable(fieldname+"Diff", "d", dimensions=dimensionsBase)
except:
fieldDiff = fileDiff.variables[fieldname+"Diff"]
fieldDiff[:] = field2[:] - field1[:]
fileDiff.close()
#-------------------------------------------------------------------------------
if __name__ == "__main__":
set_difference_fields()
|
nilq/baby-python
|
python
|
import imp
import librosa
import numpy as np
from keras.models import load_model
genres = {0: "metal", 1: "disco", 2: "classical", 3: "hiphop", 4: "jazz",
5: "country", 6: "pop", 7: "blues", 8: "reggae", 9: "rock"}
song_samples = 660000
def load_song(filepath):
y, sr = librosa.load(filepath)
y = y[:song_samples]
return y, sr
def splitsongs(X, window = 0.1, overlap = 0.5):
temp_X = []
xshape = X.shape[0]
chunk = int(xshape*window)
offset = int(chunk*(1.-overlap))
spsong = [X[i:i+chunk] for i in range(0, xshape - chunk + offset, offset)]
for s in spsong:
temp_X.append(s)
return np.array(temp_X)
def to_melspec(signals):
melspec = lambda x : librosa.feature.melspectrogram(x, n_fft=1024, hop_length=512)[:, :, np.newaxis]
spec_array = map(melspec, signals)
return np.array(list(spec_array))
def get_genre(path, debug=False):
model = load_model('./weights/genres_full_vgg16.h5')
y = load_song(path)[0]
predictions = []
spectro = []
signals = splitsongs(y)
spec_array = to_melspec(signals)
spectro.extend(spec_array)
spectro = np.array(spectro)
spectro = np.squeeze(np.stack((spectro,)*3,-1))
pr = np.array(model.predict(spectro))
predictions = np.argmax(pr, axis=1)
if debug:
print('Load audio:', path)
print("\nFull Predictions:")
for p in pr: print(list(p))
print("\nPredictions:\n{}".format(predictions))
print("Confidences:\n{}".format([round(x, 2) for x in np.amax(pr, axis=1)]))
print("\nOutput Predictions:\n{}\nPredicted class:".format(np.mean(pr, axis=0)))
return genres[np.bincount(predictions).argmax()] # list(np.mean(pr, axis=0))
if __name__ == '__main__':
print(get_genre('./audios/classical_music.mp3', True))
|
nilq/baby-python
|
python
|
""" Tests for the StarCluster Job """
import sure
from mock import Mock
from .fixtures import *
from metapipe.models import sge_job
def test_qstat_queued():
j = sge_job.SGEJob('', None)
sge_job.call = Mock(return_value=sge_job_qstat_queued)
j.is_queued().should.equal(True)
def test_qstat_running():
j = sge_job.SGEJob('', None)
sge_job.call = Mock(return_value=sge_job_qstat_running)
j.is_running().should.equal(True)
def test_submit():
j = sge_job.SGEJob('', None)
sge_job.call = Mock(return_value=sge_job_qsub)
j.make = Mock()
j.submit()
j.id.should.equal('1')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def initial_log_action(apps, schema_editor):
LogAction = apps.get_model('logs', 'LogAction')
action, created = LogAction.objects.get_or_create(
name='USER_CREATE',
)
action.template = 'User `{{ log_item.object1.username }}` was created.'
action.save()
action, created = LogAction.objects.get_or_create(
name='USER_EDIT',
)
action.template = 'User `{{ log_item.object1.username }}` field `{{ log_item.data.field }}` \
was changed from {{ log_item.data.old }} to {{ log_item.data.new }}.'
action.save()
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('logs', '0002_auto_20150128_0257'),
]
operations = [
migrations.RunPython(initial_log_action, backwards),
]
|
nilq/baby-python
|
python
|
__author__ = 'Neil Butcher'
from Institution import InstitutionSavingObject
|
nilq/baby-python
|
python
|
import sys
import os.path
def main():
sys.argv[:] = sys.argv[1:] # Must rewrite the command line arguments
progname = sys.argv[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globs = {
'__file__' : progname,
'__name__' : '__main__',
'__package__' : None,
'__cached__' : None
}
exec(code, globs)
|
nilq/baby-python
|
python
|
from __future__ import unicode_literals
import os
import shutil
import subprocess
import sys
import optparse
import datetime
import time
sys.path[:0] = ['.']
from yt_dlp.utils import check_executable
try:
iterations = str(int(os.environ['ZOPFLI_ITERATIONS']))
except BaseException:
iterations = '30'
parser = optparse.OptionParser(usage='%prog PYTHON')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('Expected python executable name for shebang')
PYTHON = args[0]
# 200001010101
date = datetime.datetime(year=2000, month=1, day=1, hour=1, minute=1, second=1)
modTime = time.mktime(date.timetuple())
try:
shutil.rmtree('zip/')
except FileNotFoundError:
pass
os.makedirs('zip/', exist_ok=True)
files = [(dir, file) for (dir, _, c) in os.walk('yt_dlp') for file in c if file.endswith('.py')]
for (dir, file) in files:
joined = os.path.join(dir, file)
dest = os.path.join('zip', joined)
os.makedirs(os.path.join('zip', dir), exist_ok=True)
shutil.copy(joined, dest)
os.utime(dest, (modTime, modTime))
os.rename('zip/yt_dlp/__main__.py', 'zip/__main__.py')
files.remove(('yt_dlp', '__main__.py'))
files[:0] = [('', '__main__.py')]
all_paths = [os.path.join(dir, file) for (dir, file) in files]
if check_executable('7z', []):
ret = subprocess.Popen(
['7z', 'a', '-mm=Deflate', '-mfb=258', '-mpass=15', '-mtc-', '../youtube-dl.zip'] + all_paths,
cwd='zip/').wait()
elif check_executable('zip', ['-h']):
ret = subprocess.Popen(
['zip', '-9', '../youtube-dl.zip'] + all_paths,
cwd='zip/').wait()
else:
raise Exception('Cannot find ZIP archiver')
if ret != 0:
raise Exception('ZIP archiver returned error: %d' % ret)
if check_executable('advzip', []):
subprocess.Popen(
['advzip', '-z', '-4', '-i', iterations, 'youtube-dl.zip']).wait()
shutil.rmtree('zip/')
with open('youtube-dl', 'wb') as ytdl:
ytdl.write(b'#!%s\n' % PYTHON.encode('utf8'))
with open('youtube-dl.zip', 'rb') as zip:
ytdl.write(zip.read())
os.remove('youtube-dl.zip')
os.chmod('youtube-dl', 0o755)
|
nilq/baby-python
|
python
|
import json
from django.http.response import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
from django.urls.base import reverse
from lykops.views import Base
class Report(Base):
def summary(self,request):
result = self._is_login(request)
if result[0] :
username = result[1]
else :
return HttpResponseRedirect(reverse('login'))
http_referer = self.uri_api.get_httpreferer(username, no=-1)
force = request.GET.get('force', False)
result = self.ansible_report_api.get_date_list(username, force=force)
if not result[0] :
return render_to_response('report_list.html', {'login_user':username, 'error_message':result[1], 'http_referer':http_referer, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html})
else :
date_list = result[1]
create_date = request.GET.get('create_date', None)
if create_date == 'None' :
create_date = None
mode = request.GET.get('mode', 'all')
result = self.ansible_report_api.summary(username, dt=create_date, mode=mode)
if not result[0] :
error_message = self.username + ' 查看用户' + username + '的ansible任务执行报告列表失败,提交保存时发生错误,原因:' + result[1]
self.logger.error(error_message)
return render_to_response('report_list.html', {'login_user':username, 'error_message':error_message, 'http_referer':http_referer, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html})
else :
work_list = result[1]
self.logger.info(self.username + ' 查看用户' + username + '的ansible任务执行报告列表成功')
return render_to_response('report_list.html', {'login_user':username, 'error_message':{}, 'http_referer':http_referer, 'date_list':date_list, 'work_list':work_list, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html})
def detail(self, request):
result = self._is_login(request)
if result[0] :
username = result[1]
else :
return HttpResponseRedirect(reverse('login'))
http_referer = self.uri_api.get_httpreferer(username, no=-1)
force = request.GET.get('force', False)
force = bool(force)
uuid_str = request.GET.get('uuid', False)
exec_mode = request.GET.get('mode', False)
orig_content = request.GET.get('orig_content', False)
orig_content = bool(orig_content)
result = self.ansible_report_api.detail(username, uuid_str, force=force, orig_content=orig_content)
if result[0] :
report_data = result[1]
if orig_content :
self.logger.info(self.username + ' 查看用户' + username + '的uuid为' + uuid_str + '的ansible任务执行报告成功(原始数据)')
return HttpResponse(json.dumps(report_data))
return render_to_response('result.html', {'login_user':username, 'content':report_data, 'http_referer':http_referer, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html})
self.logger.info(self.username + ' 查看用户' + username + '的uuid为' + uuid_str + '的ansible任务执行报告成功(格式化数据)')
if exec_mode == 'adhoc' :
return render_to_response('report_adhoc.html', {'login_user':username, 'report_data':report_data, 'http_referer':http_referer, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html})
else :
return render_to_response('report_playbook.html', {'login_user':username, 'report_data':report_data, 'http_referer':http_referer, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html})
else :
error_message = self.username + ' 查看用户' + username + '的uuid为' + uuid_str + '的ansible任务执行报告失败,查询时发生错误,原因:' + result[1]
self.logger.error(error_message)
return render_to_response('result.html', {'login_user':username, 'content':error_message, 'http_referer':http_referer, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html, 'nav_html':self.nav_html, 'lately_whereabouts':self.latelywhere_html})
|
nilq/baby-python
|
python
|
##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import pandas as pd
from pycylon.index import Index, RangeIndex, NumericIndex, CategoricalIndex, ColumnIndex, \
range_calculator
from pycylon import Table
from pycylon import CylonContext
import pyarrow as pa
import numpy as np
from pycylon.io import CSVReadOptions
from pycylon.io import read_csv
def test_with_pandas():
pdf = pd.DataFrame([[1, 2, 3, 4, 5, 'a'], [6, 7, 8, 9, 10, 'b'], [11, 12, 13, 14, 15, 'c'],
[16, 17, 18, 19, 20, 'a'], [16, 17, 18, 19, 20, 'd'],
[111, 112, 113, 114, 5,
'a']])
# print(pdf)
pdf1 = pdf.set_index([1, 2])
# print(pdf1)
print(pdf1.index)
def test_numeric_index():
rg = range(0, 10, 1)
rg1 = range(0, 10, 2)
r = NumericIndex(data=rg)
assert r.index_values == rg
assert r.index_values != rg1
def test_range_index():
rg = range(0, 10, 1)
rg1 = range(0, 10, 2)
r = RangeIndex(start=rg.start, stop=rg.stop, step=rg.step)
assert r.index_values == rg
assert r.index_values != rg1
r1 = RangeIndex(rg)
r2 = RangeIndex(rg)
assert r1.index_values == rg
assert r2.index_values != rg1
def calculate_range_size_manual(rg: range):
sum = 0
for i in rg:
sum += 1
return sum
def test_range_count():
rg_1 = range(0, 10)
rg_2 = range(0, 10, 2)
rg_3 = range(0, 10, 3)
rg_4 = range(0, 11, 2)
rg_5 = range(0, 14, 3)
rgs = [rg_1, rg_2, rg_3, rg_4, rg_5]
for rg in rgs:
assert range_calculator(rg) == calculate_range_size_manual(rg)
def test_cylon_set_index_from_column():
from pycylon.indexing.cyindex import IndexingType
from pycylon.indexing.index_utils import IndexUtil
pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.int64()),
'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int')})
pdf = pd.DataFrame([[1, 2], [4, 5], [7, 8], [10, 11], [20, 22], [23, 25], [10, 12]])
ctx: CylonContext = CylonContext(config=None, distributed=False)
cn_tb: Table = Table.from_pandas(ctx, pdf_float)
indexing_type = IndexingType.LINEAR
drop_index = True
print("Before Indexing")
print(cn_tb)
# cn_tb.set_index('a', indexing_schema, drop_index)
cn_tb.set_index('a', indexing_type, drop_index)
print("After Indexing")
assert cn_tb.column_names == ['b']
assert cn_tb.get_index().get_type() == IndexingType.LINEAR
def test_reset_index():
from pycylon.indexing.cyindex import IndexingType
from pycylon.indexing.index_utils import IndexUtil
pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.int64()),
'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int')})
pdf = pd.DataFrame([[1, 2], [4, 5], [7, 8], [10, 11], [20, 22], [23, 25], [10, 12]])
ctx: CylonContext = CylonContext(config=None, distributed=False)
cn_tb: Table = Table.from_pandas(ctx, pdf_float)
indexing_type = IndexingType.LINEAR
drop_index = True
# cn_tb.set_index('a', indexing_schema, drop_index)
cn_tb.set_index('a', indexing_type, drop_index)
# assert cn_tb.get_index().get_type() == IndexingSchema.LINEAR
assert cn_tb.get_index().get_type() == IndexingType.LINEAR
rest_drop_index = False
# cn_tb.reset_index(rest_drop_index)
cn_tb.reset_index(rest_drop_index)
assert cn_tb.column_names == ['index', 'b']
# assert cn_tb.get_index().get_schema() == IndexingSchema.RANGE
assert cn_tb.get_index().get_type() == IndexingType.RANGE
def test_cylon_cpp_single_column_indexing():
# TODO: REMOVE
pass
# from pycylon.indexing.cyindex import IndexingSchema
# from pycylon.indexing.index_utils import IndexUtil
#
#
# pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.int64()),
# 'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int')})
# pdf = pd.DataFrame([[1, 2], [4, 5], [7, 8], [10, 11], [20, 22], [23, 25], [10, 12]])
# ctx: CylonContext = CylonContext(config=None, distributed=False)
# cn_tb: Table = Table.from_pandas(ctx, pdf_float)
# indexing_schema = IndexingSchema.LINEAR
#
# print("Input Table")
# print(cn_tb)
# print(cn_tb.to_arrow())
#
# output = IndexUtil.build_index(indexing_schema, cn_tb, 0, True)
# print("Output Indexed Table")
# print(output)
#
# loc_ix = LocIndexer(indexing_schema)
# start_index = 1
# end_index = 7
# column_index = 0
#
# loc_out = loc_ix.loc_with_single_column(slice(start_index, end_index), column_index, output)
# #
# print(loc_out)
#
# print(loc_out.to_arrow())
#
# index = loc_out.get_index()
#
# print(index)
#
# print(index.get_index_array())
#
# indices = [4, 7, 23, 20]
#
# loc_out2 = loc_ix.loc_with_single_column(indices, column_index, output)
#
# print(loc_out2)
#
# loc_index = 10
# loc_out3 = loc_ix.loc_with_single_column(loc_index, column_index, output)
#
# print(loc_out3)
def test_cylon_cpp_multi_column_indexing():
# TODO REMOVE
pass
# from pycylon.indexing.cyindex import IndexingSchema
# from pycylon.indexing.index_utils import IndexUtil
#
#
# pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.float64()),
# 'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
# 'c': pd.Series([11, 12, 14, 15, 16, 17, 18], dtype='int')
# })
# pdf = pd.DataFrame([[1, 2, 11], [4, 5, 12], [7, 8, 14], [10, 11, 15], [20, 22, 16], [23, 25,
# 17],
# [10, 12, 18]])
# ctx: CylonContext = CylonContext(config=None, distributed=False)
# cn_tb: Table = Table.from_pandas(ctx, pdf)
# indexing_schema = IndexingSchema.LINEAR
#
# print("Input Table")
# print(cn_tb)
# print(cn_tb.to_arrow())
#
# output = IndexUtil.build_index(indexing_schema, cn_tb, 0, True)
# print("Output Indexed Table")
# print(output)
#
# loc_ix = LocIndexer(indexing_schema)
# start_index = 1
# end_index = 7
# column_index = [0, 1]
#
# loc_out = loc_ix.loc_with_multi_column(slice(start_index, end_index), column_index, output)
# #
# print(loc_out)
#
# print(loc_out.to_arrow())
#
# index = loc_out.get_index()
#
# print(index)
#
# print(index.get_index_array())
#
# indices = [4, 7, 23, 20]
#
# loc_out2 = loc_ix.loc_with_multi_column(indices, column_index, output)
#
# print(loc_out2)
#
# loc_index = 10
# loc_out3 = loc_ix.loc_with_multi_column(loc_index, column_index, output)
#
# print(loc_out3)
def test_cylon_cpp_str_single_column_indexing():
# TODO REMOVE
pass
# from pycylon.indexing.cyindex import IndexingSchema
# from pycylon.indexing.index_utils import IndexUtil
#
#
# pdf_str = pd.DataFrame([["1", 2], ["4", 5], ["7", 8], ["10", 11], ["20", 22], ["23", 25], ["10",
# 12]])
# pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.float64()),
# 'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int')})
# pdf = pd.DataFrame([[1, 2], [4, 5], [7, 8], [10, 11], [20, 22], [23, 25], [10, 12]])
# ctx: CylonContext = CylonContext(config=None, distributed=False)
# cn_tb: Table = Table.from_pandas(ctx, pdf_str)
# indexing_schema = IndexingSchema.LINEAR
#
# print("Input Table")
# print(cn_tb)
# print(cn_tb.to_arrow())
#
# output = IndexUtil.build_index(indexing_schema, cn_tb, 0, True)
# print("Output Indexed Table")
# print(output)
#
# loc_ix = LocIndexer(indexing_schema)
# start_index = "1"
# end_index = "7"
# column_index = 0
#
# loc_out = loc_ix.loc_with_single_column(slice(start_index, end_index), column_index, output)
#
# print(loc_out)
#
# print(loc_out.to_arrow())
#
# index = loc_out.get_index()
#
# print(index)
#
# print(index.get_index_array())
#
# indices = ["100", "4", "7", "23", "20"]
#
# indices = ['4']
#
# # indices = [4, 7]
#
# loc_out2 = loc_ix.loc_with_single_column(indices, column_index, output)
#
# print(loc_out2)
#
# loc_index = '10'
# loc_out3 = loc_ix.loc_with_single_column(loc_index, column_index, output)
#
# print(loc_out3)
def test_cylon_cpp_str_multi_column_indexing():
# TODO REMOVE
pass
# from pycylon.indexing.cyindex import IndexingSchema
# from pycylon.indexing.index_utils import IndexUtil
#
#
# pdf_str = pd.DataFrame([["1", 2, 3], ["4", 5, 4], ["7", 8, 10], ["10", 11, 12], ["20", 22, 20],
# ["23", 25, 20], ["10", 12, 35]])
# pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.float64()),
# 'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
# 'c': pd.Series([3, 4, 10, 12, 20, 20, 35], dtype='int')})
# pdf = pd.DataFrame([[1, 2], [4, 5], [7, 8], [10, 11], [20, 22], [23, 25], [10, 12]])
# ctx: CylonContext = CylonContext(config=None, distributed=False)
# cn_tb: Table = Table.from_pandas(ctx, pdf_str)
# indexing_schema = IndexingSchema.LINEAR
#
# print("Input Table")
# print(cn_tb)
# print(cn_tb.to_arrow())
#
# output = IndexUtil.build_index(indexing_schema, cn_tb, 0, True)
# print("Output Indexed Table")
# print(output)
#
# loc_ix = LocIndexer(indexing_schema)
# start_index = "1"
# end_index = "7"
# column_index = [0, 1]
#
# loc_out = loc_ix.loc_with_multi_column(slice(start_index, end_index), column_index, output)
#
# print(loc_out)
#
# print(loc_out.to_arrow())
#
# index = loc_out.get_index()
#
# print(index)
#
# print(index.get_index_array())
#
# indices = ["100", "4", "7", "23", "20"]
#
# indices = ['4']
#
# # indices = [4, 7]
#
# loc_out2 = loc_ix.loc_with_multi_column(indices, column_index, output)
#
# print(loc_out2)
#
# loc_index = '10'
# loc_out3 = loc_ix.loc_with_multi_column(loc_index, column_index, output)
#
# print(loc_out3)
def test_cylon_cpp_range_column_indexing():
# TODO REMOVE
pass
# from pycylon.indexing.cyindex import IndexingSchema
# from pycylon.indexing.index_utils import IndexUtil
#
#
# pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.float64()),
# 'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
# 'c': pd.Series([11, 12, 14, 15, 16, 17, 18], dtype='int')
# })
# pdf = pd.DataFrame([[1, 2, 11], [4, 5, 12], [7, 8, 14], [10, 11, 15], [20, 22, 16], [23, 25,
# 17],
# [10, 12, 18]])
# ctx: CylonContext = CylonContext(config=None, distributed=False)
# cn_tb: Table = Table.from_pandas(ctx, pdf)
# indexing_schema = IndexingSchema.LINEAR
#
# print("Input Table")
# print(cn_tb)
# print(cn_tb.to_arrow())
#
# output = IndexUtil.build_index(indexing_schema, cn_tb, 0, True)
# print("Output Indexed Table")
# print(output)
#
# loc_ix = LocIndexer(indexing_schema)
# start_index = 1
# end_index = 7
# column_index = slice(0, 1)
#
# loc_out = loc_ix.loc_with_range_column(slice(start_index, end_index), column_index, output)
# #
# print(loc_out)
#
# print(loc_out.to_arrow())
#
# index = loc_out.get_index()
#
# print(index)
#
# print(index.get_index_array())
#
# indices = [4, 7, 23, 20]
#
# loc_out2 = loc_ix.loc_with_range_column(indices, column_index, output)
#
# print(loc_out2)
#
# loc_index = 10
# loc_out3 = loc_ix.loc_with_range_column(loc_index, column_index, output)
#
# print(loc_out3)
def test_cylon_cpp_str_range_column_indexing():
# TODO REMOVE
pass
# from pycylon.indexing.cyindex import IndexingSchema
# from pycylon.indexing.index_utils import IndexUtil
#
#
# pdf_str = pd.DataFrame([["1", 2, 3], ["4", 5, 4], ["7", 8, 10], ["10", 11, 12], ["20", 22, 20],
# ["23", 25, 20], ["10", 12, 35]])
# pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 10], dtype=np.float64()),
# 'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
# 'c': pd.Series([3, 4, 10, 12, 20, 20, 35], dtype='int')})
# pdf = pd.DataFrame([[1, 2], [4, 5], [7, 8], [10, 11], [20, 22], [23, 25], [10, 12]])
# ctx: CylonContext = CylonContext(config=None, distributed=False)
# cn_tb: Table = Table.from_pandas(ctx, pdf_str)
# indexing_schema = IndexingSchema.LINEAR
#
# print("Input Table")
# print(cn_tb)
# print(cn_tb.to_arrow())
#
# output = IndexUtil.build_index(indexing_schema, cn_tb, 0, True)
# print("Output Indexed Table")
# print(output)
#
# loc_ix = LocIndexer(indexing_schema)
# start_index = "1"
# end_index = "7"
# column_index = slice(0, 1)
#
# loc_out = loc_ix.loc_with_range_column(slice(start_index, end_index), column_index, output)
#
# print(loc_out)
#
# print(loc_out.to_arrow())
#
# index = loc_out.get_index()
#
# print(index)
#
# print(index.get_index_array())
#
# indices = ["100", "4", "7", "23", "20"]
#
# indices = ['4']
#
# # indices = [4, 7]
#
# loc_out2 = loc_ix.loc_with_range_column(indices, column_index, output)
#
# print(loc_out2)
#
# loc_index = '10'
# loc_out3 = loc_ix.loc_with_range_column(loc_index, column_index, output)
#
# print(loc_out3)
def test_loc_op_mode_1():
from pycylon.indexing.cyindex import IndexingType
pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 11], dtype=np.int64()),
'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
'c': pd.Series([12, 15, 18, 111, 122, 125, 112], dtype='int'),
'd': pd.Series([212, 215, 218, 211, 222, 225, 312], dtype='int'),
'e': pd.Series([1121, 12151, 12181, 12111, 12221, 12251, 13121],
dtype='int')})
ctx: CylonContext = CylonContext(config=None, distributed=False)
cn_tb: Table = Table.from_pandas(ctx, pdf_float)
indexing_type = IndexingType.LINEAR
drop_index = True
print("Before Indexing")
print(cn_tb)
cn_tb.set_index('a', indexing_type, drop_index)
pdf_float = pdf_float.set_index('a')
print("After Indexing")
assert cn_tb.column_names == ['b', 'c', 'd', 'e']
# assert cn_tb.get_index().get_schema() == IndexingSchema.LINEAR
assert cn_tb.get_index().get_type() == IndexingType.LINEAR
loc_cn_1 = cn_tb.loc[7:20, 'c':'e']
loc_pd_1 = pdf_float.loc[7:20, 'c':'e']
print(loc_cn_1.get_index().values)
print(loc_pd_1.index.values)
assert loc_pd_1.values.tolist() == loc_cn_1.to_pandas().values.tolist()
assert loc_cn_1.get_index().get_index_array() == pa.array(loc_pd_1.index)
# assert loc_cn_1.get_arrow_index().get_index_array() == pa.array(loc_pd_1.index)
loc_cn_2 = cn_tb.loc[7:20, 'd':]
loc_pd_2 = pdf_float.loc[7:20, 'd':]
assert loc_pd_2.values.tolist() == loc_cn_2.to_pandas().values.tolist()
assert loc_cn_2.get_index().get_index_array() == pa.array(loc_pd_2.index)
# assert loc_cn_2.get_arrow_index().get_index_array() == pa.array(loc_pd_2.index)
loc_cn_3 = cn_tb.loc[7:, 'd':]
loc_pd_3 = pdf_float.loc[7:, 'd':]
assert loc_pd_3.values.tolist() == loc_cn_3.to_pandas().values.tolist()
assert loc_cn_3.get_index().get_index_array() == pa.array(loc_pd_3.index)
# assert loc_cn_3.get_arrow_index().get_index_array() == pa.array(loc_pd_3.index)
loc_cn_4 = cn_tb.loc[:7, 'd':]
loc_pd_4 = pdf_float.loc[:7, 'd':]
assert loc_pd_4.values.tolist() == loc_cn_4.to_pandas().values.tolist()
assert loc_cn_4.get_index().get_index_array() == pa.array(loc_pd_4.index)
# assert loc_cn_4.get_arrow_index().get_index_array() == pa.array(loc_pd_4.index)
loc_cn_5 = cn_tb.loc[:, 'd':]
loc_pd_5 = pdf_float.loc[:, 'd':]
assert loc_pd_5.values.tolist() == loc_cn_5.to_pandas().values.tolist()
assert loc_cn_5.get_index().get_index_array() == pa.array(loc_pd_5.index)
# assert loc_cn_5.get_arrow_index().get_index_array() == pa.array(loc_pd_5.index)
loc_cn_6 = cn_tb.loc[[7, 20], 'd':]
loc_pd_6 = pdf_float.loc[[7, 20], 'd':]
assert loc_pd_6.values.tolist() == loc_cn_6.to_pandas().values.tolist()
assert loc_cn_6.get_index().get_index_array() == pa.array(loc_pd_6.index)
def test_loc_op_mode_2():
from pycylon.indexing.cyindex import IndexingType
pdf_float = pd.DataFrame({'a': pd.Series(["1", "4", "7", "10", "20", "23", "11"]),
'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
'c': pd.Series([12, 15, 18, 111, 122, 125, 112], dtype='int'),
'd': pd.Series([212, 215, 218, 211, 222, 225, 312], dtype='int'),
'e': pd.Series([1121, 12151, 12181, 12111, 12221, 12251, 13121],
dtype='int')})
ctx: CylonContext = CylonContext(config=None, distributed=False)
cn_tb: Table = Table.from_pandas(ctx, pdf_float)
indexing_type = IndexingType.LINEAR
drop_index = True
print("Before Indexing")
print(cn_tb)
cn_tb.set_index('a', indexing_type, drop_index)
pdf_float = pdf_float.set_index('a')
print("After Indexing")
assert cn_tb.column_names == ['b', 'c', 'd', 'e']
assert cn_tb.get_index().get_type() == IndexingType.LINEAR
loc_cn_1 = cn_tb.loc["7":"20", 'c':'e']
loc_pd_1 = pdf_float.loc["7":"20", 'c':'e']
assert loc_pd_1.values.tolist() == loc_cn_1.to_pandas().values.tolist()
assert loc_cn_1.get_index().get_index_array() == pa.array(loc_pd_1.index)
# assert loc_cn_1.get_arrow_index().get_index_array() == pa.array(loc_pd_1.index)
loc_cn_2 = cn_tb.loc["7":"20", 'd':]
loc_pd_2 = pdf_float.loc["7":"20", 'd':]
assert loc_pd_2.values.tolist() == loc_cn_2.to_pandas().values.tolist()
assert loc_cn_2.get_index().get_index_array() == pa.array(loc_pd_2.index)
# assert loc_cn_2.get_arrow_index().get_index_array() == pa.array(loc_pd_2.index)
loc_cn_3 = cn_tb.loc["7":, 'd':]
loc_pd_3 = pdf_float.loc["7":, 'd':]
assert loc_pd_3.values.tolist() == loc_cn_3.to_pandas().values.tolist()
assert loc_cn_3.get_index().get_index_array() == pa.array(loc_pd_3.index)
# assert loc_cn_3.get_arrow_index().get_index_array() == pa.array(loc_pd_3.index)
loc_cn_4 = cn_tb.loc[:"7", 'd':]
loc_pd_4 = pdf_float.loc[:"7", 'd':]
assert loc_pd_4.values.tolist() == loc_cn_4.to_pandas().values.tolist()
assert loc_cn_4.get_index().get_index_array() == pa.array(loc_pd_4.index)
# assert loc_cn_4.get_arrow_index().get_index_array() == pa.array(loc_pd_4.index)
loc_cn_5 = cn_tb.loc[:, 'd':]
loc_pd_5 = pdf_float.loc[:, 'd':]
assert loc_pd_5.values.tolist() == loc_cn_5.to_pandas().values.tolist()
assert loc_cn_5.get_index().get_index_array() == pa.array(loc_pd_5.index)
# assert loc_cn_5.get_arrow_index().get_index_array() == pa.array(loc_pd_5.index)
loc_cn_6 = cn_tb.loc[["7", "20"], 'd':]
loc_pd_6 = pdf_float.loc[["7", "20"], 'd':]
assert loc_pd_6.values.tolist() == loc_cn_6.to_pandas().values.tolist()
assert loc_cn_6.get_index().get_index_array() == pa.array(loc_pd_6.index)
def test_loc_op_mode_3():
from pycylon.indexing.cyindex import IndexingType
from pycylon.indexing.index_utils import IndexUtil
pdf_float = pd.DataFrame({'a': pd.Series(["1", "4", "7", "10", "20", "23", "11"]),
'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
'c': pd.Series([12, 15, 18, 111, 122, 125, 112], dtype='int'),
'd': pd.Series([212, 215, 218, 211, 222, 225, 312], dtype='int'),
'e': pd.Series([1121, 12151, 12181, 12111, 12221, 12251, 13121],
dtype='int')})
ctx: CylonContext = CylonContext(config=None, distributed=False)
cn_tb: Table = Table.from_pandas(ctx, pdf_float)
indexing_type = IndexingType.LINEAR
drop_index = True
print("Before Indexing")
print(cn_tb)
cn_tb.set_index('a', indexing_type, drop_index)
pdf_float = pdf_float.set_index('a')
print("After Indexing")
assert cn_tb.column_names == ['b', 'c', 'd', 'e']
assert cn_tb.get_index().get_type() == IndexingType.LINEAR
loc_cn_1 = cn_tb.loc["7":"20"]
loc_pd_1 = pdf_float.loc["7":"20"]
print(loc_cn_1.get_index().get_index_array())
print(loc_pd_1.index.values)
assert loc_pd_1.values.tolist() == loc_cn_1.to_pandas().values.tolist()
assert loc_cn_1.get_index().get_index_array() == pa.array(loc_pd_1.index)
loc_cn_2 = cn_tb.loc["7":]
loc_pd_2 = pdf_float.loc["7":]
assert loc_pd_2.values.tolist() == loc_cn_2.to_pandas().values.tolist()
assert loc_cn_2.get_index().get_index_array() == pa.array(loc_pd_2.index)
loc_cn_3 = cn_tb.loc[:"7"]
loc_pd_3 = pdf_float.loc[:"7"]
assert loc_pd_3.values.tolist() == loc_cn_3.to_pandas().values.tolist()
assert loc_cn_3.get_index().get_index_array() == pa.array(loc_pd_3.index)
loc_cn_4 = cn_tb.loc[:]
loc_pd_4 = pdf_float.loc[:]
assert loc_pd_4.values.tolist() == loc_cn_4.to_pandas().values.tolist()
assert loc_cn_4.get_index().get_index_array() == pa.array(loc_pd_4.index)
loc_cn_5 = cn_tb.loc[["7", "20"], :]
loc_pd_5 = pdf_float.loc[["7", "20"], :]
assert loc_pd_5.values.tolist() == loc_cn_5.to_pandas().values.tolist()
assert loc_cn_5.get_index().get_index_array() == pa.array(loc_pd_5.index)
def test_iloc_op_mode_1():
from pycylon.indexing.cyindex import IndexingType
from pycylon.indexing.index_utils import IndexUtil
pdf_float = pd.DataFrame({'a': pd.Series(["1", "4", "7", "10", "20", "23", "11"]),
'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
'c': pd.Series([12, 15, 18, 111, 122, 125, 112], dtype='int'),
'd': pd.Series([212, 215, 218, 211, 222, 225, 312], dtype='int'),
'e': pd.Series([1121, 12151, 12181, 12111, 12221, 12251, 13121],
dtype='int')})
ctx: CylonContext = CylonContext(config=None, distributed=False)
cn_tb: Table = Table.from_pandas(ctx, pdf_float)
indexing_type = IndexingType.LINEAR
drop_index = True
print("Before Indexing")
print(cn_tb)
cn_tb.set_index('a', indexing_type, drop_index)
pdf_float = pdf_float.set_index('a')
print("After Indexing")
assert cn_tb.column_names == ['b', 'c', 'd', 'e']
assert cn_tb.get_index().get_type() == IndexingType.LINEAR
iloc_cn_1 = cn_tb.iloc[3:5, 1:3]
iloc_pd_1 = pdf_float.iloc[3:5, 1:3]
print(iloc_cn_1)
print(iloc_pd_1)
assert iloc_pd_1.values.tolist() == iloc_cn_1.to_pandas().values.tolist()
iloc_cn_2 = cn_tb.iloc[3:5, 1:]
iloc_pd_2 = pdf_float.iloc[3:5, 1:]
print(iloc_cn_2)
print(iloc_pd_2)
assert iloc_pd_2.values.tolist() == iloc_cn_2.to_pandas().values.tolist()
iloc_cn_3 = cn_tb.iloc[3:, 1:]
iloc_pd_3 = pdf_float.iloc[3:, 1:]
assert iloc_pd_3.values.tolist() == iloc_cn_3.to_pandas().values.tolist()
iloc_cn_4 = cn_tb.iloc[:3, 1:]
iloc_pd_4 = pdf_float.iloc[:3, 1:]
print(iloc_cn_4)
print(iloc_pd_4)
assert iloc_pd_4.values.tolist() == iloc_cn_4.to_pandas().values.tolist()
iloc_cn_5 = cn_tb.iloc[:, :]
iloc_pd_5 = pdf_float.iloc[:, :]
assert iloc_pd_5.values.tolist() == iloc_cn_5.to_pandas().values.tolist()
iloc_cn_6 = cn_tb.iloc[[0, 2, 3], :]
iloc_pd_6 = pdf_float.iloc[[0, 2, 3], :]
assert iloc_pd_6.values.tolist() == iloc_cn_6.to_pandas().values.tolist()
def test_isin():
ctx = CylonContext(config=None, distributed=False)
csv_read_options = CSVReadOptions().use_threads(True).block_size(1 << 30)
table_path = 'data/input/duplicate_data_0.csv'
tb: Table = read_csv(ctx, table_path, csv_read_options)
pdf: pd.DataFrame = tb.to_pandas()
tb.set_index(tb.column_names[0], drop=True)
pdf.set_index(pdf.columns[0], drop=True, inplace=True)
assert tb.index.values.tolist() == pdf.index.values.tolist()
compare_values = [4, 1, 10, 100, 150]
tb_res_isin = tb.index.isin(compare_values)
pdf_res_isin = pdf.index.isin(compare_values)
assert tb_res_isin.tolist() == pdf_res_isin.tolist()
def test_isin_with_getitem():
ctx = CylonContext(config=None, distributed=False)
csv_read_options = CSVReadOptions().use_threads(True).block_size(1 << 30)
table_path = 'data/input/duplicate_data_0.csv'
tb: Table = read_csv(ctx, table_path, csv_read_options)
pdf: pd.DataFrame = tb.to_pandas()
tb.set_index(tb.column_names[0], drop=True)
pdf.set_index(pdf.columns[0], drop=True, inplace=True)
assert tb.index.values.tolist() == pdf.index.values.tolist()
compare_values = [4, 1, 10, 100, 150]
tb_res_isin = tb.index.isin(compare_values)
pdf_res_isin = pdf.index.isin(compare_values)
assert tb_res_isin.tolist() == pdf_res_isin.tolist()
print(tb_res_isin)
print(pdf_res_isin)
pdf1 = pdf[pdf_res_isin]
print("Pandas Output")
print(pdf1)
print(pdf1.index.values)
tb_filter = Table.from_list(ctx, ['filter'], [tb_res_isin.tolist()])
tb1 = tb[tb_filter]
resultant_index = tb.index.values[tb_res_isin].tolist()
print(resultant_index)
tb1.set_index(resultant_index)
print("PyCylon Output")
print(tb1)
print(tb1.index.values)
assert pdf1.values.tolist() == tb1.to_pandas().values.tolist()
print(tb1.index.values)
print(pdf1.index.values)
assert tb1.index.values.tolist() == pdf1.index.values.tolist()
def test_arrow_index():
from pycylon.indexing.cyindex import IndexingType
from pycylon.indexing.cyindex import ArrowLocIndexer
pdf_float = pd.DataFrame({'a': pd.Series([1, 4, 7, 10, 20, 23, 11]),
'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
'c': pd.Series([12, 15, 18, 111, 122, 125, 112], dtype='int'),
'd': pd.Series([212, 215, 218, 211, 222, 225, 312], dtype='int'),
'e': pd.Series([1121, 12151, 12181, 12111, 12221, 12251, 13121],
dtype='int')})
ctx: CylonContext = CylonContext(config=None, distributed=False)
cn_tb: Table = Table.from_pandas(ctx, pdf_float)
indexing_type = IndexingType.LINEAR
drop_index = True
print("Before Indexing")
print(cn_tb)
cn_tb.set_index('a', indexing_type, drop_index)
pdf_float = pdf_float.set_index('a')
print("After Indexing")
assert cn_tb.column_names == ['b', 'c', 'd', 'e']
assert cn_tb.get_index().get_type() == IndexingType.LINEAR
print(cn_tb.get_index().values)
index_array = cn_tb.get_index().get_index_array()
print(index_array)
print(index_array.type)
scalar_value = pa.scalar(10, index_array.type)
print(scalar_value)
arrow_loc_indexer = ArrowLocIndexer(IndexingType.LINEAR)
output1 = arrow_loc_indexer.loc_with_index_range(4, 20, 0, cn_tb)
print(output1)
print(output1.get_index().values)
output2 = arrow_loc_indexer.loc_with_index_range(4, 20, slice(0, 1), cn_tb)
print(output2)
print(output2.get_index().values)
output3 = arrow_loc_indexer.loc_with_index_range(4, 20, [0, 1, 2], cn_tb)
print(output3)
print(output3.get_index().values)
output4 = arrow_loc_indexer.loc_with_indices([4], 0, cn_tb)
print(output4)
print(output4.get_index().values)
output5 = arrow_loc_indexer.loc_with_indices([4, 20], slice(0, 1), cn_tb)
print(output5)
print(output5.get_index().values)
output6 = arrow_loc_indexer.loc_with_indices([4, 20], [0, 1, 2], cn_tb)
print(output6)
print(output6.get_index().values)
def test_index_set_index():
from pycylon.indexing.cyindex import IndexingType
from pycylon.indexing.index_utils import IndexUtil
pdf_float = pd.DataFrame({'a': pd.Series(["1", "4", "7", "10", "20", "23", "11"]),
'b': pd.Series([2, 5, 8, 11, 22, 25, 12], dtype='int'),
'c': pd.Series([12, 15, 18, 111, 122, 125, 112], dtype='int'),
'd': pd.Series([212, 215, 218, 211, 222, 225, 312], dtype='int'),
'e': pd.Series([1121, 12151, 12181, 12111, 12221, 12251, 13121],
dtype='int')})
ctx: CylonContext = CylonContext(config=None, distributed=False)
# pdf_float = pdf_float.set_index('a')
# pdf_float = pdf_float.reset_index()
cn_tb: Table = Table.from_pandas(ctx, pdf_float)
print("PyCylon Orignal Table")
print(cn_tb)
artb = cn_tb.to_arrow()
print("Arrow Table")
print(artb)
indexing_type = IndexingType.HASH
drop_index = True
print("Before Indexing : ", cn_tb.column_names)
print("index values", cn_tb.index.values)
print(cn_tb)
cn_tb.set_index(key='a', indexing_type=indexing_type, drop=drop_index)
print("After Indexing : ", cn_tb.column_names)
print(cn_tb)
print(cn_tb.index.values)
print(pdf_float.index.values)
filter = [False, True, False, True, False, False, False]
pdf_loc = pdf_float.loc[filter]
res = cn_tb.isin([10, 20, 30])
print(res)
print(pdf_loc)
# test_isin_with_getitem()
# test_loc_op_mode_1()
# test_loc_op_mode_2()
# test_loc_op_mode_3()
#
# test_iloc_op_mode_1()
test_index_set_index()
|
nilq/baby-python
|
python
|
""" Loader for Maya api sub-package """
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# this can be imported without having maya fully initialized
from .allapi import *
|
nilq/baby-python
|
python
|
#!/usr/bin/python
#
# Copyright (c) 2012 Mikkel Schubert <MSchubert@snm.ku.dk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import copy
import collections
from pypeline.node import Node
from pypeline.common.fileutils import move_file, reroot_path
from pypeline.common.formats.msa import MSA
from pypeline.common.formats.phylip import interleaved_phy, sequential_phy
from pypeline.common.utilities import \
safe_coerce_to_frozenset, \
safe_coerce_to_tuple
_VALID_KEYS = frozenset(["partitions", "filenames"])
class FastaToPartitionedInterleavedPhyNode(Node):
def __init__(self, infiles, out_prefix, exclude_groups=(), reduce=False,
dependencies=(), file_dependencies=()):
"""
infiles = {names : {"partitions" : ..., "filenames" : [...]}}
"""
if not (isinstance(infiles, dict)
and all(isinstance(dd, dict) for dd in infiles.values())):
raise TypeError("'infiles' must be a dictionary of dictionaries")
input_filenames = []
for (name, subdd) in infiles.iteritems():
if set(subdd) - _VALID_KEYS:
raise ValueError("Invalid keys found for %r: %s"
% (name, ", ".join(set(subdd) - _VALID_KEYS)))
elif not isinstance(subdd["filenames"], list):
raise ValueError("filenames must be a list of strings")
input_filenames.extend(subdd["filenames"])
# Optional file dependencies; used to depend on the list of sequcences
input_filenames.extend(safe_coerce_to_tuple(file_dependencies))
self._reduce = bool(reduce)
self._infiles = copy.deepcopy(infiles)
self._out_prefix = out_prefix
self._excluded = safe_coerce_to_frozenset(exclude_groups)
description = "<FastaToPartitionedPhy%s: %i file(s) -> '%s.*'>" % \
(" (reducing)" if reduce else "", len(infiles), out_prefix)
Node.__init__(self,
description=description,
input_files=input_filenames,
output_files=[out_prefix + ".phy",
out_prefix + ".partitions"],
dependencies=dependencies)
def _run(self, _config, temp):
merged_msas = []
for (name, files_dd) in sorted(self._infiles.iteritems()):
partitions = files_dd["partitions"]
msas = dict((key, []) for key in partitions)
for filename in files_dd["filenames"]:
msa = MSA.from_file(filename)
if self._excluded:
msa = msa.exclude(self._excluded)
for (key, msa_part) in msa.split(partitions).iteritems():
msas[key].append(msa_part)
msas.pop("X", None)
for (key, msa_parts) in sorted(msas.iteritems()):
merged_msa = MSA.join(*msa_parts)
if self._reduce:
merged_msa = merged_msa.reduce()
if merged_msa is not None:
merged_msas.append(("%s_%s" % (name, key),
merged_msa))
out_fname_phy = reroot_path(temp, self._out_prefix + ".phy")
with open(out_fname_phy, "w") as output_phy:
final_msa = MSA.join(*(msa for (_, msa) in merged_msas))
output_phy.write(interleaved_phy(final_msa))
partition_end = 0
out_fname_parts = reroot_path(temp, self._out_prefix + ".partitions")
with open(out_fname_parts, "w") as output_part:
for (name, msa) in merged_msas:
length = msa.seqlen()
output_part.write("DNA, %s = %i-%i\n"
% (name,
partition_end + 1,
partition_end + length))
partition_end += length
def _teardown(self, _config, temp):
move_file(reroot_path(temp, self._out_prefix + ".phy"),
self._out_prefix + ".phy")
move_file(reroot_path(temp, self._out_prefix + ".partitions"),
self._out_prefix + ".partitions")
class FastaToPartitionsNode(Node):
def __init__(self, infiles, out_partitions, partition_by = "123", dependencies = ()):
if (len(partition_by) != 3):
raise ValueError("Default 'partition_by' must be 3 entires long!")
elif not isinstance(infiles, dict):
raise TypeError("'infiles' must be a dictionary")
elif any(len(dd.get("partition_by", "123")) != 3 for dd in infiles.itervalues()):
raise ValueError("'partition_by' must be 3 entires long!")
elif not all(isinstance(dd, dict) for dd in infiles.values()):
raise TypeError("'infiles' must be a dictionary of dictionaries")
elif not any(("name" in dd) for dd in infiles.values()):
raise ValueError("'name' must be specified for all input files")
elif any((set(dd) - _VALID_KEYS) for dd in infiles.values()):
raise ValueError("Invalid keys found: %s" % ", ".join(set(dd) - _VALID_KEYS))
self._infiles = infiles
self._out_part = out_partitions
self._part_by = partition_by
description = "<FastaToPartitions (default: %s): %i file(s) -> '%s'>" % \
(partition_by, len(infiles), out_partitions)
Node.__init__(self,
description = description,
input_files = infiles.keys(),
output_files = out_partitions,
dependencies = dependencies)
def _run(self, _config, temp):
end = 0
partitions = collections.defaultdict(list)
for (filename, msa) in _read_sequences(self._infiles):
length = msa.seqlen()
start, end = end + 1, end + length
for (group, offsets) in self._get_partition_by(filename):
if len(offsets) != 3:
parts = [("%i-%i\\3" % (start + offset, end)) for offset in offsets]
else:
parts = ["%i-%i" % (start, end)]
name = "%s_%s" % (self._infiles[filename]["name"], group)
partitions[name].extend(parts)
with open(reroot_path(temp, self._out_part), "w") as part_file:
for (name, parts) in sorted(partitions.items()):
part_file.writelines("DNA, %s = %s\n" % (name, ", ".join(parts)))
def _teardown(self, _config, temp):
move_file(reroot_path(temp, self._out_part), self._out_part)
def _get_partition_by(self, filename):
groups = self._infiles[filename].get("partition_by", self._part_by)
partition_by = {}
for (group, offset) in zip(groups, range(3)):
partition_by.setdefault(group, []).append(offset)
return list(sorted(partition_by.items()))
class FastaToInterleavedPhyNode(Node):
def __init__(self, infiles, out_phy, add_flag = False, dependencies = ()):
self._add_flag = add_flag
self._out_phy = out_phy
description = "<FastaToInterleavedPhy: %i file(s) -> '%s'%s>" % \
(len(infiles), out_phy, (" (w/ flag)" if add_flag else ""))
Node.__init__(self,
description = description,
input_files = infiles,
output_files = [out_phy],
dependencies = dependencies)
def _run(self, _config, temp):
msa = MSA.join(*(MSA.from_file(filename) for filename in sorted(self.input_files)))
with open(reroot_path(temp, self._out_phy), "w") as output:
output.write(interleaved_phy(msa, add_flag = self._add_flag))
def _teardown(self, _config, temp):
move_file(reroot_path(temp, self._out_phy), self._out_phy)
class FastaToSequentialPhyNode(Node):
def __init__(self, infiles, out_phy, add_flag = False, dependencies = ()):
self._add_flag = add_flag
self._out_phy = out_phy
description = "<FastaToInterleavedPhy: %i file(s) -> '%s'%s>" % \
(len(infiles), out_phy, (" (w/ flag)" if add_flag else ""))
Node.__init__(self,
description = description,
input_files = infiles,
output_files = [out_phy],
dependencies = dependencies)
def _run(self, _config, temp):
# Read and check that MSAs share groups
msas = [MSA.from_file(filename) for filename in sorted(self.input_files)]
MSA.validate(*msas)
blocks = []
for msa in msas:
blocks.append(sequential_phy(msa, add_flag = self._add_flag))
with open(reroot_path(temp, self._out_phy), "w") as output:
output.write("\n\n".join(blocks))
def _teardown(self, _config, temp):
move_file(reroot_path(temp, self._out_phy), self._out_phy)
def _read_sequences(filenames):
results = {}
for filename in filenames:
results[filename] = MSA.from_file(filename)
MSA.validate(*results.values())
return results.iteritems()
|
nilq/baby-python
|
python
|
import sys
import srvdb
db = srvdb.SrvDb("./pdb-aggregator.db")
file = sys.argv[1]
print(file)
with open(file) as f:
content = f.read().splitlines()
print(content)
ips = []
for ip in content:
if ip not in ips:
print("Adding node {}".format(ip))
db.add_node(ip, False, 0, "")
ips.append(ip)
file = sys.argv[2]
print(file)
with open(file) as f:
content = f.read().splitlines()
print(content)
# routes = []
# for ip in content:
# if ip not in routes:
# print("Adding routes {}".format(ip))
# db.add_route(ip, route)
# ips.append(routes)
#
# ips = db.get_node_ips()
# for ip in ips:
# print("Found IP: {}".format(ip)
|
nilq/baby-python
|
python
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import random
import time
import math
from functools import partial
import numpy as np
import paddle
from paddle.io import DataLoader
import paddlenlp as ppnlp
from paddlenlp.transformers import LinearDecayWithWarmup
from paddlenlp.metrics import ChunkEvaluator
from datasets import load_dataset
from paddlenlp.transformers import BertForTokenClassification, BertTokenizer
from paddlenlp.transformers import ErnieForTokenClassification, ErnieTokenizer
from paddlenlp.transformers import ErnieCtmForTokenClassification, ErnieCtmTokenizer
from paddlenlp.data import DataCollatorForTokenClassification
from paddlenlp.utils.log import logger
MODEL_CLASSES = {
"bert": (BertForTokenClassification, BertTokenizer),
"ernie": (ErnieForTokenClassification, ErnieTokenizer),
"ernie-ctm": (ErnieCtmForTokenClassification, ErnieCtmTokenizer)
}
parser = argparse.ArgumentParser()
# yapf: disable
parser.add_argument("--model_type", default="bert", type=str, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), )
parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join( sum([ list(classes[-1].pretrained_init_configuration.keys()) for classes in MODEL_CLASSES.values() ], [])), )
parser.add_argument("--dataset", default="msra_ner", type=str, choices=["msra_ner", "peoples_daily_ner"] ,help="The named entity recognition datasets.")
parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3, type=int, help="Total number of training epochs to perform.", )
parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.",)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=1, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=100, help="Save checkpoint every X updates steps.")
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--device", default="gpu", type=str, choices=["cpu", "gpu", "xpu"] ,help="The device to select to train the model, is must be cpu/gpu/xpu.")
# yapf: enable
@paddle.no_grad()
def evaluate(model, loss_fct, metric, data_loader, label_num, mode="valid"):
model.eval()
metric.reset()
avg_loss, precision, recall, f1_score = 0, 0, 0, 0
for batch in data_loader:
logits = model(batch['input_ids'], batch['token_type_ids'])
loss = loss_fct(logits, batch['labels'])
avg_loss = paddle.mean(loss)
preds = logits.argmax(axis=2)
num_infer_chunks, num_label_chunks, num_correct_chunks = metric.compute(
batch['seq_len'], preds, batch['labels'])
metric.update(num_infer_chunks.numpy(),
num_label_chunks.numpy(), num_correct_chunks.numpy())
precision, recall, f1_score = metric.accumulate()
print("%s: eval loss: %f, precision: %f, recall: %f, f1: %f" %
(mode, avg_loss, precision, recall, f1_score))
model.train()
def do_train(args):
paddle.set_device(args.device)
if paddle.distributed.get_world_size() > 1:
paddle.distributed.init_parallel_env()
# Create dataset, tokenizer and dataloader.
if args.dataset == "peoples_daily_ner":
raw_datasets = load_dataset(args.dataset)
else:
raw_datasets = load_dataset(args.dataset)
AutoForTokenClassification, AutoTokenizer = MODEL_CLASSES[args.model_type]
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
train_ds = raw_datasets['train']
label_list = train_ds.features['ner_tags'].feature.names
label_num = len(label_list)
no_entity_id = 0
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(
examples['tokens'],
max_seq_len=args.max_seq_length,
# We use this argument because the texts in our dataset are lists of words (with a label for each word).
is_split_into_words=True,
return_length=True)
labels = []
for i, label in enumerate(examples['ner_tags']):
label_ids = label
if len(tokenized_inputs['input_ids'][i]) - 2 < len(label_ids):
label_ids = label_ids[:len(tokenized_inputs['input_ids'][i]) -
2]
label_ids = [no_entity_id] + label_ids + [no_entity_id]
label_ids += [no_entity_id] * (
len(tokenized_inputs['input_ids'][i]) - len(label_ids))
labels.append(label_ids)
tokenized_inputs["labels"] = labels
return tokenized_inputs
train_ds = train_ds.select(range(len(train_ds) - 1))
column_names = train_ds.column_names
train_ds = train_ds.map(tokenize_and_align_labels,
batched=True,
remove_columns=column_names)
ignore_label = -100
batchify_fn = DataCollatorForTokenClassification(
tokenizer=tokenizer, label_pad_token_id=ignore_label)
train_batch_sampler = paddle.io.DistributedBatchSampler(
train_ds, batch_size=args.batch_size, shuffle=True, drop_last=True)
train_data_loader = DataLoader(
dataset=train_ds,
collate_fn=batchify_fn,
num_workers=0,
batch_sampler=train_batch_sampler,
return_list=True)
test_ds = raw_datasets['test']
test_ds = test_ds.select(range(len(test_ds) - 1))
test_ds = test_ds.map(tokenize_and_align_labels,
batched=True,
remove_columns=column_names)
test_data_loader = DataLoader(
dataset=test_ds,
collate_fn=batchify_fn,
num_workers=0,
batch_size=args.batch_size,
return_list=True)
if args.dataset == "peoples_daily_ner":
dev_ds = raw_datasets['validation']
dev_ds = dev_ds.select(range(len(dev_ds) - 1))
dev_ds = dev_ds.map(tokenize_and_align_labels,
batched=True,
remove_columns=column_names)
dev_data_loader = DataLoader(
dataset=dev_ds,
collate_fn=batchify_fn,
num_workers=0,
batch_size=args.batch_size,
return_list=True)
# Define the model netword and its loss
model = AutoForTokenClassification.from_pretrained(
args.model_name_or_path, num_classes=label_num)
if paddle.distributed.get_world_size() > 1:
model = paddle.DataParallel(model)
num_training_steps = args.max_steps if args.max_steps > 0 else len(
train_data_loader) * args.num_train_epochs
lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps,
args.warmup_steps)
# Generate parameter names needed to perform weight decay.
# All bias and LayerNorm parameters are excluded.
decay_params = [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
]
optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler,
epsilon=args.adam_epsilon,
parameters=model.parameters(),
weight_decay=args.weight_decay,
apply_decay_param_fun=lambda x: x in decay_params)
loss_fct = paddle.nn.loss.CrossEntropyLoss(ignore_index=ignore_label)
metric = ChunkEvaluator(label_list=label_list)
global_step = 0
last_step = args.num_train_epochs * len(train_data_loader)
tic_train = time.time()
for epoch in range(args.num_train_epochs):
for step, batch in enumerate(train_data_loader):
global_step += 1
logits = model(batch['input_ids'], batch['token_type_ids'])
loss = loss_fct(logits, batch['labels'])
avg_loss = paddle.mean(loss)
if global_step % args.logging_steps == 0:
print(
"global step %d, epoch: %d, batch: %d, loss: %f, speed: %.2f step/s"
% (global_step, epoch, step, avg_loss,
args.logging_steps / (time.time() - tic_train)))
tic_train = time.time()
avg_loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.clear_grad()
if global_step % args.save_steps == 0 or global_step == num_training_steps:
if paddle.distributed.get_rank() == 0:
if args.dataset == "peoples_daily_ner":
evaluate(model, loss_fct, metric, dev_data_loader,
label_num, "valid")
evaluate(model, loss_fct, metric, test_data_loader,
label_num, "test")
paddle.save(model.state_dict(),
os.path.join(args.output_dir,
"model_%d.pdparams" % global_step))
if global_step >= num_training_steps:
return
if __name__ == "__main__":
args = parser.parse_args()
for arg in vars(args):
logger.info('{:20}:{}'.format(arg, getattr(args, arg)))
do_train(args)
|
nilq/baby-python
|
python
|
from output.models.nist_data.list_pkg.qname.schema_instance.nistschema_sv_iv_list_qname_pattern_2_xsd.nistschema_sv_iv_list_qname_pattern_2 import NistschemaSvIvListQnamePattern2
__all__ = [
"NistschemaSvIvListQnamePattern2",
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
from datetime import datetime, timedelta
from math import floor
import sys
from time import perf_counter
from betterprint.betterprint import bp, bp_dict
from modules.notations import byte_notation
from modules.createfolder import folder_logic, folder_stat_reset
from betterprint.colortext import Ct
from modules.freespace import free_space
from modules.multifile import file_logic
import modules.options as options
import modules.treewalk
START_PROG_TIME = perf_counter()
start_time = datetime.now()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def main():
try:
# ~~~ # -init display-
bp([f'\t{options.copyright}\n\t{options.license_info}\n{"━" * 40}',
Ct.A], veb=2)
bp([f'Program start: {start_time}\nSource: ', Ct.A, f'{args.source}',
Ct.GREEN, '\nTarget: ', Ct.A, f'{args.target}\n', Ct.GREEN,
'Excluded Folders: ', Ct.A, f'{args.exdir}\n', Ct.GREEN, 'Excluded'
' Files: ', Ct.A, f'{args.exfile}', Ct.GREEN])
bp(['Args: ', Ct.A], inl=1)
for k, v in vars(args).items():
if k != 'source' and k != 'target' and k != 'exdir' and k != \
'exfile' and k != 'available':
if k == 'hash':
bp([f' {k}: ', Ct.A, f'{v}', Ct.RED, ' |', Ct.A], num=0,
inl=1, log=0)
else:
bp([f' {k}: {v} |', Ct.A], inl=1, log=0)
bp([f'\n\n{"━" * 40}\n', Ct.A], log=0)
# ~~~ # -tree walk-
tree_return = modules.treewalk.tree_walk()
tw_tup = tree_return[2]
folder_total = f'{tw_tup[2]["num_dirs"]:,}'
file_total = f'{tw_tup[2]["num_files"]:,}'
file_size_total = byte_notation(tw_tup[2]["file_size"], ntn=1)
# ~~~ # -free space-
target_space = free_space(args.target)
target_space_bytenote = byte_notation(target_space['free_bytes'],
ntn=1)
# print out the tree walk data
bp([f'Source - Size: {file_size_total[1]:>10} | Folders: '
f'{folder_total} | Files: {file_total}\nTarget - Free: '
f'{target_space_bytenote[1]:>10}', Ct.A])
if tw_tup[2]["file_size"] >= target_space['free_bytes']:
bp(['not enough free space to copy all the data.', Ct.RED], err=2)
sys.exit(1)
bp([f'\n{"━" * 40}\n', Ct.A], log=0)
# ~~~ # -folder creation-
bp(['Create folders...', Ct.A])
folder_return = folder_logic(tw_tup[0])
f_time = folder_return[1]
folder_time = f'{f_time:,.4f}'
folder_success = folder_return[2]['success']
folder_failure = folder_return[2]['failure']
bp([f'Success: {folder_success}/{folder_total}\nFailure: '
f'{folder_failure}/{folder_total}\nDuration: '
f'{timedelta(seconds=floor(f_time))}', Ct.A])
bp([f'\n{"━" * 40}\n', Ct.A], log=0)
# ~~~ # -file creation-
file_return = file_logic(tw_tup[1], tw_tup[2])
file_size_success = byte_notation(file_return["val_size"], ntn=1)
file_size_failure = byte_notation(tw_tup[2]["file_size"] -
file_return["val_size"], ntn=1)
hex_tot = file_return["hash_time"] + file_return["val_hash_time"]
file_tot = int(file_return['read_time'] + file_return["write_time"])
bp([f'\n{"━" * 40}\n', Ct.A], log=0)
# ~~~ # -folder stat reset-
folder_reset = folder_stat_reset(folder_return[2]['success_dict'])
f_time += folder_reset[1]
# ~~~ # -final display-
bp([f'\n{" " * 16}Source Target FAILED TIME', Ct.A])
bp([f' Folders: {folder_total:>10}{folder_success:>10,}'
f'{folder_failure:>10,}{folder_time:>12s}s', Ct.A])
bp([f' Files: {file_total:>10}{file_return["success"]:>10,}'
f'{file_return["failure"]:>10,}{file_tot:>12,.4f}s', Ct.A])
bp([f' Bytes: {file_size_total[1]:>10}{file_size_success[1]:>10}'
f'{file_size_failure[1]:>10}', Ct.A])
bp([f'Validation: {file_total:>10}{file_return["val_success"]:>10,}'
f'{file_return["val_failure"]:>10,}{hex_tot:>12,.4f}s (+'
f'{file_return["val_read_time"]:,.4f}s)', Ct.A])
bp([f'\n\n{"━" * 40}\n', Ct.A], log=0)
end_time = perf_counter()
total_time = end_time - START_PROG_TIME
tft = (tree_return[1] + f_time + file_return["read_time"] +
file_return["hash_time"] + file_return["write_time"] +
file_return["val_read_time"] + file_return["val_hash_time"])
bp([f'\n{total_time:,.4f}s - Total Time\n{tree_return[1]:,.4f}s - Tree'
f' Walk Time\n{folder_time:}s - FolderCreation Time\n'
f'{file_return["read_time"]:,.4f}s - Source Read Time\n'
f'{file_return["hash_time"]:,.4f}s - Source Hash Validation Time\n'
f'{file_return["write_time"]:,.4f}s - Target Write Time\n'
f'{file_return["val_read_time"]:,.4f}s - Target Read Time\n'
f'{file_return["val_hash_time"]:,.4f}s - Target Hash Validation '
f'Time\n{tft:,.4f}s - Total Function Time\n{"━" * 40}\n'
f'{total_time - tft:,.4f}s - Program Overhead Time', Ct.A])
except KeyboardInterrupt:
bp(['Ctrl+C pressed...\n', Ct.RED], err=2)
sys.exit(1)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
if __name__ == '__main__':
# ~~~ # -title-
bp([f'{options.ver} - {options.purpose}\n', Ct.BBLUE])
# ~~~ # -args-
args = options.args
# ~~~ # -variables-
bp_dict['verbose'] = args.verbose
bp_dict['date_log'] = args.date_log
bp_dict['log_file'] = args.log_file
bp_dict['error_log_file'] = args.error_log_file
bp_dict['color'] = 0 if args.no_color else 1
bp_dict['quiet'] = args.quiet
# ~~~ # -main-
bp(['calling main().', Ct.BMAGENTA], veb=2, num=0)
main()
|
nilq/baby-python
|
python
|
# https://www.hackerrank.com/challenges/s10-geometric-distribution-2/problem
# Enter your code here. Read input from STDIN. Print output to STDOUT
x,y = map(int, input().split())
p = x/y
n = int(input())
answer = 0
for z in range(1,n+1):
temp = (1-p)**(z-1) * p
answer = answer + temp
print(round(answer,3))
|
nilq/baby-python
|
python
|
# Generated by Django 3.0 on 2021-07-21 11:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wwwrate', '0007_auto_20210720_1450'),
]
operations = [
migrations.AlterModelOptions(
name='project',
options={'ordering': ['posted_at']},
),
migrations.AlterField(
model_name='review',
name='content_rating',
field=models.PositiveIntegerField(default=10),
),
migrations.AlterField(
model_name='review',
name='design_rating',
field=models.PositiveIntegerField(default=10),
),
migrations.AlterField(
model_name='review',
name='usability_rating',
field=models.PositiveIntegerField(default=10),
),
]
|
nilq/baby-python
|
python
|
import subprocess
from vendor.addressfetcher.addressfetcher import fetcher
# constant
ADDRESS_PATH = ''
def input_number(prompt):
while True:
try:
num = float(input(prompt))
break
except ValueError:
pass
return num
def display_menu(options):
for i in range(len(options)):
print("{:d}. {:s}".format(i+1, options[i]))
choice = 0
possible = list(range(1, len(options)+1))
while choice not in possible:
choice = input_number("Select an option:\n> ")
return choice
def fetch():
global ADDRESS_PATH
whitelist_path = input(
"\nEnter the path of address whitelist file (Leave blank if not exist):\n> ")
blacklist_path = input(
"\nEnter the path of address blacklist file (Leave blank if not exist):\n> ")
# call address module
address = fetcher(whitelist_path, blacklist_path)
ADDRESS_PATH = address
def distri():
global ADDRESS_PATH
confirm_distri = input("\nDo distribution now? [Y/N]\n> ")
if confirm_distri == 'Y' or confirm_distri == 'y':
modelist = [
f'Interactive mode (do confirm per-transaction)',
f"Non-Interactive mode (doesn't confirm per-transaction)"
]
unfundlist = [
f'Skip',
f'Force send'
]
amount = input("\nEnter airdrop amount for each address:\n> ")
print("\nChoose distribution mode:\n")
mode = display_menu(modelist)
print("\nWhat to do if recipient don't have SOL in their account:\n")
unfund = display_menu(unfundlist)
if mode == 1:
if unfund == 1:
subprocess.run("python3 vendor/flatdistributor/flatdistributor.py transfer -a " +
ADDRESS_PATH + " --drop " + amount + "", shell=True)
if unfund == 2:
subprocess.run("python3 vendor/flatdistributor/flatdistributor.py transfer -a " +
ADDRESS_PATH + " --drop " + amount + " --allow-unfunded-recipient", shell=True)
if mode == 2:
if unfund == 1:
subprocess.run("python3 vendor/flatdistributor/flatdistributor.py transfer -a " +
ADDRESS_PATH + " --drop " + amount + " --non-interactive", shell=True)
if unfund == 2:
subprocess.run("python3 vendor/flatdistributor/flatdistributor.py transfer -a " + ADDRESS_PATH +
" --drop " + amount + " --non-interactive --allow-unfunded-recipient", shell=True)
else:
exit()
def main():
fetch()
distri()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import os
import shutil
import tarfile
import urllib
import zipfile
from .constants import RU_VOWELS, DEFAULT_DATA_DIR
from pathlib import Path
from typing import Union
def count_syllables(word: str) -> int:
"""
Вычисление количества слогов в слове
Аргументы:
word (str): Строка слова
Вывод:
int: Количество слогов
"""
return sum((1 for char in word if char in RU_VOWELS))
def to_path(path: str) -> Path:
"""
Перевод строкового представления пути в объект Path
Аргументы:
path (str): Cтроковое представление пути
Вывод:
Path: Объект Path
Исключения:
TypeError: Если передаваемое значение не является строкой или объектом Path
"""
if isinstance(path, str):
return Path(path)
elif isinstance(path, Path):
return path
else:
raise TypeError("Некорректно указан путь")
def download_file(
url: str,
filename: str = None,
dirpath: Union[str, Path] = DEFAULT_DATA_DIR,
force: bool = False
) -> str:
"""
Загрузка файла из сети
Аргументы:
url (str): Адрес загружаемого файла
filename (str): Название файла после загрузки
dirpath (str|Path): Путь к директории для загруженного файла
force (bool): Загрузить набор данных, даже если он уже загружен
Вывод:
str: Путь к загруженному файлу
Исключения:
RuntimeError: Если не удалось загрузить файл
"""
if not os.path.exists(dirpath):
os.makedirs(dirpath)
if not filename:
filename = os.path.basename(urllib.parse.urlparse(urllib.parse.unquote_plus(url)).path)
filepath = to_path(dirpath).resolve() / filename
if filepath.is_file() and force is False:
print(f"Файл {filepath} уже загружен")
return None
else:
try:
print(f"Загрузка файла {url}...")
req = urllib.request.Request(url)
with urllib.request.urlopen(req) as response, open(filepath, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
except:
raise RuntimeError("Не удалось загрузить файл")
else:
print(f"Файл успешно загружен: {filepath}")
return str(filepath)
def extract_archive(
archive_file: Union[str, Path],
extract_dir: Union[str, Path] = None
) -> str:
"""
Извлечение файлов из архива в формате ZIP или TAR
Аргументы:
archive_file (str|Path): Путь к файлу архива
extract_dir (str|Path): Путь к директории для извлеченных файлов
Вывод:
str: Путь к директории с извлеченными файлами
"""
archive_file = to_path(archive_file).resolve()
if not extract_dir:
extract_dir = str(archive_file.parent)
archive_file = str(archive_file)
os.makedirs(extract_dir, exist_ok=True)
is_zip = zipfile.is_zipfile(archive_file)
is_tar = tarfile.is_tarfile(archive_file)
if not is_zip and not is_tar:
print(f"Файл {archive_file} не является архивом в формате ZIP или TAR")
return extract_dir
else:
print(f"Извлечение файлов из архива {archive_file}...")
shutil.unpack_archive(archive_file, extract_dir=extract_dir, format=None)
if is_zip:
with zipfile.ZipFile(archive_file, mode='r') as f:
members = f.namelist()
else:
with tarfile.open(archive_file, mode='r') as f:
members = f.getnames()
src_basename = os.path.commonpath(members)
dest_basename = os.path.basename(archive_file)
if src_basename:
while True:
tmp, _ = os.path.splitext(dest_basename)
if tmp == dest_basename:
break
else:
dest_basename = tmp
if src_basename != dest_basename:
return shutil.move(
os.path.join(extract_dir, src_basename),
os.path.join(extract_dir, dest_basename),
)
else:
return os.path.join(extract_dir, src_basename)
else:
return extract_dir
if __name__ == "__main__":
text = "самооборона"
print(count_syllables(text))
print(extract_archive('test.tar.xz'))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
'''
Copyright © 2014 by Virginia Polytechnic Institute and State University
All rights reserved
Virginia Polytechnic Institute and State University (Virginia Tech) owns the copyright for the BEMOSS software and its
associated documentation (“Software”) and retains rights to grant research rights under patents related to
the BEMOSS software to other academic institutions or non-profit research institutions.
You should carefully read the following terms and conditions before using this software.
Your use of this Software indicates your acceptance of this license agreement and all terms and conditions.
You are hereby licensed to use the Software for Non-Commercial Purpose only. Non-Commercial Purpose means the
use of the Software solely for research. Non-Commercial Purpose excludes, without limitation, any use of
the Software, as part of, or in any way in connection with a product or service which is sold, offered for sale,
licensed, leased, loaned, or rented. Permission to use, copy, modify, and distribute this compilation
for Non-Commercial Purpose to other academic institutions or non-profit research institutions is hereby granted
without fee, subject to the following terms of this license.
Commercial Use If you desire to use the software for profit-making or commercial purposes,
you agree to negotiate in good faith a license with Virginia Tech prior to such profit-making or commercial use.
Virginia Tech shall have no obligation to grant such license to you, and may grant exclusive or non-exclusive
licenses to others. You may contact the following by email to discuss commercial use: vtippatents@vtip.org
Limitation of Liability IN NO EVENT WILL VIRGINIA TECH, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO
LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE
OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF VIRGINIA TECH OR OTHER PARTY HAS BEEN ADVISED
OF THE POSSIBILITY OF SUCH DAMAGES.
For full terms and conditions, please visit https://bitbucket.org/bemoss/bemoss_os.
Address all correspondence regarding this license to Virginia Tech’s electronic mail address: vtippatents@vtip.org
__author__ = "Warodom Khamphanchai"
__credits__ = ""
__version__ = "1.2.1"
__maintainer__ = "Warodom Khamphanchai"
__email__ = "kwarodom@vt.edu"
__website__ = "kwarodom.wordpress.com"
__status__ = "Prototype"
__created__ = "2014-8-28 16:19:00"
__lastUpdated__ = "2015-02-11 17:12:03"
'''
import logging
import sys
import datetime
import json
import os
from volttron.lite.agent import BaseAgent, PublishMixin, periodic
from volttron.lite.agent import utils, matching
from volttron.lite.messaging import headers as headers_mod
import settings
import psycopg2 # PostgresQL database adapter
import re
utils.setup_logging()
_log = logging.getLogger(__name__)
app_name = "appLauncher"
debug_agent = False
clock_time = 1
time_to_start_previous_apps = 30 # sec
#@params agent & DB interfaces
db_host = settings.DATABASES['default']['HOST']
db_port = settings.DATABASES['default']['PORT']
db_database = settings.DATABASES['default']['NAME']
db_user = settings.DATABASES['default']['USER']
db_password = settings.DATABASES['default']['PASSWORD']
db_table_application_registered = settings.DATABASES['default']['TABLE_application_registered']
db_table_application_running = settings.DATABASES['default']['TABLE_application_running']
class AppLauncherAgent(PublishMixin, BaseAgent):
'''Listens to UI to launch new APP in the BEMOSS APP Store'''
def __init__(self, config_path, **kwargs):
super(AppLauncherAgent, self).__init__(**kwargs)
self.config = utils.load_config(config_path)
# self.app_number = 0
#connect to the database
try:
self.con = psycopg2.connect(host=db_host, port=db_port, database=db_database, user=db_user,
password=db_password)
self.cur = self.con.cursor() # open a cursor to perform database operations
print("AppLauncher Agent connects to the database name {} successfully".format(db_database))
except:
print("ERROR: {} fails to connect to the database name {}".format(app_name, db_database))
self.time_applauncher_start = datetime.datetime.now()
self.already_started_previous_apps = False
def setup(self):
# Demonstrate accessing a value from the config file
_log.info(self.config['message'])
self._agent_id = self.config['agentid']
# Always call the base class setup()
super(AppLauncherAgent, self).setup()
# self.appLauncherInitiator()
print "AppLauncher Agent is waiting for UI to activate/disable APPs"
# clockBehavior (CyclicBehavior)
@periodic(clock_time)
def clockBehavior(self):
#1. check current time
self.time_applauncher_now = datetime.datetime.now()
if self.already_started_previous_apps:
# print "AppLauncher Agent >> appLauncherInitiator has already run"
pass
else:
# print "AppLauncher Agent >> appLauncherInitiator has not run yet"
if (self.time_applauncher_now - self.time_applauncher_start).seconds > time_to_start_previous_apps:
print "AppLauncher Agent is starting previously running Apps"
self.appLauncherInitiator()
self.already_started_previous_apps = True
else:
pass
# Add Cyclic behavior to track current status of app then update DB
def appLauncherInitiator(self):
try:
self.cur.execute("SELECT * FROM "+db_table_application_running)
# self.cur.execute("SELECT status FROM applications_running WHERE app_name=%s", (ui_app_name,))
print self.cur.rowcount
if self.cur.rowcount != 0:
all_row = self.cur.fetchall()
for row in all_row:
if row[3] == 'running': # rerun app for the agent
# To launch agent: 1.get app_name, 2.get agent_id, 3.get auth_token
print "This {} is {}".format(row[1], row[3])
_temp_app_agent_id = str(row[1]).split('_')
app_name = _temp_app_agent_id[0]+'_'+_temp_app_agent_id[1]
agent_id = _temp_app_agent_id[2]
self.cur.execute("SELECT auth_token FROM "+db_table_application_registered+" WHERE app_name=%s",
(app_name,))
if self.cur.rowcount != 0:
auth_token = str(self.cur.fetchone()[0])
app_setting = row[4]
print "AppLauncher >> is trying the previous run App {} for agent {} with auth_token {} and " \
"app_setting {}".format(app_name, agent_id, auth_token, app_setting)
self.app_has_already_launched = False
self.launch_app(app_name, agent_id, auth_token)
else: # do nothing
print "This {} is {}".format(row[1], row[3])
else:
print "AppLauncher >> no App was running"
except:
"AppLauncher >> failed to launch the previous run Apps"
# on_match (Cyclic Behavior) to filter message from the UI to launch new APP
@matching.match_start('/ui/appLauncher/')
def on_match(self, topic, headers, message, match):
print "AppLauncher Agent got Topic: {topic}".format(topic=topic)
_sub_topic = str(topic).split('/')
app_name = _sub_topic[3]
agent_id = _sub_topic[4]
_data = json.dumps(message[0])
_data = json.loads(message[0])
auth_token = _data.get('auth_token')
if _sub_topic[5] == 'launch':
self.app_has_already_launched = False
self.launch_app(app_name, agent_id, auth_token)
elif _sub_topic[5] == 'disable':
self.app_has_already_launched = False
self.disable_app(app_name, agent_id, auth_token)
else:
"AppLauncher Agent does not understand this message"
def launch_app(self, ui_app_name, ui_agent_id, ui_auth_token):
#1. query database whether the app_name is verified and registered
#if app_name is in database with the valid authorization_token, then launch agent
self.cur.execute("SELECT auth_token FROM "+db_table_application_registered+" WHERE app_name=%s", (ui_app_name,))
if self.cur.rowcount != 0:
app_auth_token = self.cur.fetchone()[0]
if ui_auth_token == app_auth_token:
# 1. launch app
PROJECT_DIR = settings.PROJECT_DIR
sys.path.append(PROJECT_DIR)
os.system("bin/volttron-ctrl list-agent > app_running_agent.txt")
infile = open('app_running_agent.txt', 'r')
for line in infile:
#print(line, end='') #write to a next file name outfile
match = re.search(ui_app_name+'_'+ui_agent_id+'.launch.json', line) \
and re.search('running', line) # have results in match
if match: # The app that ui requested has already launched
self.app_has_already_launched = True
print "AppLauncher failed to launch APP: {}, APP has actually been launched"\
.format(ui_app_name)
print "AppLauncher >> {}".format(line)
if self.app_has_already_launched:
_launch_file_to_check = str(ui_app_name) + "_" + str(ui_agent_id)
self.cur.execute("SELECT status FROM "+db_table_application_running+" WHERE app_agent_id=%s",
(_launch_file_to_check,))
if self.cur.rowcount != 0: # this APP used to be launched before
_app_status = str(self.cur.fetchone()[0])
if _app_status == "running": # no need to launch new app
pass
else:
self.cur.execute("UPDATE application_running SET status=%s WHERE app_agent_id=%s",
("running", _launch_file_to_check,))
self.con.commit()
else:
# 2. log app that has been launched to the database
_launch_file_name = str(ui_app_name) + "_" + str(ui_agent_id)
_start_time = str(datetime.datetime.now())
_app_status = "running"
self.cur.execute("SELECT application_id FROM "+db_table_application_running)
if self.cur.rowcount != 0:
# print 'cur.fetchall()' + str(max(cur.fetchall())[0])
app_no = max(self.cur.fetchall())[0] + 1
else: #default no_app
app_no = 1
self.cur.execute("INSERT INTO application_running(application_id, app_agent_id, start_time, status) "
"VALUES(%s,%s,%s,%s)",
(app_no, _launch_file_name, _start_time, _app_status))
self.con.commit()
print "AppLauncher >> the requested APP {} for {} is running but not in db, " \
"now it is added to db".format(ui_app_name, ui_agent_id)
print "AppLauncher >> NOTE Date and Time launch APP is the current time not actual time"
_topic_appLauncher_ui = '/appLauncher/ui/' + ui_app_name + '/' + ui_agent_id + '/' \
+ 'launch/response'
_headers = {
headers_mod.FROM: app_name,
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON,
}
_message = "failure"
self.publish(_topic_appLauncher_ui, _headers, _message)
else: # APP has not launched yet
_launch_file_to_check = str(ui_app_name) + "_" + str(ui_agent_id)
self.cur.execute("SELECT status FROM "+db_table_application_running+" WHERE app_agent_id=%s",
(_launch_file_to_check,))
if self.cur.rowcount != 0: # delete existing row from the table before launching new app
# self.cur.execute("DELETE FROM "+db_table_application_running+" WHERE app_agent_id=%s",
# (_launch_file_to_check,))
# self.con.commit()
self.launch_existing_app(ui_app_name, ui_agent_id)
else: #this APP has never been launched and not in db launch new app
self.launch_new_app(ui_app_name, ui_agent_id)
else:
print "UI failed to authorize with AppLauncher Agent before launching the requested APP"
else:
print "The APP that UI requested is neither REGISTERED nor AVAILABLE"
def launch_existing_app(self, ui_app_name, ui_agent_id):
self.cur.execute("SELECT executable FROM "+db_table_application_registered+" WHERE app_name=%s", (ui_app_name,))
# 1. launch app for an agent based on the exec file and agent_id
if self.cur.rowcount != 0:
_exec_name = str(self.cur.fetchone()[0])
_exec = _exec_name+"-0.1-py2.7.egg --config \"%c\" --sub \"%s\" --pub \"%p\""
data = {
"agent": {
"exec": _exec
},
"agent_id": ui_agent_id
}
PROJECT_DIR = settings.PROJECT_DIR
_launch_file = os.path.join(PROJECT_DIR, "bemoss/Applications/launch/"
+ str(ui_app_name) + "_" + str(ui_agent_id) +".launch.json")
if debug_agent: print(_launch_file)
with open(_launch_file, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
if debug_agent: print(os.path.basename(_launch_file))
os.system("bin/volttron-ctrl load-agent "+_launch_file)
os.system("bin/volttron-ctrl start-agent "+os.path.basename(_launch_file))
os.system("bin/volttron-ctrl list-agent")
print "AppLauncher has successfully launched APP: {} for Agent: {}"\
.format(ui_app_name, ui_agent_id)
# send reply back to UI
_topic_appLauncher_ui = '/appLauncher/ui/' + ui_app_name + '/' + ui_agent_id + '/' + 'launch/response'
_headers = {
headers_mod.FROM: app_name,
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON,
}
_message = "success"
self.publish(_topic_appLauncher_ui, _headers, _message)
def launch_new_app(self, ui_app_name, ui_agent_id):
self.cur.execute("SELECT executable FROM "+db_table_application_registered+" WHERE app_name=%s", (ui_app_name,))
# 1. launch app for an agent based on the exec file and agent_id
if self.cur.rowcount != 0:
_exec_name = str(self.cur.fetchone()[0])
_exec = _exec_name+"-0.1-py2.7.egg --config \"%c\" --sub \"%s\" --pub \"%p\""
data = {
"agent": {
"exec": _exec
},
"agent_id": ui_agent_id
}
PROJECT_DIR = settings.PROJECT_DIR
_launch_file = os.path.join(PROJECT_DIR, "bemoss/Applications/launch/"
+ str(ui_app_name) + "_" + str(ui_agent_id) +".launch.json")
if debug_agent: print(_launch_file)
with open(_launch_file, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
if debug_agent: print(os.path.basename(_launch_file))
os.system("bin/volttron-ctrl load-agent "+_launch_file)
os.system("bin/volttron-ctrl start-agent "+os.path.basename(_launch_file))
os.system("bin/volttron-ctrl list-agent")
print "AppLauncher has successfully launched APP: {} for Agent: {}"\
.format(ui_app_name, ui_agent_id)
# send reply back to UI
_topic_appLauncher_ui = '/appLauncher/ui/' + ui_app_name + '/' + ui_agent_id + '/' + 'launch/response'
_headers = {
headers_mod.FROM: app_name,
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON,
}
_message = "success"
self.publish(_topic_appLauncher_ui, _headers, _message)
# self.app_number += 1
self.cur.execute("SELECT description FROM "+db_table_application_registered+" WHERE app_name=%s", (ui_app_name,))
if self.cur.rowcount != 0:
_app_description = str(self.cur.fetchone()[0])
print "The description of APP: {} is {}".format(ui_app_name, _app_description)
else:
print "AppLauncher failed to get APP: {} description".format(ui_app_name)
# 2. log app that has been launched to the database
_launch_file_name = str(ui_app_name) + "_" + str(ui_agent_id)
_start_time = str(datetime.datetime.now())
_app_status = "running"
self.cur.execute("SELECT application_id FROM "+db_table_application_running)
if self.cur.rowcount != 0:
# print 'cur.fetchall()' + str(max(cur.fetchall())[0])
app_no = max(self.cur.fetchall())[0] + 1
else: #default no_app
app_no = 1
self.cur.execute("INSERT INTO application_running(application_id, app_agent_id, start_time, status) "
"VALUES(%s,%s,%s,%s)",
(app_no, _launch_file_name, _start_time, _app_status))
self.con.commit()
print "AppLauncher finished update table applications_running of APP: {}".format(ui_app_name)
print "with launch_file: {}, at timestamp {}".format(_launch_file, _start_time)
else:
print "AppLauncher failed to launch APP: {} for Agent: {}".format(ui_app_name, ui_agent_id)
def disable_app(self, ui_app_name, ui_agent_id, ui_auth_token):
#1. query database whether the ui_app_name is verified and registered
self.cur.execute("SELECT auth_token FROM "+db_table_application_registered+" WHERE app_name=%s", (ui_app_name,))
if self.cur.rowcount != 0:
app_auth_token = self.cur.fetchone()[0]
if ui_auth_token == app_auth_token:
#check whether the ui_app_name and ui_agent_id is actually running
PROJECT_DIR = settings.PROJECT_DIR
sys.path.append(PROJECT_DIR)
os.system("bin/volttron-ctrl list-agent > app_running_agent.txt")
infile = open('app_running_agent.txt', 'r')
for line in infile:
#print(line, end='') #write to a next file name outfile
match = re.search(ui_app_name+'_'+ui_agent_id+'.launch.json', line) \
and re.search('running', line) # have results in match
if match: # The app that ui requested has already launched
self.app_has_already_launched = True
else:
pass
if self.app_has_already_launched:
_launch_file_to_check = str(ui_app_name) + "_" + str(ui_agent_id)
self.cur.execute("SELECT status FROM "+db_table_application_running+" WHERE app_agent_id=%s",
(_launch_file_to_check,))
if self.cur.rowcount != 0:
_app_status = str(self.cur.fetchone()[0])
#if it's running disable app
if _app_status == "running":
_lauch_file_to_disable = _launch_file_to_check+".launch.json"
os.system("bin/volttron-ctrl stop-agent "+_lauch_file_to_disable)
os.system("bin/volttron-ctrl list-agent")
print "AppLauncher has successfully disabled APP: {} ".format(ui_app_name)
self.cur.execute("UPDATE application_running SET status=%s WHERE app_agent_id=%s"
, ('disabled', _launch_file_to_check))
self.con.commit()
# send reply back to UI
topic_appLauncher_ui = '/appLauncher/ui/' + ui_app_name + '/' + ui_agent_id + '/' \
+ 'disable/response'
headers = {
headers_mod.FROM: app_name,
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON,
}
message = "success"
self.publish(topic_appLauncher_ui, headers, message)
elif _app_status == "disabled":
print "AppLauncher: the requested APP: {} for Agent: {} has already disabled"\
.format(ui_app_name, ui_agent_id)
else:
print "AppLauncher: the requested APP: {} for Agent: {} has unknown status"\
.format(ui_app_name, ui_agent_id)
else:
print "AppLauncher: APP {} for Agent: {} is not running".format(ui_app_name, ui_agent_id)
else: # app is acutally not running no need to do action
"AppLauncher: discard request to disable APP: {} for Agent: {} since it's not running"\
.format(ui_app_name, ui_agent_id)
else:
print "UI failed to authorize with AppLauncher Agent before disabling the requested APP"
else:
print "The APP that UI requested is neither REGISTERED nor AVAILABLE"
def main(argv=sys.argv):
'''Main method called by the eggsecutable.'''
try:
utils.default_main(AppLauncherAgent,
description='this is an AppLauncher agent',
argv=argv)
except Exception as e:
_log.exception('unhandled exception')
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
nilq/baby-python
|
python
|
import webbrowser
import cv2
import numpy as np
import pyautogui
import PIL
import time
def init():
# TODO: Add more example websites with recaptcha
webbrowser.open(
'''https://jsso.indiatimes.com/sso/identity/register?channel=businessinsider&identifier=r@g.c'''
)
# Move to a temporary location and wait for window to open
pyautogui.moveTo(1200, 200)
time.sleep(5)
def get_coords():
# Grab a screenshot and save it
screenshot = PIL.ImageGrab.grab()
screenshot.save("hay.png")
# Convert the PIL image to an OpenCV one and read in the needle
haystack = cv2.cvtColor(np.array(screenshot), cv2.COLOR_RGB2BGR)
needle = cv2.imread('needle.png')
# Find the coordinates of the ReCaptcha logo
diff = cv2.matchTemplate(haystack, needle, cv2.TM_CCORR_NORMED)
x, y = np.unravel_index(np.argmax(diff), diff.shape)
# Subtract offset of Checkbox from logo
return x - 230, y + 60
def click_captcha(x, y):
# Move to the captcha, but overshoot and then fine-tune
pyautogui.moveTo(x - 28, y + 50, duration=0.5)
pyautogui.moveTo(x + 3, y - 51, duration=0.20)
pyautogui.moveTo(x, y, duration=0.2)
# Pause momentarily before clicking
time.sleep(0.2)
pyautogui.click()
# Once click has been registered, move away
time.sleep(0.5)
pyautogui.moveTo(x - 12, y + 42, duration=0.1)
def main():
print("Starting...")
init()
print("Finding Captcha...")
x, y = get_coords()
print("Coords: (%d, %d)" % (x, y))
click_captcha(x, y)
print("Done!")
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
"""
A class/method/function should have only 1 reason to change!!!
It should have single responsibility
eg. book movie for a theatre
"""
class BookMovie(object):
"""
Bad code
"""
def book_movie_seat(self, movie, seat):
if self.is_seat_available(seat):
return False
self.book_seat()
def is_seat_available(self, seat):
pass
def book_seat(self):
pass
"""
In above class if we change how the seat availability is defined, it will change,
if booking seat process changes it will be changed so, move it to somthing like SeatValidator class
give it seat and ask if the seat is available
"""
|
nilq/baby-python
|
python
|
import unittest
from Credentials import Credentials
class TestCredentials(unittest.TestCase):
"""
Test class that defines test cases for the Credentials class behaviours
"""
def setUp(self):
"""
Set up method to run befor each test case
"""
self.new_credentials = Credentials("Instagram", "123654")
def test_credentials_instance(self):
"""
Method that tests if the new_credentials have been instantiated correctly
"""
self.assertEqual(self.new_credentials.account_name, "Instagram")
self.assertEqual(self.new_credentials.account_password, "123654")
def test_save_credentials(self):
"""
Method that tests if the new credentials have been saved
"""
self.new_credentials.save_credentials()
self.assertEqual(len(Credentials.credentials_list), 1)
def test_save_multiple_credentials(self):
"""
Method that saves multiple credentials to credentials_list test
"""
self.new_credentials.save_credentials()
new_test_credential = Credentials("Twitter", "741258963")
new_test_credential.save_credentials()
self.assertEqual(len(Credentials.credentials_list), 2)
def tearDown(self):
"""
Method that clears the credentials_list after every test to ensure that there is no error
"""
Credentials.credentials_list = []
def test_find_credential_by_name(self):
"""
Test to check if we can find credentials and display them
"""
self.new_credentials.save_credentials()
new_test_credential = Credentials("Twitter", "741258963")
new_test_credential.save_credentials()
found_credential = Credentials.find_by_name("Twitter")
self.assertEqual(found_credential.account_name, new_test_credential.account_name)
def test_display_all_credentials(self):
"""
TestCase to test if all credentials are displayed
"""
self.assertEqual(Credentials.display_credentials(), Credentials.credentials_list)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
"""Module for handling buses."""
from sinfactory.component import Component
from sinfactory.load import Load
from sinfactory.generator import Generator
class Bus(Component):
"""Node class"""
def __init__(self, pf_object):
"""Constructor for the Bus class.
Args:
pf_object: The power factory object we will store.
"""
super().__init__(pf_object)
elms = pf_object.GetConnectedElements()
self.loads = {}
self.gens = {}
# If this makes initialisation too slow, only calculate this on
# request.
for elm in elms:
elm_name = elm.GetFullName()
if "ElmLod" in elm_name:
self.loads[elm.cDisplayName] = Load(elm)
if "ElmSym" in elm_name:
self.gens[elm.cDisplayName] = Generator(elm)
self.cubs = []
for elm in pf_object.GetConnectedCubicles():
self.cubs.append(elm)
@property
def u(self):
"""The voltage magnitude of the bus in p.u."""
return self.get_attribute("m:u")
@property
def island_id(self):
"""The island id of the bus.
In case the system has been split up into different islands
the buses in the same island will have the same id."""
return self.get_attribute("b:ipat")
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# coding:utf-8
# ServerSan - ss-agent.py
# 2018/3/14 15:03
#
__author__ = 'Benny <benny@bennythink.com>'
__version__ = '1.0.0'
import os
import platform
import socket
import sys
import time
import cpuinfo
import psutil
import requests
# API = 'http://127.0.0.1:5000/'
API = 'https://api.serversan.date:5000/'
def get_uptime():
return psutil.boot_time()
def get_os():
if platform.system() == 'Windows':
uname = platform.uname()
return '%s %s %s' % (uname[0], uname[2], uname[4])
else:
uname = platform.dist()
return '%s %s %s %s' % (uname[0], uname[1], uname[2], platform.machine())
def get_kernel():
info = platform.version() if platform.system() == 'Windows' else platform.release()
return info
def get_process_count():
return len(psutil.pids())
def get_sessions():
info = '%d user(s) in Total' % len(psutil.users())
for user in psutil.users():
info += '\n%s on %s from %s at %s' % (
user[0], user[1], user[2], time.strftime("%Y-%m-%d %H:%M", time.localtime(user[3])))
return info
def get_cpu_model():
return cpuinfo.get_cpu_info()['brand']
def get_cpu_count():
return psutil.cpu_count()
def get_cpu_freq():
# psutil won't return current cpu freq in visualization.
# return psutil.cpu_freq()[0]
return round(float(cpuinfo.get_cpu_info()['hz_actual'].split(' ')[0]), 2)
def get_host_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
s.close()
return ip
def network_activity():
old_value = old_value2 = 0
while True:
new_value = psutil.net_io_counters().bytes_recv
new_value2 = psutil.net_io_counters().bytes_sent
if old_value:
rx = round((new_value - old_value) / 1024.0, 2)
tx = round((new_value2 - old_value2) / 1024.0, 2)
rx_tx = round((new_value - old_value + new_value2 - old_value2) / 1024.0, 2)
break
old_value = new_value
old_value2 = new_value2
time.sleep(1)
return [tx, rx, rx_tx]
def current_network_flow():
rx = round(psutil.net_io_counters().bytes_recv / 1024.0 / 1024 / 1024, 2)
tx = round(psutil.net_io_counters().bytes_sent / 1024.0 / 1024 / 1024, 2)
return [tx, rx]
def average_load():
return psutil.cpu_percent()
def mem():
used = round(psutil.virtual_memory().used / 1024.0 / 1024, 2)
total = round(psutil.virtual_memory().total / 1024.0 / 1024, 2)
percent = psutil.virtual_memory().percent
return [used, total, percent]
def swap():
used = round(psutil.swap_memory().used / 1024.0 / 1024, 2)
total = round(psutil.swap_memory().total / 1024.0 / 1024, 2)
percent = psutil.swap_memory().percent
return [used, total, percent]
def disk():
used = round(psutil.disk_usage('/').used / 1024.0 / 1024 / 1024, 2)
total = round(psutil.disk_usage('/').total / 1024.0 / 1024 / 1024, 2)
percent = psutil.disk_usage('/').percent
return [used, total, percent]
def top_process():
cmd = 'ps axc -o uname:12,pcpu,rss,cmd --sort=-pcpu,-rss --noheaders --width 120|head'
with os.popen(cmd) as p:
pro = p.read()
info = pro if pro else 'Windows is not supported.'
return info
def get_hostname():
return platform.node()
def build():
message = dict(auth=get_auth_token().rstrip('\n'), hostname=get_hostname(),
uptime=get_uptime(), os=[get_os(), get_kernel()], pro=get_process_count(),
session=get_sessions(), cpu=[get_cpu_model(), get_cpu_count(), get_cpu_freq()],
ip=get_host_ip(), network=network_activity(), flow=current_network_flow(),
percent=average_load(), mem=mem(), swap=swap(), disk=disk(), top=top_process()
)
return message
def send_request(dic):
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
print(requests.post(API + 'v1/create', json=dic, headers=headers).text)
def get_auth_token():
path = os.environ.get('HOMEPATH') + '/ss-auth.log' if platform.system() == 'Windows' \
else '/etc/serversan/ss-auth.log'
with open(path) as f:
return f.read()
# TODO: upgrade client agent: shell scripts or ...?
def upgrade():
pass
def main():
json_result = build()
send_request(json_result)
if __name__ == '__main__':
if len(sys.argv) == 1:
main()
elif sys.argv[1] == 'version':
print('The current ServerSan agent verson is %s' % __version__)
else:
print('Wrong parameters.')
|
nilq/baby-python
|
python
|
import pyautogui
import time
# time to change tabs from editor to paint;
time.sleep(10)
# it will remain clicked till program ends;
pyautogui.click()
# can be varied according to convininence
distance = 250
while distance > 0:
# right
pyautogui.dragRel(distance, 0, duration = 0.1)
distance -= 5
# down
pyautogui.dragRel(0, distance, duration = 0.1)
# left
pyautogui.dragRel(-distance, 0, duration = 0.1)
distance -= 5
#up
pyautogui.dragRel(0, -distance, duration = 0.1)
|
nilq/baby-python
|
python
|
from flask import (
Flask, render_template, send_from_directory, redirect, url_for, request)
from . import settings as st
from .persistency import PersistencyManager
import markdown
from pygments.formatters import HtmlFormatter
from flask_wtf import FlaskForm
from flask_pagedown.fields import PageDownField
from wtforms.fields import SubmitField
from flask_pagedown import PageDown
import os
app = Flask(
__name__, template_folder=st.TEMPLATE_FOLDER,
static_folder=st.STATIC_FOLDER)
pagedown = PageDown(app)
class PageDownForm(FlaskForm):
pagedown = PageDownField('Enter your markdown')
submit = SubmitField('Submit')
def run_flask_server():
"""Run the flask server"""
SECRET_KEY = os.urandom(32)
app.config['SECRET_KEY'] = SECRET_KEY
app.debug = True
app.run()
@app.route('/')
def index():
persistencyManager = PersistencyManager(
st.ZETTELKASTEN)
zettelkasten_list = persistencyManager.get_list_of_filenames()
zettelkasten_list.sort()
return render_template('startpage.html', zettelkasten=zettelkasten_list)
@app.route('/<file>')
def show_md_file(file):
persistencyManager = PersistencyManager(
st.ZETTELKASTEN)
filename = file
input_file = persistencyManager.get_string_from_file_content(filename)
htmlString = markdown.markdown(
input_file, output_format='html5',
extensions=[
"fenced_code",
'codehilite',
'attr_list',
'pymdownx.arithmatex'],
extension_configs={'pymdownx.arithmatex': {'generic': True}}
)
formatter = HtmlFormatter(style="emacs", full=True, cssclass="codehilite")
css_string = formatter.get_style_defs()
return render_template(
"mainpage.html",
codeCSSString="<style>" + css_string + "</style>",
htmlString=htmlString,
filename=filename)
@app.route('/edit/<filename>', methods=['GET', 'POST'])
def edit(filename):
persistencyManager = PersistencyManager(
st.ZETTELKASTEN)
input_file = persistencyManager.get_string_from_file_content(filename)
markdown_string = input_file
form = PageDownForm()
form.pagedown.data = markdown_string
if form.validate_on_submit():
if request.method == 'POST':
new_markdown_string = request.form['pagedown']
form.pagedown.data = new_markdown_string
persistencyManager.overwrite_file_content(
filename, new_markdown_string)
return redirect(url_for('show_md_file', file=filename))
return render_template('edit.html', form=form)
@app.route('/images/<path:filename>')
def send_image(filename):
return send_from_directory(
st.ABSOLUTE_PATH_IMAGES,
filename)
|
nilq/baby-python
|
python
|
from pwn import * # NOQA
flag = b"flag{AAAAAAAAAAA}"
flag = bytes(bin(int(binascii.hexlify(flag), 16)), 'utf8')
class GuessIterator:
def __init__(self):
self.known_part = b""
self.first_block = True
self.i = -1
def know_guess(self):
self.known_part = self.current_guess()
self.first_block = False
self.i = -1
def current_guess(self):
if self.first_block:
guess = bytes(bin(self.i).rjust(16, ' '), 'utf8')
else:
guess = bytes(bin(self.i)[2:].rjust(16, '0'), 'utf8')
return self.known_part + guess
def __iter__(self):
return self
def __next__(self):
self.i += 1
guess = self.current_guess()
return guess
guessing = GuessIterator()
best_index = 0
def take_guess():
return next(guessing)
def wrong_byte_feedback(index):
global best_index
if index is None: # No wrong byte
guessing.know_guess()
best_index += 16
elif index % 16 == 0 and index > best_index:
guessing.know_guess()
best_index += 16
# GAME ########################
p = process(['python3', './remote.py'])
try:
while True:
p.sendline(take_guess())
result = p.recvline()
i = 0
for c in result:
if c == ord('0'):
i += 1
else:
break
i = i // 2
if i % 16 == 0 and i > best_index:
print(guessing.current_guess())
wrong_byte_feedback(i)
except Exception as e:
print(guessing.current_guess())
print(binascii.unhexlify(hex(int(str(guessing.current_guess(), 'utf8').strip()[2:], 2))[:2]))
|
nilq/baby-python
|
python
|
from setuptools import setup
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='SimpleHTTPSAuthServer',
version='1.1.0',
description='HTTPS server with Basic authentication and client certificate authentication.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/oza6ut0ne/SimpleHTTPSAuthServer',
license='MIT',
author='Ryota Okimatsu',
author_email='oza6ut0ne@gmail.com',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
py_modules=['SimpleHTTPSAuthServer']
)
|
nilq/baby-python
|
python
|
""" A simple example which prints out parsed streaming responses.
Python version: 3.6+
Dependencies (use `pip install X` to install a dependency):
- websockets
Usage:
python deepgram_streaming.py -k 'YOUR_DEEPGRAM_API_KEY' /path/to/audio.wav
Limitations:
- Only parses signed, 16-bit little-endian encoded WAV files.
"""
import argparse
import asyncio
import base64
import json
import sys
import wave
import websockets
import subprocess
# Mimic sending a real-time stream by sending this many seconds of audio at a time.
REALTIME_RESOLUTION = 0.100
async def run(data, key, channels, sample_width, sample_rate, filepath):
# How many bytes are contained in one second of audio.
byte_rate = sample_width * sample_rate * channels
print('This demonstration will print all finalized results, not interim results.')
# Connect to the real-time streaming endpoint, attaching our credentials.
async with websockets.connect(
# Alter the protocol and base URL below.
f'wss://api.deepgram.com/v1/listen?punctuate=true&channels={channels}&sample_rate={sample_rate}&encoding=linear16',
extra_headers={
'Authorization': 'Token {}'.format(key)
}
) as ws:
async def sender(ws):
""" Sends the data, mimicking a real-time connection.
"""
nonlocal data
try:
total = len(data)
while len(data):
# How many bytes are in `REALTIME_RESOLUTION` seconds of audio?
i = int(byte_rate * REALTIME_RESOLUTION)
chunk, data = data[:i], data[i:]
# Send the data
await ws.send(chunk)
# Mimic real-time by waiting `REALTIME_RESOLUTION` seconds
# before the next packet.
await asyncio.sleep(REALTIME_RESOLUTION)
# An empty binary message tells Deepgram that no more audio
# will be sent. Deepgram will close the connection once all
# audio has finished processing.
await ws.send(b'')
except Exception as e:
print(f'Error while sending: {e}')
raise
async def receiver(ws):
""" Print out the messages received from the server.
"""
async for msg in ws:
res = json.loads(msg)
try:
# To see interim results in this demo, remove the conditional `if res['is_final']:`.
if res['is_final']:
transcript = res['channel']['alternatives'][0]['transcript']
start = res['start']
print(f'{transcript}')
except KeyError:
print(msg)
await asyncio.wait([
asyncio.ensure_future(sender(ws)),
asyncio.ensure_future(receiver(ws))
])
print()
def parse_args():
""" Parses the command-line arguments.
"""
parser = argparse.ArgumentParser(description='Submits data to the real-time streaming endpoint.')
parser.add_argument('-k', '--key', required=True, help='YOUR_DEEPGRAM_API_KEY (authorization)')
parser.add_argument('input', help='Input file.')
return parser.parse_args()
def main():
""" Entrypoint for the example.
"""
# Parse the command-line arguments.
args = parse_args()
# Open the audio file.
with wave.open(args.input, 'rb') as fh:
(channels, sample_width, sample_rate, num_samples, _, _) = fh.getparams()
assert sample_width == 2, 'WAV data must be 16-bit.'
data = fh.readframes(num_samples)
print(f'Channels = {channels}, Sample Rate = {sample_rate} Hz, Sample width = {sample_width} bytes, Size = {len(data)} bytes', file=sys.stderr)
# Run the example.
asyncio.get_event_loop().run_until_complete(run(data, args.key, channels, sample_width, sample_rate, args.input))
if __name__ == '__main__':
sys.exit(main() or 0)
|
nilq/baby-python
|
python
|
# This file is part of pure-dispatch.
# https://github.com/SeedyROM/pure-dispatch
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2017, Zack Kollar <zackkollar@gmail.com>
'''Test our database module.
'''
from tests.base import TestCase
from pure_dispatch.database import DatabaseEngine, DatabaseSession
from preggy import expect
class DatabaseTestCase(TestCase):
'''Create a generic test case for our suite.
'''
@classmethod
def setUpClass(cls):
cls.engine = DatabaseEngine()
cls.session = DatabaseSession()
class TestDatabaseBase(DatabaseTestCase):
'''Test the basic functionality of our module.
'''
def test_globals_initialize(self):
'''Test our singleton's initialize at all.
'''
expect(DatabaseEngine).error_not_to_happen()
expect(DatabaseSession).error_not_to_happen()
def test_database_engine(self):
'''Test if our singleton initializes the engine property.
'''
expect(self.engine).error_not_to_happen()
def test_database_session(self):
'''Test if our singleton initializes the session property.
'''
expect(self.session).error_not_to_happen()
|
nilq/baby-python
|
python
|
# By Justin Walgran
# Copyright (c) 2012 Azavea, Inc.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import unittest
from blend import Result
class TestResult(unittest.TestCase):
"""Asserts that the properties and methods of the Result class behave correctly."""
def setUp(self):
self.result = Result()
def tearDown(self):
pass
def test_adding_none_to_messages_does_not_create_a_message(self):
self.result.add_message(None)
self.result.add_warning("warning")
self.result.add_error("error")
self.assertIsNone(self.result.messages, "Expected adding a None message to not add an item to Result.messages")
def test_adding_none_to_warnings_does_not_create_a_warning(self):
self.result.add_message("message")
self.result.add_warning(None)
self.result.add_error("error")
self.assertIsNone(self.result.warnings, "Expected adding a None warning to not add an item to Result.warnings")
def test_adding_none_to_error_does_not_create_a_message(self):
self.result.add_message("message")
self.result.add_warning("warning")
self.result.add_error(None)
self.assertIsNone(self.result.errors, "Expected adding a None error to not add an item to Result.errors")
def test_errors_warnings_and_messages_as_string_with_one_of_each(self):
self.result.add_message("message")
self.result.add_warning("warning")
self.result.add_error("error")
self.assertEqual("error\nwarning\nmessage", self.result.errors_warnings_and_messages_as_string)
def test_errors_warnings_and_messages_as_string_with_message_and_warning(self):
self.result.add_message("message")
self.result.add_warning("warning")
self.assertEqual("warning\nmessage", self.result.errors_warnings_and_messages_as_string)
|
nilq/baby-python
|
python
|
# Copyright (c) 2017 Midokura SARL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from neutron_lib import constants as lib_constants
from neutron_lib.services.qos import constants as qos_consts
from neutron_lib.utils import test
from tempest.common import utils
from tempest.common import waiters
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions
import testscenarios
from testscenarios.scenarios import multiply_scenarios
from neutron_tempest_plugin.api import base as base_api
from neutron_tempest_plugin.common import ssh
from neutron_tempest_plugin.common import utils as common_utils
from neutron_tempest_plugin import config
from neutron_tempest_plugin.scenario import base
from neutron_tempest_plugin.scenario import constants
from neutron_tempest_plugin.scenario import test_qos
CONF = config.CONF
load_tests = testscenarios.load_tests_apply_scenarios
class FloatingIpTestCasesMixin(object):
credentials = ['primary', 'admin']
@classmethod
@utils.requires_ext(extension="router", service="network")
def resource_setup(cls):
super(FloatingIpTestCasesMixin, cls).resource_setup()
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.router = cls.create_router_by_client()
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
cls.keypair = cls.create_keypair()
cls.secgroup = cls.os_primary.network_client.create_security_group(
name=data_utils.rand_name('secgroup'))['security_group']
cls.security_groups.append(cls.secgroup)
cls.create_loginable_secgroup_rule(secgroup_id=cls.secgroup['id'])
cls.create_pingable_secgroup_rule(secgroup_id=cls.secgroup['id'])
if cls.same_network:
cls._dest_network = cls.network
else:
cls._dest_network = cls._create_dest_network()
@classmethod
def _get_external_gateway(cls):
if CONF.network.public_network_id:
subnets = cls.os_admin.network_client.list_subnets(
network_id=CONF.network.public_network_id)
for subnet in subnets['subnets']:
if (subnet['gateway_ip'] and
subnet['ip_version'] == lib_constants.IP_VERSION_4):
return subnet['gateway_ip']
@classmethod
def _create_dest_network(cls):
network = cls.create_network()
subnet = cls.create_subnet(network)
cls.create_router_interface(cls.router['id'], subnet['id'])
return network
def _create_server(self, create_floating_ip=True, network=None):
if network is None:
network = self.network
port = self.create_port(network, security_groups=[self.secgroup['id']])
if create_floating_ip:
fip = self.create_floatingip(port=port)
else:
fip = None
server = self.create_server(
flavor_ref=CONF.compute.flavor_ref,
image_ref=CONF.compute.image_ref,
key_name=self.keypair['name'],
networks=[{'port': port['id']}])['server']
waiters.wait_for_server_status(self.os_primary.servers_client,
server['id'],
constants.SERVER_STATUS_ACTIVE)
return {'port': port, 'fip': fip, 'server': server}
def _test_east_west(self):
# The proxy VM is used to control the source VM when it doesn't
# have a floating-ip.
if self.src_has_fip:
proxy = None
proxy_client = None
else:
proxy = self._create_server()
proxy_client = ssh.Client(proxy['fip']['floating_ip_address'],
CONF.validation.image_ssh_user,
pkey=self.keypair['private_key'])
# Source VM
if self.src_has_fip:
src_server = self._create_server()
src_server_ip = src_server['fip']['floating_ip_address']
else:
src_server = self._create_server(create_floating_ip=False)
src_server_ip = src_server['port']['fixed_ips'][0]['ip_address']
ssh_client = ssh.Client(src_server_ip,
CONF.validation.image_ssh_user,
pkey=self.keypair['private_key'],
proxy_client=proxy_client)
# Destination VM
if self.dest_has_fip:
dest_server = self._create_server(network=self._dest_network)
else:
dest_server = self._create_server(create_floating_ip=False,
network=self._dest_network)
# Check connectivity
self.check_remote_connectivity(ssh_client,
dest_server['port']['fixed_ips'][0]['ip_address'],
servers=[src_server, dest_server])
if self.dest_has_fip:
self.check_remote_connectivity(ssh_client,
dest_server['fip']['floating_ip_address'],
servers=[src_server, dest_server])
class FloatingIpSameNetwork(FloatingIpTestCasesMixin,
base.BaseTempestTestCase):
scenarios = multiply_scenarios([
('SRC with FIP', dict(src_has_fip=True)),
('SRC without FIP', dict(src_has_fip=False)),
], [
('DEST with FIP', dict(dest_has_fip=True)),
('DEST without FIP', dict(dest_has_fip=False)),
])
same_network = True
@test.unstable_test("bug 1717302")
@decorators.idempotent_id('05c4e3b3-7319-4052-90ad-e8916436c23b')
def test_east_west(self):
self._test_east_west()
class FloatingIpSeparateNetwork(FloatingIpTestCasesMixin,
base.BaseTempestTestCase):
scenarios = multiply_scenarios([
('SRC with FIP', dict(src_has_fip=True)),
('SRC without FIP', dict(src_has_fip=False)),
], [
('DEST with FIP', dict(dest_has_fip=True)),
('DEST without FIP', dict(dest_has_fip=False)),
])
same_network = False
@test.unstable_test("bug 1717302")
@decorators.idempotent_id('f18f0090-3289-4783-b956-a0f8ac511e8b')
def test_east_west(self):
self._test_east_west()
class DefaultSnatToExternal(FloatingIpTestCasesMixin,
base.BaseTempestTestCase):
same_network = True
@decorators.idempotent_id('3d73ea1a-27c6-45a9-b0f8-04a283d9d764')
def test_snat_external_ip(self):
"""Check connectivity to an external IP"""
gateway_external_ip = self._get_external_gateway()
if not gateway_external_ip:
raise self.skipTest("IPv4 gateway is not configured for public "
"network or public_network_id is not "
"configured")
proxy = self._create_server()
proxy_client = ssh.Client(proxy['fip']['floating_ip_address'],
CONF.validation.image_ssh_user,
pkey=self.keypair['private_key'])
src_server = self._create_server(create_floating_ip=False)
src_server_ip = src_server['port']['fixed_ips'][0]['ip_address']
ssh_client = ssh.Client(src_server_ip,
CONF.validation.image_ssh_user,
pkey=self.keypair['private_key'],
proxy_client=proxy_client)
self.check_remote_connectivity(ssh_client,
gateway_external_ip,
servers=[proxy, src_server])
class FloatingIPPortDetailsTest(FloatingIpTestCasesMixin,
base.BaseTempestTestCase):
same_network = True
@classmethod
@utils.requires_ext(extension="router", service="network")
@utils.requires_ext(extension="fip-port-details", service="network")
def resource_setup(cls):
super(FloatingIPPortDetailsTest, cls).resource_setup()
@test.unstable_test("bug 1815585")
@decorators.idempotent_id('a663aeee-dd81-492b-a207-354fd6284dbe')
def test_floatingip_port_details(self):
"""Tests the following:
1. Create a port with floating ip in Neutron.
2. Create two servers in Nova.
3. Attach the port to the server.
4. Detach the port from the server.
5. Attach the port to the second server.
6. Detach the port from the second server.
"""
port = self.create_port(self.network)
fip = self.create_and_associate_floatingip(port['id'])
server1 = self._create_server(create_floating_ip=False)
server2 = self._create_server(create_floating_ip=False)
for server in [server1, server2]:
# attach the port to the server
self.create_interface(
server['server']['id'], port_id=port['id'])
waiters.wait_for_interface_status(
self.os_primary.interfaces_client, server['server']['id'],
port['id'], lib_constants.PORT_STATUS_ACTIVE)
fip = self.client.show_floatingip(fip['id'])['floatingip']
self._check_port_details(
fip, port, status=lib_constants.PORT_STATUS_ACTIVE,
device_id=server['server']['id'], device_owner='compute:nova')
# detach the port from the server; this is a cast in the compute
# API so we have to poll the port until the device_id is unset.
self.delete_interface(server['server']['id'], port['id'])
port = self._wait_for_port_detach(port['id'])
fip = self._wait_for_fip_port_down(fip['id'])
self._check_port_details(
fip, port, status=lib_constants.PORT_STATUS_DOWN,
device_id='', device_owner='')
def _check_port_details(self, fip, port, status, device_id, device_owner):
self.assertIn('port_details', fip)
port_details = fip['port_details']
self.assertEqual(port['name'], port_details['name'])
self.assertEqual(port['network_id'], port_details['network_id'])
self.assertEqual(port['mac_address'], port_details['mac_address'])
self.assertEqual(port['admin_state_up'],
port_details['admin_state_up'])
self.assertEqual(status, port_details['status'])
self.assertEqual(device_id, port_details['device_id'])
self.assertEqual(device_owner, port_details['device_owner'])
def _wait_for_port_detach(self, port_id, timeout=120, interval=10):
"""Waits for the port's device_id to be unset.
:param port_id: The id of the port being detached.
:returns: The final port dict from the show_port response.
"""
port = self.client.show_port(port_id)['port']
device_id = port['device_id']
start = int(time.time())
# NOTE(mriedem): Nova updates the port's device_id to '' rather than
# None, but it's not contractual so handle Falsey either way.
while device_id:
time.sleep(interval)
port = self.client.show_port(port_id)['port']
device_id = port['device_id']
timed_out = int(time.time()) - start >= timeout
if device_id and timed_out:
message = ('Port %s failed to detach (device_id %s) within '
'the required time (%s s).' %
(port_id, device_id, timeout))
raise exceptions.TimeoutException(message)
return port
def _wait_for_fip_port_down(self, fip_id, timeout=120, interval=10):
"""Waits for the fip's attached port status to be 'DOWN'.
:param fip_id: The id of the floating IP.
:returns: The final fip dict from the show_floatingip response.
"""
fip = self.client.show_floatingip(fip_id)['floatingip']
self.assertIn('port_details', fip)
port_details = fip['port_details']
status = port_details['status']
start = int(time.time())
while status != lib_constants.PORT_STATUS_DOWN:
time.sleep(interval)
fip = self.client.show_floatingip(fip_id)['floatingip']
self.assertIn('port_details', fip)
port_details = fip['port_details']
status = port_details['status']
timed_out = int(time.time()) - start >= timeout
if status != lib_constants.PORT_STATUS_DOWN and timed_out:
port_id = fip.get("port_id")
port = self.os_admin.network_client.show_port(port_id)['port']
message = ('Floating IP %s attached port status failed to '
'transition to DOWN (current status %s) within '
'the required time (%s s). Port details: %s' %
(fip_id, status, timeout, port))
raise exceptions.TimeoutException(message)
return fip
class FloatingIPQosTest(FloatingIpTestCasesMixin,
test_qos.QoSTestMixin,
base.BaseTempestTestCase):
same_network = True
@classmethod
@utils.requires_ext(extension="router", service="network")
@utils.requires_ext(extension="qos", service="network")
@utils.requires_ext(extension="qos-fip", service="network")
@base_api.require_qos_rule_type(qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)
def resource_setup(cls):
super(FloatingIPQosTest, cls).resource_setup()
@decorators.idempotent_id('5eb48aea-eaba-4c20-8a6f-7740070a0aa3')
def test_qos(self):
"""Test floating IP is binding to a QoS policy with
ingress and egress bandwidth limit rules. And it applied correctly
by sending a file from the instance to the test node.
Then calculating the bandwidth every ~1 sec by the number of bits
received / elapsed time.
"""
self._test_basic_resources()
policy_id = self._create_qos_policy()
ssh_client = self._create_ssh_client()
self.os_admin.network_client.create_bandwidth_limit_rule(
policy_id, max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND,
max_burst_kbps=constants.LIMIT_KILO_BYTES,
direction=lib_constants.INGRESS_DIRECTION)
self.os_admin.network_client.create_bandwidth_limit_rule(
policy_id, max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND,
max_burst_kbps=constants.LIMIT_KILO_BYTES,
direction=lib_constants.EGRESS_DIRECTION)
rules = self.os_admin.network_client.list_bandwidth_limit_rules(
policy_id)
self.assertEqual(2, len(rules['bandwidth_limit_rules']))
fip = self.os_admin.network_client.get_floatingip(
self.fip['id'])['floatingip']
self.assertEqual(self.port['id'], fip['port_id'])
self.os_admin.network_client.update_floatingip(
self.fip['id'],
qos_policy_id=policy_id)
fip = self.os_admin.network_client.get_floatingip(
self.fip['id'])['floatingip']
self.assertEqual(policy_id, fip['qos_policy_id'])
self._create_file_for_bw_tests(ssh_client)
common_utils.wait_until_true(lambda: self._check_bw(
ssh_client,
self.fip['floating_ip_address'],
port=self.NC_PORT),
timeout=120,
sleep=1)
class TestFloatingIPUpdate(FloatingIpTestCasesMixin,
base.BaseTempestTestCase):
same_network = None
@decorators.idempotent_id('1bdd849b-03dd-4b8f-994f-457cf8a36f93')
def test_floating_ip_update(self):
"""Test updating FIP with another port.
The test creates two servers and attaches floating ip to first server.
Then it checks server is accesible using the FIP. FIP is then
associated with the second server and connectivity is checked again.
"""
ports = [self.create_port(
self.network, security_groups=[self.secgroup['id']])
for i in range(2)]
servers = []
for port in ports:
name = data_utils.rand_name("server-%s" % port['id'][:8])
server = self.create_server(
name=name,
flavor_ref=CONF.compute.flavor_ref,
key_name=self.keypair['name'],
image_ref=CONF.compute.image_ref,
networks=[{'port': port['id']}])['server']
server['name'] = name
servers.append(server)
for server in servers:
self.wait_for_server_active(server)
self.fip = self.create_floatingip(port=ports[0])
self.check_connectivity(self.fip['floating_ip_address'],
CONF.validation.image_ssh_user,
self.keypair['private_key'],
servers=servers)
self.client.update_floatingip(self.fip['id'], port_id=ports[1]['id'])
def _wait_for_fip_associated():
try:
self.check_servers_hostnames(servers[-1:], log_errors=False)
except (AssertionError, exceptions.SSHTimeout):
return False
return True
# The FIP is now associated with the port of the second server.
try:
common_utils.wait_until_true(_wait_for_fip_associated,
timeout=15, sleep=3)
except common_utils.WaitTimeout:
self._log_console_output(servers[-1:])
self.fail(
"Server %s is not accessible via its floating ip %s" % (
servers[-1]['id'], self.fip['id']))
|
nilq/baby-python
|
python
|
from django.shortcuts import render_to_response, render
from django.template.response import TemplateResponse
from django.template import RequestContext
from django.contrib.sites.models import Site
from django.urls import reverse
from django.views.decorators.clickjacking import xframe_options_exempt
from django.views.decorators.cache import cache_page
from django.utils.safestring import mark_safe
from django.conf import settings
from embed9.utils import get_params, common_view, get_encoded_params
WIDGET_CACHE_TIME = getattr(settings, 'WIDGET_CACHE_TIME', 60*60)
@cache_page(WIDGET_CACHE_TIME)
@xframe_options_exempt
def widget(request, app, model, pk):
""" Renders an iframe with the widget. """
embed, obj = common_view(app, model, pk)
params = get_params(embed.get_form_class(), request.GET)
template = embed.get_widget_template()
return TemplateResponse(request, template, {
model: obj,
'params': params,
},)
@cache_page(WIDGET_CACHE_TIME)
@xframe_options_exempt
def loader(request, app, model, pk):
""" Renders JavaScript loader of the widget. """
embed, obj = common_view(app, model, pk)
params = get_params(embed.get_form_class(), request.GET)
template = embed.get_loader_template()
return TemplateResponse(request, template, {
model: obj,
'widget_name': 'widget_' + model + str(pk),
'domain': Site.objects.get_current().domain,
'iframe_url': mark_safe(reverse('embed9:widget', kwargs={'app': app, 'model': model, 'pk': pk}) + get_encoded_params(params)),
'params': params,
})
def preview(request, app, model, pk):
#print("this function is being called")
""" Handles previewing and adjusting the widget. """
embed, obj = common_view(app, model, pk)
template = embed.get_form_template()
show_preview = True
params = {}
if request.method == 'POST':
form = embed.get_form_class()(request.POST)
if form.is_valid():
for n, v in form.cleaned_data.items():
params[n] = v
else:
show_preview = False
else:
form = embed.get_form_class()()
return TemplateResponse(request, template, {
'obj': obj,
'form': form,
'params': params,
'show_preview': show_preview,
},)
|
nilq/baby-python
|
python
|
from rdflib.graph import Graph
from rdflib.namespace import Namespace, RDFS, RDF
from owmeta_core.rdf_query_modifiers import (ZeroOrMoreTQLayer,
rdfs_subclassof_subclassof_zom_creator as mod,
rdfs_subclassof_zom,
rdfs_subclassof_zom_creator)
ex = Namespace('http://example.org/')
def test_zom_triples_choices():
g = Graph()
g.add((ex.a, RDFS.subClassOf, ex.b))
g.add((ex.b, RDFS.subClassOf, ex.c))
g.add((ex.c, RDFS.subClassOf, ex.d))
g.add((ex.d, RDFS.subClassOf, ex.e))
g.add((ex.e, RDFS.subClassOf, ex.f))
g.add((ex.f, RDFS.subClassOf, ex.g))
g = ZeroOrMoreTQLayer(mod(ex.c), g)
choices = set(g.triples_choices((None, RDFS.subClassOf, [ex.f, ex.c])))
expected = [(ex.a, RDFS.subClassOf, ex.c),
(ex.a, RDFS.subClassOf, ex.b),
(ex.a, RDFS.subClassOf, ex.d),
(ex.a, RDFS.subClassOf, ex.e),
(ex.a, RDFS.subClassOf, ex.f),
(ex.b, RDFS.subClassOf, ex.c),
(ex.b, RDFS.subClassOf, ex.d),
(ex.b, RDFS.subClassOf, ex.e),
(ex.b, RDFS.subClassOf, ex.f),
(ex.c, RDFS.subClassOf, ex.d),
(ex.c, RDFS.subClassOf, ex.e),
(ex.c, RDFS.subClassOf, ex.f),
(ex.d, RDFS.subClassOf, ex.e),
(ex.d, RDFS.subClassOf, ex.f),
(ex.e, RDFS.subClassOf, ex.f)]
assert choices == set(expected)
def test_zom_triples_choices_1():
g = Graph()
g.add((ex.a, RDFS.subClassOf, ex.b))
g.add((ex.b, RDFS.subClassOf, ex.c))
g.add((ex.c, RDFS.subClassOf, ex.d))
g.add((ex.d, RDFS.subClassOf, ex.e))
g.add((ex.e, RDFS.subClassOf, ex.f))
g.add((ex.f, RDFS.subClassOf, ex.g))
g.add((ex.obj, RDF.type, ex.c))
g = ZeroOrMoreTQLayer(rdfs_subclassof_zom, g)
choices = set(g.triples_choices(([ex.obj], RDF.type, ex.g)))
expected = [(ex.obj, RDF.type, ex.c),
(ex.obj, RDF.type, ex.d),
(ex.obj, RDF.type, ex.e),
(ex.obj, RDF.type, ex.f),
(ex.obj, RDF.type, ex.g)]
assert choices == set(expected)
def test_zom_triples_choices_2():
g = Graph()
g.add((ex.a, RDFS.subClassOf, ex.b))
g.add((ex.b, RDFS.subClassOf, ex.c))
g.add((ex.c, RDFS.subClassOf, ex.d))
g.add((ex.d, RDFS.subClassOf, ex.e))
g.add((ex.e, RDFS.subClassOf, ex.f))
g.add((ex.f, RDFS.subClassOf, ex.g))
g.add((ex.obj, RDF.type, ex.c))
g = ZeroOrMoreTQLayer(rdfs_subclassof_zom_creator(ex.g), g)
choices = set(g.triples_choices(([ex.obj], RDF.type, ex.g)))
expected = [(ex.obj, RDF.type, ex.c),
(ex.obj, RDF.type, ex.d),
(ex.obj, RDF.type, ex.e),
(ex.obj, RDF.type, ex.f),
(ex.obj, RDF.type, ex.g)]
assert choices == set(expected)
def test_zom_triples():
g = Graph()
g.add((ex.a, RDFS.subClassOf, ex.b))
g.add((ex.b, RDFS.subClassOf, ex.c))
g.add((ex.c, RDFS.subClassOf, ex.d))
g.add((ex.d, RDFS.subClassOf, ex.e))
g.add((ex.e, RDFS.subClassOf, ex.f))
g.add((ex.f, RDFS.subClassOf, ex.g))
g.add((ex.obj, RDF.type, ex.c))
g = ZeroOrMoreTQLayer(rdfs_subclassof_zom_creator(ex.g), g)
choices = set(g.triples((None, RDF.type, ex.g)))
expected = [(ex.obj, RDF.type, ex.c),
(ex.obj, RDF.type, ex.d),
(ex.obj, RDF.type, ex.e),
(ex.obj, RDF.type, ex.f),
(ex.obj, RDF.type, ex.g)]
assert choices == set(expected)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
'''This script will compile 1D spectra into cubes, and visualize them.'''
# create (and save!) a cube, and visualize it
from mosasaurus.Cube import Cube
import sys
try:
c = Cube(sys.argv[1])
c.populate(remake=False, visualize=False)
c.movieCube(stride=1)
except IndexError:
print '''
Example usage:
./show.py gj1132_0227.obs
'''
|
nilq/baby-python
|
python
|
# requires:
# - phantomjs or geckodriver installed
# - selenium from pip
import sys
import unittest
import json
import argparse
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
class SmokeTest(unittest.TestCase):
def __init__(self, testName, config):
super(SmokeTest, self).__init__(testName)
self.config = json.loads(config.read())
def setUp(self):
driver = self.config.get("driver", "phantomjs")
if driver == "PhantomJS":
self.driver = webdriver.PhantomJS()
self.driver.delete_all_cookies()
elif driver == "Firefox":
self.driver = webdriver.Firefox()
else:
raise SystemExit("unknown driver")
self.img_path = self.config.get("img_path", "/tmp/")
self.driver.set_window_size(1120, 550)
def _login(self):
"""
Do login and wait for the user dashboard to be visible
Could be made more generic by permitting admin dashboard visible
:return:
"""
self.driver.get(self.config["url"])
self.driver.find_element_by_name(
'click-show-login').click()
self.driver.find_element_by_id("email").send_keys(self.config["email"])
elem = self.driver.find_element_by_id("password")
elem.send_keys(self.config["password"])
elem.submit()
WebDriverWait(self.driver, 3).until(
expected_conditions.presence_of_element_located((By.ID,
"user-dashboard"))
)
def _logout(self):
self.driver.get(self.config["url"])
self.driver.find_element_by_id('logout').click()
def _test_blueprint_start(self, elem, wait_for_open=False):
launch_button = elem.find_element_by_css_selector(".panel-footer "
"span").click()
start_timeout = self.config.get("timeouts", {}).get(
"start", 60)
if not wait_for_open:
WebDriverWait(self.driver, start_timeout).until(
expected_conditions.visibility_of_element_located(
(By.PARTIAL_LINK_TEXT, "pb-") # object
)
)
else:
WebDriverWait(self.driver, start_timeout).until(
expected_conditions.visibility_of_element_located(
(By.PARTIAL_LINK_TEXT, "Open in") # object
)
)
# have to check non-dummies differently
# they create a "Click to open" link
def _test_blueprint_shutdown(self, elem):
shutdown_button = elem.find_element_by_css_selector("table "
"button.btn-danger")
id_ = shutdown_button.id
shutdown_button.click()
self._dismiss_shutdown_modal()
shutdown_timeout = self.config.get("timeouts", {}).get(
"shutdown", 60)
WebDriverWait(self.driver, shutdown_timeout).until(
expected_conditions.invisibility_of_element_located(
(By.CLASS_NAME, "btn-danger"))
)
def _dismiss_shutdown_modal(self):
"""
Attempts to dismiss a modal by clicking on the first btn-primary
inside the modal
:return:
"""
WebDriverWait(self.driver, 10).until(
expected_conditions.visibility_of_element_located(
(By.CLASS_NAME, "modal"))
)
yes_button = self.driver.find_element_by_css_selector(
".modal .btn-primary").click()
WebDriverWait(self.driver, 10).until(
expected_conditions.invisibility_of_element_located(
(By.CLASS_NAME, "modal"))
)
def smoke_test(self):
try:
self._login()
elem = self.driver.find_element_by_xpath(
'//*[@id="user-dashboard"]/div')
elements = elem.find_elements_by_css_selector("div.panel")
for child in elem.find_elements_by_css_selector("div.panel"):
cur_element = child.find_element_by_css_selector(
"h3.panel-title")
blueprint_name = cur_element.text
for bp in self.config["blueprints"]:
if bp in blueprint_name:
if "dummy" in blueprint_name.lower():
self._test_blueprint_start(child, wait_for_open=False)
else:
self._test_blueprint_start(child, wait_for_open=True)
self._test_blueprint_shutdown(child)
self._logout()
except Exception, e:
import datetime
fname = datetime.datetime.now().isoformat()+ "_screenshot.png"
self.driver.save_screenshot(self.img_path + fname)
sys.stderr.write("failed: " + str(e))
self._logout()
def tearDown(self):
self.driver.quit()
def main(args=None):
parser = argparse.ArgumentParser(description="Pebbles smoke tester",
usage=("Run with configs to smoke test "
"a running Pebbles instance. "
"Outputs a string (OK/FAIL) that "
"can be "
"redirected to a file. Also "
"returns 0 or nonzero for Posix "
"compliance"))
parser.add_argument("-c", "--config", type=argparse.FileType("r"),
default=sys.stdin, help=("config file in JSON "
"format. see "
"example in "
"example.config.json for "
"defaults"
))
parser.add_argument("-o", "--output", default=sys.stdout,
type=argparse.FileType("w"),
help=("file to print test status string"))
parser.add_argument("--success", default="OK",
help=("text to display if tests run ok"))
parser.add_argument("--fail", default="FAIL", help=("text to display if "
"tests do not run ok"))
args = parser.parse_args()
suite = unittest.TestSuite()
suite.addTest(SmokeTest("smoke_test", args.config))
res = unittest.TextTestRunner(verbosity=0).run(suite)
if res.wasSuccessful():
args.output.write(args.success)
else:
args.output.write(args.fail)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#102
# Time: O(n)
# Space: O(n)
# Given a binary tree, return the level order traversal
# of its nodes' values. (ie, from left to right, level by level).
#
# For example:
# Given binary tree [3,9,20,null,null,15,7],
# 3
# / \
# 9 20
# / \
# 15 7
# return its level order traversal as:
# [
# [3],
# [9,20],
# [15,7]
#]
class TreeNode():
def __init__(self,val):
self.val=val;
self.right=None
self.left=None
class BFSSol():
def levelOrderTraversalBTI(self,root):
if not root:
return None
next_level_node,levelOrder_Traversal=[root],[]
while next_level_node:
cur_level_node=next_level_node
next_level_node,cur_level_val=[],[]
for cur_node in cur_level_node:
cur_level_val.append(cur_node.val)
if cur_node.left:
next_level_node.append(cur_node.left)
if cur_node.right:
next_level_node.append(cur_node.right)
levelOrder_Traversal.append(cur_level_val)
return levelOrder_Traversal
|
nilq/baby-python
|
python
|
"""Some useful data structures and functions"""
import datetime
espa_env = {
"dev": "https://espa-dev.cr.usgs.gov",
"tst": "https://espa-tst.cr.usgs.gov",
"ops": "https://espa.cr.usgs.gov"
}
api_urls = {
"status": "/api/v1/item-status/",
"order": "/api/v1/order/"
}
def timestamp() -> str:
"""
Get system timestamp for output text file name in the format YYYYMMDDhhmmss
:return:
"""
return str(int(float(str(datetime.datetime.now()).replace('-', '')
.replace(':', '').replace(' ', ''))))
|
nilq/baby-python
|
python
|
import os
import glob
import shutil
# dictionary mapping each extension with its corresponding folder
# For example, 'jpg', 'png', 'ico', 'gif', 'svg' files will be moved to 'images' folder
# feel free to change based on your needs
extensions = {
"jpg": "images",
"png": "images",
"ico": "images",
"gif": "images",
"svg": "images",
"sql": "sql",
"exe": "programs",
"msi": "programs",
"pdf": "pdf",
"xlsx": "excel",
"csv": "excel",
"rar": "archive",
"zip": "archive",
"gz": "archive",
"tar": "archive",
"docx": "word",
"torrent": "torrent",
"txt": "text",
"ipynb": "python",
"py": "python",
"pptx": "powerpoint",
"ppt": "powerpoint",
"mp3": "audio",
"wav": "audio",
"mp4": "video",
"m3u8": "video",
"webm": "video",
"ts": "video",
"json": "json",
"css": "web",
"js": "web",
"html": "web",
"apk": "apk",
"sqlite3": "sqlite3",
}
if __name__ == "__main__":
path = r"E:\Downloads"
# setting verbose to 1 (or True) will show all file moves
# setting verbose to 0 (or False) will show basic necessary info
verbose = 0
for extension, folder_name in extensions.items():
# get all the files matching the extension
files = glob.glob(os.path.join(path, f"*.{extension}"))
print(f"[*] Found {len(files)} files with {extension} extension")
if not os.path.isdir(os.path.join(path, folder_name)) and files:
# create the folder if it does not exist before
print(f"[+] Making {folder_name} folder")
os.mkdir(os.path.join(path, folder_name))
for file in files:
# for each file in that extension, move it to the correponding folder
basename = os.path.basename(file)
dst = os.path.join(path, folder_name, basename)
if verbose:
print(f"[*] Moving {file} to {dst}")
shutil.move(file, dst)
|
nilq/baby-python
|
python
|
#! /usr/bin/env python3
from sklearn import svm
import numpy as np
import json
import random
import sys
import os
import argparse
def estimate(data, target, trainingsubset, testingsubset, gamma='auto', C=1):
if(len(set(target)) < 2): # only one class
return 0;
clf = svm.SVC(gamma=gamma, C=C)
clf.fit(data[trainingsubset], target[trainingsubset])
prediction = clf.predict(data[testingsubset])
results = zip(prediction, target[testingsubset])
matches = 0
total = 0
for p, t in results:
total += 1
if p == t:
matches += 1
return matches/total
def successrate_stats(successrates):
successrate_avg = 0
successrate_min = successrates[0]
successrate_max = successrates[0]
for rate in successrates:
successrate_avg += rate
successrate_min = min(rate, successrate_min)
successrate_max = max(rate, successrate_max)
successrate_avg /= len(successrates)
return (successrate_min, successrate_max, successrate_avg)
def successrate_cdf(successrates):
# create dict with x:0 for x in {0..100}
cdf = dict(enumerate([0 for i in range(101)]))
for x in successrates:
cdf[int(x*100)] += 1
for key in range(1,101):
cdf[key] += cdf[key-1]
return cdf
def main():
parser = argparse.ArgumentParser(prog='testData')
parser.add_argument('--default-training-size', help='set the default size of the training set. True size will be min(default_training_size, len(data)-1).', action='store', default=5)
parser.add_argument('--limittraining', help='limit where training data may come from, e.g. a value of 0.5 limits training data to the first half of the dataset.', action='store', default=1.0)
parser.add_argument('--limitfeatures', help='the features used for training and testing', action='store', default=None)
parser.add_argument('--csv', help='write data to csv file', action='store')
parser.add_argument('--cdf', help='write cdf data to file', action='store')
parser.add_argument('-s', '--silent', help='do not output anything except errors', action='store_true')
parser.add_argument('target', help='target directory')
args = parser.parse_args()
silent = args.silent
if(not os.path.isdir(args.target)):
print("Error: Target '{}' does not exist".format(args.target))
sys.exit()
limittraining = float(args.limittraining)
if (limittraining < 0) or (limittraining > 1):
print("Error: limittraining must be between 0 and 1")
sys.exit()
default_training_size = float(args.default_training_size)
if default_training_size < 0:
print("Error: default_training_size must be >= 0")
sys.exit()
limitfeatures = None
if(args.limitfeatures):
limitfeatures = int(args.limitfeatures)
if(limitfeatures < 1):
print("Error: limitfeatures must be > 0")
sys.exit()
normalizeddata = {}
sensortypes = {}
with open(os.path.join(args.target, "normalizeddata.json"), "r") as f:
normalizeddata = json.load(f)
with open(os.path.join(args.target, "enhancedsensors.json"), "r") as f:
sensortypes = json.load(f)
f = None
if(args.csv):
f = open(args.csv, "w")
print("sensor type\t# sensors\t# devices\tmin\tmax\tavg", file=f)
cdf_data = {}
for selected_sensor_type in sorted(sensortypes):
if not silent:
print("Selected sensor type: {0}".format(selected_sensor_type))
print("Contains {0} sensors: ".format(len(sensortypes[selected_sensor_type])), end="")
for sensor in sensortypes[selected_sensor_type]:
print("{0} ({1}), ".format(sensor, sensortypes[selected_sensor_type][sensor]), end="")
print("")
data = np.matrix(normalizeddata[selected_sensor_type]['data'])
if(limitfeatures and data.shape[1] >= limitfeatures):
if not silent:
print("Cutting feature vector at {}".format(limitfeatures))
data = data[:,[i for i in range(limitfeatures)]]
target_sensor_name = np.array(normalizeddata[selected_sensor_type]['target_sensor_name'])
target_device_id = np.array(normalizeddata[selected_sensor_type]['target_device_id'])
if not silent:
print("Contains {0} devices: {1}".format(len(set(target_device_id)), ", ".join(set(target_device_id))))
if(len(set(target_device_id)) < 1):
if not silent:
print("\n")
continue
#print(data.shape)
#print(target_sensor_name.shape)
#print(target_device_id.shape)
#successrates_sensor = []
successrates_device = []
#successrates_device2 = []
upper_training_limit = limittraining * data.shape[0]
training_size = int(min(default_training_size, upper_training_limit, (data.shape[0]-1 / len(set(target_device_id)))))
if(default_training_size < 1):
training_size = int(min(data.shape[0] * default_training_size, upper_training_limit, data.shape[0]-1))
for i in range(100):
completeset = set(range(data.shape[0]))
trainingsubset = set()
for deviceid in set(target_device_id):
device_training_set = set()
while(len(device_training_set) < training_size):
index = random.randrange(0, upper_training_limit)
if(target_device_id[index] == deviceid):
device_training_set.add(index)
trainingsubset |= device_training_set
testingsubset = completeset - trainingsubset
trainingsubset = list(trainingsubset)
testingsubset = list(testingsubset)
#print(target_device_id[trainingsubset])
#successrates_sensor.append(estimate(data, target_sensor_name, trainingsubset, testingsubset))
successrates_device.append(estimate(data, target_device_id, trainingsubset, testingsubset))
#successrates_device2.append(estimate(data, target_device_id, trainingsubset, testingsubset, gamma=0.001, C=100.))
#sr_min, sr_max, sr_avg = successrate_stats(successrates_sensor)
#print("Success rate (Sensor): min {0:.2f} / max {1:.2f} / avg {2:.2f}".format(sr_min, sr_max, sr_avg))
if not silent:
print("Training: {} values per device; Testing: {} values per device".format(training_size, (data.shape[0]-(training_size * len(set(target_device_id))) / len(set(target_device_id)))))
sr_min, sr_max, sr_avg = successrate_stats(successrates_device)
cdf = successrate_cdf(successrates_device)
cdf_data[selected_sensor_type] = cdf
if not silent:
print("Success rate (Device): min {0:.2f} / max {1:.2f} / avg {2:.2f}".format(sr_min, sr_max, sr_avg))
print("\n")
if(args.csv):
print("{}\t{}\t{}\t{}\t{}\t{}".format(
selected_sensor_type,
len(sensortypes[selected_sensor_type]),
len(set(target_device_id)),
sr_min,
sr_max,
sr_avg,
), file=f)
if(args.cdf):
with open(args.cdf, "w") as f:
for index in sorted(cdf_data):
print(index, end="\t", file=f)
print("", file=f)
for i in range(101):
for index in sorted(cdf_data):
print(cdf_data[index][i], end="\t", file=f)
print("", file=f)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# Initializes the datastore with sample data.
#
# Possibilities to execute the code in this file:
#
# * GAE SDK 1.5 or compatible: Paste the code in the interactive console and
# execute it.
#
# * GAE (production):
#
# a) Enter the directory of the Reality Builder.
#
# b) Connect to the remote API shell.
#
# On Windows XP's "cmd.exe" (substitute %-placeholders):
#
# %PYTHON_PATH%\python.exe %GAE_PATH%\remote_api_shell.py -s ^
# %VERSION%.%APPLICATION%.appspot.com
#
# Note that, despite specifying a version above, the same datastore as for
# all other versions is used: There is only one.
#
# c) Paste the code and press enter. It will execute automatically.
# Copyright 2010-2012 Felix E. Klee <felix.klee@inka.de>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os.path
import sys
sys.path.append(os.path.realpath('.'))
from google.appengine.dist import use_library
use_library('django', '0.96')
from main import Construction
from main import Block
from main import BlockProperties
from main import NewBlock
from main import NewBlockEmail
from django.utils import simplejson
from google.appengine.api import namespace_manager
if 'CURRENT_VERSION_ID' in os.environ:
# Works in the SDK's interactive console.
app_version = os.environ['CURRENT_VERSION_ID'].split('.')[0]
else:
# Takes the version from the command line:
parser = optparse.OptionParser()
parser.add_option('-s', '--server', dest='server')
(options, args) = parser.parse_args()
app_version = options.server.split('.')[0]
namespace_manager.set_namespace('demo')
# Deletes all construction entries:
queries = [Construction.all()]
for query in queries:
for result in query:
result.delete()
# Creates the construction configuration.
construction = Construction(key_name = 'main')
construction.update_interval_client = 2000
construction.validator_version = '0'
construction.validator_src = 'scene/validator.js'
construction.validator_function_name = 'validator' # attached to "window"!
construction.blocks_data_version = '0'
construction.camera_data_version = '0'
construction.camera_pos = [189.57, -159.16, 140.11]
construction.camera_a_x = 2.1589
construction.camera_a_y = -0.46583
construction.camera_a_z = 0.29
construction.camera_fl = 40.
construction.camera_sensor_resolution = 19.9
construction.put()
# Deletes all block properties entries:
queries = [BlockProperties.all()]
for query in queries:
for result in query:
result.delete()
# Sets up the block properties (construction as parent is important so
# that the properties form one entity group with the construction,
# which is necessary when doing transactions):
blockProperties = BlockProperties(parent=construction)
blockProperties.data_version = '0'
blockProperties.has_2_fold_symmetry = False
blockProperties.pos_spacing_xy = 20.
blockProperties.pos_spacing_z = 10.
blockProperties.outline_bxy = '[[0, 0], [1, 0], [2, 1], [0, 1]]'
blockProperties.collision_offsets_list_bxy = \
['[[-1, 0], [0, 0], [1, 0]]',
'[[0, 0], [1, 0], [0, -1], [1, -1]]',
'[[0, 0], [1, 0]]',
'[[0, 1], [1, 1], [0, 0], [1, 0]]']
blockProperties.attachment_offsets_list_b = \
['[[0, 0, -1], [0, 0, 1]]',
'[[0, 0, -1], [0, 0, 1]]',
'[[0, 0, -1], [0, 0, 1], [1, 0, -1], [1, 0, 1]]',
'[[0, 0, -1], [0, 0, 1]]']
blockProperties.rot_center_bxy = [0.5, 0.5]
blockProperties.put()
# Deletes all new block entries:
queries = [NewBlock.all()]
for query in queries:
for result in query:
result.delete()
# Sets up the new block:
newBlock = NewBlock(parent=construction)
newBlock.data_version = '0'
newBlock.init_pos_b = [4, 0, 4]
newBlock.init_a = 0
newBlock.put()
# Deletes all block entries:
queries = [Block.all()]
for query in queries:
for result in query:
result.delete()
# Creates block entries:
cs = [[1, 4, 3, 1], [1, 4, 2, 0], [1, 4, 1, 3], [1, 4, 0, 2],
[5, 5, 1, 2], [5, 5, 0, 2], [0, 1, 0, 3], [3, 0, 0, 2],
[4, 0, 0, 0], [1, 0, 0, 0], [4, 4, 0, 0]]
for c in cs:
x_b = c[0]
y_b = c[1]
z_b = c[2]
a = c[3]
block = Block.insert_at(construction, [x_b, y_b, z_b], a)
block.state = 2
block.put()
# Deletes all new block email entries:
queries = [NewBlockEmail.all()]
for query in queries:
for result in query:
result.delete()
# Creates new block email entries:
newBlockEmail = NewBlockEmail(parent=construction)
newBlockEmail.sender_address = 'Admin <admin@example.com>'
newBlockEmail.recipient_address = 'Block Builders <block.builders@example.com>'
newBlockEmail.put()
print 'Done.'
|
nilq/baby-python
|
python
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
lvmmodel.io
============
I/O utility functions for files in lvmmodel.
"""
import os
from astropy.io import fits
import yaml
import numpy as np
import warnings
from lvmutil.log import get_logger
log = get_logger()
_thru = dict()
def load_throughput(channel):
"""Returns specter Throughput object for the given channel 'b', 'r', or 'z'.
Parameters
----------
channel : {'b', 'r', 'z'}
Spectrograph channel.
"""
import specter.throughput
channel = channel.lower()
global _thru
if channel not in _thru:
thrufile = os.path.join(os.environ['LVMMODEL'], 'data', 'throughput', 'thru-{0}.fits'.format(channel))
_thru[channel] = specter.throughput.load_throughput(thrufile)
return _thru[channel]
_psf = dict()
def load_psf(channel):
"""Returns specter PSF object for the given channel 'b', 'r', or 'z'.
Parameters
----------
channel : {'b', 'r', 'z'}
Spectrograph channel.
"""
import specter.psf
channel = channel.lower()
global _psf
if channel not in _psf:
psffile = os.path.join(os.environ['LVMMODEL'], 'data', 'specpsf', 'psf-{0}.fits'.format(channel))
_psf[channel] = specter.psf.load_psf(psffile)
return _psf[channel]
_params = None
def load_lvmparams(config='lvm', telescope='1m'):
"""Returns LVM parameter dictionary loaded from lvmmodel/data/lvm.yaml.
Parameters:
config (str):
Which config yaml to load
telescope (str):
Which telescope config to load.
"""
# build param name
if config == 'lvm':
config_name = '{0}_{1}.yaml'.format(config, telescope)
else:
config_name = '{0}.yaml'.format(config)
global _params
sametele = _params is not None and 'telescope' in _params and telescope == _params['telescope']
if _params is None or not sametele:
lvmparamsfile = os.path.join(os.environ['LVMMODEL'], 'data', config_name)
with open(lvmparamsfile) as par:
_params = yaml.load(par)
# - add config and telescope name
_params['config_name'] = config_name
_params['telescope'] = telescope
# - for temporary backwards compability after 'exptime' -> 'exptime_dark'
if ('exptime' not in _params) and ('exptime_dark' in _params):
_params['exptime'] = _params['exptime_dark']
# - Augment params with wavelength coverage from specpsf files
# - wavemin/max = min/max wavelength covered by *any* fiber on the CCD
# - wavemin/max_all = min/max wavelength covered by *all* fibers
for channel in ['b', 'r', 'z']:
hdr = fits.getheader(findfile('specpsf/psf-{}.fits'.format(channel)), 0)
_params['ccd'][channel]['wavemin'] = hdr['WAVEMIN']
_params['ccd'][channel]['wavemax'] = hdr['WAVEMAX']
_params['ccd'][channel]['wavemin_all'] = hdr['WMIN_ALL']
_params['ccd'][channel]['wavemax_all'] = hdr['WMAX_ALL']
return _params
# Added and still needs to be committed and pushed to desihub
_gfa = None
def load_gfa():
"""Returns GFA table from lvmmodel/data/focalplane/gfa.ecsv"""
global _gfa
from astropy.table import Table
# os is imported already in the lvmmodel io.py
import os
if _gfa is None:
gfaFile = os.path.join(os.environ['LVMMODEL'], 'data', 'focalplane', 'gfa.ecsv')
_gfa = Table.read(gfaFile, format='ascii.ecsv')
return _gfa
_fiberpos = None
def load_fiberpos():
"""Returns fiberpos table from lvmmodel/data/focalplane/fiberpos.fits.
"""
global _fiberpos
from astropy.table import Table
if _fiberpos is None:
fiberposfile = os.path.join(os.environ['LVMMODEL'], 'data', 'focalplane', 'fiberpos.fits')
_fiberpos = Table.read(fiberposfile)
# - Convert to upper case if needed
# - Make copy of colnames b/c they are updated during iteration
for col in list(_fiberpos.colnames):
if col.islower():
_fiberpos.rename_column(col, col.upper())
# - Temporary backwards compatibility for renamed columns
if 'POSITIONER' in _fiberpos.colnames:
import warnings
warnings.warn('old fiberpos.fits with POSITIONER column instead of LOCATION; please update your $LVMMODEL checkout', DeprecationWarning)
_fiberpos['LOCATION'] = _fiberpos['POSITIONER']
else:
_fiberpos['POSITIONER'] = _fiberpos['LOCATION']
if 'SPECTROGRAPH' in _fiberpos.colnames:
import warnings
warnings.warn('old fiberpos.fits with SPECTROGRAPH column instead of SPECTRO; please update your $LVMMODEL checkout', DeprecationWarning)
_fiberpos['SPECTRO'] = _fiberpos['SPECTROGRAPH']
else:
_fiberpos['SPECTROGRAPH'] = _fiberpos['SPECTRO']
return _fiberpos
_tiles = dict()
def load_tiles(onlydesi=True, extra=False, tilesfile=None, cache=True):
"""Return DESI tiles structure from lvmmodel/data/footprint/desi-tiles.fits.
Parameters
----------
onlydesi : :class:`bool` (default True)
If ``True``, trim to just the tiles in the DESI footprint.
extra : :class:`bool`, (default False)
If ``True``, include extra layers with PROGRAM='EXTRA'.
tilesfile : (str)
Name of tiles file to load; or None for default.
Without path, look in $LVMMODEL/data/footprint, otherwise load file.
cache : :class:`bool`, (default True)
Use cache of tiles data.
"""
global _tiles
if tilesfile is None:
tilesfile = 'desi-tiles.fits'
# - Check if tilesfile includes a path (absolute or relative)
tilespath, filename = os.path.split(tilesfile)
if tilespath == '':
tilesfile = os.path.join(os.environ['LVMMODEL'], 'data', 'footprint', filename)
# - standarize path location
tilesfile = os.path.abspath(tilesfile)
if cache and tilesfile in _tiles:
tiledata = _tiles[tilesfile]
else:
with fits.open(tilesfile, memmap=False) as hdulist:
tiledata = hdulist[1].data
#
# Temporary workaround for problem identified in
# https://github.com/desihub/lvmmodel/issues/30
#
if any([c.bzero is not None for c in tiledata.columns]):
foo = [_tiles[k].dtype for k in tiledata.dtype.names]
# - Check for out-of-date tiles file
if np.issubdtype(tiledata['OBSCONDITIONS'].dtype, 'u2'):
import warnings
warnings.warn('old desi-tiles.fits with uint16 OBSCONDITIONS; please update your $LVMMODEL checkout', DeprecationWarning)
# - load cache for next time
if cache:
_tiles[tilesfile] = tiledata
# - Filter to only the DESI footprint if requested
subset = np.ones(len(tiledata), dtype=bool)
if onlydesi:
subset &= tiledata['IN_DESI'] > 0
# - Filter out PROGRAM=EXTRA tiles if requested
if not extra:
subset &= ~np.char.startswith(tiledata['PROGRAM'], 'EXTRA')
if np.all(subset):
return tiledata
else:
return tiledata[subset]
_platescale = None
def load_platescale():
'''
Loads platescale.txt, returning structured array with columns
radius: radius from center of focal plane [mm]
theta: radial angle that has a centroid at this radius [deg]
radial_platescale: Meridional (radial) plate scale [um/arcsec]
az_platescale: Sagittal (azimuthal) plate scale [um/arcsec]
'''
global _platescale
if _platescale is not None:
return _platescale
infile = findfile('focalplane/platescale.txt')
columns = [
('radius', 'f8'),
('theta', 'f8'),
('radial_platescale', 'f8'),
('az_platescale', 'f8'),
]
_platescale = np.loadtxt(infile, usecols=[0, 1, 6, 7], dtype=columns)
return _platescale
def reset_cache():
'''Reset I/O cache'''
global _thru, _psf, _params, _gfa, _fiberpos, _tiles, _platescale
_thru = dict()
_psf = dict()
_params = None
_gfa = None
_fiberpos = None
_tiles = dict()
_platescale = None
def load_target_info():
'''
Loads data/targets/targets.yaml and returns the nested dictionary
This is primarily syntactic sugar to avoid end users constructing
paths and filenames by hand (which e.g. broke when targets.dat was
renamed to targets.yaml)
'''
targetsfile = os.path.join(datadir(), 'targets', 'targets.yaml')
if not os.path.exists(targetsfile):
targetsfile = os.path.join(datadir(), 'targets', 'targets.dat')
with open(targetsfile) as fx:
data = yaml.load(fx)
return data
def load_pixweight(nside):
'''
Loads lvmmodel/data/footprint/desi-healpix-weights.fits
nside: after loading, the array will be resampled to the
passed HEALPix nside
'''
import healpy as hp
# ADM read in the standard pixel weights file
pixfile = os.path.join(os.environ['LVMMODEL'], 'data', 'footprint', 'desi-healpix-weights.fits')
with fits.open(pixfile) as hdulist:
pix = hdulist[0].data
# ADM determine the file's nside, and flag a warning if the passed nside exceeds it
npix = len(pix)
truenside = hp.npix2nside(len(pix))
if truenside < nside:
log.warning("downsampling is fuzzy...Passed nside={}, "
"but file {} is stored at nside={}".format(nside, pixfile, truenside))
# ADM resample the map
return hp.pixelfunc.ud_grade(pix, nside, order_in='NESTED', order_out='NESTED')
def findfile(filename):
'''
Return full path to data file $LVMMODEL/data/filename
Note: this is a precursor for a potential future refactor where
lvmmodel data would be installed with the package and $LVMMODEL
would become an optional override.
'''
return os.path.join(datadir(), filename)
def datadir():
'''
Returns location to lvmmodel data
if set, $LVMMODEL overrides data installed with the package
'''
if 'LVMMODEL' in os.environ:
return os.path.abspath(os.path.join(os.environ['LVMMODEL'], 'data'))
else:
import pkg_resources
return pkg_resources.resource_filename('lvmmodel', 'data')
|
nilq/baby-python
|
python
|
import glob
import bz2
with open('all_data.ndjson','w') as out:
for div in glob.glob('./OpenAccess-master/metadata/objects/*'):
print('Working on: ',div)
for file in glob.glob(f'{div}/*'):
with bz2.open(file, "rb") as f:
out.write(f.read().decode())
|
nilq/baby-python
|
python
|
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import mars.oscar as mo
from mars.tests.core import require_ray
from mars.tests.conftest import * # noqa
from mars.utils import lazy_import
from mars.oscar.backends.ray.utils import placement_group_info_to_addresses
from mars.services.task.supervisor.task_manager import \
TaskConfigurationActor
ray = lazy_import('ray')
@require_ray
@pytest.mark.asyncio
async def test_task_manager_creation(ray_start_regular):
mo.setup_cluster(address_to_resources=placement_group_info_to_addresses('test_cluster', [{'CPU': 2}]))
# the pool is an ActorHandle, it does not have an async context.
pool = await mo.create_actor_pool('ray://test_cluster/0/0', n_process=2,
labels=[None] + ['numa-0'] * 2)
assert pool
# create configuration
await mo.create_actor(TaskConfigurationActor, dict(),
uid=TaskConfigurationActor.default_uid(),
address='ray://test_cluster/0/0')
configuration_ref = await mo.actor_ref(
TaskConfigurationActor.default_uid(),
address='ray://test_cluster/0/0')
await configuration_ref.get_config()
|
nilq/baby-python
|
python
|
from django.urls import path,include
from . import views
urlpatterns = [
path('api/projects/', views.ProjectList.as_view()),
path('api/projects/profile', views.ProfileList.as_view()),
path('api/projects/ratings', views.RatingList.as_view()),
]
|
nilq/baby-python
|
python
|
from output.models.ms_data.attribute.att_j004_xsd.att_j004 import Test
__all__ = [
"Test",
]
|
nilq/baby-python
|
python
|
import param
from . import API1TestCase
# TODO: I copied the tests from testobjectselector, although I
# struggled to understand some of them. Both files should be reviewed
# and cleaned up together.
# TODO: tests copied from testobjectselector could use assertRaises
# context manager (and could be updated in testobjectselector too).
class TestListParameters(API1TestCase):
def setUp(self):
super(TestListParameters, self).setUp()
class P(param.Parameterized):
e = param.List([5,6,7], item_type=int)
l = param.List(["red","green","blue"], item_type=str, bounds=(0,10))
self.P = P
def test_default_None(self):
class Q(param.Parameterized):
r = param.List(default=[]) # Also check None)
def test_set_object_constructor(self):
p = self.P(e=[6])
self.assertEqual(p.e, [6])
def test_set_object_outside_bounds(self):
p = self.P()
try:
p.l=[6]*11
except ValueError:
pass
else:
raise AssertionError("Object set outside range.")
def test_set_object_wrong_type(self):
p = self.P()
try:
p.e=['s']
except TypeError:
pass
else:
raise AssertionError("Object allowed of wrong type.")
def test_set_object_not_None(self):
p = self.P(e=[6])
try:
p.e = None
except ValueError:
pass
else:
raise AssertionError("Object set outside range.")
if __name__ == "__main__":
import nose
nose.runmodule()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from django.conf.urls import url
from .views import create_view
from .views import update_view
from .views import delete_view
from .views import list_view
stagesetting_create = url(regex=r'^add/$',
view=create_view,
name='stagesetting_create',
kwargs={})
stagesetting_update = url(regex=r'^(?P<pk>\d+)/update/$',
view=update_view,
name='stagesetting_update',
kwargs={})
stagesetting_delete = url(regex=r'^(?P<pk>\d+)/delete/$',
view=delete_view,
name='stagesetting_delete',
kwargs={})
stagesetting_list = url(regex=r'^$',
view=list_view,
name='stagesetting_list',
kwargs={})
urlpatterns = [
stagesetting_create,
stagesetting_update,
stagesetting_delete,
stagesetting_list,
]
|
nilq/baby-python
|
python
|
#
# Copyright (c) 2013-2020 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
# ===============================================================================================
from ..query_constraints_factory import QueryConstraintsFactory
from ..query_constraints import QueryConstraints
from .spatial_temporal_constraints_builder import SpatialTemporalConstraintsBuilder
class VectorQueryConstraintsFactory(QueryConstraintsFactory):
"""
A query constraints factory with additional methods for creating spatial and/or
temporal constraints for vector data. Do not construct this class manually, instead,
get the constraints factory by using the `constraints_factory()` method of the
query builder.
"""
def spatial_temporal_constraints(self):
"""
Creates a spatial temporal constraints builder that can be used to construct
spatial and/or temporal constraints.
Returns:
A new `pygw.query.vector.spatial_temporal_constraints_builder.SpatialTemporalConstraintsBuilder`.
"""
return SpatialTemporalConstraintsBuilder(self._java_ref.spatialTemporalConstraints())
def filter_constraints(self, filter_constraint):
"""
Constrain a query using a filter created by pygw.query.FilterFactory.
Args:
filter_constraint (filter): The filter to constrain the query by.
Returns:
A `pygw.query.query_constraints.QueryConstraints` with the given filter.
"""
return QueryConstraints(self._java_ref.filterConstraints(filter_constraint))
def cql_constraints(self, cql_expression):
"""
Constrain a query using a CQL expression.
Args:
cql_expression (str): The CQL expression to constrain the query by.
Returns:
A `pygw.query.query_constraints.QueryConstraints` with the given CQL expression.
"""
return QueryConstraints(self._java_ref.cqlConstraints(cql_expression))
|
nilq/baby-python
|
python
|
from insights.parsers.route import Route
from insights.tests import context_wrap
ROUTE = '''
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
10.66.208.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
169.254.0.0 0.0.0.0 255.255.0.0 U 1002 0 0 eth0
0.0.0.0 10.66.208.254 0.0.0.0 UG 0 0 0 eth0
'''
def test_route():
route_info = Route(context_wrap(ROUTE))
for route in route_info:
assert route == {'Destination': '10.66.208.0',
'Gateway': '0.0.0.0',
'Genmask': '255.255.255.0',
'Flags': 'U',
'Metric': '0',
'Ref': '0',
'Use': '0',
'Iface': 'eth0'}
break
assert '169.254.0.0' in route_info
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3.7
# Copyright: Ismael Narváez Berenjeno
from datetime import datetime
def get_time_isoformat():
"""
Get timestamp with ISO format.
:return: ISO timestamp
:rtype: str
"""
return datetime.now().isoformat()
|
nilq/baby-python
|
python
|
"""
面试题21:包含min函数的栈
题目:定义栈的数据结构,请在该类型中实现一个能够得到栈的最小元素的min函数。在该栈中,调用min、push及pop的时间复杂度都是O(1)。
https://leetcode.com/problems/min-stack/
Design a stack that supports push, pop, top, and retrieving the minimum element in constant time.
push(x) -- Push element x onto stack.
pop() -- Removes the element on top of the stack.
top() -- Get the top element.
getMin() -- Retrieve the minimum element in the stack.
Example:
MinStack minStack = new MinStack();
minStack.push(-2);
minStack.push(0);
minStack.push(-3);
minStack.getMin(); --> Returns -3.
minStack.pop();
minStack.top(); --> Returns 0.
minStack.getMin(); --> Returns -2.
"""
from collections import deque
class Stack:
def __init__(self):
self.items = deque()
def push(self, val):
return self.items.append(val)
def pop(self):
return self.items.pop()
def empty(self):
return len(self.items) == 0
def top(self):
return self.items[-1]
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.s = Stack()
self.mins = Stack()
def push(self, x):
"""
:type x: int
:rtype: void
"""
self.s.push(x)
if self.mins.empty():
self.mins.push(x)
else:
min_val = self.mins.top()
if x < min_val:
self.mins.push(x)
else:
self.mins.push(min_val)
def pop(self):
"""
:rtype: void
"""
self.mins.pop()
return self.s.pop()
def top(self):
"""
:rtype: int
"""
return self.s.top()
def getMin(self):
"""Retrieve the minimum element in the stack.
:rtype: int
"""
return self.mins.top()
def test():
minStack = MinStack()
minStack.push(-2)
minStack.push(0)
minStack.push(-3)
assert minStack.getMin() == -3 # --> Returns -3.
minStack.pop()
assert minStack.top() == 0 # --> Returns 0.
assert minStack.getMin() == -2 # --> Returns -2.
if __name__ == '__main__':
test()
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QFrame, QPushButton, QLabel
class LaunchScreen(QFrame):
def __init__(self, parent): # constructor
super().__init__(parent)
self.windowClass = parent # allows calling of parent class methods
self.setStyleSheet(open('css/window.css').read())
self.initScreen()
def initScreen(self): # gui
QtGui.QFontDatabase.addApplicationFont("fonts\Lora\static\Lora-Regular.ttf")
self.verticalBox = QVBoxLayout()
self.verticalBox.setAlignment(QtCore.Qt.AlignTop)
self.upperHBox = QHBoxLayout()
self.upperHBox.setAlignment(QtCore.Qt.AlignCenter)
self.lowerHBox = QHBoxLayout()
self.lowerHBox.setAlignment(QtCore.Qt.AlignCenter)
self.lowerHBox.setSpacing(50)
self.lowerHBox.setContentsMargins(50, 50, 50, 50)
self.upperHBox.addSpacing(35)
self.titleLabel = QLabel('Inventory', self)
self.titleLabel.setStyleSheet(open("css/titleLabels.css").read())
self.titleLabel.setFixedSize(650,195)
self.upperHBox.addWidget(self.titleLabel)
self.collectionsButton = QPushButton('Access Your\nCollections', self)
self.collectionsButton.setFixedSize(350,350)
self.collectionsButton.setStyleSheet(open('css/bigButtons.css').read())
self.collectionsButton.clicked.connect(self.collectionEvent)
self.lowerHBox.addWidget(self.collectionsButton)
self.newButton = QPushButton('Make A New\nCollection', self)
self.newButton.setFixedSize(350,350)
self.newButton.setStyleSheet(open('css/bigButtons.css').read())
self.newButton.clicked.connect(self.newEvent)
self.lowerHBox.addWidget(self.newButton)
self.verticalBox.addLayout(self.upperHBox)
self.verticalBox.addLayout(self.lowerHBox)
self.setLayout(self.verticalBox)
# navigation events
def collectionEvent(self):
self.windowClass.collectionScreen()
def newEvent(self):
self.windowClass.chooseScreen()
|
nilq/baby-python
|
python
|
"""Conditional Grammar."""
from sqlfluff.core.parser.segments import Indent
from sqlfluff.core.parser.match_result import MatchResult
from sqlfluff.core.parser.match_wrapper import match_wrapper
from sqlfluff.core.parser.grammar.base import (
BaseGrammar,
)
class Conditional(BaseGrammar):
"""A grammar which is conditional on the parse context.
| NOTE: The Conditional grammar is assumed to be operating
| within a Sequence grammar, and some of the functionality
| may not function within a different context.
Args:
*args: A meta segment which is instantiated
conditionally upon the rules set.
config_type: The area of the config that is used
when evaluating the status of the given rules.
rules: A set of `rule=boolean` pairs, which are
evaluated when understanding whether conditions
are met for this grammar to be enabled.
Example:
.. code-block::
Conditional(Dedent, config_type="indent", indented_joins=False)
This effectively says that if `indented_joins` in the "indent" section
of the current config is set to `True`, then this grammar will allow
a `Dedent` segment to be matched here. If `indented_joins` is set to
`False`, it will be as though there was no `Dedent` in this sequence.
| NOTE: While the Conditional grammar is set up to allow different
| sources of configuration, it relies on configuration keys being
| available within the ParseContext. Practically speaking only the
| "indentation" keys are currently set up.
"""
def __init__(self, *args, config_type: str = "indentation", **rules):
if not all(issubclass(arg, Indent) for arg in args):
raise ValueError(
"Conditional is only designed to work with Indent segments."
)
if len(args) != 1:
raise ValueError(
"Conditional is only designed to work with a single element."
)
if not config_type:
raise ValueError("Conditional config_type must be set.")
elif config_type not in ("indentation"):
raise ValueError(
"Only 'indentation' is supported as a Conditional config_type."
)
if not rules:
raise ValueError("Conditional requires rules to be set.")
self._config_type = config_type
self._config_rules = rules
super().__init__(*args)
def is_enabled(self, parse_context):
"""Evaluate conditionals and return whether enabled."""
# NOTE: Because only "indentation" is the only current config_type
# supported, this code is much simpler that would be required in
# future if multiple options are available.
if self._config_type != "indentation":
raise ValueError(
"Only 'indentation' is supported as a Conditional config_type."
)
config_section = parse_context.indentation_config
# If any rules fail, return no match.
for rule, val in self._config_rules.items():
# Assume False if not set.
conf_val = config_section.get(rule, False)
# Coerce to boolean.
if val != bool(conf_val):
return False
return True
@match_wrapper()
def match(self, segments, parse_context):
"""Evaluate conditionals and return content."""
if not self.is_enabled(parse_context):
return MatchResult.from_unmatched(segments)
# Instantiate the new element and return
new_seg = self._elements[0]()
return MatchResult((new_seg,), segments)
|
nilq/baby-python
|
python
|
from textformer.models import Seq2Seq
# Creating the Seq2Seq model
seq2seq = Seq2Seq(n_input=1, n_output=1, n_hidden=512, n_embedding=256, n_layers=2,
ignore_token=None, init_weights=None, device='cpu')
|
nilq/baby-python
|
python
|
from django.conf.urls import include, url
import django.contrib.auth.views
from work_evid import views
urlpatterns = [
url(r'^overviews/$', views.overviews, name='overviews'),
url(r'^delete_work/$', views.delete_work, name='delete_work'),
url(r'^work/$', views.WorkList.as_view(), name='work_list'),
url(r'^work/add/$', views.WorkCreate.as_view(), name='work_create'),
url(r'^work/detail/(?P<pk>\d+)/$', views.WorkDetail.as_view(), name='work_detail'),
url(r'^work/update/(?P<pk>\d+)/$', views.WorkUpdate.as_view(), name='work_update'),
url(r'^work/delete/(?P<pk>\d+)/$', views.WorkDelete.as_view(), name='work_delete'),
url(r'^firm/$', views.FirmList.as_view(), name='firm_list'),
url(r'^firm/add/$', views.FirmCreate.as_view(), name='firm_create'),
url(r'^firm/detail/(?P<pk>\d+)/$', views.FirmDetail.as_view(), name='firm_detail'),
url(r'^firm/update/(?P<pk>\d+)/$', views.FirmUpdate.as_view(), name='firm_update'),
url(r'^firm/delete/(?P<pk>\d+)/$', views.FirmDelete.as_view(), name='firm_delete'),
url(r'^todo/$', views.TodoList.as_view(), name='todo_list'),
url(r'^todo/(?P<firm>\d+)/$', views.TodoList.as_view(), name='todo_list_firm'),
url(r'^todo/add/$', views.TodoCreate.as_view(), name='todo_create'),
url(r'^todo/detail/(?P<pk>\d+)/$', views.TodoDetail.as_view(), name='todo_detail'),
url(r'^todo/update/(?P<pk>\d+)/$', views.TodoUpdate.as_view(), name='todo_update'),
url(r'^todo/delete/(?P<pk>\d+)/$', views.TodoDelete.as_view(), name='todo_delete'),
url(r'^accounts/login/$', django.contrib.auth.views.login, name='login'),
url(r'^accounts/logout/$', django.contrib.auth.views.logout, name='logout'),
url(r'^$', views.WorkList.as_view(), name='index'),
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import tensorflow as tf
import pickle
import cv2
import os
import os.path as path
from utils import predict, predict_no_tiles
from model import dilation_model_pretrained
from datasets import CONFIG
if __name__ == '__main__':
test = True
# Choose between 'cityscapes' and 'camvid'
dataset = 'cityscapes'
# Load dict of pretrained weights
print('Loading pre-trained weights...')
with open(CONFIG[dataset]['weights_file'], 'rb') as f:
w_pretrained = pickle.load(f)
print('Done.')
# Create checkpoint directory
checkpoint_dir = path.join('data/checkpoint', 'dilation_' + dataset)
if not path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# Image in / out parameters
input_image_path = path.join('data', dataset + '.png')
output_image_path = path.join('data', dataset + '_out.png')
# Build pretrained model and save it as TF checkpoint
with tf.Session() as sess:
# Choose input shape according to dataset characteristics
if not test:
input_h, input_w, input_c = CONFIG[dataset]['input_shape']
else:
input_h, input_w, input_c = (1452, 2292, 3) # REVIEW: dr-eye-ve size.
input_tensor = tf.placeholder(tf.float32, shape=(None, input_h, input_w, input_c), name='input_placeholder')
# Create pretrained model
model = dilation_model_pretrained(dataset, input_tensor, w_pretrained, trainable=False)
sess.run(tf.global_variables_initializer())
# Save both graph and weights
saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES))
saver.save(sess, path.join(checkpoint_dir, 'dilation'))
asdf = saver.save(sess, path.join(checkpoint_dir, 'dilation.ckpt'))
print("saved asdf:", asdf)
# Restore both graph and weights from TF checkpoint
with tf.Session() as sess:
saver = tf.train.import_meta_graph(path.join(checkpoint_dir, 'dilation.meta'))
saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir))
graph = tf.get_default_graph()
output = 'softmax:0'
model = graph.get_tensor_by_name(output)
model = tf.reshape(model, shape=(1,)+CONFIG[dataset]['output_shape'])
# Read and predict on a test image
input_image = cv2.imread(input_image_path)
# import matplotlib.pyplot as plt
# plt.imshow(input_image)
# plt.show()
input_tensor = graph.get_tensor_by_name('input_placeholder:0')
if test:
tensors = [n.name for n in tf.get_default_graph().as_graph_def().node]
for tensor in tensors:
print(tensor)
import numpy as np
import os
path = '/home/josephz/tmp/data/dr-eyeve/35/frames/0057.png'
image = cv2.imread(path)
# output = 'input_placeholder:0'
outputs = ('conv1_1/Relu:0', 'conv1_2/Relu:0',
# 'pool1/MaxPool:0',
'conv2_1/Relu:0', 'conv2_2/Relu:0',
# 'conv3_1/Relu:0', 'conv3_2/Relu:0', 'conv3_3/Relu:0',
'conv5_3/Relu:0',
'fc6/Relu:0',
'fc7/Relu:0',
'final/Relu:0',
'ctx_pad1_1:0',
'ctx_conv1_1/Relu:0',
'ctx_conv7_1/Relu:0',
'ctx_fc1/Relu:0',
'ctx_final/BiasAdd:0',
'ctx_upsample/Relu:0',
)
for output in outputs:
print("Checking", output)
import pdb
pdb.set_trace()
model = graph.get_tensor_by_name(output)
outp = os.path.join('/home/josephz/ws/git/ml/framework/scripts/dilation/outs/tf', output.split('/')[0])
if not os.path.isfile(outp + '.npy'):
print("Saving to ", outp)
y = predict_no_tiles(image, input_tensor, model, dataset, sess, test=test)
np.save(outp, y)
out_tensor = graph.get_tensor_by_name('softmax:0')
out_tensor = tf.reshape(out_tensor, shape=(1,) + (1080, 1920, 19))
y = predict_no_tiles(image, input_tensor, out_tensor, dataset, sess, test=False)
else:
# Convert colorspace (palette is in RGB) and save prediction result
predicted_image = predict(input_image, input_tensor, model, dataset, sess, test=test)
predicted_image = cv2.cvtColor(predicted_image, cv2.COLOR_BGR2RGB)
cv2.imwrite(output_image_path, predicted_image)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Unit test package for tatortot."""
|
nilq/baby-python
|
python
|
import requests
import re
from bs4 import BeautifulSoup
import logging
import os
import json
base_url = "https://vulncat.fortify.com/en/weakness?q="
logpath=f'{os.getcwd()}/log'
def scrape_url(url):
soup:BeautifulSoup=None
try:
r=requests.get(url)
soup=BeautifulSoup(r.text, 'html.parser')
except requests.exceptions.RequestException as ex:
logging.warning("There was an error with the request")
logging.error(ex)
except Exception as ex:
logging.warning("An unknown exception has occured")
logging.error(ex)
finally:
return soup
def get_filter_list(html):
""""""
soup = BeautifulSoup(html, 'html.parser')
return soup.find("input", attrs={"data-filtername":"category"})
def scrape_filters(filtername):
soup = scrape_url(base_url)
logfile=f'{logpath}/{filtername}.json'
open(logfile, 'w').close()
filter_list={}
try:
for data in soup.find_all("input", attrs={"data-filtername":filtername}):
#print(data["data-name"].replace("+"," "))
key = data["data-name"].replace("+"," ")
logging.info(f"found category '{key}'")
link = f"https://vulncat.fortify.com/en/weakness?{filtername}={data['data-name']}"
#print(f"{category}\nHyper-Link:{link}")
filter_list[key]=link
with open(logfile, 'w+') as f:
f.write(json.dumps(filter_list))
except Exception as ex:
logging.error(ex)
finally:
return filter_list
def get_issue_detail(url, soup:BeautifulSoup):
links = soup.find_all(class_="external-link")
for link in links:
parse_issue_data(f"{url}{link['href']}")
def parse_issue_data(url):
try:
soup=scrape_url(url)
title = soup.find(class_="detail-title")
print(title.text)
content = soup.find(class_="tab-content")
sections = content.find_all(class_="sub-title")
if sections:
for s in sections:
print(s.text + "\n")
metadata = s.findNext()
print(metadata.text.replace("[", "\n[").replace(". ", ".\n\n") +"\n\n")
except Exception as err:
print("--------ERROR!!! Unable to get explanation of vulnerability")
print(err)
def navigatePages(soup, base_url):
if soup is None: return
get_issue_detail(base_url, soup)
pagination = soup.find(class_="pagination")
if pagination is None:
print("Unable to find location of page navigation links")
return
link = pagination.find("li", class_="active")
if link and link.text !=">":
next_link = link.findNext("li")
if next_link:
next_url = next_link.find("a")
target_url = f"{base_url}{next_url['href']}"
print(target_url + "\n")
r = requests.get(url=target_url)
soup = BeautifulSoup(r.text,"html.parser")
if soup:
navigatePages(soup, base_url)
else:
print("No more links")
|
nilq/baby-python
|
python
|
from fastai.vision.all import *
import fastai
from fastai.tabular.all import *
from fastai.data.load import _FakeLoader, _loaders
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
import random
# CUSTOM VIS DATABLOCK FUNCTIONS
def get_npy(dataframe):
"Get the images (.npy) that will be used as input for the model"
# get sample names from the dataframe
samples = dataframe['Barcode']
fnames = []
# for each sample in the dataframe
for sp in samples:
img_getter = lambda x: path/f'images/{sp}.npy'
fnames.append(img_getter(sp))
# returns a list of the image paths
return fnames
def get_y(fname):
"Get the target yield value"
fname = str(fname)
fname = fname.split(sep='/')[-1]
fname = fname.replace('.npy', '')
y_target = mixed_df[mixed_df["Barcode"] == fname]
y_target = float(y_target['Yield'])
return y_target
def mix_npy_blocks(img):
"This function will be used to build the plot image and add transforms"
# Cut the image in half and stack the chunks side-by-side
chunk0 = img[:40, :20, :]
chunk1 = img[40:80, :20, :]
if random.choice([True,False]):
chunk0 = np.flip(chunk0[:,:,:], axis=0) # Flip vertically equals img[X,:,:]
if random.choice([True,False]):
chunk1 = np.flip(chunk1[:,:,:], axis=0) # Flip vertically equals img[X,:,:]
if random.choice([True,False]):
chunk0 = np.flip(chunk0[:,:,:], axis=1) # Flip horizontally equals img[:,X,:]
if random.choice([True,False]):
chunk1 = np.flip(chunk1[:,:,:], axis=1) # Flip horizontally equals img[:,X,:]
if random.choice([True,False]):
new_img = np.hstack((chunk0, chunk1))
else:
new_img =np.hstack((chunk1, chunk0))
return new_img
def vegetation_idxs(img):
"Calculate VI and add as new bands"
e = 0.00015 # Add a small value to avoid division by zero
im = img
# Calculate the VIs - change to np functions
ndvi = np.divide(np.subtract(im[:,:,4], im[:,:,2]), (np.add(im[:,:,4], im[:,:,2])+e))
ndvi_re = (im[:,:,4] - im[:,:,3]) / ((im[:,:,4] + im[:,:,3]) + e)
ndre = (im[:,:,3] - im[:,:,2]) / ((im[:,:,3] + im[:,:,3]) + e)
envi = ((im[:,:,4] + im[:,:,1]) - (2 * im[:,:,0])) / (((im[:,:,4] - im[:,:,1]) + (2 * im[:,:,0])) + e)
ccci = ndvi_re / (ndvi + e)
gndvi = (im[:,:,4] - im[:,:,1])/ ((im[:,:,4] + im[:,:,1]) + e)
gli = ((2* im[:,:,1]) - im[:,:,0] - im[:,:,2]) / (((2* im[:,:,1]) + im[:,:,0] + im[:,:,2]) + e)
osavi = ((im[:,:,4] - im[:,:,3])/ ((im[:,:,4] + im[:,:,3] + 0.16)) *(1 + 0.16) + e)
vi_list = [ndvi, ndvi_re, ndre, envi, ccci, gndvi , gli, osavi]
vis = np.zeros((40,40,13))
vis_stacked = np.stack(vi_list, axis=2)
vis[:,:,:5] = im
vis[:,:,5:] = vis_stacked
return vis
def load_npy(fn):
im = np.load(str(fn), allow_pickle=True)
im = im*3 # increase image signal
# Padding with zeros
w, h , c = im.shape
im = np.pad(im, ((0, 100-w), (0, 100-h), (0,0)),mode='constant', constant_values=0)
im = mix_npy_blocks(im) # Add transforms and stacking
im = vegetation_idxs(im) # Add vegetation indexes bands
# Normalise bands by deleting no-data values
for band in range(13):
im[:,:,band] = np.clip(im[:,:,band], 0, 1)
# Swap axes because np is: width, height, channels
# and torch wants : channel, width , height
im = np.swapaxes(im, 2, 0)
im = np.swapaxes(im, 1, 2)
im = np.nan_to_num(im)
return torch.from_numpy(im)
class MSITensorImage(TensorImage):
_show_args = {'cmap':'Rdb'}
def show(self, channels=3, ctx=None, vmin=None, vmax=None, **kwargs):
"Visualise the images"
if channels == 3 :
return show_composite(self, 3, ctx=ctx, **{**self._show_args, **kwargs})
else:
return show_single_channel(self, channels, ctx=ctx, **{**self._show_args, **kwargs} )
@classmethod
def create(cls, fn:(Path, str), **kwargs) -> None:
" Uses the load fn the array and turn into tensor"
return cls(load_npy(fn))
def __repr__(self): return f'{self.__class__.__name__} size={"x".join([str(d) for d in self.shape])}'
def MSITensorBlock(cls=MSITensorImage):
" A `TransformBlock` for numpy array images"
# Calls the class create function to transform the x input using custom functions
return TransformBlock(type_tfms=cls.create, batch_tfms=None)
def root_mean_squared_error(p, y):
return torch.sqrt(F.mse_loss(p.view(-1), y.view(-1)))
def create_rgb(img):
# make RGB plot to visualise the "show batch"
RGB = np.zeros((3, 40, 40))
RGB[0] = img[2]
RGB[2] = img[0]
RGB[1] = img[1]
#Change from tensor format to pyplot
RGB = np.swapaxes(RGB, 0, 2)
RGB = np.swapaxes(RGB, 1, 0)
RGB = RGB
return RGB
def show_composite(img, channels, ax=None,figsize=(3,3), title=None, scale=True,
ctx=None, vmin=0, vmax=1, scale_axis=(0,1), **kwargs)->plt.Axes:
"Show three channel composite"
ax = ifnone(ax, ctx)
dims = img.shape[0]
RGBim = create_rgb(img)
ax.imshow(RGBim)
ax.axis('off')
if title is not None: ax.set_title(title)
return ax
def show_single_channel(img, channel, ax=None, figsize=(3,3), ctx=None,
title=None, **kwargs) -> plt.Axes:
ax = ifnone(ax, ctx)
if ax is None: _, ax = plt.subplots(figsize=figsize)
tempim = img.data.cpu().numpy()
if tempim.ndim >2:
ax.imshow(tempim[channel,:,:])
ax.axis('off')
if title is not None: ax.set_title(f'{fname} with {title}')
else:
ax.imshow(tempim)
ax.axis('off')
if title is not None: ax.set_title(f'{fname} with {title}')
return ax
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
import re
from cStringIO import StringIO
from datetime import date, datetime, timedelta
from psycopg2.extensions import AsIs, Binary, QuotedString
from pytz import timezone
class PostgresWriter(object):
"""Base class for :py:class:`mysql2pgsql.lib.postgres_file_writer.PostgresFileWriter`
and :py:class:`mysql2pgsql.lib.postgres_db_writer.PostgresDbWriter`.
"""
def __init__(self, file_options, tz=False):
index_prefix = file_options.get("index_prefix")
self.column_types = {}
self.log_detail = '\n%s\n'%(file_options['destination']['postgres']['database'])
self.is_gpdb = file_options.get("is_gpdb")
self.index_prefix = index_prefix if index_prefix else ''
if tz:
self.tz = timezone('UTC')
self.tz_offset = '+00:00'
else:
self.tz = None
self.tz_offset = ''
""" 'UPPER_ID' is different with '"column"' in CREATE statement:
'UPPER_ID' will create column with name 'upper_id'
'"UPPER_ID"' will create column with name 'UPPER_ID'
"""
def column_description(self, column):
return '"%s" %s' % (column['name'], self.column_type_info(column))
def column_type(self, column):
hash_key = hash(frozenset(column.items()))
self.column_types[hash_key] = self.column_type_info(column).split(" ")[0]
return self.column_types[hash_key]
def column_type_info(self, column):
"""
"""
null = "" if column['null'] else " NOT NULL"
def get_type(column):
"""This in conjunction with :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader._convert_type`
determines the PostgreSQL data type. In my opinion this is way too fugly, will need
to refactor one day.
"""
t = lambda v: not v == None
default = (' DEFAULT %s' % QuotedString(column['default']).getquoted()) if t(column['default']) else None
if column['type'] == 'char':
default = ('%s::char' % default) if t(default) else None
return default, 'character(%s)' % column['length']
elif column['type'] == 'varchar':
default = ('%s::character varying' % default) if t(default) else None
return default, 'character varying(%s)' % column['length']
elif column['type'] == 'json':
default = None
return default, 'json'
elif column['type'] == 'integer':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'integer'
elif column['type'] == 'bigint':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'bigint'
elif column['type'] == 'tinyint':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'smallint'
elif column['type'] == 'boolean':
default = (" DEFAULT %s" % ('true' if int(column['default']) == 1 else 'false')) if t(default) else None
return default, 'boolean'
elif column['type'] == 'float':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'real'
elif column['type'] == 'float unsigned':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'real'
elif column['type'] in ('numeric', 'decimal'):
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'numeric(%s, %s)' % (column['length'] or 20, column['decimals'] or 0)
elif column['type'] == 'double precision':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'double precision'
elif column['type'] == 'datetime' or column['type'].startswith('datetime('):
default = None
if self.tz:
return default, 'timestamp with time zone'
else:
return default, 'timestamp without time zone'
elif column['type'] == 'date':
default = None
return default, 'date'
elif column['type'] == 'timestamp':
if column['default'] == None:
default = None
elif "current_timestamp()" in column['default']:
default = ' DEFAULT CURRENT_TIMESTAMP'
elif "CURRENT_TIMESTAMP" in column['default']:
default = ' DEFAULT CURRENT_TIMESTAMP'
elif "0000-00-00 00:00" in column['default']:
if self.tz:
default = " DEFAULT '1970-01-01T00:00:00.000000%s'" % self.tz_offset
elif "0000-00-00 00:00:00" in column['default']:
default = " DEFAULT '1970-01-01 00:00:00'"
else:
default = " DEFAULT '1970-01-01 00:00'"
if self.tz:
return default, 'timestamp with time zone'
else:
return default, 'timestamp without time zone'
elif column['type'] == 'time' or column['type'].startswith('time('):
default = " DEFAULT NOW()" if t(default) else None
if self.tz:
return default, 'time with time zone'
else:
return default, 'time without time zone'
elif column['type'] in ('blob', 'binary', 'longblob', 'mediumblob', 'tinyblob', 'varbinary'):
return default, 'bytea'
elif column['type'].startswith('binary(') or column['type'].startswith('varbinary('):
return default, 'bytea'
elif column['type'] in ('tinytext', 'mediumtext', 'longtext', 'text'):
return default, 'text'
elif column['type'].startswith('enum'):
default = (' %s::character varying' % default) if t(default) else None
enum = re.sub(r'^enum\(|\)$', '', column['type'])
# TODO: will work for "'.',',',''''" but will fail for "'.'',','.'"
max_enum_size = max([len(e.replace("''", "'")) for e in enum.split("','")])
return default, ' character varying(%s) check("%s" in (%s))' % (max_enum_size, column['name'], enum)
elif column['type'].startswith('bit('):
return ' DEFAULT %s' % column['default'].upper() if column['default'] else column['default'], 'varbit(%s)' % re.search(r'\((\d+)\)', column['type']).group(1)
elif column['type'].startswith('set('):
if default:
default = ' DEFAULT ARRAY[%s]::text[]' % ','.join(QuotedString(
v).getquoted() for v in re.search(r"'(.*)'", default).group(1).split(','))
return default, 'text[]'
else:
raise Exception('unknown %s' % column['type'])
default, column_type = get_type(column)
"""Refactor for GPDB."""
if not self.is_gpdb and column.get('auto_increment', None):
return '%s DEFAULT nextval(\'"%s_%s_seq"\'::regclass) NOT NULL' % (
column_type, column['table_name'], column['name'])
return '%s%s%s' % (column_type, (default if not default == None else ''), null)
"""QuotedString API: http://initd.org/psycopg/docs/extensions.html?highlight=quotedstring#psycopg2.extensions.QuotedString
ERROR:
UnicodeEncodeError: 'latin-1' codec can't encode characters in position 18-19: ordinal not in range(256)
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe5 in position 16: ordinal not in range(128)
"""
def table_comments(self, table):
comments = []
if table.comment:
"""comments.append('COMMENT ON TABLE %s is %s;' % (table.name, QuotedString(table.comment).getquoted()))
comments.append('COMMENT ON TABLE %s is %s;' % (table.name, "'"+table.comment+"'"))"""
table_comment = QuotedString(table.comment.encode('utf8')).getquoted()
comments.append('COMMENT ON TABLE {} is {};'.format(table.name, table_comment))
for column in table.columns:
if column['comment']:
"""comments.append('COMMENT ON COLUMN %s.%s is %s;' % (table.name, column['name'], QuotedString(column['comment']).getquoted()))
comments.append('COMMENT ON COLUMN %s.%s is %s;' % (table.name, column['name'], "'"+column['comment'].decode('utf8')+"'"))"""
comments.append('COMMENT ON COLUMN {}.{} is {};'.format(table.name, column['name'], QuotedString(column['comment']).getquoted()))
return comments
def process_row(self, table, row):
"""Examines row data from MySQL and alters
the values when necessary to be compatible with
sending to PostgreSQL via the copy command
"""
for index, column in enumerate(table.columns):
hash_key = hash(frozenset(column.items()))
column_type = self.column_types[hash_key] if hash_key in self.column_types else self.column_type(column)
if row[index] == None and ('timestamp' not in column_type or not column['default']):
row[index] = '\N'
elif row[index] == None and column['default']:
if self.tz:
row[index] = '1970-01-01T00:00:00.000000' + self.tz_offset
else:
row[index] = '1970-01-01 00:00:00'
elif 'bit' in column_type:
row[index] = bin(ord(row[index]))[2:]
elif isinstance(row[index], (str, unicode, basestring)):
if column_type == 'bytea':
row[index] = Binary(row[index]).getquoted()[1:-8] if row[index] else row[index]
elif 'text[' in column_type:
row[index] = '{%s}' % ','.join('"%s"' % v.replace('"', r'\"') for v in row[index].split(','))
else:
row[index] = row[index].replace('\\', r'\\').replace('\n', r'\n').replace(
'\t', r'\t').replace('\r', r'\r').replace('\0', '')
elif column_type == 'boolean':
# We got here because you used a tinyint(1), if you didn't want a bool, don't use that type
row[index] = 't' if row[index] not in (None, 0) else 'f' if row[index] == 0 else row[index]
elif isinstance(row[index], (date, datetime)):
if isinstance(row[index], datetime) and self.tz:
try:
if row[index].tzinfo:
row[index] = row[index].astimezone(self.tz).isoformat()
else:
row[index] = datetime(*row[index].timetuple()[:6], tzinfo=self.tz).isoformat()
except Exception as e:
print e.message
else:
row[index] = row[index].isoformat()
elif isinstance(row[index], timedelta):
row[index] = datetime.utcfromtimestamp(_get_total_seconds(row[index])).time().isoformat()
else:
row[index] = AsIs(row[index]).getquoted()
def table_attributes(self, table):
primary_keys = []
serial_key = None
maxval = None
columns = StringIO()
for column in table.columns:
if column['auto_increment']:
serial_key = column['name']
maxval = 1 if column['maxval'] < 1 else column['maxval'] + 1
if column['primary_key']:
primary_keys.append(column['name'])
columns.write(' %s,\n' % self.column_description(column))
return primary_keys, serial_key, maxval, columns.getvalue()[:-2]
def truncate(self, table):
serial_key = None
maxval = None
for column in table.columns:
if column['auto_increment']:
serial_key = column['name']
maxval = 1 if column['maxval'] < 1 else column['maxval'] + 1
truncate_sql = 'TRUNCATE "%s" CASCADE;' % table.name
serial_key_sql = None
if serial_key:
serial_key_sql = "SELECT pg_catalog.setval(pg_get_serial_sequence(%(table_name)s, %(serial_key)s), %(maxval)s, true);" % {
'table_name': QuotedString('"%s"' % table.name).getquoted(),
'serial_key': QuotedString(serial_key).getquoted(),
'maxval': maxval}
return (truncate_sql, serial_key_sql)
"""Exclude PRIMARY KEY, create with write_indexes"""
def write_table(self, table):
primary_keys, serial_key, maxval, columns = self.table_attributes(table)
serial_key_sql = []
table_sql = []
table_comment_sql = []
if serial_key:
serial_key_seq = '%s_%s_seq' % (table.name, serial_key)
serial_key_sql.append('DROP SEQUENCE IF EXISTS "%s" CASCADE;' % serial_key_seq)
serial_key_sql.append("""CREATE SEQUENCE "%s" INCREMENT BY 1
NO MAXVALUE NO MINVALUE CACHE 1;""" % serial_key_seq)
serial_key_sql.append('SELECT pg_catalog.setval(\'"%s"\', %s, true);' % (serial_key_seq, maxval))
""" 'CREATE TABLE schema.table' is different with 'CREATE TABLE "schema.table"':
'CREATE TABLE schema1.table1' will create table in schema1
'CREATE TABLE "schema1.table1"' will create 'schema1.table1' in selected or public schema
If use SQL Key Word in scripts, necessarily with double quate, like "user".
"""
table_sql.append('DROP TABLE IF EXISTS "%s" CASCADE;' % table.name)
table_sql.append('CREATE TABLE "%s" (\n%s\n)\nWITHOUT OIDS;' % (table.name.encode('utf8'), columns))
if not self.is_gpdb:
table_comment_sql.extend(self.table_comments(table))
return (table_sql, serial_key_sql, table_comment_sql)
def write_indexes(self, table):
index_sql = []
primary_index = [idx for idx in table.indexes if idx.get('primary', None)]
index_prefix = self.index_prefix
if primary_index:
index_sql.append('ALTER TABLE "%(table_name)s" ADD CONSTRAINT "%(index_name)s_pkey" PRIMARY KEY(%(column_names)s);' % {
'table_name': table.name,
'index_name': '%s%s_%s' % (index_prefix, table.name,
'_'.join(primary_index[0]['columns'])),
'column_names': ', '.join('"%s"' % col for col in primary_index[0]['columns']),
})
self.process_log(' create index: '+table.name+'|'+','.join(primary_index[0]['columns'])+'|PRIMARY')
if self.is_gpdb:
for index in table.indexes:
if 'primary' in index:
continue
unique = 'UNIQUE ' if index.get('unique', None) else ''
self.process_log(' ignore index: '+table.name+'|'+','.join(index['columns'])+ ('|UNIQUE' if unique else ''))
return index_sql
'''For Greenplum Database(base on PSQL):
psycopg2.ProgrammingError: UNIQUE index must contain all columns in the distribution key
Detail refer to:
https://stackoverflow.com/questions/40987460/how-should-i-deal-with-my-unique-constraints-during-my-data-migration-from-postg
http://gpdb.docs.pivotal.io/4320/ref_guide/sql_commands/CREATE_INDEX.html
EXCERPT: In Greenplum Database, unique indexes are allowed only if the columns of the index key are the same as (or a superset of)
the Greenplum distribution key. On partitioned tables, a unique index is only supported within an individual partition
- not across all partitions.
'''
for index in table.indexes:
if 'primary' in index:
continue
unique = 'UNIQUE ' if index.get('unique', None) else ''
index_name = '%s%s_%s' % (index_prefix, table.name, '_'.join(index['columns']))
index_sql.append('DROP INDEX IF EXISTS "%s" CASCADE;' % index_name)
index_sql.append('CREATE %(unique)sINDEX "%(index_name)s" ON "%(table_name)s" (%(column_names)s);' % {
'unique': unique,
'index_name': index_name,
'table_name': table.name,
'column_names': ', '.join('"%s"' % col for col in index['columns']),
})
self.process_log(' create index: '+table.name+'|'+','.join(index['columns'])+ ('|UNIQUE' if unique else ''))
return index_sql
def write_constraints(self, table):
constraint_sql = []
if self.is_gpdb:
for key in table.foreign_keys:
self.process_log(' ignore constraints: '+table.name+'|'+key['column']+'| ref:'+key['ref_table']+'.'+key['ref_column'])
return constraint_sql
for key in table.foreign_keys:
constraint_sql.append("""ALTER TABLE "%(table_name)s" ADD FOREIGN KEY ("%(column_name)s")
REFERENCES "%(ref_table_name)s"(%(ref_column_name)s);""" % {
'table_name': table.name,
'column_name': key['column'],
'ref_table_name': key['ref_table'],
'ref_column_name': key['ref_column']})
self.process_log(' create constraints: '+table.name+'|'+key['column']+'| ref:'+key['ref_table']+'.'+key['ref_column'])
return constraint_sql
def write_triggers(self, table):
trigger_sql = []
if self.is_gpdb:
for key in table.triggers:
self.process_log(' ignore triggers: '+table.name+'|'+key['name']+'|'+key['event']+'|'+key['timing'])
return trigger_sql
for key in table.triggers:
trigger_sql.append("""CREATE OR REPLACE FUNCTION %(fn_trigger_name)s RETURNS TRIGGER AS $%(trigger_name)s$
BEGIN
%(trigger_statement)s
RETURN NULL;
END;
$%(trigger_name)s$ LANGUAGE plpgsql;""" % {
'table_name': table.name,
'trigger_time': key['timing'],
'trigger_event': key['event'],
'trigger_name': key['name'],
'fn_trigger_name': 'fn_' + key['name'] + '()',
'trigger_statement': key['statement']})
trigger_sql.append("""CREATE TRIGGER %(trigger_name)s %(trigger_time)s %(trigger_event)s ON %(table_name)s
FOR EACH ROW
EXECUTE PROCEDURE fn_%(trigger_name)s();""" % {
'table_name': table.name,
'trigger_time': key['timing'],
'trigger_event': key['event'],
'trigger_name': key['name']})
self.process_log(' create triggers: '+table.name+'|'+key['name']+'|'+key['event']+'|'+key['timing'])
return trigger_sql
def process_log(self, log):
print(log)
self.log_detail += log+'\n'
def close(self):
raise NotImplementedError
def write_contents(self, table, reader):
raise NotImplementedError
# Original fix for Py2.6: https://github.com/mozilla/mozdownload/issues/73
def _get_total_seconds(dt):
# Keep backward compatibility with Python 2.6 which doesn't have this method
if hasattr(datetime, 'total_seconds'):
return dt.total_seconds()
else:
return (dt.microseconds + (dt.seconds + dt.days * 24 * 3600) * 10**6) / 10**6
|
nilq/baby-python
|
python
|
from app import server as user
if __name__ == "__main__":
user.run()
|
nilq/baby-python
|
python
|
"""
Faça um programa que possua um vetor denominado 'A' que armazene 6 números inteiros. O programa deve executar
os seguintes passos.
(a) Atribua os seguintes valores a esse vetor: 1, 0, 5, -2, -5, 7.
(b) Armezene em uma variável inteira (simples) a soma entre os valores das posições A[0], A[1], e A[5] do vetor
e mostre na tela esta soma
(c) Modifique o vetor da posição 4, atribuindo a esta posição o valor 100
(d) Mostre na tela cada valor do vetor A, um em cada linha
"""
A = [1, 0, 5, -2, -5, 7]
soma = A[0] + A[1] + A[5]
print(soma)
A.insert(4, 100)
for num in A:
print(num)
|
nilq/baby-python
|
python
|
"""
Django settings for coralcity project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+=mb2q!t+yg7(m$!_$iki#2*z(+ub^lcas0jx$l2-dp%bp8pt)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['koralcity.herokuapp.com', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pages.apps.PagesConfig',
'Ages.apps.AgesConfig',
'listings.apps.ListingsConfig',
'realtors.apps.RealtorsConfig',
'accounts.apps.AccountsConfig',
'contacts.apps.ContactsConfig',
'django.contrib.humanize'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'coralcity.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR , 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'coralcity.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR , 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS=[(os.path.join(BASE_DIR, 'coralcity/static'))]
# Media Folder Settings
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/media/'
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
if 'DATABASE_URL' in os.environ:
import dj_database_url
DATABASES = {'default': dj_database_url.config()}
|
nilq/baby-python
|
python
|
# https://jsonapi.org/format/#document-resource-identifier-objects
def build_resource_identifier(type, id):
return {"type": type, "id": id}
#https://jsonapi.org/format/#document-meta
def build_meta(meta):
return meta
# https://jsonapi.org/format/#document-links
def build_links_object(links):
links_object = {}
#links is a dict, loop through it and build_link
# {
# 'key': {
# 'responder': ResponderClass,
# 'href': 'http://example.com/comments/{posts.comments}',
# 'meta': {"whatever": "data", "here": true}
# },
# "more keys" : {...},
# ...
# }
for key, value in links.items():
try:
meta_info = value['meta']
except KeyError:
meta_info = None
links_object[key] = build_link(
value['href'],
meta = meta_info
)
return links_object
#builds an individual link inside a links object
#returns either a string or a "link object"
# see https://jsonapi.org/format/#document-links
def build_link(url, meta=None):
if meta is not None:
link = {}
link['href'] = url
link['meta'] = build_meta(meta)
return link
else:
return url
|
nilq/baby-python
|
python
|
import os
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
from rpython.rlib.rposix import is_valid_fd
from rpython.rlib.rarithmetic import widen, ovfcheck_float_to_longlong
from rpython.rlib.objectmodel import keepalive_until_here
from rpython.rtyper.annlowlevel import llhelper
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.error import exception_from_saved_errno
from pypy.interpreter.gateway import unwrap_spec
from pypy.module.faulthandler import cintf, dumper
class Handler(object):
def __init__(self, space):
"NOT_RPYTHON"
self.space = space
self._cleanup_()
def _cleanup_(self):
self.fatal_error_w_file = None
self.dump_traceback_later_w_file = None
self.user_w_files = None
def check_err(self, p_err):
if p_err:
raise oefmt(self.space.w_RuntimeError, 'faulthandler: %s',
rffi.charp2str(p_err))
def get_fileno_and_file(self, w_file):
space = self.space
if space.is_none(w_file):
w_file = space.sys.get('stderr')
if space.is_none(w_file):
raise oefmt(space.w_RuntimeError, "sys.stderr is None")
elif space.isinstance_w(w_file, space.w_int):
fd = space.c_int_w(w_file)
if fd < 0 or not is_valid_fd(fd):
raise oefmt(space.w_ValueError,
"file is not a valid file descriptor")
return fd, None
fd = space.c_int_w(space.call_method(w_file, 'fileno'))
try:
space.call_method(w_file, 'flush')
except OperationError as e:
if e.async(space):
raise
pass # ignore flush() error
return fd, w_file
def setup(self):
dump_callback = llhelper(cintf.DUMP_CALLBACK, dumper._dump_callback)
self.check_err(cintf.pypy_faulthandler_setup(dump_callback))
def enable(self, w_file, all_threads):
fileno, w_file = self.get_fileno_and_file(w_file)
self.setup()
self.fatal_error_w_file = w_file
self.check_err(cintf.pypy_faulthandler_enable(
rffi.cast(rffi.INT, fileno),
rffi.cast(rffi.INT, all_threads)))
def disable(self):
cintf.pypy_faulthandler_disable()
self.fatal_error_w_file = None
def is_enabled(self):
return bool(widen(cintf.pypy_faulthandler_is_enabled()))
def dump_traceback(self, w_file, all_threads):
fileno, w_file = self.get_fileno_and_file(w_file)
self.setup()
cintf.pypy_faulthandler_dump_traceback(
rffi.cast(rffi.INT, fileno),
rffi.cast(rffi.INT, all_threads),
llmemory.NULL)
keepalive_until_here(w_file)
def dump_traceback_later(self, timeout, repeat, w_file, exit):
space = self.space
timeout *= 1e6
try:
microseconds = ovfcheck_float_to_longlong(timeout)
except OverflowError:
raise oefmt(space.w_OverflowError, "timeout value is too large")
if microseconds <= 0:
raise oefmt(space.w_ValueError, "timeout must be greater than 0")
fileno, w_file = self.get_fileno_and_file(w_file)
self.setup()
self.check_err(cintf.pypy_faulthandler_dump_traceback_later(
rffi.cast(rffi.LONGLONG, microseconds),
rffi.cast(rffi.INT, repeat),
rffi.cast(rffi.INT, fileno),
rffi.cast(rffi.INT, exit)))
self.dump_traceback_later_w_file = w_file
def cancel_dump_traceback_later(self):
cintf.pypy_faulthandler_cancel_dump_traceback_later()
self.dump_traceback_later_w_file = None
def check_signum(self, signum):
err = rffi.cast(lltype.Signed,
cintf.pypy_faulthandler_check_signum(signum))
if err < 0:
space = self.space
if err == -1:
raise oefmt(space.w_RuntimeError,
"signal %d cannot be registered, "
"use enable() instead", signum)
else:
raise oefmt(space.w_ValueError, "signal number out of range")
def register(self, signum, w_file, all_threads, chain):
self.check_signum(signum)
fileno, w_file = self.get_fileno_and_file(w_file)
self.setup()
self.check_err(cintf.pypy_faulthandler_register(
rffi.cast(rffi.INT, signum),
rffi.cast(rffi.INT, fileno),
rffi.cast(rffi.INT, all_threads),
rffi.cast(rffi.INT, chain)))
if self.user_w_files is None:
self.user_w_files = {}
self.user_w_files[signum] = w_file
def unregister(self, signum):
self.check_signum(signum)
change = cintf.pypy_faulthandler_unregister(
rffi.cast(rffi.INT, signum))
if self.user_w_files is not None:
self.user_w_files.pop(signum, None)
return rffi.cast(lltype.Signed, change) == 1
def finish(self):
cintf.pypy_faulthandler_teardown()
self._cleanup_()
def finish(space):
"Finalize the faulthandler logic (called from shutdown())"
space.fromcache(Handler).finish()
@unwrap_spec(all_threads=int)
def enable(space, w_file=None, all_threads=0):
"enable(file=sys.stderr, all_threads=True): enable the fault handler"
space.fromcache(Handler).enable(w_file, all_threads)
def disable(space):
"disable(): disable the fault handler"
space.fromcache(Handler).disable()
def is_enabled(space):
"is_enabled()->bool: check if the handler is enabled"
return space.newbool(space.fromcache(Handler).is_enabled())
@unwrap_spec(all_threads=int)
def dump_traceback(space, w_file=None, all_threads=0):
"""dump the traceback of the current thread into file
including all threads if all_threads is True"""
space.fromcache(Handler).dump_traceback(w_file, all_threads)
@unwrap_spec(timeout=float, repeat=int, exit=int)
def dump_traceback_later(space, timeout, repeat=0, w_file=None, exit=0):
"""dump the traceback of all threads in timeout seconds,
or each timeout seconds if repeat is True. If exit is True,
call _exit(1) which is not safe."""
space.fromcache(Handler).dump_traceback_later(timeout, repeat, w_file, exit)
def cancel_dump_traceback_later(space):
"""cancel the previous call to dump_traceback_later()."""
space.fromcache(Handler).cancel_dump_traceback_later()
@unwrap_spec(signum=int, all_threads=int, chain=int)
def register(space, signum, w_file=None, all_threads=1, chain=0):
space.fromcache(Handler).register(signum, w_file, all_threads, chain)
@unwrap_spec(signum=int)
def unregister(space, signum):
return space.newbool(space.fromcache(Handler).unregister(signum))
# for tests...
@unwrap_spec(release_gil=int)
def read_null(space, release_gil=0):
if release_gil:
cintf.pypy_faulthandler_read_null_releasegil()
else:
cintf.pypy_faulthandler_read_null()
@unwrap_spec(release_gil=int)
def sigsegv(space, release_gil=0):
if release_gil:
cintf.pypy_faulthandler_sigsegv_releasegil()
else:
cintf.pypy_faulthandler_sigsegv()
def sigfpe(space):
cintf.pypy_faulthandler_sigfpe()
def sigabrt(space):
cintf.pypy_faulthandler_sigabrt()
@unwrap_spec(levels=int)
def stack_overflow(space, levels=100000000):
levels = float(levels)
return space.newfloat(cintf.pypy_faulthandler_stackoverflow(levels))
|
nilq/baby-python
|
python
|
from .CreateSource import CreateSource
from .DropSource import DropSource
from .InputKeys import InputKeys
from .OutputKeys import OutputKeys
from .ProducedKeys import (
ProducedKeys,
ProducedLinkKeys,
ProducedHubKeys
)
from .SatelliteQuery import SatelliteQuery
from .SerialiseSatellite import SerialiseSatellite
from .SerialiseSatelliteOwner import SerialiseSatelliteOwner
from .StarData import StarData
from .SatelliteOwnerKeys import SatelliteOwnerKeys
from .StarMerge import StarMerge
|
nilq/baby-python
|
python
|
"""Array with time epochs
"""
# Standard library imports
from collections import namedtuple
from datetime import datetime, timedelta
from typing import Callable, Dict, List, Optional, Tuple, Any, TypeVar
from functools import lru_cache
try:
import importlib.resources as importlib_resources # Python >= 3.7
except ImportError:
import importlib_resources # Python <= 3.6: pip install importlib_resources
# Third party imports
import numpy as np
# Midgard imports
from midgard.dev import exceptions
from midgard.math.unit import Unit
from midgard.math.constant import constant
_SCALES: Dict[str, Dict[str, Callable]] = dict() # Populated by register_scale()
_CONVERSIONS: Dict[str, Dict[Tuple[str, str], Callable]] = dict() # Populated by register_scale()
_CONVERSION_HOPS: Dict[str, Dict[Tuple[str, str], List[str]]] = dict() # Cache for to_scale()
_FORMATS: Dict[str, Dict[str, Callable]] = dict() # Populated by register_format()
_FORMAT_UNITS: Dict[str, Dict[str, str]] = dict() # Populated by register_format()
# Type specification: scalar float or numpy array
np_float = TypeVar("np_float", float, np.ndarray)
#######################################################################################################################
# Module functions
#######################################################################################################################
def read_tai_utc():
package, _, _ = __name__.rpartition(".")
with importlib_resources.path(package, "_taiutc.txt") as path:
return np.genfromtxt(
path,
names=["start", "end", "offset", "ref_epoch", "factor"],
comments="#",
dtype=("f8", "f8", "f8", "f8", "f8"),
autostrip=True,
)
def register_scale(
convert_to: Dict[str, Callable] = None, convert_from: Dict[str, Callable] = None
) -> Callable[[Callable], Callable]:
"""Decorator used to register new time scales
The scale name is read from the .scale attribute of the Time class.
Args:
convert_to: Functions used to convert to other scales.
convert_from: Functions used to convert from other scales.
Returns:
Decorator registering scale.
"""
def wrapper(cls: Callable) -> Callable:
name = cls.scale
_SCALES[cls.cls_name][name] = cls
conversions = _CONVERSIONS.setdefault(cls.cls_name, dict())
if convert_to:
for to_scale, converter in convert_to.items():
conversions[(name, to_scale)] = converter
if convert_from:
for from_scale, converter in convert_from.items():
conversions[(from_scale, name)] = converter
return cls
return wrapper
def register_format(cls: Callable) -> Callable:
"""Decorator used to register new time formats
The format name is read from the .format attribute of the TimeFormat class.
"""
name = cls.fmt
_FORMATS[cls.cls_name][name] = cls
_FORMAT_UNITS[cls.cls_name][name] = cls.unit
return cls
def _find_conversion_hops(cls: str, hop: Tuple[str, str]) -> List[Tuple[str, str]]:
"""Calculate the hops needed to convert between scales using breadth first search"""
start_scale, target_scale = hop
queue = [(start_scale, [])]
visited = set()
if start_scale == target_scale:
return [hop]
while queue:
from_scale, hops = queue.pop(0)
for to_scale in [t for f, t in _CONVERSIONS[cls] if f == from_scale]:
one_hop = (from_scale, to_scale)
if to_scale == target_scale:
return hops + [one_hop]
if one_hop not in visited:
visited.add(one_hop)
queue.append((to_scale, hops + [one_hop]))
raise exceptions.UnknownConversionError(f"Can't convert TimeArray from {start_scale!r} to {target_scale!r}")
######################################################################################################################
# Time classes
######################################################################################################################
class TimeBase(np.ndarray):
"""Base class for TimeArray and TimeDeltaArray"""
scale = None
def __new__(cls, val, fmt, val2=None, _jd1=None, _jd2=None):
"""Create a new TimeArray"""
if cls.scale is None:
raise ValueError(f"{cls.__name__} cannot be instantiated. Use Time(val=..., scale={cls.scale!r}) instead")
if fmt not in cls._formats():
formats = ", ".join(cls._formats())
raise exceptions.UnknownSystemError(f"Format {fmt!r} unknown. Use one of {formats}")
# Convert to numpy array and read format
fmt_values = cls._formats()[fmt](val, val2, cls.scale)
val = np.asarray(val)
if val2 is not None:
val2 = np.asarray(val2)
if val2.shape != val.shape:
raise ValueError(f"'val2' must have the same shape as 'val': {val.shape}")
if val2.ndim == 0:
val2 = val2.item()
if val.ndim == 0:
val = val.item()
# Store values on array
obj = np.asarray(fmt_values.value).view(cls)
jd1 = fmt_values.jd1 if _jd1 is None else _jd1
jd2 = fmt_values.jd2 if _jd2 is None else _jd2
# Validate shape
fmt_ndim = cls._formats()[fmt].ndim
if obj.ndim > fmt_ndim:
raise ValueError(
f"{type(self).__name__!r} must be a {fmt_ndim - 1} or {fmt_ndim}-dimensional array for format type {obj_fmt}"
)
# Freeze
if isinstance(jd1, np.ndarray):
jd1.flags.writeable = False
if isinstance(jd2, np.ndarray):
jd2.flags.writeable = False
super(TimeBase, obj).__setattr__("fmt", fmt)
super(TimeBase, obj).__setattr__("jd1", jd1)
super(TimeBase, obj).__setattr__("jd2", jd2)
if isinstance(obj, np.ndarray):
obj.flags.writeable = False
return obj
def __array_finalize__(self, obj):
"""Called automatically when a new TimeArray is created"""
if obj is None:
return
obj_fmt = getattr(obj, "fmt", None)
# Copy attributes from the original object
super().__setattr__("fmt", obj_fmt)
jd1_sliced = getattr(obj, "_jd1_sliced", None)
if jd1_sliced is not None:
super().__setattr__("jd1", jd1_sliced)
else:
super().__setattr__("jd1", getattr(obj, "jd1", None))
jd2_sliced = getattr(obj, "_jd2_sliced", None)
if jd2_sliced is not None:
super().__setattr__("jd2", jd2_sliced)
else:
super().__setattr__("jd2", getattr(obj, "jd2", None))
# Validate shape for arrays not created with __new__
if obj_fmt and self.ndim > 1:
fmt_ndim = self._formats()[obj_fmt].ndim
if self.ndim > fmt_ndim:
raise ValueError(
f"{type(self).__name__!r} must be a {fmt_ndim - 1} or {fmt_ndim}-dimensional array for format type {obj_fmt}"
)
# Freeze
self.flags.writeable = False
if self.jd1 is not None and self.jd2 is not None:
if isinstance(self.jd1, np.ndarray):
self.jd1.flags.writeable = False
if isinstance(self.jd2, np.ndarray):
self.jd2.flags.writeable = False
def __lt__(self, other):
return self.jd < other.jd
@lru_cache()
def to_scale(self, scale: str) -> "TimeBase":
"""Convert to a different scale
Returns a new array with the same time in the new scale.
Args:
scale: Name of new scale.
Returns:
TimeBase representing the same times in the new scale.
"""
# Don't convert if not necessary
if scale == self.scale:
return self
# Raise error for unknown scales
if scale not in self._scales():
scales = ", ".join(self._scales())
raise exceptions.UnknownSystemError(f"Scale {scale!r} unknown. Use one of {scales}")
# Simplified conversion if time is None
if self.shape == () and self.item() == None: # time is None
return _SCALES[self.cls_name][scale](val=None, fmt=self.fmt, _jd1=None, _jd2=None)
# Convert to new scale
hop = (self.scale, scale)
if hop in _CONVERSIONS[self.cls_name]:
jd1, jd2 = _CONVERSIONS[self.cls_name][hop](self)
try:
return self._scales()[scale].from_jds(jd1, jd2, self.fmt)
except ValueError:
# Given format does not exist for selected time scale, use default jd
return self._scales()[scale].from_jds(jd1, jd2, "jd")
if hop not in _CONVERSION_HOPS.setdefault(self.cls_name, {}):
_CONVERSION_HOPS[self.cls_name][hop] = _find_conversion_hops(self.cls_name, hop)
converted_time = self
for one_hop in _CONVERSION_HOPS[self.cls_name][hop]:
jd1, jd2 = _CONVERSIONS[self.cls_name][one_hop](converted_time)
try:
converted_time = self._scales()[one_hop[-1]].from_jds(jd1, jd2, self.fmt)
except ValueError:
# Given format does not exist for selected time scale, use default jd
converted_time = self._scales()[one_hop[-1]].from_jds(jd1, jd2, "jd")
return converted_time
def subset(self, idx, memo):
"""Create a subset"""
old_id = id(self)
if old_id in memo:
return memo[old_id]
new_time = self._scales()[self.scale](
np.asarray(self)[idx], fmt=self.fmt, _jd1=self.jd1[idx], _jd2=self.jd2[idx]
)
memo[old_id] = new_time
return new_time
@classmethod
def insert(cls, a, pos, b, memo):
"""Insert b into a at index pos"""
id_a = id(a)
if id_a in memo:
return memo[id_a][-1]
id_b = id(b)
if id_b in memo:
return memo[id_b][-1]
b = b if a.scale == b.scale else getattr(b, a.scale)
b_formatted = np.asarray(b) if a.fmt == b.fmt else getattr(b, a.fmt)
val = np.insert(np.asarray(a), pos, b_formatted)
jd1 = np.insert(a.jd1, pos, b.jd1)
jd2 = np.insert(a.jd2, pos, b.jd2)
new_time = cls._scales()[a.scale](val, fmt=a.fmt, _jd1=jd1, _jd2=jd2)
memo[id(a)] = (a, new_time)
memo[id(b)] = (b, new_time)
return new_time
@property
def val(self):
return np.asarray(self)
@classmethod
def _cls_scale(cls, scale: str) -> "Type[TimeArray]":
"""Check that scale is valid and return corresponding type"""
if scale not in cls._scales():
scales = ", ".join(sorted(cls._scales()))
raise exceptions.UnknownSystemError(f"Scale {scale!r} unknown. Use one of {scales}")
return cls._scales()[scale]
@classmethod
def create(
cls,
val: np.ndarray,
scale: str,
fmt: str,
val2: Optional[np.ndarray] = None,
_jd1: Optional[np.ndarray] = None,
_jd2: Optional[np.ndarray] = None,
) -> "TimeArray":
"""Factory for creating TimeArrays for different scales
See each time class for exact optional parameters.
Args:
val: Array of time values.
scale: Name of time scale.
pos_args: Additional arguments used to create the TimeArray.
Returns:
Array with times in the given scale.
"""
return cls._cls_scale(scale)(val, val2=val2, fmt=fmt, _jd1=_jd1, _jd2=_jd2)
@classmethod
def from_jds(cls, jd1: np.ndarray, jd2: np.ndarray, fmt: str) -> "TimeArray":
"""Create a new time array with given Julian dates and format, keep scale
"""
fmt_value = cls._formats()[fmt].from_jds(jd1, jd2, cls.scale)
return cls(val=fmt_value, fmt=fmt, _jd1=jd1, _jd2=jd2)
@classmethod
def _scales(cls):
return _SCALES.get(cls.cls_name, dict())
@classmethod
def _conversions(cls):
return _CONVERSIONS.get(cls.cls_name, dict())
@property
def SCALES(self):
return list(self._scales().keys())
@property
def FORMATS(self):
return list(self._formats().keys())
@property
def CONVERSIONS(self):
return list(self._conversions().keys())
def fieldnames(self):
"""Return list of valid attributes for this object"""
# Pick one element to avoid doing calculations on a large array
obj = self if len(self) == 1 else self[0]
scales_and_formats = []
for scale in obj._scales():
try:
_find_conversion_hops(self.cls_name, (obj.scale, scale))
# Add scales
scales_and_formats.append(scale)
scale_time = getattr(obj, scale)
fmt_cls = obj.cls_name.replace("Array", "Format")
for fmt in _FORMATS.get(fmt_cls, {}):
# Add system fields
try:
fmt_time = getattr(scale_time, fmt)
if isinstance(fmt_time, tuple) and hasattr(fmt_time, "_fields"):
for f in fmt_time._fields:
scales_and_formats.append(f"{scale}.{fmt}.{f}")
else:
scales_and_formats.append(f"{scale}.{fmt}")
except ValueError:
pass # Skip formats that are invalid for that scale
except exceptions.UnknownConversionError:
pass # Skip systems that cannot be converted to
return scales_and_formats
@lru_cache()
def plot_fields(self):
"""Returns list of attributes that can be plotted"""
obj = self if len(self) == 1 else self[0]
fieldnames = set(self.fieldnames())
text_fields = set()
for f in fieldnames:
attr_value = getattr(obj, f)
if isinstance(attr_value, np.ndarray) and attr_value.dtype.type is np.str_:
text_fields.add(f)
elif isinstance(attr_value, str):
text_fields.add(f)
return list(fieldnames - text_fields)
def unit(self, field: str = "") -> Tuple[str, ...]:
"""Unit of field"""
# mainfield, _, subfield = field.partition(".")
# Units of formats
field = self.fmt if not field else field
if field in self._formats():
return _FORMAT_UNITS[field]
# Units of properties
else:
return self._unit
@lru_cache()
def to_format(self, fmt: str):
return self._formats()[fmt].from_jds(self.jd1, self.jd2, scale=self.scale)
def __hash__(self):
try:
return hash(self.jd1.data.tobytes()) + hash(self.jd2.data.tobytes())
except AttributeError:
return hash(str(self.jd1)) + hash(str(self.jd2))
def __eq__(self, other):
if isinstance(other, self.__class__):
return np.all(self.jd1 == other.jd1) and np.all(self.jd2 == other.jd2)
else:
return NotImplemented
def __getattr__(self, key):
"""Get attributes with dot notation
Add time scales and formats to attributes on Time arrays.
"""
if "." in key:
mainfield, _, subfield = key.partition(".")
return getattr(getattr(self, mainfield), subfield)
# Convert to a different scale
if key in self._scales():
return self.to_scale(key)
# Convert to a different format
elif key in self._formats():
return self.to_format(key)
# Raise error for unknown attributes
else:
raise AttributeError(f"{type(self).__name__!r} has no attribute {key!r}") from None
def __len__(self):
fmt_ndim = self._formats()[self.fmt].ndim
return int(self.size / fmt_ndim)
def __setattr__(self, name, value):
raise AttributeError(f"{self.__class__.__name__} object does not support item assignment ")
def __copy__(self):
return self.create(val=self.val.copy(), fmt=self.fmt, scale=self.scale, _jd1=self.jd1, _jd2=self.jd2)
def __deepcopy__(self, memo):
"""Deep copy a TimeArray
"""
time = self.create(val=self.val.copy(), fmt=self.fmt, scale=self.scale, _jd1=self.jd1, _jd2=self.jd2)
memo[id(time)] = time
return time
# Override numpys copy method # Might not be needed for numpy 1.16 or higher
copy = __copy__
def __getitem__(self, item):
"""Update _jd*_sliced with correct shape, used by __array_finalize__"""
fmt_ndim = self._formats()[self.fmt].ndim
if isinstance(item, tuple) and fmt_ndim > 1 and len(item) > 1:
# Only use item row to slice jds
super().__setattr__("_jd1_sliced", self.jd1[item[-1]])
super().__setattr__("_jd2_sliced", self.jd2[item[-1]])
else:
if isinstance(self.jd1, np.ndarray):
super().__setattr__("_jd1_sliced", self.jd1[item])
if isinstance(self.jd2, np.ndarray):
super().__setattr__("_jd2_sliced", self.jd2[item])
if isinstance(item, (int, np.int_)):
return self._scales()[self.scale].from_jds(self._jd1_sliced, self._jd2_sliced, self.fmt)
return super().__getitem__(item)
@classmethod
def _read(cls, h5_group, memo):
scale = h5_group.attrs["scale"]
fmt = h5_group.attrs["fmt"]
jd1 = h5_group["jd1"][...]
jd2 = h5_group["jd2"][...]
time = cls._cls_scale(scale).from_jds(jd1, jd2, fmt)
memo[f"{h5_group.attrs['fieldname']}"] = time
return time
def _write(self, h5_group, memo):
h5_group.attrs["scale"] = self.scale
h5_group.attrs["fmt"] = self.fmt
h5_field = h5_group.create_dataset("jd1", self.jd1.shape, dtype=self.jd1.dtype)
h5_field[...] = self.jd1
h5_field = h5_group.create_dataset("jd2", self.shape, dtype=self.jd2.dtype)
h5_field[...] = self.jd2
def __dir__(self):
"""List all fields and attributes on the Time array"""
return super().__dir__() + list(self._scales()) + list(self._formats())
def __repr__(self):
cls_name = type(self).__name__
repr_str = super().__repr__()
return repr_str.replace(f"{cls_name}(", f"{cls_name}(scale={self.scale!r}, fmt={self.fmt!r}, ")
#
# The main Time class
#
class TimeArray(TimeBase):
"""Base class for time objects. Is immutable to allow the data to be hashable"""
cls_name = "TimeArray"
type = "time"
_SCALES.setdefault(cls_name, dict())
_unit = None
@classmethod
def now(cls, scale="utc", fmt="datetime") -> "TimeArray":
"""Create a new time representing now"""
jd1, jd2 = cls._formats()["datetime"].to_jds(datetime.now(), scale=scale)
return cls._cls_scale("utc").from_jds(jd1, jd2, fmt=fmt).to_scale(scale)
@classmethod
def empty_from(cls, other: "TimeArray") -> "TimeArray":
"""Create a new time of the same type as other but with empty(datetime.min) values
"""
return _SCALES[other.scale](np.full(other.shape, fill_value=datetime.min), fmt="datetime")
@classmethod
def _formats(cls):
return _FORMATS["TimeFormat"]
@property
@Unit.register(("year",))
@lru_cache()
def year(self):
if isinstance(self.datetime, datetime):
return self.datetime.year
return np.array([d.year for d in self.datetime])
@property
@lru_cache()
@Unit.register(("month",))
def month(self):
if isinstance(self.datetime, datetime):
return self.datetime.month
return np.array([d.month for d in self.datetime])
@property
@lru_cache()
@Unit.register(("day",))
def day(self):
if isinstance(self.datetime, datetime):
return self.datetime.day
return np.array([d.day for d in self.datetime])
@property
@lru_cache()
@Unit.register(("hour",))
def hour(self):
if isinstance(self.datetime, datetime):
return self.datetime.hour
return np.array([d.hour for d in self.datetime])
@property
@lru_cache()
@Unit.register(("minute",))
def minute(self):
if isinstance(self.datetime, datetime):
return self.datetime.minute
return np.array([d.minute for d in self.datetime])
@property
@lru_cache()
@Unit.register(("second",))
def second(self):
if isinstance(self.datetime, datetime):
return self.datetime.second
return np.array([d.second for d in self.datetime])
@property
@lru_cache()
@Unit.register(("day",))
def doy(self):
if isinstance(self.datetime, datetime):
return self.datetime.timetuple().tm_yday
return np.array([d.timetuple().tm_yday for d in self.datetime])
@property
@lru_cache()
@Unit.register(("second",))
def sec_of_day(self):
"""Seconds since midnight
Note - Does not support leap seconds
Returns:
Seconds since midnight
"""
if isinstance(self.datetime, datetime):
return self.datetime.hour * 60 * 60 + self.datetime.minute * 60 + self.datetime.second
return np.array([d.hour * 60 * 60 + d.minute * 60 + d.second for d in self.datetime])
@property
@lru_cache()
def mean(self):
"""Mean time
Returns:
Time: Time object containing the mean time
"""
if self.size == 1:
return self
return self._cls_scale(self.scale)(np.mean(self.utc.jd), fmt="jd")
@property
@lru_cache()
def min(self):
return self[np.argmin(self.jd)]
@property
@lru_cache()
def max(self):
return self[np.argmax(self.jd)]
@property
@lru_cache()
def jd_int(self):
"""Integer part of Julian Day
To ensure consistency, we therefore add two properties `jd_int` and `jd_frac` where the integer part is
guaranteed to be a "half-integer" (e.g. 2457617.5) and the fractional part is guaranteed to be a float in the
range [0., 1.). The parts are calculated from `jd1` and `jd2` to preserve precision.
Returns:
Numpy-float scalar or array with (half-)integer part of Julian Day.
"""
return self.jd1 - self._jd_delta
@property
@lru_cache()
def jd_frac(self):
"""Fractional part of Julian Day
See the docstring of `jd_int` for more information.
Returns:
Numpy-float scalar or array with fractional part of Julian Day, in the range [0., 1.).
"""
return self.jd2 + self._jd_delta
@property
@lru_cache()
def _jd_delta(self):
"""Delta between jd1 and jd_int
This is a helper function used by `jd_int` and `jd_frac` to find the difference to `jd1` and `jd2`
respectively. See the docstring of `jd_int` for more information.
Returns:
Numpy-float scalar or array with difference between `jd1` and the integer part of Julian Day.
"""
return self.jd1 - (np.floor(self.jd - 0.5) + 0.5)
@property
@lru_cache()
def mjd_int(self):
"""Integer part of Modified Julian Day
In general, we have that MJD = JD - 2400000.5. See the docstring of `jd_int` for more information.
Returns:
Numpy-float scalar or array with the integer part of Modified Julian Day.
"""
return self.jd_int - 2_400_000.5
@property
@lru_cache()
def mjd_frac(self):
"""Fractional part of Modified Julian Day
See the docstring of `jd_int` for more information. The way we have defined `jd_int` and `jd_frac` means that
`mjd_frac` will be equal to `jd_frac`.
Returns:
Numpy-float scalar or array with the fractional part of Modified Julian Day, in the range [0., 1.).
"""
return self.jd_frac
def __add__(self, other):
"""self + other"""
if self.scale != other.scale:
return NotImplemented
if isinstance(other, TimeDeltaArray):
# time + timedelta
jd2 = self.jd2 + other.days
return self.from_jds(self.jd1, jd2, self.fmt)
elif isinstance(other, TimeArray):
# time1 + time2 does not make sense
return NotImplemented
return NotImplemented
def __sub__(self, other):
"""self - other"""
if self.scale != other.scale:
return NotImplemented
if isinstance(other, TimeDeltaArray):
# time - timedelta -> time
jd1 = self.jd1 - other.jd1
jd2 = self.jd2 - other.jd2
return self.from_jds(self.jd1, jd2, self.fmt)
elif isinstance(other, TimeArray):
# time - time -> timedelta
jd1 = self.jd1 - other.jd1
jd2 = self.jd2 - other.jd2
fmt = "timedelta" if self.fmt == other.fmt == "datetime" else "jd"
return _SCALES["TimeDeltaArray"][self.scale].from_jds(jd1, jd2, fmt)
return NotImplemented
# Turn off remaining arithmetic operations
def __rsub__(self, _):
""" other - self"""
return NotImplemented
def __radd__(self, _):
"""other + self"""
return NotImplemented
def __iadd__(self, _):
"""Immutable object does not support this operation"""
return NotImplemented
def __isub__(self, _):
"""Immutable object does not support this operation"""
return NotImplemented
class TimeDeltaArray(TimeBase):
"""Base class for time delta objects. Is immutable to allow the data to be hashable"""
cls_name = "TimeDeltaArray"
type = "time_delta"
_SCALES.setdefault(cls_name, dict())
_unit = None
@classmethod
def empty_from(cls, other: "TimeDeltaArray") -> "TimeDeltaArray":
"""Create a new time of the same type as other but with empty(datetime.min) values
"""
return _SCALES[other.scale](np.full(other.shape, fill_value=timedelta(seconds=0)), fmt="timedelta")
@lru_cache()
def plot_fields(self):
"""Returns list of attributes that can be plotted"""
obj = self if len(self) == 1 else self[0]
scales_and_formats = []
try:
# Add scale
scales_and_formats.append(obj.scale)
fmt_cls = obj.cls_name.replace("Array", "Format")
for fmt in _FORMATS.get(fmt_cls, {}):
# Add system fields
try:
fmt_time = getattr(obj, fmt)
if isinstance(fmt_time, np.ndarray) and fmt_time.dtype.type is np.str_:
# Skip string formats
continue
if isinstance(fmt_time, str):
# Skip string formats
continue
scales_and_formats.append(f"{obj.scale}.{fmt}")
except ValueError:
pass # Skip formats that are invalid for that scale
except exceptions.UnknownConversionError:
pass # Skip systems that cannot be converted to
return scales_and_formats
@classmethod
def _formats(cls):
return _FORMATS["TimeDeltaFormat"]
def __add__(self, other):
"""self + other """
if self.scale != other.scale:
return NotImplemented
if isinstance(other, TimeDeltaArray):
# timedelta + timedelta -> timedelta
jd1 = self.jd1 + other.jd2
jd2 = self.jd1 + other.jd2
return self.from_jds(jd1, jd2, fmt=self.fmt)
elif isinstance(other, TimeArray):
# timedelta + time -> time
jd1 = self.jd1 + other.jd1
jd2 = self.jd2 + other.jd2
return other.from_jds(jd1, jd2, fmt=other.fmt)
return NotImplemented
def __sub__(self, other):
"""self - other"""
if self.scale != other.scale:
return NotImplemented
if isinstance(other, TimeArray):
# timedelta - time -> time
jd1 = self.jd1 - other.jd1
jd2 = self.jd2 - other.jd2
return other.from_jds(jd1, jd2, fmt=other.fmt)
elif isinstance(other, TimeDeltaArray):
# timedelta - timedelta -> timedelta
jd1 = self.jd1 - other.jd1
jd2 = self.jd1 - other.jd2
return self.from_jds(jd1, jd2, fmt=self.fmt)
return NotImplemented
# Turn off remaining arithmetic operations
def __radd__(self, _):
"""other - self"""
return NotImplemented
def __rsub__(self, _):
"""other - self"""
return NotImplemented
def __iadd__(self, _):
"""Immutable object does not support this operation"""
return NotImplemented
def __isub__(self, _):
"""Immutable object does not support this operation"""
return NotImplemented
#######################################################################################################################
# Time scales
#######################################################################################################################
# Time deltas
def delta_tai_utc(time: "TimeArray") -> "np_float":
try:
idx = [np.argmax(np.logical_and(t.jd >= _TAIUTC["start"], t.jd < _TAIUTC["end"])) for t in time]
except TypeError:
idx = np.argmax(np.logical_and(time.jd >= _TAIUTC["start"], time.jd < _TAIUTC["end"]))
delta = _TAIUTC["offset"][idx] + (time.mjd - _TAIUTC["ref_epoch"][idx]) * _TAIUTC["factor"][idx]
if time.scale == "utc":
return delta * Unit.seconds2day
else:
# time.scale is tai
tmp_utc_jd = time.tai.jd - delta * Unit.seconds2day
tmp_utc_mjd = time.tai.mjd - delta * Unit.seconds2day
try:
idx = [np.argmax(np.logical_and(t >= _TAIUTC["start"], t < _TAIUTC["end"])) for t in tmp_utc_jd]
except TypeError:
idx = np.argmax(np.logical_and(tmp_utc_jd >= _TAIUTC["start"], tmp_utc_jd < _TAIUTC["end"]))
delta = _TAIUTC["offset"][idx] + (tmp_utc_mjd - _TAIUTC["ref_epoch"][idx]) * _TAIUTC["factor"][idx]
return -delta * Unit.seconds2day
def delta_tai_tt(time: "TimeArray") -> "np_float":
delta = 32.184 * Unit.seconds2day
if time.scale == "tt":
return -delta
else:
# time.scale is tai
return delta
def delta_tcg_tt(time: "TimeArray") -> "np_float":
dt = time.jd1 - constant.T_0_jd1 + time.jd2 - constant.T_0_jd2
if time.scale == "tt":
return constant.L_G / (1 - constant.L_G) * dt
else:
# time.scale is tcg
return -constant.L_G * dt
def delta_gps_tai(time: "TimeArray") -> "np_float":
delta = 19 * Unit.seconds2day
if time.scale == "gps":
return delta
else:
# time.scale is tai
return -delta
#
# Time scale conversions
#
def _utc2tai(utc: "TimeArray") -> ("np_float", "np_float"):
"""Convert UTC to TAI"""
return utc.jd1, utc.jd2 + delta_tai_utc(utc)
def _tai2utc(tai: "TimeArray") -> ("np_float", "np_float"):
"""Convert TAI to UTC"""
return tai.jd1, tai.jd2 + delta_tai_utc(tai)
def _tai2tt(tai: "TimeArray") -> ("np_float", "np_float"):
"""Convert TAI to UTC"""
return tai.jd1, tai.jd2 + delta_tai_tt(tai)
def _tt2tai(tt: "TimeArray") -> ("np_float", "np_float"):
"""Convert TT to TAI"""
return tt.jd1, tt.jd2 + delta_tai_tt(tt)
def _tt2tcg(tt: "TimeArray") -> ("np_float", "np_float"):
"""Convert TT to TCG"""
return tt.jd1, tt.jd2 + delta_tcg_tt(tt)
def _tcg2tt(tcg: "TimeArray") -> ("np_float", "np_float"):
"""Convert TCG to TT"""
return tcg.jd1, tcg.jd2 + delta_tcg_tt(tcg)
def _gps2tai(gps: "TimeArray") -> ("np_float", "np_float"):
"""Convert GPS to TAI"""
return gps.jd1, gps.jd2 + delta_gps_tai(gps)
def _tai2gps(tai: "TimeArray") -> ("np_float", "np_float"):
"""Convert TAI to GPS"""
return tai.jd1, tai.jd2 + delta_gps_tai(tai)
#
# Time scales
#
@register_scale(convert_to=dict(tai=_utc2tai))
class UtcTime(TimeArray):
scale = "utc"
@register_scale(convert_to=dict(utc=_tai2utc, tt=_tai2tt, gps=_tai2gps))
class TaiTime(TimeArray):
scale = "tai"
@register_scale(convert_to=dict(tt=_tcg2tt))
class TcgTime(TimeArray):
scale = "tcg"
@register_scale(convert_to=dict(tai=_gps2tai))
class GpsTime(TimeArray):
scale = "gps"
@register_scale(convert_to=dict(tai=_tt2tai, tcg=_tt2tcg))
class TtTime(TimeArray):
scale = "tt"
#
# Time Delta scales
#
@register_scale(convert_to=dict())
class UtcTimeDelta(TimeDeltaArray):
scale = "utc"
@register_scale(convert_to=dict())
class TaiTimeDelta(TimeDeltaArray):
scale = "tai"
@register_scale(convert_to=dict())
class TcgTimeDelta(TimeDeltaArray):
scale = "tcg"
@register_scale(convert_to=dict())
class GpsTimeDelta(TimeDeltaArray):
scale = "gps"
@register_scale(convert_to=dict())
class TtTimeDelta(TimeDeltaArray):
scale = "tt"
######################################################################################################################
# Formats
######################################################################################################################
#
# Time formats
#
class TimeFormat:
cls_name = "TimeFormat"
_FORMATS.setdefault(cls_name, dict())
_FORMAT_UNITS.setdefault(cls_name, dict())
fmt = None
unit = None
ndim = 1
day2seconds = Unit.day2seconds
week2days = Unit.week2days
def __init__(self, val, val2=None, scale=None):
"""Convert val and val2 to Julian days"""
self.scale = scale
if val is None:
self.jd1 = None
self.jd2 = None
elif np.asarray(val).size == 0 and np.asarray(val).ndim == 1: # Empty array
self.jd1 = np.array([])
self.jd2 = np.array([])
else:
self.jd1, self.jd2 = self.to_jds(val, val2=val2, scale=scale)
@classmethod
def to_jds(cls, val, val2=None, scale=None):
"""Convert val and val2 to Julian days and set the .jd1 and .jd2 attributes"""
if val is None and val2 is None:
return None, None
return cls._to_jds(val, val2, scale)
@classmethod
def _to_jds(cls, val, val2, scale):
"""Convert val and val2 to Julian days and set the .jd1 and .jd2 attributes"""
raise NotImplementedError
@classmethod
def from_jds(cls, jd1, jd2, scale):
"""Convert Julian days to the right format"""
if jd1 is None and jd2 is None:
return None
return cls._from_jds(jd1, jd2, scale)
@classmethod
def _from_jds(cls, jd1, jd2, scale):
"""Convert Julian days to the right format"""
raise NotImplementedError
@property
def value(self):
"""Convert Julian days to the right format"""
if self.jd1 is None and self.jd1 is None:
return None
return self.from_jds(self.jd1, self.jd2, self.scale)
class TimeDeltaFormat(TimeFormat):
"""Base class for Time Delta formats"""
cls_name = "TimeDeltaFormat"
_FORMATS.setdefault(cls_name, dict())
_FORMAT_UNITS.setdefault(cls_name, dict())
@register_format
class TimeJD(TimeFormat):
fmt = "jd"
unit = ("day",)
@classmethod
def _to_jds(cls, val, val2, scale=None):
if val2 is None:
try:
val2 = np.zeros(val.shape)
except AttributeError:
val2 = 0
val = np.asarray(val)
_delta = val - (np.floor(val + val2 - 0.5) + 0.5)
jd1 = val - _delta
jd2 = val2 + _delta
return jd1, jd2
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
return jd1 + jd2
@register_format
class TimeMJD(TimeFormat):
"""Modified Julian Date time format.
This represents the number of days since midnight on November 17, 1858.
For example, 51544.0 in MJD is midnight on January 1, 2000.
"""
fmt = "mjd"
unit = ("day",)
_mjd0 = 2_400_000.5
@classmethod
def _to_jds(cls, val, val2, scale=None):
if val2 is None:
try:
val2 = np.zeros(val.shape)
except AttributeError:
val2 = 0
val = np.asarray(val)
_delta = val - (np.floor(val + val2 - 0.5) + 0.5)
jd1 = cls._mjd0 + val - _delta
jd2 = val2 + _delta
return jd1, jd2
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
return jd1 - cls._mjd0 + jd2
@register_format
class TimeDateTime(TimeFormat):
fmt = "datetime"
unit = None
_jd2000 = 2_451_544.5
_dt2000 = datetime(2000, 1, 1)
@classmethod
def _to_jds(cls, val, val2=None, scale=None):
try:
if val2 is not None:
val = np.asarray(val) + np.asarray(val2)
return np.array([cls._dt2jd(dt) for dt in val]).T
except TypeError:
if val2 is not None:
val = val + val2
return cls._dt2jd(val)
@classmethod
@lru_cache()
def _dt2jd(cls, dt):
"""Convert one datetime to one Julian date pair"""
delta = dt - cls._dt2000
jd1 = cls._jd2000 + delta.days
delta -= timedelta(days=delta.days)
jd2 = delta.total_seconds() / cls.day2seconds
return jd1, jd2
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
try:
return np.array([cls._jd2dt(j1, j2) for j1, j2 in zip(jd1, jd2)])
except TypeError:
return cls._jd2dt(jd1, jd2)
@classmethod
@lru_cache()
def _jd2dt(cls, jd1, jd2):
"""Convert one Julian date to a datetime"""
return cls._dt2000 + timedelta(days=jd1 - cls._jd2000) + timedelta(days=jd2)
# @register_format
# class TimePlotDate(TimeFormat):
# """Matplotlib date format
#
# Matplotlib represents dates using floating point numbers specifying the number
# of days since 0001-01-01 UTC, plus 1. For example, 0001-01-01, 06:00 is 1.25,
# not 0.25. Values < 1, i.e. dates before 0001-01-01 UTC are not supported.
#
# Warning: This requires matplotlib version 3.2.2 or lower
# """
#
# fmt = "plot_date"
# unit = None
# _jd0001 = 1721424.5 # julian day 2001-01-01 minus 1
#
# def __init__(self, val, val2=None, scale=None):
# """Convert val and val2 to Julian days"""
# print(f"Warning: TimeFormat {self.fmt} is deprecated and requires matplotlib version 3.2.2 or lower. Will be removed in future versions.")
# super().__init__(val, val2, scale)
#
# @classmethod
# def _to_jds(cls, val, val2=None, scale=None):
# print(f"Warning: TimeFormat {cls.fmt} is deprecated and requires matplotlib version 3.2.2 or lower. Will be removed in future versions.")
# if val2 is None:
# try:
# val2 = np.zeros(val.shape)
# except AttributeError:
# val2 = 0
#
# _delta = val - (np.floor(val + val2 - 0.5) + 0.5)
# jd1 = cls._jd0001 + val - _delta
# jd2 = val2 + _delta
# return jd1, jd2
#
# @classmethod
# def _from_jds(cls, jd1, jd2, scale=None):
# print(f"Warning: TimeFormat {cls.fmt} is deprecated and requires matplotlib version 3.2.2 or lower. Will be removed in future versions.")
# return jd1 - cls._jd0001 + jd2
@register_format
class TimeGPSWeekSec(TimeFormat):
"""GPS weeks and seconds."""
fmt = "gps_ws"
unit = ("week", "second")
_jd19800106 = 2_444_244.5
WeekSec = namedtuple("week_sec", ["week", "seconds", "day"])
ndim = len(WeekSec._fields)
@classmethod
def _to_jds(cls, val, val2, scale=None):
if scale != "gps":
raise ValueError(f"Format {cls.fmt} is only available for time scale gps")
if isinstance(val, cls.WeekSec):
week = np.asarray(val.week)
sec = np.asarray(val.seconds)
elif val2 is None:
raise ValueError(f"val2 should be seconds (not {val2}) for format {cls.fmt}")
else:
week = np.asarray(val)
sec = np.asarray(val2)
# Determine GPS day
wd = np.floor((sec + 0.5 * cls.day2seconds) / cls.day2seconds) # 0.5 d = 43200.0 s
# Determine remainder
fracSec = sec + 0.5 * cls.day2seconds - wd * cls.day2seconds
# Conversion GPS week and day to from Julian Date (JD)
jd_day = week * Unit.week2days + wd + cls._jd19800106 - 0.5
jd_frac = fracSec / cls.day2seconds
return jd_day, jd_frac
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
if scale != "gps":
raise ValueError(f"Format {cls.fmt} is only available for time scale gps")
if np.any(jd1 + jd2 < cls._jd19800106):
raise ValueError(f"Julian Day exceeds the GPS time start date of 6-Jan-1980 (JD {cls._jd19800106})")
# See Time.jd_int for explanation
_delta = jd1 - (np.floor(jd1 + jd2 - 0.5) + 0.5)
jd_int = jd1 - _delta
jd_frac = jd2 + _delta
# .. Conversion from Julian Date (JD) to GPS week and day
wwww = np.floor((jd_int - cls._jd19800106) / cls.week2days)
wd = np.floor(jd_int - cls._jd19800106 - wwww * cls.week2days)
gpssec = (jd_frac + wd) * cls.day2seconds
return cls.WeekSec(wwww, gpssec, wd)
@register_format
class TimeGPSSec(TimeFormat):
"""Number of seconds since the GPS epoch 1980-01-06 00:00:00 UTC."""
fmt = "gps_seconds"
unit = "second"
_jd19800106 = 2_444_244.5
@classmethod
def _to_jds(cls, val, val2, scale=None):
if scale != "gps":
raise ValueError(f"Format {cls.fmt} is only available for time scale gps")
if val2 is not None:
raise ValueError(f"val2 should be None (not {val2}) for format {cls.fmt}")
days = np.asarray(val) * Unit.second2day
days_int = np.floor(days)
days_frac = days - days_int
return cls._jd19800106 + days_int, days_frac
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
if scale != "gps":
raise ValueError(f"Format {cls.fmt} is only available for time scale gps")
if np.any(jd1 + jd2 < cls._jd19800106):
raise ValueError(f"Julian Day exceeds the GPS time start date of 6-Jan-1980 (JD {cls._jd19800106})")
# See Time.jd_int for explanation
_delta = jd1 - (np.floor(jd1 + jd2 - 0.5) + 0.5)
days_int = jd1 - _delta - cls._jd19800106
days_frac = jd2 + _delta
return (days_int + days_frac) * Unit.day2second
@register_format
class TimeJulianYear(TimeFormat):
""" Time as year with decimal number. (Ex: 2000.0). Fixed year length."""
fmt = "jyear"
unit = ("julian_year",)
_jd2000 = 2_451_545.0
_j2000 = 2000
@classmethod
def _to_jds(cls, val, val2=None, scale=None):
"""Based on epj2jd.for from SOFA library"""
if val2 is not None:
raise ValueError(f"val2 should be None (not {val2}) for format {fmt}")
int_part, fraction = np.divmod((val - cls._j2000) * Unit.julian_year2day, 1)
return cls._jd2000 + int_part, fraction
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
"""Based on epj.for from SOFA library"""
return cls._j2000 + ((jd1 - cls._jd2000) + jd2) * Unit.day2julian_year
@register_format
class TimeDecimalYear(TimeFormat):
"""Time as year with decimal number. (Ex: 2000.0). Variable year length."""
fmt = "decimalyear"
unit = None # Year length is variable so this does not make sense to apply one value
@classmethod
def _to_jds(cls, val, val2=None, scale=None):
if val2 is not None:
raise ValueError(f"val2 should be None (not {val2}) for format {fmt}")
if scale is None:
raise ValueError(f"scale must be defined for format {fmt}")
try:
return np.array([cls._dy2jd(t, scale) for t in val]).T
except TypeError:
return cls._dy2jd(val, scale)
@classmethod
@lru_cache()
def _dy2jd(cls, decimalyear, scale):
year_int = int(decimalyear)
year_frac = decimalyear - year_int
t_start_of_year = TimeArray.create(datetime(year_int, 1, 1), scale=scale, fmt="datetime")
days = year_frac * cls._year2days(year_int, scale)
jd = t_start_of_year.jd1 + days # t_start.jd2 is zero for start of year
jd1 = int(jd)
jd2 = jd - jd1
return jd1, jd2
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
try:
return np.array([cls._jd2dy(j1, j2, scale) for j1, j2 in zip(jd1, jd2)]).T
except TypeError:
return cls._jd2dy(jd1, jd2, scale)
@classmethod
@lru_cache()
def _jd2dy(cls, jd1, jd2, scale):
year = TimeDateTime._jd2dt(jd1, jd2).year
t_start_of_year = TimeArray.create(datetime(year, 1, 1), scale=scale, fmt="datetime")
year2days = cls._year2days(year, scale)
days = jd1 - t_start_of_year.jd1 + jd2 # t_start.jd2 is zero for start of year
decimalyear = year + days / year2days
return decimalyear
@classmethod
@lru_cache()
def _year2days(cls, year, scale):
"""Computes number of days in year, including leap seconds"""
t_start = TimeArray.create(datetime(year, 1, 1), scale=scale, fmt="datetime")
t_end = TimeArray.create(datetime(year + 1, 1, 1), scale=scale, fmt="datetime")
if scale == "utc":
# Account for leap seconds in UTC by differencing one TAI year
t_start = getattr(t_start, "tai")
t_end = getattr(t_end, "tai")
return (t_end - t_start).days
@register_format
class TimeYyDddSssss(TimeFormat):
""" Time as 2 digit year, doy and second of day.
Text based format "yy:ddd:sssss"
yy - decimal year without century
ddd - zero padded decimal day of year
sssss - zero padded seconds since midnight
Note - Does not support leap seconds
Returns:
Time converted to yydddssss format
"""
fmt = "yydddsssss"
unit = None
@classmethod
def _to_jds(cls, val, val2=None, scale=None):
if val2 is not None:
raise ValueError(f"val2 should be None (not {val2}) for format {fmt}")
try:
return np.array([cls._yds2jd(v) for v in val])
except TypeError:
cls._yds2jd(val)
@classmethod
@lru_cache()
def _yds2jd(cls, val):
return datetime.strptime(val[:7], "%y:%j:") + timedelta(sec=float(val[7:]))
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
try:
return np.array([cls._jd2yds(j1, j2) for j1, j2 in zip(jd1, jd2)])
except TypeError:
return cls._jd2yds(jd1, jd2)
@classmethod
@lru_cache()
def _jd2yds(cls, jd1, jd2):
dt = TimeDateTime._jd2dt(jd1, jd2)
delta = (dt - datetime(dt.year, dt.month, dt.day)).seconds
return dt.strftime("%y:%j:") + str(delta).zfill(5)
@register_format
class TimeYyyyDddSssss(TimeFormat):
""" Time as 4-digit year, doy and second of day.
Text based format "yyyy:ddd:sssss"
yyyy - decimal year with century
ddd - zero padded decimal day of year
sssss - zero padded seconds since midnight
Note - Does not support leap seconds
Returns:
Time converted to yydddssss format
"""
fmt = "yyyydddsssss"
unit = None
@classmethod
def _to_jds(cls, val, val2=None, scale=None):
if val2 is not None:
raise ValueError(f"val2 should be None (not {val2}) for format {fmt}")
try:
return np.array([cls._yds2jd(v) for v in val])
except TypeError:
cls._yds2jd(val)
@classmethod
@lru_cache()
def _yds2jd(cls, val):
return datetime.strptime(val[:9], "%Y:%j:") + timedelta(sec=float(val[9:]))
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
try:
return np.array([cls._jd2yds(j1, j2) for j1, j2 in zip(jd1, jd2)])
except TypeError:
return cls._jd2yds(jd1, jd2)
@classmethod
@lru_cache()
def _jd2yds(cls, jd1, jd2):
dt = TimeDateTime._jd2dt(jd1, jd2)
delta = (dt - datetime(dt.year, dt.month, dt.day)).seconds
return dt.strftime("%Y:%j:") + str(delta).zfill(5)
# Text based time formats
class TimeStr(TimeFormat):
""" Base class for text based time. """
unit = None
_dt_fmt = None
@classmethod
def _to_jds(cls, val, val2=None, scale=None):
if val2 is not None:
raise ValueError(f"val2 should be None (not {val2}) for format {cls.fmt}")
if isinstance(val, str):
return TimeDateTime._dt2jd(cls._str2dt(val))
else:
return np.array([TimeDateTime._dt2jd(cls._str2dt(isot)) for isot in val]).T
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
try:
return np.array([cls._dt2str(TimeDateTime._jd2dt(j1, j2)) for j1, j2 in zip(jd1, jd2)])
except TypeError:
return cls._dt2str(TimeDateTime._jd2dt(jd1, jd2))
@classmethod
@lru_cache()
def _dt2str(cls, dt):
return dt.strftime(cls._dt_fmt)
@classmethod
@lru_cache()
def _str2dt(cls, time_str):
# fractional parts are optional
main_str, _, fraction = time_str.partition(".")
if fraction and set(fraction) != "0":
# Truncate fraction to 6 digits due to limits of datetime
frac = float(f"0.{fraction}")
fraction = f"{frac:8.6f}"[2:]
time_str = f"{main_str}.{fraction}"
return datetime.strptime(time_str, cls._dt_fmt)
else:
fmt_str, _, _ = cls._dt_fmt.partition(".")
return datetime.strptime(main_str, fmt_str)
@register_format
class TimeIsot(TimeStr):
"""ISO 8601 compliant date-time format “YYYY-MM-DDTHH:MM:SS.sss…” """
fmt = "isot"
_dt_fmt = "%Y-%m-%dT%H:%M:%S.%f"
@register_format
class TimeIso(TimeStr):
"""ISO 8601 compliant date-time format “YYYY-MM-DD HH:MM:SS.sss…” without the T"""
fmt = "iso"
_dt_fmt = "%Y-%m-%d %H:%M:%S.%f"
@register_format
class TimeYearDoy(TimeStr):
fmt = "yday"
_dt_fmt = "%Y:%j:%H:%M:%S.%f"
@register_format
class TimeDate(TimeStr):
fmt = "date"
_dt_fmt = "%Y-%m-%d"
# Time Delta Formats
@register_format
class TimeDeltaJD(TimeDeltaFormat):
"""Time delta as Julian days"""
fmt = "jd"
unit = ("day",)
@classmethod
def _to_jds(cls, val, val2, scale=None):
if val2 is None:
try:
val2 = np.zeros(val.shape)
except AttributeError:
val2 = 0
_delta = val - (np.floor(val + val2))
jd1 = val - _delta
jd2 = val2 + _delta
return jd1, jd2
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
return jd1 + jd2
@register_format
class TimeDeltaSec(TimeDeltaFormat):
"""Time delta in seconds"""
fmt = "seconds"
unit = ("second",)
@classmethod
def _to_jds(cls, val, val2, scale=None):
if val2 is None:
try:
val2 = np.zeros(val.shape)
except AttributeError:
val2 = 0
val *= Unit.second2day
val2 *= Unit.second2day
_delta = val - (np.floor(val + val2))
jd1 = val - _delta
jd2 = val2 + _delta
return jd1, jd2
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
return (jd1 + jd2) * Unit.day2second
@register_format
class TimeDeltaDay(TimeDeltaFormat):
"""Time delta in days"""
fmt = "days"
unit = ("day",)
@classmethod
def _to_jds(cls, val, val2, scale=None):
if val2 is None:
try:
val2 = np.zeros(val.shape)
except AttributeError:
val2 = 0
_delta = val - (np.floor(val + val2))
jd1 = val - _delta
jd2 = val2 + _delta
return jd1, jd2
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
return jd1 + jd2
@register_format
class TimeDeltaDateTime(TimeDeltaFormat):
"""Time delta as datetime's timedelta"""
fmt = "timedelta"
unit = None
@classmethod
def _to_jds(cls, val, val2, scale=None):
if val2 is None:
try:
val2 = [timedelta(seconds=0)] * len(val)
except TypeError:
val2 = timedelta(seconds=0)
try:
days = (val + val2).total_seconds() * Unit.second2day
except AttributeError:
seconds = [v1.total_seconds() + v2.total_seconds() for v1, v2 in zip(val, val2)]
days = np.array(seconds) * Unit.second2day
jd1 = np.floor(days)
jd2 = days - jd1
return jd1, jd2
@classmethod
def _from_jds(cls, jd1, jd2, scale=None):
try:
return timedelta(days=jd1 + jd2)
except TypeError:
return np.array([timedelta(days=j1 + j2) for j1, j2 in zip(jd1, jd2)])
#######################################################################################################################
# Execute on import
#######################################################################################################################
_TAIUTC = read_tai_utc()
|
nilq/baby-python
|
python
|
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
n = int(input())
p = list(map(int, input().split()))
from collections import deque
_min = 0
used = set()
num = deque(range(1, 200000 + 2))
for v in p:
used.add(v)
if _min not in used:
print(_min)
else:
while num:
candidate = num.popleft()
if candidate not in used:
_min = candidate
break
print(_min)
|
nilq/baby-python
|
python
|
class Solution:
def plusOne(self, digits):
length = len(digits)
for i in range(length - 1, -1, -1):
if digits[i] < 9:
digits[i] += 1
return digits
digits[i] = 0
return [1] + [0] * length
|
nilq/baby-python
|
python
|
# coding: utf-8
# **Appendix D – Autodiff**
# _This notebook contains toy implementations of various autodiff techniques, to explain how they works._
# # Setup
# First, let's make sure this notebook works well in both python 2 and 3:
# In[1]:
# To support both python 2 and python 3
from __future__ import absolute_import, division, print_function, unicode_literals
# # Introduction
# Suppose we want to compute the gradients of the function $f(x,y)=x^2y + y + 2$ with regards to the parameters x and y:
# In[2]:
def f(x,y):
return x*x*y + y + 2
# One approach is to solve this analytically:
#
# $\dfrac{\partial f}{\partial x} = 2xy$
#
# $\dfrac{\partial f}{\partial y} = x^2 + 1$
# In[3]:
def df(x,y):
return 2*x*y, x*x + 1
# So for example $\dfrac{\partial f}{\partial x}(3,4) = 24$ and $\dfrac{\partial f}{\partial y}(3,4) = 10$.
# In[4]:
df(3, 4)
# Perfect! We can also find the equations for the second order derivatives (also called Hessians):
#
# $\dfrac{\partial^2 f}{\partial x \partial x} = \dfrac{\partial (2xy)}{\partial x} = 2y$
#
# $\dfrac{\partial^2 f}{\partial x \partial y} = \dfrac{\partial (2xy)}{\partial y} = 2x$
#
# $\dfrac{\partial^2 f}{\partial y \partial x} = \dfrac{\partial (x^2 + 1)}{\partial x} = 2x$
#
# $\dfrac{\partial^2 f}{\partial y \partial y} = \dfrac{\partial (x^2 + 1)}{\partial y} = 0$
# At x=3 and y=4, these Hessians are respectively 8, 6, 6, 0. Let's use the equations above to compute them:
# In[5]:
def d2f(x, y):
return [2*y, 2*x], [2*x, 0]
# In[6]:
d2f(3, 4)
# Perfect, but this requires some mathematical work. It is not too hard in this case, but for a deep neural network, it is pratically impossible to compute the derivatives this way. So let's look at various ways to automate this!
# # Numeric differentiation
# Here, we compute an approxiation of the gradients using the equation: $\dfrac{\partial f}{\partial x} = \displaystyle{\lim_{\epsilon \to 0}}\dfrac{f(x+\epsilon, y) - f(x, y)}{\epsilon}$ (and there is a similar definition for $\dfrac{\partial f}{\partial y}$).
# In[7]:
def gradients(func, vars_list, eps=0.0001):
partial_derivatives = []
base_func_eval = func(*vars_list)
for idx in range(len(vars_list)):
tweaked_vars = vars_list[:]
tweaked_vars[idx] += eps
tweaked_func_eval = func(*tweaked_vars)
derivative = (tweaked_func_eval - base_func_eval) / eps
partial_derivatives.append(derivative)
return partial_derivatives
# In[8]:
def df(x, y):
return gradients(f, [x, y])
# In[9]:
df(3, 4)
# It works well!
# The good news is that it is pretty easy to compute the Hessians. First let's create functions that compute the first order derivatives (also called Jacobians):
# In[10]:
def dfdx(x, y):
return gradients(f, [x,y])[0]
def dfdy(x, y):
return gradients(f, [x,y])[1]
dfdx(3., 4.), dfdy(3., 4.)
# Now we can simply apply the `gradients()` function to these functions:
# In[11]:
def d2f(x, y):
return [gradients(dfdx, [3., 4.]), gradients(dfdy, [3., 4.])]
# In[12]:
d2f(3, 4)
# So everything works well, but the result is approximate, and computing the gradients of a function with regards to $n$ variables requires calling that function $n$ times. In deep neural nets, there are often thousands of parameters to tweak using gradient descent (which requires computing the gradients of the loss function with regards to each of these parameters), so this approach would be much too slow.
# ## Implementing a Toy Computation Graph
# Rather than this numerical approach, let's implement some symbolic autodiff techniques. For this, we will need to define classes to represent constants, variables and operations.
# In[13]:
class Const(object):
def __init__(self, value):
self.value = value
def evaluate(self):
return self.value
def __str__(self):
return str(self.value)
class Var(object):
def __init__(self, name, init_value=0):
self.value = init_value
self.name = name
def evaluate(self):
return self.value
def __str__(self):
return self.name
class BinaryOperator(object):
def __init__(self, a, b):
self.a = a
self.b = b
class Add(BinaryOperator):
def evaluate(self):
return self.a.evaluate() + self.b.evaluate()
def __str__(self):
return "{} + {}".format(self.a, self.b)
class Mul(BinaryOperator):
def evaluate(self):
return self.a.evaluate() * self.b.evaluate()
def __str__(self):
return "({}) * ({})".format(self.a, self.b)
# Good, now we can build a computation graph to represent the function $f$:
# In[14]:
x = Var("x")
y = Var("y")
f = Add(Mul(Mul(x, x), y), Add(y, Const(2))) # f(x,y) = x²y + y + 2
# And we can run this graph to compute $f$ at any point, for example $f(3, 4)$.
# In[15]:
x.value = 3
y.value = 4
f.evaluate()
# Perfect, it found the ultimate answer.
# ## Computing gradients
# The autodiff methods we will present below are all based on the *chain rule*.
# Suppose we have two functions $u$ and $v$, and we apply them sequentially to some input $x$, and we get the result $z$. So we have $z = v(u(x))$, which we can rewrite as $z = v(s)$ and $s = u(x)$. Now we can apply the chain rule to get the partial derivative of the output $z$ with regards to the input $x$:
#
# $ \dfrac{\partial z}{\partial x} = \dfrac{\partial s}{\partial x} \cdot \dfrac{\partial z}{\partial s}$
# Now if $z$ is the output of a sequence of functions which have intermediate outputs $s_1, s_2, ..., s_n$, the chain rule still applies:
#
# $ \dfrac{\partial z}{\partial x} = \dfrac{\partial s_1}{\partial x} \cdot \dfrac{\partial s_2}{\partial s_1} \cdot \dfrac{\partial s_3}{\partial s_2} \cdot \dots \cdot \dfrac{\partial s_{n-1}}{\partial s_{n-2}} \cdot \dfrac{\partial s_n}{\partial s_{n-1}} \cdot \dfrac{\partial z}{\partial s_n}$
# In forward mode autodiff, the algorithm computes these terms "forward" (i.e., in the same order as the computations required to compute the output $z$), that is from left to right: first $\dfrac{\partial s_1}{\partial x}$, then $\dfrac{\partial s_2}{\partial s_1}$, and so on. In reverse mode autodiff, the algorithm computes these terms "backwards", from right to left: first $\dfrac{\partial z}{\partial s_n}$, then $\dfrac{\partial s_n}{\partial s_{n-1}}$, and so on.
#
# For example, suppose you want to compute the derivative of the function $z(x)=\sin(x^2)$ at x=3, using forward mode autodiff. The algorithm would first compute the partial derivative $\dfrac{\partial s_1}{\partial x}=\dfrac{\partial x^2}{\partial x}=2x=6$. Next, it would compute $\dfrac{\partial z}{\partial x}=\dfrac{\partial s_1}{\partial x}\cdot\dfrac{\partial z}{\partial s_1}= 6 \cdot \dfrac{\partial \sin(s_1)}{\partial s_1}=6 \cdot \cos(s_1)=6 \cdot \cos(3^2)\approx-5.46$.
# Let's verify this result using the `gradients()` function defined earlier:
# In[16]:
from math import sin
def z(x):
return sin(x**2)
gradients(z, [3])
# Look good. Now let's do the same thing using reverse mode autodiff. This time the algorithm would start from the right hand side so it would compute $\dfrac{\partial z}{\partial s_1} = \dfrac{\partial \sin(s_1)}{\partial s_1}=\cos(s_1)=\cos(3^2)\approx -0.91$. Next it would compute $\dfrac{\partial z}{\partial x}=\dfrac{\partial s_1}{\partial x}\cdot\dfrac{\partial z}{\partial s_1} \approx \dfrac{\partial s_1}{\partial x} \cdot -0.91 = \dfrac{\partial x^2}{\partial x} \cdot -0.91=2x \cdot -0.91 = 6\cdot-0.91=-5.46$.
# Of course both approaches give the same result (except for rounding errors), and with a single input and output they involve the same number of computations. But when there are several inputs or outputs, they can have very different performance. Indeed, if there are many inputs, the right-most terms will be needed to compute the partial derivatives with regards to each input, so it is a good idea to compute these right-most terms first. That means using reverse-mode autodiff. This way, the right-most terms can be computed just once and used to compute all the partial derivatives. Conversely, if there are many outputs, forward-mode is generally preferable because the left-most terms can be computed just once to compute the partial derivatives of the different outputs. In Deep Learning, there are typically thousands of model parameters, meaning there are lots of inputs, but few outputs. In fact, there is generally just one output during training: the loss. This is why reverse mode autodiff is used in TensorFlow and all major Deep Learning libraries.
# There's one additional complexity in reverse mode autodiff: the value of $s_i$ is generally required when computing $\dfrac{\partial s_{i+1}}{\partial s_i}$, and computing $s_i$ requires first computing $s_{i-1}$, which requires computing $s_{i-2}$, and so on. So basically, a first pass forward through the network is required to compute $s_1$, $s_2$, $s_3$, $\dots$, $s_{n-1}$ and $s_n$, and then the algorithm can compute the partial derivatives from right to left. Storing all the intermediate values $s_i$ in RAM is sometimes a problem, especially when handling images, and when using GPUs which often have limited RAM: to limit this problem, one can reduce the number of layers in the neural network, or configure TensorFlow to make it swap these values from GPU RAM to CPU RAM. Another approach is to only cache every other intermediate value, $s_1$, $s_3$, $s_5$, $\dots$, $s_{n-4}$, $s_{n-2}$ and $s_n$. This means that when the algorithm computes the partial derivatives, if an intermediate value $s_i$ is missing, it will need to recompute it based on the previous intermediate value $s_{i-1}$. This trades off CPU for RAM (if you are interested, check out [this paper](https://pdfs.semanticscholar.org/f61e/9fd5a4878e1493f7a6b03774a61c17b7e9a4.pdf)).
# ### Forward mode autodiff
# In[17]:
Const.gradient = lambda self, var: Const(0)
Var.gradient = lambda self, var: Const(1) if self is var else Const(0)
Add.gradient = lambda self, var: Add(self.a.gradient(var), self.b.gradient(var))
Mul.gradient = lambda self, var: Add(Mul(self.a, self.b.gradient(var)), Mul(self.a.gradient(var), self.b))
x = Var(name="x", init_value=3.)
y = Var(name="y", init_value=4.)
f = Add(Mul(Mul(x, x), y), Add(y, Const(2))) # f(x,y) = x²y + y + 2
dfdx = f.gradient(x) # 2xy
dfdy = f.gradient(y) # x² + 1
# In[18]:
dfdx.evaluate(), dfdy.evaluate()
# Since the output of the `gradient()` method is fully symbolic, we are not limited to the first order derivatives, we can also compute second order derivatives, and so on:
# In[19]:
d2fdxdx = dfdx.gradient(x) # 2y
d2fdxdy = dfdx.gradient(y) # 2x
d2fdydx = dfdy.gradient(x) # 2x
d2fdydy = dfdy.gradient(y) # 0
# In[20]:
[[d2fdxdx.evaluate(), d2fdxdy.evaluate()],
[d2fdydx.evaluate(), d2fdydy.evaluate()]]
# Note that the result is now exact, not an approximation (up to the limit of the machine's float precision, of course).
# ### Forward mode autodiff using dual numbers
# A nice way to apply forward mode autodiff is to use [dual numbers](https://en.wikipedia.org/wiki/Dual_number). In short, a dual number $z$ has the form $z = a + b\epsilon$, where $a$ and $b$ are real numbers, and $\epsilon$ is an infinitesimal number, positive but smaller than all real numbers, and such that $\epsilon^2=0$.
# It can be shown that $f(x + \epsilon) = f(x) + \dfrac{\partial f}{\partial x}\epsilon$, so simply by computing $f(x + \epsilon)$ we get both the value of $f(x)$ and the partial derivative of $f$ with regards to $x$.
# Dual numbers have their own arithmetic rules, which are generally quite natural. For example:
#
# **Addition**
#
# $(a_1 + b_1\epsilon) + (a_2 + b_2\epsilon) = (a_1 + a_2) + (b_1 + b_2)\epsilon$
#
# **Subtraction**
#
# $(a_1 + b_1\epsilon) - (a_2 + b_2\epsilon) = (a_1 - a_2) + (b_1 - b_2)\epsilon$
#
# **Multiplication**
#
# $(a_1 + b_1\epsilon) \times (a_2 + b_2\epsilon) = (a_1 a_2) + (a_1 b_2 + a_2 b_1)\epsilon + b_1 b_2\epsilon^2 = (a_1 a_2) + (a_1b_2 + a_2b_1)\epsilon$
#
# **Division**
#
# $\dfrac{a_1 + b_1\epsilon}{a_2 + b_2\epsilon} = \dfrac{a_1 + b_1\epsilon}{a_2 + b_2\epsilon} \cdot \dfrac{a_2 - b_2\epsilon}{a_2 - b_2\epsilon} = \dfrac{a_1 a_2 + (b_1 a_2 - a_1 b_2)\epsilon - b_1 b_2\epsilon^2}{{a_2}^2 + (a_2 b_2 - a_2 b_2)\epsilon - {b_2}^2\epsilon} = \dfrac{a_1}{a_2} + \dfrac{a_1 b_2 - b_1 a_2}{{a_2}^2}\epsilon$
#
# **Power**
#
# $(a + b\epsilon)^n = a^n + (n a^{n-1}b)\epsilon$
#
# etc.
# Let's create a class to represent dual numbers, and implement a few operations (addition and multiplication). You can try adding some more if you want.
# In[21]:
class DualNumber(object):
def __init__(self, value=0.0, eps=0.0):
self.value = value
self.eps = eps
def __add__(self, b):
return DualNumber(self.value + self.to_dual(b).value,
self.eps + self.to_dual(b).eps)
def __radd__(self, a):
return self.to_dual(a).__add__(self)
def __mul__(self, b):
return DualNumber(self.value * self.to_dual(b).value,
self.eps * self.to_dual(b).value + self.value * self.to_dual(b).eps)
def __rmul__(self, a):
return self.to_dual(a).__mul__(self)
def __str__(self):
if self.eps:
return "{:.1f} + {:.1f}ε".format(self.value, self.eps)
else:
return "{:.1f}".format(self.value)
def __repr__(self):
return str(self)
@classmethod
def to_dual(cls, n):
if hasattr(n, "value"):
return n
else:
return cls(n)
# $3 + (3 + 4 \epsilon) = 6 + 4\epsilon$
# In[22]:
3 + DualNumber(3, 4)
# $(3 + 4ε)\times(5 + 7ε)$ = $3 \times 5 + 3 \times 7ε + 4ε \times 5 + 4ε \times 7ε$ = $15 + 21ε + 20ε + 28ε^2$ = $15 + 41ε + 28 \times 0$ = $15 + 41ε$
# In[23]:
DualNumber(3, 4) * DualNumber(5, 7)
# Now let's see if the dual numbers work with our toy computation framework:
# In[24]:
x.value = DualNumber(3.0)
y.value = DualNumber(4.0)
f.evaluate()
# Yep, sure works. Now let's use this to compute the partial derivatives of $f$ with regards to $x$ and $y$ at x=3 and y=4:
# In[25]:
x.value = DualNumber(3.0, 1.0) # 3 + ε
y.value = DualNumber(4.0) # 4
dfdx = f.evaluate().eps
x.value = DualNumber(3.0) # 3
y.value = DualNumber(4.0, 1.0) # 4 + ε
dfdy = f.evaluate().eps
# In[26]:
dfdx
# In[27]:
dfdy
# Great! However, in this implementation we are limited to first order derivatives.
# Now let's look at reverse mode.
# ### Reverse mode autodiff
# Let's rewrite our toy framework to add reverse mode autodiff:
# In[28]:
class Const(object):
def __init__(self, value):
self.value = value
def evaluate(self):
return self.value
def backpropagate(self, gradient):
pass
def __str__(self):
return str(self.value)
class Var(object):
def __init__(self, name, init_value=0):
self.value = init_value
self.name = name
self.gradient = 0
def evaluate(self):
return self.value
def backpropagate(self, gradient):
self.gradient += gradient
def __str__(self):
return self.name
class BinaryOperator(object):
def __init__(self, a, b):
self.a = a
self.b = b
class Add(BinaryOperator):
def evaluate(self):
self.value = self.a.evaluate() + self.b.evaluate()
return self.value
def backpropagate(self, gradient):
self.a.backpropagate(gradient)
self.b.backpropagate(gradient)
def __str__(self):
return "{} + {}".format(self.a, self.b)
class Mul(BinaryOperator):
def evaluate(self):
self.value = self.a.evaluate() * self.b.evaluate()
return self.value
def backpropagate(self, gradient):
self.a.backpropagate(gradient * self.b.value)
self.b.backpropagate(gradient * self.a.value)
def __str__(self):
return "({}) * ({})".format(self.a, self.b)
# In[29]:
x = Var("x", init_value=3)
y = Var("y", init_value=4)
f = Add(Mul(Mul(x, x), y), Add(y, Const(2))) # f(x,y) = x²y + y + 2
result = f.evaluate()
f.backpropagate(1.0)
# In[30]:
print(f)
# In[31]:
result
# In[32]:
x.gradient
# In[33]:
y.gradient
# Again, in this implementation the outputs are just numbers, not symbolic expressions, so we are limited to first order derivatives. However, we could have made the `backpropagate()` methods return symbolic expressions rather than values (e.g., return `Add(2,3)` rather than 5). This would make it possible to compute second order gradients (and beyond). This is what TensorFlow does, as do all the major libraries that implement autodiff.
# ### Reverse mode autodiff using TensorFlow
# In[34]:
import tensorflow as tf
# In[35]:
tf.reset_default_graph()
x = tf.Variable(3., name="x")
y = tf.Variable(4., name="y")
f = x*x*y + y + 2
jacobians = tf.gradients(f, [x, y])
init = tf.global_variables_initializer()
with tf.Session() as sess:
init.run()
f_val, jacobians_val = sess.run([f, jacobians])
f_val, jacobians_val
# Since everything is symbolic, we can compute second order derivatives, and beyond. However, when we compute the derivative of a tensor with regards to a variable that it does not depend on, instead of returning 0.0, the `gradients()` function returns None, which cannot be evaluated by `sess.run()`. So beware of `None` values. Here we just replace them with zero tensors.
# In[36]:
hessians_x = tf.gradients(jacobians[0], [x, y])
hessians_y = tf.gradients(jacobians[1], [x, y])
def replace_none_with_zero(tensors):
return [tensor if tensor is not None else tf.constant(0.)
for tensor in tensors]
hessians_x = replace_none_with_zero(hessians_x)
hessians_y = replace_none_with_zero(hessians_y)
init = tf.global_variables_initializer()
with tf.Session() as sess:
init.run()
hessians_x_val, hessians_y_val = sess.run([hessians_x, hessians_y])
hessians_x_val, hessians_y_val
# And that's all folks! Hope you enjoyed this notebook.
|
nilq/baby-python
|
python
|
from django.db import models
# from django.contrib.auth.models import User
from django.contrib.auth import get_user_model as user_model
User = user_model()
from apps.lobby.main.models import Lobby
class Room(models.Model):
title = models.CharField(max_length=30, primary_key=True)
description = models.CharField(max_length=200)
members = models.ManyToManyField(User, related_name='room_members')
creator = models.ForeignKey(User, on_delete=models.CASCADE, related_name='room_creator')
admins = models.ManyToManyField(User, blank=True, related_name='room_admins')
onlineUsers = models.ManyToManyField(User, blank=True, related_name='room_online_users')
requests = models.ManyToManyField(User, blank=True, related_name='room_requests')
Lobby = models.ForeignKey(Lobby, on_delete=models.CASCADE, related_name='room_lobby')
def __str__(self):
return self.title
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2016 Aarón Abraham Velasco Alvarez
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import state_table, reserved
def print_error(verbose, message, line):
if verbose == 1:
print "{} on line {}".format(message, line)
def get_event(input):
try:
if input.isalpha():
key = "alpha"
elif unicode(input).isnumeric():
key = "numeric"
else:
key = input
except UnicodeDecodeError:
key = get_event(input.decode('unicode_escape'))
return state_table.input.get(key, len(state_table.input))
def get_tokens(string, verbose):
tokens = []
result = True
next_state = lambda state, event : state_table.table[state][0][event]
is_final = lambda state : state_table.table[state][1]
token = lambda state : state_table.table[state][2]
error = lambda state : state_table.table[state][3]
error_message = lambda state : state_table.errors[state_table.table[state][3]]
current_state = 0
lexeme = ''
index = 0
count = 0
checkpoint = 0
line = 1
new_lines = 0
while index < len(string):
char = string[index]
if char == "\n":
if current_state == 0:
line += 1
else:
new_lines += 1
event = get_event(char)
if verbose >= 3:
print "debug:", line, current_state, char, next_state(current_state, event)
if next_state(current_state, event) > 0:
current_state = next_state(current_state, event)
index += 1
lexeme += char
checkpoint = current_state
elif next_state(current_state, event) < 0:
current_state = next_state(current_state, event)
index += 1
count += 1
else:
if current_state > 0:
if is_final(current_state):
tokens.append((reserved.words.get(lexeme, token(current_state)), lexeme, line))
else:
print_error(verbose, error_message(current_state), line)
result = False
current_state = 0
lexeme = ''
line += new_lines
new_lines = 0
if char.isspace():
index += 1
elif current_state < 0:
if is_final(current_state):
tokens.append((reserved.words.get(lexeme, token(checkpoint)), lexeme, line))
lexeme = ''
current_state = token(current_state)
index -= count
count = 0
checkpoint = 0
new_lines = 0
else:
if not char.isspace():
tokens.append((reserved.words.get(char, None), char, line))
index += 1
if current_state > 0 and not is_final(current_state):
print_error(verbose, error_message(current_state), line)
result = False
elif current_state > 0 and is_final(current_state):
tokens.append((reserved.words.get(lexeme, token(current_state)), lexeme, line))
return tokens, result
|
nilq/baby-python
|
python
|
""" Npy module
This module implements all npy file management class and methods
...
Classes
-------
Npy
Class that manages npy file's reading and writing functionalities
"""
import numpy as np
import os
class Npy(object):
""" Class that manages npy file's reading and writing functionalities
Methods
-------
read(path: str)
Method that reads all npy files in the dir's path. Npy files in this dir
should be form one numpy array when read, this method is going to create
one np array with the npy files found
write(path: str, npy_fname: str, npy: np.ndarray)
Method that write a new npy file in the path passed with the npy_fname as
the file's name
count_npy_files(path: str)
Method that counts the number of npy files in a dir
"""
def read(self, path: str) -> np.ndarray:
""" Method to read all npy files in the path passed. It expects npy fi-
-les to be in the following format XXXX_<name>.npy, where XXXX is a
number ordering the files
Parameters
----------
path: str
path to the folder containing the npy files
Returns
--------
npy_arrays: np.ndarray
np.array with shape (N, S) where N is the number of files in
the path and S is the shape of each npy file read
"""
npy_fnames = os.listdir(path) # getting all npy files in path
assert len(npy_fnames) != 0, "No files in path {0}".format(path)
npy_fnames = [ # making sure each file is npy with index prefix
fn for fn in npy_fnames if self.__assert_is_npy(fn)
]
assert len(npy_fnames) == len(os.listdir(path)), "There must be only npy files in path {0}".format(path)
# sorting the files by the index prefix in each file name
npy_fnames = sorted(npy_fnames, key=lambda p: int(p.split("_")[0]))
features = [np.load(os.path.join(path, fname))
for fname in npy_fnames]
features = np.array(features)
return features.reshape((features.shape[0] * features.shape[1],
features.shape[2]))
def write(self, path: str, npy_fname: str, npy: np.ndarray) -> None:
""" Method to write a npy file in the path passed
Parameters:
path: str
path to the folder to write the new npy file
npy_fname: str
name of the new npy file
npy: np.ndarray
content of the new npy file
"""
self.__assert_is_npy(npy_fname)
if os.path.isdir(path) is False:
os.mkdir(path)
write_path = os.path.join(path, npy_fname)
np.save(write_path, npy)
def count_npy_files(self, path: str) -> int:
""" Method that counts the number of files in a folder
Parameters
----------
path: str
path to the folder where the method should count the number of npy
files
Returns
-------
int
Returns the number of files in a folder
"""
if os.path.isdir(path) is False:
return 0
return len([
fn for fn in os.listdir(path) if self.__assert_is_npy(fn)
])
def __assert_is_npy(self, fname: str):
""" Method that returns true if the extension of a file name is npy
Parameters
----------
fname: str
a file name
Returns
-------
boolean
Returns True if the extension is npy otherwise False
"""
return "npy" == fname.split(".")[-1]
|
nilq/baby-python
|
python
|
from machine import I2C
import LIS2MDL
i2c = I2C(1)
mdl = LIS2MDL.LIS2MDL(i2c)
mdl.x()
mdl.get()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#==================
# gmail_pycamera
#==================
import os
import json
import datetime
import shutil
from devices import CameraMount
from h264tomp4 import h264tomp4
from gmail import Gmail
from command import parse_command
class GmailPiCamera:
"""
gmail_picamera
"""
def __init__(self, video_setting=None, gmail_setting=None, command_setting=None):
self.vsetting = self._load_video_setting(video_setting)
self.gsetting = self._load_gmail_setting(gmail_setting)
self.csetting = self._load_command_setting(command_setting)
self.history_json = './history.json'
self.history = self._load_history()
self.fname = './video.mp4'
self.tfname = './tmp.h264'
self.video_store = './videos'
self.now = None
def _load_video_setting(self, setting_json):
"""
loading setting file
"""
setting = {
"width": 240,
"height": 320,
"store": False
}
if setting_json is not None and os.path.isfile(setting_json):
with open(setting_json) as f:
setting = json.load(f)
return setting
def _load_gmail_setting(self, setting_json):
"""
loading setting file
"""
setting = {
"sender_address": "SENDER_ADDRESS",
"user_addresses": [
"USER_ADDRESS1",
"USER_ADDRESS2"
],
"credential": "CREDENTIAL",
"subject": "SUBJECT",
"message": "MESSAGE"
}
if setting_json is not None and os.path.isfile(setting_json):
with open(setting_json) as f:
setting = json.load(f)
return setting
def _load_command_setting(self, setting_json):
"""
loading setting file
"""
setting = {
"execute": "EXECUTE_COMMAND",
"pan": "PAN_COMMAND",
"tilt": "TILT_COMMAND"
}
if setting_json is not None and os.path.isfile(setting_json):
with open(setting_json) as f:
setting = json.load(f)
return setting
def _load_history(self):
"""
loading history
"""
setting = {}
setting_json = self.history_json
if setting_json is not None and os.path.isfile(setting_json):
with open(setting_json) as f:
setting = json.load(f)
return setting
def save_history(self):
"""
save history
"""
setting_json = self.history_json
with open(setting_json, 'w') as f:
json.dump(self.history, f)
def video(self, motion):
"""
video pan or tilt
"""
self.now = None
width, height, store = self._get_video_setting()
with CameraMount() as camera:
if motion == 'pan':
camera.video_pan(width, height, self.tfname)
elif motion == 'tilt':
camera.video_tilt(width, height, self.tfname)
else:
raise ValueError("Invalid motion value!")
camera.center()
h264tomp4(self.tfname, self.fname)
d = datetime.datetime.today()
year = d.strftime("%Y")
month = d.strftime("%m")
day = d.strftime("%d")
now = d.strftime("%Y%m%d%H%M%S")
if self.vsetting["store"] is True:
if not os.path.isdir(self.video_store):
os.mkdir(self.video_store)
if not os.path.isdir(self.video_store + "/" + year):
os.mkdir(self.video_store + "/" + year)
if not os.path.isdir(self.video_store + "/" + year + "/" + month):
os.mkdir(self.video_store + "/" + year + "/" + month)
shutil.copyfile(self.fname, self.video_store + "/" + year + "/" + month + "/" + now + ".mp4")
self.now = now
def _get_video_setting(self):
"""
get video setting
"""
width = self.vsetting["width"]
height = self.vsetting["height"]
store = self.vsetting["store"]
return (width, height, store)
def send(self, to_index=None):
"""
send gmail
"""
if self.now is not None:
gmail = Gmail(self.gsetting)
gmail.send(to_index, self.fname, self.now + '.mp4')
def receive(self, from_address=None):
"""
receive gmail
"""
gmail = Gmail(self.gsetting)
date, message = gmail.receive(from_address)
return date, message
def parse(self, message=None):
"""
parse command
"""
return parse_command(self.csetting, message)
if __name__ == '__main__':
gcamera = GmailPiCamera('./video_setting.json', './gmail_setting.json', './command_setting.json')
for index, address in enumerate(gcamera.gsetting['user_addresses']):
# receive
date, message = gcamera.receive(address)
# check history
if not (address in gcamera.history and date == gcamera.history[address]):
# save history
gcamera.history[address] = date
gcamera.save_history()
# parse command
command = gcamera.parse(message)
print(command)
# execute command
if command:
gcamera.video(command)
gcamera.send(index)
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.8 on 2021-11-02 22:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('annotations', '0004_auto_20211102_1819'),
]
operations = [
migrations.RemoveField(
model_name='imagegroup',
name='group_size',
),
migrations.AlterField(
model_name='image',
name='image_name',
field=models.CharField(max_length=200, unique=True),
),
migrations.AlterField(
model_name='imagegroup',
name='group_name',
field=models.CharField(max_length=200, unique=True),
),
migrations.AlterField(
model_name='organization',
name='organization_name',
field=models.CharField(max_length=40, unique=True),
),
migrations.AlterField(
model_name='user',
name='UNI',
field=models.CharField(max_length=8, unique=True),
),
]
|
nilq/baby-python
|
python
|
import unittest
import solver
from solver import sort_colors
class TestSolver(unittest.TestCase):
def test_sort_colors(self):
self.assertEqual(sort_colors([0, 0, 0, 0, 0, 0]), [0, 0, 0, 0, 0, 0])
self.assertEqual(sort_colors([2, 2, 2, 2, 2, 2]), [2, 2, 2, 2, 2, 2])
self.assertEqual(sort_colors([0, 0, 2, 0, 0, 0]), [0, 0, 0, 0, 0, 2])
self.assertEqual(sort_colors([0, 0, 0, 0, 1, 0]), [0, 0, 0, 0, 0, 1])
self.assertEqual(sort_colors([2, 2, 1, 2, 2, 2]), [1, 2, 2, 2, 2, 2])
self.assertEqual(sort_colors([0, 2, 2, 0, 2, 0]), [0, 0, 0, 2, 2, 2])
self.assertEqual(sort_colors([2, 0, 2, 1, 1, 0]), [0, 0, 1, 1, 2, 2])
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
from django import template
from raids.utils import get_instances
register = template.Library()
def get_loot_history(character):
# Only filters items for main specialization -> where entitlement is set for the characters specialization
# Dict for easy checking if instance is already a key
instances = {instance: 0 for instance in get_instances()}
for acquisition in character.loot_history.all():
for entitlement in acquisition.item.entitlement.all():
if entitlement.specialization == character.specialization:
if acquisition.item.encounter.all()[0].instance in instances:
instances[acquisition.item.encounter.all()[0].instance] += 1
# Need to return the loot history as list of sets
return [(instance, count) for instance, count in instances.items()]
register.filter('get_loot_history', get_loot_history)
|
nilq/baby-python
|
python
|
# Copyright (c) Open-MMLab. All rights reserved.
from .checkpoint import (_load_checkpoint, load_checkpoint, load_state_dict,
save_checkpoint, weights_to_cpu)
from .dist_utils import get_dist_info, init_dist, master_only
from .hooks import (CheckpointHook, ClosureHook, DistSamplerSeedHook, Hook,
IterTimerHook, LoggerHook, LrUpdaterHook, OptimizerHook,
TensorboardLoggerHook, TextLoggerHook, WandbLoggerHook)
from .log_buffer import LogBuffer
from .parallel_test import parallel_test
from .priority import Priority, get_priority
from .runner import Runner
from .utils import get_host_info, get_time_str, obj_from_dict
__all__ = [
'Runner', 'LogBuffer', 'Hook', 'CheckpointHook', 'ClosureHook',
'LrUpdaterHook', 'OptimizerHook', 'IterTimerHook', 'DistSamplerSeedHook',
'LoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook', 'WandbLoggerHook',
'_load_checkpoint', 'load_state_dict', 'load_checkpoint', 'weights_to_cpu',
'save_checkpoint', 'parallel_test', 'Priority', 'get_priority',
'get_host_info', 'get_time_str', 'obj_from_dict', 'init_dist',
'get_dist_info', 'master_only'
]
|
nilq/baby-python
|
python
|
# Standard imports
from types import SimpleNamespace
import numpy as np
from scipy import optimize
from scipy import stats
from scipy import random
from scipy.stats import beta
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
#Constructing grid of x and q in specific ranges
q_values = np.linspace(0.0, 0.9)
x_values = np.linspace(0.01, 0.9)
#Creating a class of the known parameters
par = SimpleNamespace()
par.theta = -2
par.y = 1
par.p = 0.2
#Utility function
def u(z, par):
""" Calculates utility of assets
Args:
par.theta (int): Degree of risk aversion
z (float or int): Input in utility function
par: SimpleNamespace
Returns:
u (float): Utility of assets
"""
return z**(1+par.theta)/(1+par.theta)
#Premium policy
def pi(q, par):
""" Calculates premium for better coverage
Args:
q (float): Insurance coverage
par.p: Probability that the loss is incurred
par: SimpleNamespace
Returns:
Premium for better coverage
"""
return par.p*q
#Expected utility function if not insured
def V0(x, par):
""" Calculates the expected utility of a non-insured agent with insurance coverage (q) and premium (pi)
Args:
x (float): Monetary loss in the case of a bad outcome
par.y (int): Total assets
par.p (float): Probability that the loss is incurred
par: SimpleNamespace
Returns:
V0 (float): Expected utility from not buying an insurance.
"""
u0_loss = u(par.y-x, par)
u0_win = u(par.y, par)
return par.p*u0_loss + (1-par.p)*u0_win
#Expected utility function if insured
def V(q, x, par):
""" Calculates the expected utility of an insured agent with insurance coverage (q) and premium (pi)
Args:
q (float): Insurance coverage
pi (float): Insurance premium
x (float): Monetary loss in the case of a bad outcome
par.y (int): Total assets
par.p (float): Probability that the loss is incurred
par: SimpleNamespace
Returns:
V (float): Expected utility from buying an insurance.
"""
u_loss = u(par.y-x+q-pi(q, par), par)
u_win = u(par.y-pi(q, par), par)
return par.p*u_loss + (1-par.p)*u_win
#Constructing a grid of qs over [0.01;0,6]
q_vec = np.linspace(0.01,0.6)
#Creating a new class of the known parameters letting x=0.6
par1 = SimpleNamespace()
par1.theta = -2
par1.y = 1
par1.p = 0.2
par1.x = 0.6
#Expected utility function if not insured with x=0.6
def V0_function(par1):
""" Calculates the expected utility of a non-insured agent with insurance coverage (q) and premium (pi)
Args:
par1.x (float): Monetary loss in the case of a bad outcome
par1.y (int): Total assets
par1.p (float): Probability that the loss is incurred
par1: SimpleNamespace
Returns:
V0_function (float): Expected utility from not buying an insurance with a monetary loss equal to 0.6
"""
u0_loss = u(par1.y-par1.x, par1)
u0_win = u(par1.y, par1)
return par1.p*u0_loss + (1-par1.p)*u0_win
#Expected utility function if insured with x=0.6
def V_function(q, pi, par1):
""" Calculates the expected utility of an insured agent with insurance coverage (q) and premium (pi)
Args:
q (float): Insurance coverage
pi (float): Insurance premium
par1.x (float): Monetary loss in the case of a bad outcome
par1.y (int): Total assets
par1.p (float): Probability that the loss is incurred
par1: SimpleNamespace
Returns:
V_function (float): Expected utility from buying an insurance with a monetary loss equal to 0.6
"""
u_loss = u(par1.y-par1.x+q-pi, par1)
u_win = u(par1.y-pi, par1)
return par1.p*u_loss + (1-par1.p)*u_win
#Defining a function that subtracts the expected utility from buying an insurance with the expected utility from not buying an insurance
def pi_tilde_acc(q, pi, par1):
""" Calculates the difference in the expected utility from buying an insurance vs. not buying an insurance
Args:
V0_function (float): Expected utility from not buying an insurance with a monetary loss equal to 0.6
V_function (float): Expected utility from buying an insurance with a monetary loss equal to 0.6
q (float): Insurance coverage
pi (float): Insurance premium
par1: SimpleNamespace
Returns:
pi_tilde_acc (float): the difference in the expected utility from buying an insurance vs. not buying an insurance
"""
return V_function(q, pi, par1)-V0_function(par1)
#Creating a class of the known parameters
par2 = SimpleNamespace()
par2.theta = -2
par2.y = 1
par2.p = 0.2
#Creating a class of the known parameters of the two insurance policies
par3 = SimpleNamespace()
par3.gamma1 = 0.9
par3.gamma2 = 0.2
par3.pi1 = 0.45
par3.pi2 = 0.1
def g1(x, par2, par3):
""" Calculates the agents value of policy 1
Args:
x (float): Drawn from a beta distribution (X)
par2.y (int):Total assets
par3.gamma1 (float): Coverage ratio of policy 1
par3.pi1 (float): Insurance premium of policy 1
par2: SimpleNamespace
par3: SimpleNamespace
Returns:
g1 (float): Agents value of policy 1
"""
return u(par2.y-(1-par3.gamma1)*x-par3.pi1, par2)
def MC1(N,g1,x):
""" Calculating the numerical solution to the integral of the agents value by Monte Carlo of policy 1
Args:
N (int): Number of iterations/draws
g1 (float): Agents value of policy 1
x (float): Drawn from a beta distribution (X)
Returns:
MC1 (float): Agents value of policy 1
"""
#Draw N random values from a beta distribution (x)
X = np.random.beta(2, 7, size=N)
return np.mean(g1(X, par2, par3))
def g2(x, par2, par3):
""" Calculates the agents value of policy 2
Args:
x (float): Drawn from a beta distribution (X)
par2.y (int):Total assets
par3.gamma2 (float): Coverage ratio of policy 2
par3.pi2 (float): Insurance premium of policy 2
par2: SimpleNamespace
par3: SimpleNamespace
Returns:
g2 (float): Agents value of policy 2
"""
return u(par2.y-(1-par3.gamma2)*x-par3.pi2, par2)
def MC2(N,g2,x):
""" Calculating the numerical solution to the integral of the agents value by Monte Carlo of policy 2
Args:
N (int): Number of iterations/draws
g2 (float): Agents value of policy 2
x (float): Drawn from a beta distribution (X)
Returns:
MC2 (float): Agents value of policy 2
"""
X = np.random.beta(2, 7, size=N) # rvs = draw N random values from a beta distribution (x)
return np.mean(g2(X, par2, par3))
#Defining known parameters globally
N = 10_000
a = 0
b = 1
#Creating a class of the known parameters
par4 = SimpleNamespace()
par4.gamma = 0.95
par4.y = 1
par4.theta = -2
def g3(x, pi, N, par4):
""" Calculates the agents value of a policy
Args:
x (float): Drawn from a beta distribution (X)
pi (float): Insurance Premium
N (int): Iterations
par4.y (int):Total assets
par4.gamma (float): Coverage ratio of policy 2
par4: SimpleNamespace
Returns:
g3 (float): Agents value of a policy
"""
X = np.random.beta(2, 7, size=N)
return np.mean(u(par4.y-(1-par4.gamma)*X-pi, par4)) #taken the mean of it, V(gamma, pi)=\int mean(u(z,par)*X)
|
nilq/baby-python
|
python
|
from core.config.setting import static_setting
from core.resource.pool import ResourceSetting
static_setting.setting_path = "/Users/lilen/mySetting"
ResourceSetting.load()
print(f"资源文件路径{ResourceSetting.resource_path}")
print(f"配置文件路径{ResourceSetting.setting_path}")
ResourceSetting.resource_path = "/User/user/new_resource"
static_setting.save_all()
|
nilq/baby-python
|
python
|
import pygame
from settings import *
class Entity:
def __init__(self, x, y, w, h, speed, begin_act):
self.x = x
self.y = y
self.w = w
self.h = h
self.rect = pygame.Rect(x, y, w, h)
self.life = 100
self.life_rect = pygame.Rect(x, y, w, 4)
self.action = begin_act
self.animations_database = {}
self.frame = 0
self.flip = False
self.velocity = [0, 0]
self.speed = speed
@staticmethod
def animations(path, frame_length):
db_animation = []
for i in range(len(frame_length)):
animation_dir = path + "_" + str(i) + ".png"
animation_image = pygame.transform.scale2x(pygame.image.load(animation_dir))
animation_image.set_colorkey(WHITE_COLOR)
for frame in range(frame_length[i]):
db_animation.append(animation_image)
return db_animation
def move_right(self):
self.rect.x += self.velocity[0]
def move_left(self):
self.rect.x += self.velocity[0]
def move_up(self):
self.rect.y += self.velocity[1]
def move_down(self):
self.rect.y += self.velocity[1]
def update(self):
self.frame += 1
if self.frame >= len(self.animations_database[self.action]):
self.frame = 0
self.life_rect.x = self.rect.x
self.life_rect.y = self.rect.y - 26
def damage(self, life_decrease):
self.life -= life_decrease
self.life_rect.width -= self.w / (100 / life_decrease)
def check_die(self):
if self.life <= 0:
return True
def draw(self, window):
window.blit(pygame.transform.flip(self.animations_database[self.action][self.frame], self.flip, False), (self.rect.x, self.rect.y - 20))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
mail.py: email server utils
'''
import sys
import argparse
import getpass
from os import getenv
try:
import mysql.connector as mariadb
except ImportError:
print('找不到 mysql 客户端,请安装: pip install mysql-connector-python')
sys.exit(2)
_version = '2.1'
parser = argparse.ArgumentParser(description='作用: 插入邮件、删除邮件、查看邮箱、修改密码')
parser.add_argument(
'-v',
'--version',
action='version',
version=f"%(prog)s version: {_version}",
help='显示版本并退出'
)
args = parser.parse_args()
username = getenv('DB_USERNAME')
password = getenv('DB_PASSWORD')
dbname = getenv('DB_DATABASE')
dbhost = getenv('DB_HOST')
try:
conn = mariadb.connect(
host=dbhost,
user=username,
passwd=password,
database=dbname,
use_pure=True
)
except mariadb.Error as err:
print("Error: {}".format(err))
sys.exit(0)
all_mails = []
cursor = conn.cursor()
cursor.execute("SELECT * FROM users")
myresult = cursor.fetchall()
for x in myresult:
all_mails.append(x[0])
prompt_text = """\
0.退出
1.新建邮箱
2.删除邮箱
3.显示所有邮箱
4.修改密码
5.显示可用邮箱域名
输入数字:
"""
var1 = input(prompt_text)
while var1 != "0":
# 插入邮箱
if var1 == "1":
str0 = input("输入新邮箱: ")
print("输入的内容是: ", str0)
print("输入密码: ")
str1 = getpass.getpass()
print(("再次输入密码: "))
str_1 = getpass.getpass()
if str1 != str_1:
print("再次密码不一样")
continue
try:
cursor = conn.cursor(prepared=True)
val = (str0, str1)
cursor.execute("INSERT INTO users (email, password) VALUES (?, ENCRYPT(?))", val)
conn.commit()
print (cursor.rowcount, "个邮箱已插入")
except mariadb.Error as error:
print("Error: {}".format(error))
input("Press Enter to continue...")
# 删除邮箱
elif var1 == "2":
str2 = input("输入删除的邮箱: ")
if str2 not in all_mails:
print(str2, "不存在")
input("Press Enter to continue...")
else:
try:
cursor = conn.cursor(prepared=True)
sql1 = "DELETE FROM users WHERE email = %s"
val1 = (str2)
cursor.execute(sql1, (val1,))
conn.commit()
print (str2, "已删除")
except mariadb.Error as error:
print("Error: {}".format(error))
input("Press Enter to continue...")
# 显示所有邮箱
elif var1 == "3":
cursor = conn.cursor()
cursor.execute("SELECT email FROM users")
myresult = cursor.fetchall()
for x in myresult:
print(x[0])
# 修改密码
elif var1 == "4":
str3 = input("输入邮箱: ")
print("输入的内容是: ", str3)
str4 = getpass.getpass("输入新密码: ")
str_4 = getpass.getpass("再次输入密码: ")
if str4 != str_4:
print("两次密码不一样")
continue
try:
cursor = conn.cursor(prepared=True)
val2 = (str4, str3)
cursor.execute("UPDATE users SET password = ENCRYPT(?) WHERE email = ?", val2)
conn.commit()
print(str3, "密码已修改")
except mariadb.Error as error:
print("Errot: {}".format(error))
input("Press Enter to continue...")
# 显示可用域名
elif var1 == "5":
cursor = conn.cursor()
cursor.execute("SELECT domain FROM domains")
myresult = cursor.fetchall()
for x in myresult:
print(x[0])
else:
print("请输入有效数字")
input("Press Enter to continue...")
input("Press Enter to continue...")
var1 = input(prompt_text)
|
nilq/baby-python
|
python
|
import os
import json
from .android_component_builder import AndroidComponentBuilder
class LabelBuilder(AndroidComponentBuilder):
def __init__(self, options, component):
super().__init__(options, component)
self.constraints = {}
self.name = ''
self.text = ''
self.text_align = 'left'
self.font = {
'face': 'Arial',
'size': 20,
'color': '#000000',
'weight': 'normal'
}
self.lines = []
self.load_attributes()
def load_lines(self):
self.lines = [
' <TextView',
f" android:id=\"@+id/{self.name}\"",
f" android:text=\"{self.text}\"",
]
if self.text_align != 'left':
self.lines += [f" android:textAlignment=\"{self.text_align}\""]
self.lines += self.constraint_lines()
self.lines += self.text_styling_lines()
self.lines += [" />\n"]
return ("\n").join(self.lines)
def text_styling_lines(self):
return [
f" fontPath=\"font/{self.font['face']}.ttf\"",
f" android:textStyle=\"{self.font['weight']}\"",
f" android:textSize=\"{self.font['size']}sp\"",
f" android:textColor=\"{self.font['color']}\""
]
|
nilq/baby-python
|
python
|
import http
import secrets
import pytest
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils import timezone
from django.utils.http import urlencode
from guildmaster import views
from guildmaster.utils import reverse
@pytest.fixture()
def user():
user, __ = User.objects.get_or_create(username='henry', email='henry@aie-guild.org')
return user
def test_authorization(rf, user, tf_client):
request = rf.get(
reverse('guildmaster:authorization', kwargs={'client_name': tf_client.name}), username=user.username
)
response = views.AuthorizationView.as_view()(request, client_name=tf_client.name)
assert response.status_code == 302
assert response.url.startswith(tf_client.authorization_url)
def test_authorization_state(rf, user, tf_client, settings):
request = rf.get(
reverse('guildmaster:authorization', kwargs={'client_name': tf_client.name}), username=user.username
)
response = views.AuthorizationView.as_view()(request, client_name=tf_client.name)
assert request.session[settings.GUILDMASTER_SESSION_STATE_KEY] in response.url
def test_authorization_return_url(rf, user, tf_client, settings):
request = rf.get(
reverse('guildmaster:authorization', kwargs={'client_name': tf_client.name}), username=user.username
)
views.AuthorizationView.as_view()(request, client_name=tf_client.name)
assert request.session[settings.GUILDMASTER_SESSION_RETURN_KEY] == settings.GUILDMASTER_RETURN_URL
request = rf.get(
'{}?{}'.format(
reverse('guildmaster:authorization', kwargs={'client_name': tf_client.name}),
urlencode({f'{settings.GUILDMASTER_RETURN_FIELD_NAME}': '/other'}),
),
username=user.username,
)
views.AuthorizationView.as_view()(request, client_name=tf_client.name)
assert request.session[settings.GUILDMASTER_SESSION_RETURN_KEY] == '/other'
def test_token(rf, settings, user, tf_client, requests_mock):
expected = {
'access_token': secrets.token_urlsafe(64),
'refresh_token': secrets.token_urlsafe(64),
'token_type': 'bearer',
'expires_in': 3600,
}
requests_mock.post(
tf_client.token_url, json=expected, headers={'Date': timezone.now().strftime('%a, %d %b %Y %H:%M:%S %Z')}
)
userinfo = {'username': 'henry', 'discriminator': '1234', 'battletag': 'henry#1234'}
requests_mock.get(tf_client.userinfo_url, json=userinfo)
code = secrets.token_urlsafe(64)
state = secrets.token_urlsafe(64)
request = rf.get(
reverse('guildmaster:token', kwargs={'client_name': tf_client.name}),
{'code': code, 'state': state},
username=user.username,
)
request.user = user
request.session[settings.GUILDMASTER_SESSION_STATE_KEY] = state
response = views.TokenView.as_view()(request, client_name=tf_client.name)
assert response.status_code == 302
assert requests_mock.called
assert requests_mock.call_count == 2
assert requests_mock.request_history[0].method == 'POST'
assert requests_mock.request_history[0].url == tf_client.token_url
assert requests_mock.request_history[1].method == 'GET'
assert requests_mock.request_history[1].url == tf_client.userinfo_url
def test_token_error(rf, settings, user, tf_client):
secrets.token_urlsafe(64)
state = secrets.token_urlsafe(64)
request = rf.get(
reverse('guildmaster:token', kwargs={'client_name': tf_client.name}),
{'error': 'access_denied', 'state': state},
username=user.username,
)
request.session[settings.GUILDMASTER_SESSION_STATE_KEY] = state
response = views.TokenView.as_view()(request, client_name=tf_client.name)
assert response.status_code == 403
def test_token_bogus(rf, settings, user, tf_client):
code = secrets.token_urlsafe(64)
state = secrets.token_urlsafe(64)
request = rf.get(
reverse('guildmaster:token', kwargs={'client_name': tf_client.name}),
{'code': code, 'state': state},
username=user.username,
)
request.session[settings.GUILDMASTER_SESSION_STATE_KEY] = secrets.token_urlsafe(64)
response = views.TokenView.as_view()(request, client_name=tf_client.name)
assert response.status_code == 403
def test_discord_list(settings, client, user):
client.force_login(user)
response = client.get(reverse('guildmaster:discord-list'))
assert response.status_code == http.HTTPStatus.OK
|
nilq/baby-python
|
python
|
# coding: utf-8
"""
Json-serializers for books-rest-api.
"""
from rest_framework.serializers import (
ModelSerializer, ReadOnlyField
)
from books.models import *
__author__ = "Vladimir Gerasimenko"
__copyright__ = "Copyright (C) 2017, Vladimir Gerasimenko"
__version__ = "0.0.1"
__maintainer__ = "Vladimir Gerasimenko"
__email__ = "vladworldss@yandex.ru"
class BookSerializer(ModelSerializer):
"""
Book serializer class.
"""
owner = ReadOnlyField(source='owner.username')
class Meta:
model = Book
fields = "__all__"
class CategorySerializer(ModelSerializer):
"""
Book serializer class.
"""
owner = ReadOnlyField(source='owner.username')
class Meta:
model = Category
fields = "__all__"
|
nilq/baby-python
|
python
|
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from faker import Faker
from members.models.members import Member
from members.models.services import Service
from members.models.countries import City
from django.template.defaultfilters import slugify
import random
from django_countries import countries
from members.models.countries import City
import os
import csv
module_dir = os.path.dirname(__file__) # get current directory
file_path = os.path.join(module_dir, 'members_city.csv')
class Command(BaseCommand):
help = "Command information"
def handle(self, *args, **kwargs):
try:
user = User.objects.get(username='admin')
except:
user = User.objects.create_superuser(
username='admin',
email='admin@website.com',
password='testpass123'
)
fake = Faker()
with open(file_path) as f:
reader = csv.reader(f)
for row in reader:
_, created = City.objects.get_or_create(
name=row[3],
user=user,
country='EG'
)
member_obj = Member.objects.create(
first_name=fake.first_name(),
last_name=fake.last_name(),
email=fake.unique.email(),
mobile=fake.unique.phone_number(),
address=fake.address(),
birthday=fake.date_of_birth(),
user=user,
city=City.objects.filter(name='New Cairo').first(),
job="Engineer",
marital_status="S",
gender="F",
height='175'
)
# for _ in range(10):
# membership_obj = Membership.objects.create(
# name=fake.word(),
# type=random.choice(['Clinic', 'Online']),
# period=random.randint(1, 12),
# sessions=random.randint(12, 120),
# price=random.randint(1000, 5000),
# user=user
|
nilq/baby-python
|
python
|
q1 = []
q2 = []
n = 5
def push(data):
if not q1==[] and len(q1) == n:
print("Overflow")
return
if not q2==[] and len(q2) == n:
print("Overflow")
return
if(q2 == []):
q1.append(data)
else:
q2.append(data)
def pop():
if(q1 == [] and q2 == []):
print("Underflow")
if(not q1 == []):
while(len(q1)>1):
q2.append(q1[0])
q1.pop(0)
q1.pop()
return
if(not q2 == []):
while(len(q2)>1):
q1.append(q2[0])
q2.pop(0)
q2.pop()
return
def Top():
if(q1 == [] and q2 == []):
print("Underflow")
if(not q1 == []):
while(len(q1)>1):
q2.append(q1[0])
q1.pop(0)
x = q1[0]
q1.pop(0)
q2.append(x)
return x
if(not q2 == []):
while(len(q2)>1):
q1.append(q2[0])
q2.pop(0)
x = q2[0]
q2.pop(0)
q1.append(x)
return x
if __name__ == '__main__':
push(1)
push(2)
push(3)
push(4)
push(5)
print(q1)
print(q2)
pop()
print(q1)
print(q2)
print(Top())
|
nilq/baby-python
|
python
|
import requests, time
import sys,time,socket
from Sensor import Sensor
if __name__ == "__main__":
sensor = Sensor("/dev/ttyUSB0",9600)
while True:
data = sensor.PM25_Hex(10).split(" ")
pm = int(data[3]+data[2], 16)/10
print str(time.strftime("%H:%M:%S", time.localtime())) + ' PM2.5: ', pm
|
nilq/baby-python
|
python
|
from test_helper import run_common_tests, failed, passed, check_tests_pass
from maximum_salary import largest_number
def reference(numbers):
numbers = list(map(str, numbers))
for _ in numbers:
for i in range(len(numbers) - 1):
if numbers[i] + numbers[i + 1] < numbers[i + 1] + numbers[i]:
t = numbers[i]
numbers[i] = numbers[i + 1]
numbers[i + 1] = t
return int("".join(numbers))
if __name__ == '__main__':
run_common_tests()
check_tests_pass("maximum_salary_unit_tests.py")
all_tests_passed = True
for numbers in [
[2, 21, 23, 211, 213, 231, 232],
[56, 5, 6, 556, 566, 666, 665, 656]
]:
if reference(numbers) != largest_number(numbers):
all_tests_passed = False
failed("Wrong answer for n={}".format(numbers))
break
if all_tests_passed:
passed()
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.