id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
1665298 | <filename>InstagramBotComment/bot.py
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import random
class InstagramBot:
def __init__(self, username, password):
# ------------------------
# Inicialização do firefox
# ------------------------
self.username = username
self.password = password
firefoxProfile = webdriver.FirefoxProfile()
firefoxProfile.set_preference("intl.accept_languages", "pt,pt-BR")
firefoxProfile.set_preference("dom.webnotifications.enabled", False)
self.driver = webdriver.Firefox(
firefox_profile=firefoxProfile, executable_path=r"geckodriver"
)
def login(self):
# ------------------------
# Realiza o login no Instagram
# ------------------------
driver = self.driver
driver.get("https://www.instagram.com")
time.sleep(3)
'''
login_button = driver.find_element_by_xpath(
"//a[@href='/accounts/login/?source=auth_switcher']"
)
login_button.click()
'''
time.sleep(3)
user_element = driver.find_element_by_xpath(
"//input[@name='username']")
user_element.clear()
user_element.send_keys(self.username)
time.sleep(3)
password_element = driver.find_element_by_xpath(
"//input[@name='password']")
password_element.clear()
password_element.send_keys(<PASSWORD>)
time.sleep(3)
password_element.send_keys(Keys.RETURN)
time.sleep(5)
agora_nao = driver.find_element_by_class_name("cmbtv")
agora_nao.click()
self.comenta_fotos()
@staticmethod
def type_like_a_person(sentence, single_input_field):
# ---------------------------------------------------------------
""" Código responsável por simular a digitação de uma pessoa """
# ---------------------------------------------------------------
print("Digitando comentário...")
for letter in sentence:
single_input_field.send_keys(letter)
time.sleep(random.randint(1,3))
def comenta_fotos(self):
vezes_comentadas = 0
# ---------------------------------------------------
#Inserir manualmente uma Lista de comentários de todos os sorteios
# ---------------------------------------------------
comments = [
'',
]
# ---------------------------------------------
# Criar comentários aleatórios para os sorteios
# ---------------------------------------------
'''listLetras = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'1','2','3','4','5','6','7','8','9']
from random import randint
listPalavras = []
#Mínimo de caracteres do comentário
minCaracteres = 3
#Máximo de caracteres do comentário
maxCaracteres = 10
#Quantidade de comentários
qntComentarios = 100
for i in range(qntComentarios):
palavra = []
for i in range(randint(minCaracteres,maxCaracteres)):
palavra.append(listLetras[randint(0, 34)])
palavraString = ''.join(palavra)
listPalavras.append(palavraString)
comments = listPalavras'''
# ---------------------------------------------------
#Para pegar de um documento de texto
# ---------------------------------------------------
'''lista = open('referenciaTxt','r')
for i in lista:
comments.append(i)'''
#Lista de comentário de cada um dos 5 sorteios
comments01 = []
comments02 = []
comments03 = []
comments04 = []
comments05 = []
#Adiciona todas as palavras do comments a cada lista
for i in comments:
comments01.append(i)
comments02.append(i)
comments03.append(i)
comments04.append(i)
comments05.append(i)
# Insere o link dos sorteios que quer comentar nas variáveis
sorteio01 = ''
sorteio02 = ''
sorteio03 = ''
sorteio04 = ''
sorteio05 = ''
sorteios = []
# somente adicionará na lista /sorteios/ as variaveis que foram atribuídas a uma link
if sorteio01 != '' and len(comments01) != 0:
sorteios.append(sorteio01)
if sorteio02 != '' and len(comments02) != 0:
sorteios.append(sorteio02)
if sorteio03 != '' and len(comments03) != 0:
sorteios.append(sorteio03)
if sorteio04 != '' and len(comments04) != 0:
sorteios.append(sorteio04)
if sorteio05 != '' and len(comments05) != 0:
sorteios.append(sorteio05)
while (1):
try:
#Escolhe um sorteio da lista
sorteio_da_vez = random.choice(sorteios)
driver = self.driver
time.sleep(5)
driver.get(sorteio_da_vez)
driver.execute_script(
"window.scrollTo(0, document.body.scrollHeight);")
# Codigo pra pegar o input que escreve o comentário
driver.find_element_by_class_name("Ypffh").click()
comment_input_box = driver.find_element_by_class_name("Ypffh")
time.sleep(random.randint(1, 20))
# Insira aqui a quantidade de comentarios que devem ser feitos em cada sorteio
quantidadeSorteio01 = 0
quantidadeSorteio02 = 0
quantidadeSorteio03 = 0
quantidadeSorteio04 = 0
quantidadeSorteio05 = 0
# Encerra o programa quando se esgota as opções de comentários de todos os sorteios
if len(comments01) < quantidadeSorteio01 & len(comments02) < quantidadeSorteio02 & len(comments03) < quantidadeSorteio03 & len(comments04) < quantidadeSorteio04 & len(comments05) < quantidadeSorteio05:
break
# Verifica qual sorteio o programa escolheu para comentar
if sorteio_da_vez == sorteio01:
'''Verifica se os comentários da lista do sorteio01 são maiores ou igual
a quantidade que deve ser comentada por vez no sorteio01'''
if len(comments01) >= quantidadeSorteio01:
marcados = ''
# Comentários escolhidos da lista do sorteio01
pessoa_1 = random.choice(comments01)
pessoa_2 = random.choice(comments01)
pessoa_3 = random.choice(comments01)
# Verifica se os comentários são iguais para tornarem diferentes
if quantidadeSorteio01 == 2:
while pessoa_1 == pessoa_2:
pessoa_1 = random.choice(comments01)
pessoa_2 = random.choice(comments01)
elif quantidadeSorteio01 == 3:
while pessoa_3 == pessoa_2 or pessoa_3 == pessoa_1 or pessoa_1 == pessoa_2:
pessoa_1 = random.choice(comments01)
pessoa_2 = random.choice(comments01)
pessoa_3 = random.choice(comments01)
# verificando quantas pessoas é para marcar no sorteio
''' obs: se nao quiser remover o comentário da lista dos comentários do sorteio01,
comente as linhas "comments01.remove(pessoa_)" '''
if quantidadeSorteio01 == 1:
marcados = pessoa_1
comments01.remove(pessoa_1)
elif quantidadeSorteio01 == 2:
marcados = f'{pessoa_2} {pessoa_1}'
comments01.remove(pessoa_1)
comments01.remove(pessoa_2)
elif quantidadeSorteio01 == 3:
marcados = f'{pessoa_2} {pessoa_1} {pessoa_3}'
comments01.remove(pessoa_1)
comments01.remove(pessoa_2)
comments01.remove(pessoa_3)
# Digita o comentário
self.type_like_a_person(marcados, comment_input_box)
print("Comentei: ", marcados, " no post: ", sorteio_da_vez, "")
else:
sorteios.remove(sorteio01)
elif sorteio_da_vez == sorteio02:
if len(comments02) >= quantidadeSorteio02:
marcados = ''
# Pessoas que poderão ser marcadas
pessoa2_1 = random.choice(comments02)
pessoa2_2 = random.choice(comments02)
pessoa2_3 = random.choice(comments02)
if quantidadeSorteio02 == 2:
while pessoa2_1 == pessoa2_2:
pessoa2_1 = random.choice(comments02)
pessoa2_2 = random.choice(comments02)
elif quantidadeSorteio02 == 3:
while pessoa2_3 == pessoa2_2 or pessoa2_3 == pessoa2_1 or pessoa2_1 == pessoa2_2:
pessoa2_1 = random.choice(comments02)
pessoa2_2 = random.choice(comments02)
pessoa2_3 = random.choice(comments02)
# verificando quantas pessoas é para marcar no sorteio
if quantidadeSorteio02 == 1:
marcados = pessoa2_1
comments02.remove(pessoa2_1)
elif quantidadeSorteio02 == 2:
marcados = f'{pessoa2_2} {pessoa2_1}'
comments02.remove(pessoa2_1)
comments02.remove(pessoa2_2)
elif quantidadeSorteio02 == 3:
marcados = f'{pessoa2_2} {pessoa2_1} {pessoa2_3}'
comments02.remove(pessoa2_1)
comments02.remove(pessoa2_2)
comments02.remove(pessoa2_3)
# Digita o comentário
self.type_like_a_person(marcados, comment_input_box)
print("Comentei: ", marcados, " no post: ", sorteio_da_vez, "")
else:
sorteios.remove(sorteio02)
elif sorteio_da_vez == sorteio03:
if len(comments03) >= quantidadeSorteio03:
marcados = ''
# Pessoas que poderão ser marcadas
pessoa3_1 = random.choice(comments03)
pessoa3_2 = random.choice(comments03)
pessoa3_3 = random.choice(comments03)
if quantidadeSorteio03 == 2:
while pessoa3_1 == pessoa3_2:
pessoa3_1 = random.choice(comments03)
pessoa3_2 = random.choice(comments03)
elif quantidadeSorteio03 == 3:
while pessoa3_3 == pessoa3_2 or pessoa3_3 == pessoa3_1 or pessoa3_1 == pessoa3_2:
pessoa3_1 = random.choice(comments03)
pessoa3_2 = random.choice(comments03)
pessoa3_3 = random.choice(comments03)
# verificando quantas pessoas é para marcar no sorteio
if quantidadeSorteio03 == 1:
marcados = pessoa3_1
comments03.remove(pessoa3_1)
elif quantidadeSorteio03 == 2:
marcados = f'{pessoa3_2} {pessoa3_1}'
comments03.remove(pessoa3_1)
comments03.remove(pessoa3_2)
elif quantidadeSorteio03 == 3:
marcados = f'{pessoa3_2} {pessoa3_1} {pessoa3_3}'
comments03.remove(pessoa3_1)
comments03.remove(pessoa3_2)
comments03.remove(pessoa3_3)
# Digita o comentário
self.type_like_a_person(marcados, comment_input_box)
print("Comentei: ", marcados, " no post: ", sorteio_da_vez, "")
else:
sorteios.remove(sorteio03)
elif sorteio_da_vez == sorteio04:
if len(comments04) >= quantidadeSorteio04:
marcados = ''
# Pessoas que poderão ser marcadas
pessoa4_1 = random.choice(comments04)
pessoa4_2 = random.choice(comments04)
pessoa4_3 = random.choice(comments04)
if quantidadeSorteio04 == 2:
while pessoa4_1 == pessoa4_2:
pessoa4_1 = random.choice(comments04)
pessoa4_2 = random.choice(comments04)
elif quantidadeSorteio04 == 3:
while pessoa4_3 == pessoa4_2 or pessoa4_3 == pessoa4_1 or pessoa4_1 == pessoa4_2:
pessoa4_1 = random.choice(comments04)
pessoa4_2 = random.choice(comments04)
pessoa4_3 = random.choice(comments04)
# verificando quantas pessoas é para marcar no sorteio
if quantidadeSorteio04 == 1:
marcados = pessoa4_1
comments04.remove(pessoa4_1)
elif quantidadeSorteio04 == 2:
marcados = f'{pessoa4_2} {pessoa4_1}'
comments04.remove(pessoa4_1)
comments04.remove(pessoa4_2)
elif quantidadeSorteio04 == 3:
marcados = f'{pessoa4_2} {pessoa4_1} {pessoa4_3}'
comments04.remove(pessoa4_1)
comments04.remove(pessoa4_2)
comments04.remove(pessoa4_3)
# Digita o comentário
self.type_like_a_person(marcados, comment_input_box)
print("Comentei: ", marcados, " no post: ", sorteio_da_vez, "")
else:
sorteios.remove(sorteio04)
elif sorteio_da_vez == sorteio05:
if len(comments05) >= quantidadeSorteio05:
marcados = ''
# Pessoas que poderão ser marcadas
pessoa5_1 = random.choice(comments05)
pessoa5_2 = random.choice(comments05)
pessoa5_3 = random.choice(comments05)
if quantidadeSorteio05 == 2:
while pessoa5_1 == pessoa5_2:
pessoa5_1 = random.choice(comments05)
pessoa5_2 = random.choice(comments05)
elif quantidadeSorteio05 == 3:
while pessoa5_3 == pessoa5_2 or pessoa5_3 == pessoa5_1 or pessoa5_1 == pessoa5_2:
pessoa5_1 = random.choice(comments05)
pessoa5_2 = random.choice(comments05)
pessoa5_3 = random.choice(comments05)
# verificando quantas pessoas é para marcar no sorteio
if quantidadeSorteio05 == 1:
marcados = pessoa5_1
comments05.remove(pessoa5_1)
elif quantidadeSorteio05 == 2:
marcados = f'{pessoa5_2} {pessoa5_1}'
comments05.remove(pessoa5_1)
comments05.remove(pessoa5_2)
elif quantidadeSorteio05 == 3:
marcados = f'{pessoa5_2} {pessoa5_1} {pessoa5_3}'
comments05.remove(pessoa5_1)
comments05.remove(pessoa5_2)
comments05.remove(pessoa5_3)
# Digita o comentário
self.type_like_a_person(marcados, comment_input_box)
print("Comentei: ", marcados, " no post: ", sorteio_da_vez, "")
else:
sorteios.remove(sorteio05)
# Encerra o programa quando se esgota as opções de comentários de todos os sorteios
if len(sorteios) == 0:
break
# Publica o comentário
time.sleep(random.randint(1, 15))
driver.find_element_by_xpath(
"//button[contains(text(), 'Publicar')]"
).click()
vezes_comentadas += 1
'''Aqui ele te informará quantas vezes já comentou o todo, desde o momento do start do script'''
print('Vezes comentadas:')
print(vezes_comentadas)
time.sleep(random.randint(40,55))
# Tempo do intervalo em que ocorrerá um comentário e outro.
# Não é aconselhável reduzir este tempo, corre o risco de ser bloqueado pelo Instagram
except:
print('Um erro ocorreu, mas sem problemas vamos continuar')
print('Script Iniciado')
print('-'*20)
# Entre com o usuário e senha aqui
Bot = InstagramBot("user", "password")
Bot.login()
print('-'*20)
print('Script Finalizado')
| StarcoderdataPython |
1612697 | from unittest import TestCase
import unittest
import pkgutil
from os import walk
from os import path
class TestPackage(TestCase):
def test_import_ez_1(self):
import ezyrb as ez
fh = ez.filehandler.FileHandler('inexistent.vtk')
def test_import_ez_2(self):
import ezyrb as ez
mh = ez.matlabhandler.MatlabHandler('inexistent.mat')
def test_import_ez_3(self):
import ezyrb as ez
vh = ez.vtkhandler.VtkHandler('inexistent.vtk')
def test_modules_name(self):
# it checks that __all__ includes all the .py files in ezyrb folder
import ezyrb
package = ezyrb
f_aux = []
for (__, __, filenames) in walk('ezyrb'):
f_aux.extend(filenames)
f = []
for i in f_aux:
file_name, file_ext = path.splitext(i)
if file_name != '__init__' and file_ext == '.py':
f.append(file_name)
print(f)
print(package.__all__)
assert (sorted(package.__all__) == sorted(f))
| StarcoderdataPython |
9658301 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pyenhancer",
version="1.0dev1",
author="<NAME>",
author_email="<EMAIL>",
description="A tiny NumPy-based library for image color enhancement",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/yuki-koyama/pyenhancer",
packages=["pyenhancer"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
"numpy",
"scikit-image",
],
)
| StarcoderdataPython |
8309 | <filename>SelfTests.py
import os
import unittest
from Logger import Logger
class TestLogger(unittest.TestCase):
def test_file_handling(self):
testLog = Logger("testLog")
## Check if program can create and open file
self.assertTrue(testLog.opened)
returns = testLog.close()
## Check if logger correctly signs bool OPENED and returns
## 0 as succes.
self.assertFalse(testLog.opened)
self.assertEqual(returns,0)
returns = testLog.close()
## Check if logger returns 1 when trying to close already
## closed file
self.assertEqual(returns,1)
## Do cleanup:
os.remove(testLog.name)
def test_logging(self):
testLog = Logger("testLog")
testPhrase = "TestLine\r\n"
testLog.save_line(testPhrase)
testLog.close()
logfile = open(testLog.name)
content = logfile.read()
logfile.close()
saved = content.split(" : ")
## Check if saved data corresponds
self.assertEqual(saved[1],testPhrase)
## cleanup
os.remove(testLog.name)
from gpsNavigation import gpsModule,gpsPoint
class TestGPSNavigation(unittest.TestCase):
def test_gps_angles(self):
gpsMod = gpsModule()
A = gpsPoint(10,10)
B = gpsPoint(10.1,10.1)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,15623.0)
self.assertEqual(azimut,45.0)
B = gpsPoint(10.0,10.1)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,10963.0)
self.assertEqual(azimut,90.0)
B = gpsPoint(9.9,10.1)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,15625.0)
self.assertEqual(azimut,135.0)
B = gpsPoint(9.9,10.0)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,11132.0)
self.assertEqual(azimut,180.0)
B = gpsPoint(9.9,9.9)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,15625.0)
self.assertEqual(azimut,225.0)
B = gpsPoint(10.0,9.9)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,10963.0)
self.assertEqual(azimut,270.0)
B = gpsPoint(10.1,9.9)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,15623.0)
self.assertEqual(azimut,315.0)
B = gpsPoint(10.1,10.0)
distance, azimut = gpsMod.GPSData.getDirAndDist(A,B)
self.assertEqual(distance,11132.0)
self.assertEqual(azimut,0)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
9614161 | #script to find three letter words which have the same two last letters
fp = open('C:/Users/dennis/Desktop/test2.txt', 'r')
words = [line.rstrip() for line in fp.readlines() if len(line) > 2 and line[1] == line[2]]
print str(len(words))+ " results"
for w in words :
print w
| StarcoderdataPython |
11277256 | <filename>software/python/basics/L1_motors.py
# This example drives the right and left motors.
# Intended for Beaglebone Blue hardware.
# This example uses rcpy library. Documentation: guitar.ucsd.edu/rcpy/rcpy.pdf
# Import external libraries
import rcpy
import rcpy.motor as motor
import time # only necessary if running this program as a loop
import numpy as np # for clip function
motor_l = 1 # Left Motor (ch1)
motor_r = 2 # Right Motor (ch2)
# NOTE: THERE ARE 4 OUTPUTS. 3 & 4 ACCESSIBLE THROUGH diode & accy functions
rcpy.set_state(rcpy.RUNNING) # initialize the rcpy library
# define functions to command motors, effectively controlling PWM
def MotorL(speed): # takes argument in range [-1,1]
motor.set(motor_l, speed)
def MotorR(speed): # takes argument in range [-1,1]
motor.set(motor_r, speed)
def diode(state, channel): # takes argument in range [0,1]
np.clip(state, 0, 1) # limit the output, disallow negative voltages
motor.set(channel, state)
def accy(state, channel): # takes argument in range [-1,1]
motor.set(channel, state)
if __name__ == "__main__":
while rcpy.get_state() != rcpy.EXITING: # exit loop if rcpy not ready
if rcpy.get_state() == rcpy.RUNNING: # execute loop when rcpy is ready
print("motors.py: driving fwd")
MotorL(0.6) # gentle speed for testing program. 0.3 PWM may not spin the wheels.
MotorR(0.6)
time.sleep(4) # run fwd for 4 seconds
print("motors.py: driving reverse")
MotorL(-0.6)
MotorR(-0.6)
time.sleep(4) # run reverse for 4 seconds
| StarcoderdataPython |
63562 | <reponame>deepparrot/vision
import PIL
import torch
import torchvision
import tqdm
from torchbench.image_classification import ImageNet
import torchvision.transforms as transforms
import PIL
import torchvision.models as models
# DEEP RESIDUAL LEARNING
# Define the transforms need to convert ImageNet data to expected model input
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
input_transform = transforms.Compose([
transforms.Resize(256, PIL.Image.BICUBIC),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
ImageNet.benchmark(
model=models.resnet18(pretrained=True),
paper_model_name='ResNet-18',
paper_arxiv_id='1512.03385',
input_transform=input_transform,
batch_size=256,
num_gpu=1,
paper_results={'Top 1 Accuracy': 0.7212}
)
ImageNet.benchmark(
model=models.resnet34(pretrained=True),
paper_model_name='ResNet-34 A',
paper_arxiv_id='1512.03385',
input_transform=input_transform,
batch_size=256,
num_gpu=1,
paper_results={'Top 1 Accuracy': 0.7497, 'Top 5 Accuracy': 0.9224}
)
ImageNet.benchmark(
model=models.resnet50(pretrained=True),
paper_model_name='ResNet-50',
paper_arxiv_id='1512.03385',
input_transform=input_transform,
batch_size=256,
num_gpu=1
)
ImageNet.benchmark(
model=models.resnet101(pretrained=True),
paper_model_name='ResNet-101',
paper_arxiv_id='1512.03385',
input_transform=input_transform,
batch_size=256,
num_gpu=1
)
ImageNet.benchmark(
model=models.resnet152(pretrained=True),
paper_model_name='ResNet-152',
paper_arxiv_id='1512.03385',
input_transform=input_transform,
batch_size=256,
num_gpu=1
)
# ALEXNET
ImageNet.benchmark(
model=models.alexnet(pretrained=True),
paper_model_name='AlexNet (single)',
paper_arxiv_id='1404.5997',
input_transform=input_transform,
batch_size=256,
num_gpu=1,
paper_results={'Top 1 Accuracy': 0.5714}
)
# VGG
ImageNet.benchmark(
model=models.vgg11(pretrained=True),
paper_model_name='VGG-11',
paper_arxiv_id='1409.1556',
input_transform=input_transform,
batch_size=256,
num_gpu=1,
paper_results={'Top 1 Accuracy': 0.704, 'Top 5 Accuracy': 0.896}
)
ImageNet.benchmark(
model=models.vgg13(pretrained=True),
paper_model_name='VGG-13',
paper_arxiv_id='1409.1556',
input_transform=input_transform,
batch_size=256,
num_gpu=1,
paper_results={'Top 1 Accuracy': 0.713, 'Top 5 Accuracy': 0.901}
)
ImageNet.benchmark(
model=models.vgg16(pretrained=True),
paper_model_name='VGG-16',
paper_arxiv_id='1409.1556',
input_transform=input_transform,
batch_size=256,
num_gpu=1
)
ImageNet.benchmark(
model=models.vgg19(pretrained=True),
paper_model_name='VGG-19',
paper_arxiv_id='1409.1556',
input_transform=input_transform,
batch_size=256,
num_gpu=1
)
ImageNet.benchmark(
model=models.vgg11_bn(pretrained=True),
paper_model_name='VGG-11 (batch-norm)',
paper_arxiv_id='1409.1556',
input_transform=input_transform,
batch_size=256,
num_gpu=1,
paper_results={'Top 1 Accuracy': 0.704, 'Top 5 Accuracy': 0.896}
)
ImageNet.benchmark(
model=models.vgg13_bn(pretrained=True),
paper_model_name='VGG-13 (batch-norm)',
paper_arxiv_id='1409.1556',
input_transform=input_transform,
batch_size=256,
num_gpu=1,
paper_results={'Top 1 Accuracy': 0.713, 'Top 5 Accuracy': 0.901}
)
ImageNet.benchmark(
model=models.vgg16_bn(pretrained=True),
paper_model_name='VGG-16 (batch-norm)',
paper_arxiv_id='1409.1556',
input_transform=input_transform,
batch_size=256,
num_gpu=1
)
ImageNet.benchmark(
model=models.vgg19_bn(pretrained=True),
paper_model_name='VGG-19 (batch-norm)',
paper_arxiv_id='1409.1556',
input_transform=input_transform,
batch_size=256,
num_gpu=1
)
# SQUEEZENET
ImageNet.benchmark(
model=models.squeezenet1_0(pretrained=True),
paper_model_name='SqueezeNet',
paper_arxiv_id='1602.07360',
input_transform=input_transform,
batch_size=256,
num_gpu=1,
paper_results={'Top 1 Accuracy': 0.575, 'Top 5 Accuracy': 0.803}
)
ImageNet.benchmark(
model=models.squeezenet1_1(pretrained=True),
paper_model_name='SqueezeNet 1.1',
paper_arxiv_id='1602.07360',
input_transform=input_transform,
batch_size=256,
num_gpu=1
)
# DENSENET
ImageNet.benchmark(
model=models.densenet121(pretrained=True),
paper_model_name='DenseNet-121',
paper_arxiv_id='1608.06993',
input_transform=input_transform,
batch_size=256,
num_gpu=1
)
ImageNet.benchmark(
model=models.densenet161(pretrained=True),
paper_model_name='DenseNet-161',
paper_arxiv_id='1608.06993',
input_transform=input_transform,
batch_size=256,
num_gpu=1
)
ImageNet.benchmark(
model=models.densenet169(pretrained=True),
paper_model_name='DenseNet-169',
paper_arxiv_id='1608.06993',
input_transform=input_transform,
batch_size=256,
num_gpu=1
)
ImageNet.benchmark(
model=models.densenet201(pretrained=True),
paper_model_name='DenseNet-201',
paper_arxiv_id='1608.06993',
input_transform=input_transform,
batch_size=256,
num_gpu=1
)
# INCEPTION V3
ImageNet.benchmark(
model=models.inception_v3(pretrained=True),
paper_model_name='Inception V3',
paper_arxiv_id='1512.00567',
input_transform=input_transform,
batch_size=256,
num_gpu=1
)
# INCEPTION V1 (GOOGLENET)
ImageNet.benchmark(
model=models.googlenet(pretrained=True),
paper_model_name='Inception V1',
paper_arxiv_id='1409.4842',
input_transform=input_transform,
batch_size=256,
num_gpu=1
)
# SHUFFLENET V2
ImageNet.benchmark(
model=models.shufflenet_v2_x1_0(pretrained=True),
paper_model_name='ShuffleNet V2 (1x)',
paper_arxiv_id='1807.11164',
input_transform=input_transform,
batch_size=256,
num_gpu=1,
paper_results={'Top 1 Accuracy': 0.694}
)
ImageNet.benchmark(
model=models.shufflenet_v2_x0_5(pretrained=True),
paper_model_name='ShuffleNet V2 (0.5x)',
paper_arxiv_id='1807.11164',
input_transform=input_transform,
batch_size=256,
num_gpu=1,
paper_results={'Top 1 Accuracy': 0.603}
)
# MOBILENET
ImageNet.benchmark(
model=models.mobilenet_v2(pretrained=True),
paper_model_name='MobileNetV2',
paper_arxiv_id='1801.04381',
input_transform=input_transform,
batch_size=256,
num_gpu=1,
paper_results={'Top 1 Accuracy': 0.72}
)
# RESNEXT
ImageNet.benchmark(
model=models.resnext50_32x4d(pretrained=True),
paper_model_name='ResNeXt-50 32x4d',
paper_arxiv_id='1611.05431',
input_transform=input_transform,
batch_size=256,
num_gpu=1
)
ImageNet.benchmark(
model=models.resnext101_32x8d(pretrained=True),
paper_model_name='ResNeXt-101 32x8d',
paper_arxiv_id='1611.05431',
input_transform=input_transform,
batch_size=256,
num_gpu=1
)
# WIDE RESNET
ImageNet.benchmark(
model=models.wide_resnet50_2(pretrained=True),
paper_model_name='WRN-50-2-bottleneck',
paper_arxiv_id='1605.07146',
input_transform=input_transform,
batch_size=256,
num_gpu=1
)
ImageNet.benchmark(
model=models.wide_resnet101_2(pretrained=True),
paper_model_name='WRN-101-2-bottleneck',
paper_arxiv_id='1605.07146',
input_transform=input_transform,
batch_size=256,
num_gpu=1
)
# MNASNET
ImageNet.benchmark(
model=models.mnasnet0_5(pretrained=True),
paper_model_name='MnasNet-A1 (depth multiplier=0.5)',
paper_arxiv_id='1807.11626',
input_transform=input_transform,
batch_size=256,
num_gpu=1,
paper_results={'Top 1 Accuracy': 0.689}
)
ImageNet.benchmark(
model=models.mnasnet1_0(pretrained=True),
paper_model_name='MnasNet-A1',
paper_arxiv_id='1807.11626',
input_transform=input_transform,
batch_size=256,
num_gpu=1
)
| StarcoderdataPython |
6510938 | <filename>gensim2/pearson.py
"""
@Project : DuReader
@Module : pearson.py
@Author : Deco [<EMAIL>]
@Created : 5/17/18 10:25 AM
@Desc :
"""
import os
import numpy as np
import pandas
import scipy.stats
import tensorflow as tf
def load_sts_dataset(filename):
# Loads a subset of the STS dataset into a DataFrame. In particular both
# sentences and their human rated similarity score.
sent_pairs = []
with tf.gfile.GFile(filename, "r") as f:
for line in f:
ts = line.strip().split("\t")
# (sent_1, sent_2, similarity_score)
sent_pairs.append((ts[5], ts[6], float(ts[4])))
return pandas.DataFrame(sent_pairs, columns=["sent_1", "sent_2", "sim"])
def download_and_load_sts_data():
sts_dataset = tf.keras.utils.get_file(
fname="Stsbenchmark.tar.gz",
origin="http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz",
extract=True)
print('Dirname of sts_dataset:', os.path.dirname(sts_dataset))
# location of the downloaded files
sts_dev = load_sts_dataset(
os.path.join(os.path.dirname(sts_dataset),
"stsbenchmark", "sts-dev.csv"))
sts_test = load_sts_dataset(
os.path.join(
os.path.dirname(sts_dataset), "stsbenchmark", "sts-test.csv"))
return sts_dev, sts_test
def cal_pearson_correlation(scores, dev_scores):
pearson_correlation = scipy.stats.pearsonr(scores, dev_scores)
# 两个分布的pearson分布系数,用直线拟合,但目标函数时点到直线距离最小
print('Pearson correlation coefficient = {0}\np-value = {1}'.format(
pearson_correlation[0], pearson_correlation[1]))
if __name__ == '__main__':
sts_dev0, sts_test0 = download_and_load_sts_data()
dev_scores0 = sts_dev0['sim'].tolist()
scores1 = np.random.uniform(low=0, high=5, size=len(dev_scores0))
print(len(scores1), len(dev_scores0))
cal_pearson_correlation(scores1, dev_scores0)
# Pearson correlation coefficient = 0.02144320357998576
# p - value = 0.406598277363874
| StarcoderdataPython |
330068 | #!/usr/local/bin/python3
import os
import logging
import json
import time
import signal
from pprint import pformat
from http import client
from urllib.parse import urlparse
from pddns import providers
from pddns.pddns import get_ip
CACHE_PATH = os.getenv('CACHE_PATH', '/var/lib/dyndns.cache')
CONSOLE_AUTH_TOKEN = os.getenv('CONSOLE_AUTH_TOKEN')
CONSOLE_URL = os.getenv('CONSOLE_URL')
INTERVAL = int(os.getenv('INTERVAL', '300'))
LOG_LEVEL = os.getenv('LOG_LEVEL', 'INFO').upper()
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.StreamHandler())
LOGGER.setLevel(getattr(logging, LOG_LEVEL))
PROVIDERS = {
'afraid': ('Afraid', providers.Afraid),
'cloudflare': ('Cloudflare', providers.Cloudflare),
'hurricane': ('Hurricane Electric', providers.HurricaneElectric),
'strato': ('Strato', providers.Strato),
}
# NOTE: lifted from source code.
# TODO: refactor this library so version can be imported.
PDDNS_VERSION = "v2.1.0"
def _load_cache():
if os.path.getsize(CACHE_PATH) == 0:
return {}
LOGGER.info('Loading cache')
try:
with open(CACHE_PATH, 'r') as f:
cache = json.load(f)
LOGGER.debug('Loaded %i items from cache', len(cache))
return cache
except Exception:
LOGGER.exception('Error loading cache')
return {}
def _save_cache(*args):
LOGGER.info('Saving cache')
try:
with open(CACHE_PATH, 'w') as f:
json.dump(IP_CACHE, f)
except Exception:
LOGGER.exception('Error saving cache')
finally:
exit(0)
IP_CACHE = None # _load_cache()
def request(path, headers=None):
urlp = urlparse(CONSOLE_URL)
headers = headers.copy() if headers else {}
headers.update({
'Authorization': f'Bearer {CONSOLE_AUTH_TOKEN}',
})
c = client.HTTPConnection(urlp.hostname, urlp.port)
c.request('GET', path, headers=headers)
r = c.getresponse()
assert r.status == 200, f'Invalid HTTP status {r.status}'
return json.loads(r.read().decode())['objects']
def update_dns(ip):
try:
domains = request('/api/domains/')
except Exception:
LOGGER.exception('Error getting domain list')
return
for domain in domains:
config = {}
try:
provider = domain['provider']
config_name, klass = PROVIDERS[provider]
options = config[config_name] = domain['options']
domain_name = options['Name'] = domain['name']
# if 'nameservers' in domain:
# options['Nameservers'] = domain['Nameservers']
except Exception:
LOGGER.exception('Error initializing client: %s', pformat(domain))
continue
if ip == IP_CACHE.get(domain_name):
# If the ip has not changed since last run, don't update it.
LOGGER.debug(
'Skipping: %s, provider=%s, no ip change',
domain_name, provider)
continue
LOGGER.info(
'Updating: %s, provider=%s', domain_name, provider)
try:
# NOTE: if ip address is defined, it is a static record, use that
# ip rather than the detected one.
client_ip = options.get('ip address') or ip
klass(config, PDDNS_VERSION).main(client_ip, None)
IP_CACHE[domain_name] = ip
except Exception:
LOGGER.exception('Error updating ip.')
continue
def main():
signal.signal(signal.SIGTERM, _save_cache)
LOGGER.info('Starting dyndns client.')
while True:
update_dns(get_ip())
LOGGER.info('Slumbering for %i seconds...', INTERVAL)
time.sleep(INTERVAL)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3523398 | """ UI Controller interaction between the UI and the core """
import numpy as np
from PyQt5.QtCore import pyqtSignal, QThread, pyqtSlot
from torpido.controller import Controller as MainController
class Controller(QThread):
"""
Middleware between the UI and the main core, all interactions reside here. with slots
and signals, since, this a derived class of the QThread it can operate without any
locking mechanism and does not affect the main gui thread.
Attributes
----------
Controller.percentComplete : pyqtSignal(float)
signal for progress bar on the UI, progress value comes from torpido.Visual
Controller.percentMem : pyqtSignal(float)
signal for the percentage of the memory usage, value comes from torpido.Watcher
Controller.percentCpu : pyqtSignal(float)
signal for the percentage of the cpu usage, value comes from torpido.Watcher
Controller.logger : pyqtSignal(str)
signal for logs to the ui, all logs are redirected to the ui log window from torpido.Log
Controller.videoFrame : pyqtSignal(np.ndarray)
signal to send frames from the video processing, straight to gui thread for display, frames are coming
from torpido.Visual
Controller.videoClose : pyqtSignal()
signal to close the video output
controller : MainController
object of the controller class in the core
videoFile : str
path of the input video file
"""
percentComplete = pyqtSignal(float)
percentMem = pyqtSignal(float)
percentCpu = pyqtSignal(float)
logger = pyqtSignal(str)
videoFrame = pyqtSignal(np.ndarray)
videoClose = pyqtSignal()
def __init__(self):
super().__init__()
self.controller = MainController()
self.videoFile = self.intro = self.outro = None
def set_video(self, videoFile, intro, extro):
""" Set the video file for processing """
self.videoFile, self.intro, self.outro = videoFile, intro, extro
def run(self):
""" Start the processing on the input video file """
self.controller.start_processing(self, self.videoFile, intro=self.intro, outro=self.outro)
def set_video_display(self, value):
""" Set up the video display request """
self.controller.set_video_display(value)
def set_spec_plot(self, value):
""" Set up the plotting for SNR audio """
self.controller.set_spec_plot(value)
def set_ranking_plot(self, value):
""" Set up the plotting of the analytics """
self.controller.set_ranking_plot(value)
def set_save_logs(self, value):
""" Set up logging to use file to save logs"""
self.controller.set_save_logs(value)
@pyqtSlot()
def set_percent_complete(self, value):
""" Emits the signal with the percent for the progress bar """
self.percentComplete.emit(value)
if value == 95:
self.videoClose.emit()
@pyqtSlot()
def set_cpu_complete(self, value: float):
""" Emits the signal with the cpu usage value """
self.percentCpu.emit(value)
@pyqtSlot()
def set_mem_complete(self, value: float):
""" Emits the signal with the memory usage value """
self.percentMem.emit(value)
@pyqtSlot()
def set_message_log(self, message):
""" Emits the signal with the log to the ui """
self.logger.emit(str(message))
@pyqtSlot()
def set_video_frame(self, frame):
""" Emits the signal with the video frame to display """
self.videoFrame.emit(frame)
@pyqtSlot()
def set_video_close(self):
""" Emits the signal to end the video display """
self.videoClose.emit()
def terminate(self) -> None:
""" Clean up """
self.controller.clean()
del (Controller.percentComplete, Controller.percentMem, Controller.percentCpu, Controller.logger)
| StarcoderdataPython |
1751131 | <filename>trainer/show.py
import matplotlib.pyplot as plt
#-------Immagini di esempio---------------#
def show_data(X_train,Y_train,datagen):
plt.figure(figsize=(15,6))
for i in range(40):
plt.subplot(4, 10, i+1)
X_train2, Y_train2 = datagen.flow(X_train,Y_train).next()
plt.imshow(X_train2[0].reshape((28,28)),cmap=plt.cm.binary)
plt.axis('off')
plt.subplots_adjust(wspace=-0.1, hspace=-0.1)
plt.show()
| StarcoderdataPython |
296791 | import forum
from forum.tests.auth import *
from forum.tests.models import * | StarcoderdataPython |
5111615 | <filename>RobustART/metrics/imageneto_evaluator.py
import json
import yaml
import numpy as np
from .base_evaluator import Evaluator, Metric
import RobustART.metrics.calibration_tools as calibration_tools
class ImageNetOEvaluator(Evaluator):
def __init__(self):
super(ImageNetOEvaluator, self).__init__()
self.metric = Metric()
def load_res(self, res_file):
"""
Load results from file.
"""
res_dict = {}
pre_res = []
with open(res_file) as f:
lines = f.readlines()
for line in lines:
one_pre = json.loads(line)['predictions']
pre_res.append(np.array(one_pre))
return pre_res
def eval(self, res_file_in=None, res_file_out=None):
assert res_file_in is not None and res_file_out is not None
confidence_in = []
correct_in = []
num_correct_in = 0
confidence_out = []
correct_out = []
num_correct_out = 0
with open(res_file_in) as f:
lines = f.readlines()
for line in lines:
obj = json.loads(line)
confidence_in += obj['confidence']
correct_in += obj['correct']
num_correct_in += obj['num_correct']
with open(res_file_out) as f:
lines = f.readlines()
for line in lines:
obj = json.loads(line)
confidence_out += obj['confidence']
correct_out += obj['correct']
num_correct_out += obj['num_correct']
in_score = -np.array(confidence_in)
out_score = -np.array(confidence_out)
aurocs, auprs, fprs = [], [], []
measures = calibration_tools.get_measures(out_score, in_score)
aurocs = measures[0]
auprs = measures[1]
fprs = measures[2]
result_dict = {'AUPR': (100 * auprs)}
self.metric.update(result_dict)
return result_dict
def get_mean(self):
result_dict = self.metric.metric
sum = 0
idx = 0
for key, item in result_dict:
idx += 1
sum += item
mean = sum / idx
self.metric.update({'Mean': mean})
self.metric.set_cmp_key('Mean')
return {'Mean': mean}
def clear(self):
self.metric.metric = {}
@staticmethod
def add_subparser(name, subparsers):
subparser = subparsers.add_parser(
name, help='subcommand for ImageNet of Top-1/5 accuracy metric')
subparser.add_argument('--config', dest='config', required=True,
help='settings of classification in yaml format')
subparser.add_argument('--res_file', required=True, action='append',
help='results file of classification')
return subparser
@classmethod
def from_args(cls, args):
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
kwargs = config['data']['evaluator']['kwargs']
return cls(**kwargs)
| StarcoderdataPython |
11268421 | <reponame>labomics/MetaLogo<gh_stars>1-10
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import dash_bio as dashbio
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from ..app import app
from ..config import CONFIG_PATH, SQLITE3_DB, PNG_PATH, FA_PATH
import os
import re
loading_spinner = html.Div(
[
dbc.Spinner(html.Div(id="loading-output3"), fullscreen=True,
fullscreen_style={"opacity": "0.8"}),
dbc.Spinner(html.Div(id="loading-output4"), fullscreen=True,
fullscreen_style={"opacity": "0.8"}),
]
)
checklist = dbc.FormGroup(
[
dbc.Label("Panels"),
dbc.Checklist(
options=[
{"label": "Conservation", "value": 'conservation'},
{"label": "Gaps", "value": 'gaps'},
{"label": "Overview", "value": 'overview'},
{"label": "Consensus", "value": 'consensus'},
],
value=[],
id="checklist",
inline=True,
),
]
)
COLORSCALES_DICT = [
{'value': 'buried', 'label': 'Buried'},
{'value': 'cinema', 'label': 'Cinema'},
{'value': 'clustal2', 'label': 'Clustal2'},
{'value': 'clustal', 'label': 'Clustal'},
{'value': 'helix', 'label': 'Helix'},
{'value': 'hydro', 'label': 'Hydrophobicity'},
{'value': 'lesk', 'label': 'Lesk'},
{'value': 'mae', 'label': 'Mae'},
{'value': 'nucleotide', 'label': 'Nucleotide'},
{'value': 'purine', 'label': 'Purine'},
{'value': 'strand', 'label': 'Strand'},
{'value': 'taylor', 'label': 'Taylor'},
{'value': 'turn', 'label': 'Turn'},
{'value': 'zappo', 'label': 'Zappo'},
]
colorscale_dropdown = dbc.FormGroup(
[
dbc.Label("Color scale", html_for="dropdown"),
dcc.Dropdown(
id="color_scale_dropdown",
options=COLORSCALES_DICT,
value='buried',
searchable=False,
clearable=False,
),
],
style={'width':'200px','marginRight':'50px'}
)
layout = dbc.Container([
html.H3([html.Span("MSA result for task "),html.A(id="uid")]),
dbc.Col([
dbc.Row([
colorscale_dropdown,
checklist
]),
]
),
dbc.Col(
[
dbc.Row([
dashbio.AlignmentChart(
id='my-default-alignment-viewer',
data='>a\nA',
height=1200,
width="100%",
showgap=False,
#showconservation=False,
#showconsensus=False,
tilewidth=30,
overview='slider'
)]),
dbc.Row([html.Div(id='default-alignment-viewer-output',style={'display': 'none'})]),
]
),
loading_spinner
])
def get_values(checklist):
arr = []
if 'gaps' in checklist:
arr.append(True)
else:
arr.append(False)
if 'conservation' in checklist:
arr.append(True)
else:
arr.append(False)
if 'consensus' in checklist:
arr.append(True)
else:
arr.append(False)
if 'overview' in checklist:
arr.append('slider')
else:
arr.append('none')
return arr
@app.callback(
Output('my-default-alignment-viewer','colorscale'),
Input('color_scale_dropdown','value')
)
def change_color(val):
return val
@app.callback(
[
Output('my-default-alignment-viewer', 'data'),
Output('my-default-alignment-viewer', 'height'),
Output("loading-output3", "children"),
Output("uid","children"),
Output("uid","href"),
Output('my-default-alignment-viewer','showgap'),
Output('my-default-alignment-viewer','showconservation'),
Output('my-default-alignment-viewer','showconsensus'),
Output('my-default-alignment-viewer','overview')
],
[
Input('url', 'pathname'),
Input('checklist','value')
],
[
State('my-default-alignment-viewer','data'),
State('my-default-alignment-viewer','height'),
]
)
def display_page(pathname,checklist,data,height):
arrs = pathname.split('/msa/')
return_arrs = []
ctx = dash.callback_context
example_id = ''
if ctx.triggered:
example_id = ctx.triggered[0]['prop_id'].split('.')[0]
if len(arrs) > 1:
uid = arrs[-1]
if example_id != 'checklist':
msa_file = f'{FA_PATH}/server.{uid}.msa.rawid.fa'
if not os.path.exists(msa_file):
return_arrs = ["",100,'',uid,'/results/'+uid]
else:
with open(msa_file, encoding='utf-8') as data_file:
data = data_file.read()
line_no = len(re.findall('\n',data))/2
return_arrs = [data, line_no*20,'',uid,'/results/'+uid]
return_arrs += [False,False,False,'none']
else:
vals = get_values(checklist)
c = 0
for v in vals:
if v:
c+= 1
if vals[-1] != 'none':
c += 1
return_arrs = [data,height,'',uid,'/results/'+uid] + vals
return return_arrs
else:
return '','','','','','','','',''
@app.callback(
Output('default-alignment-viewer-output', 'children'),
#Output("loading-output4", "children")],
Input('my-default-alignment-viewer', 'eventDatum')
)
def update_output(value):
if value is None:
return 'No data.'#,''
else:
return str(value)#,''
| StarcoderdataPython |
208247 | <filename>release/stubs/System/ComponentModel/Design/Serialization.py
# encoding: utf-8
# module System.ComponentModel.Design.Serialization calls itself Serialization
# from System, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089
# by generator 1.145
""" NamespaceTracker represent a CLS namespace. """
# no imports
# no functions
# classes
class ComponentSerializationService(object):
""" Provides the base class for serializing a set of components or serializable objects into a serialization store. """
def CreateStore(self):
"""
CreateStore(self: ComponentSerializationService) -> SerializationStore
Creates a new System.ComponentModel.Design.Serialization.SerializationStore.
Returns: A new System.ComponentModel.Design.Serialization.SerializationStore.
"""
pass
def Deserialize(self, store, container=None):
"""
Deserialize(self: ComponentSerializationService, store: SerializationStore, container: IContainer) -> ICollection
Deserializes the given store and populates the given System.ComponentModel.IContainer with
deserialized System.ComponentModel.IComponent objects.
store: The System.ComponentModel.Design.Serialization.SerializationStore to deserialize.
container: The System.ComponentModel.IContainer to which System.ComponentModel.IComponent objects will be
added.
Returns: A collection of objects created according to the stored state.
Deserialize(self: ComponentSerializationService, store: SerializationStore) -> ICollection
Deserializes the given store to produce a collection of objects.
store: The System.ComponentModel.Design.Serialization.SerializationStore to deserialize.
Returns: A collection of objects created according to the stored state.
"""
pass
def DeserializeTo(
self, store, container, validateRecycledTypes=None, applyDefaults=None
):
"""
DeserializeTo(self: ComponentSerializationService, store: SerializationStore, container: IContainer, validateRecycledTypes: bool)
Deserializes the given System.ComponentModel.Design.Serialization.SerializationStore to the
given container, optionally validating recycled types.
store: The System.ComponentModel.Design.Serialization.SerializationStore to deserialize.
container: The container to which System.ComponentModel.IComponent objects will be added.
validateRecycledTypes: true to guarantee that the deserialization will only work if applied to an object of the same
type.
DeserializeTo(self: ComponentSerializationService, store: SerializationStore, container: IContainer)
Deserializes the given System.ComponentModel.Design.Serialization.SerializationStore to the
given container.
store: The System.ComponentModel.Design.Serialization.SerializationStore to deserialize.
container: The container to which System.ComponentModel.IComponent objects will be added.
DeserializeTo(self: ComponentSerializationService, store: SerializationStore, container: IContainer, validateRecycledTypes: bool, applyDefaults: bool)
Deserializes the given System.ComponentModel.Design.Serialization.SerializationStore to the
given container, optionally applying default property values.
store: The System.ComponentModel.Design.Serialization.SerializationStore to deserialize.
container: The container to which System.ComponentModel.IComponent objects will be added.
validateRecycledTypes: true to guarantee that the deserialization will only work if applied to an object of the same
type.
applyDefaults: true to indicate that the default property values should be applied.
"""
pass
def LoadStore(self, stream):
"""
LoadStore(self: ComponentSerializationService, stream: Stream) -> SerializationStore
Loads a System.ComponentModel.Design.Serialization.SerializationStore from a stream.
stream: The System.IO.Stream from which the store will be loaded.
Returns: A new System.ComponentModel.Design.Serialization.SerializationStore instance.
"""
pass
def Serialize(self, store, value):
"""
Serialize(self: ComponentSerializationService, store: SerializationStore, value: object)
Serializes the given object to the given
System.ComponentModel.Design.Serialization.SerializationStore.
store: The System.ComponentModel.Design.Serialization.SerializationStore to which the state of value
will be written.
value: The object to serialize.
"""
pass
def SerializeAbsolute(self, store, value):
"""
SerializeAbsolute(self: ComponentSerializationService, store: SerializationStore, value: object)
Serializes the given object, accounting for default property values.
store: The System.ComponentModel.Design.Serialization.SerializationStore to which the state of value
will be serialized.
value: The object to serialize.
"""
pass
def SerializeMember(self, store, owningObject, member):
"""
SerializeMember(self: ComponentSerializationService, store: SerializationStore, owningObject: object, member: MemberDescriptor)
Serializes the given member on the given object.
store: The System.ComponentModel.Design.Serialization.SerializationStore to which the state of member
will be serialized.
owningObject: The object to which member is attached.
member: A System.ComponentModel.MemberDescriptor specifying the member to serialize.
"""
pass
def SerializeMemberAbsolute(self, store, owningObject, member):
"""
SerializeMemberAbsolute(self: ComponentSerializationService, store: SerializationStore, owningObject: object, member: MemberDescriptor)
Serializes the given member on the given object, accounting for the default property value.
store: The System.ComponentModel.Design.Serialization.SerializationStore to which the state of member
will be serialized.
owningObject: The object to which member is attached.
member: The member to serialize.
"""
pass
class ContextStack(object):
"""
Provides a stack object that can be used by a serializer to make information available to nested serializers.
ContextStack()
"""
def Append(self, context):
"""
Append(self: ContextStack, context: object)
Appends an object to the end of the stack, rather than pushing it onto the top of the stack.
context: A context object to append to the stack.
"""
pass
def Pop(self):
"""
Pop(self: ContextStack) -> object
Removes the current object off of the stack, returning its value.
Returns: The object removed from the stack; null if no objects are on the stack.
"""
pass
def Push(self, context):
"""
Push(self: ContextStack, context: object)
Pushes, or places, the specified object onto the stack.
context: The context object to push onto the stack.
"""
pass
def __getitem__(self, *args): # cannot find CLR method
""" x.__getitem__(y) <==> x[y]x.__getitem__(y) <==> x[y] """
pass
Current = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets the current object on the stack.
Get: Current(self: ContextStack) -> object
"""
class DefaultSerializationProviderAttribute(Attribute, _Attribute):
"""
The System.ComponentModel.Design.Serialization.DefaultSerializationProviderAttribute attribute is placed on a serializer to indicate the class to use as a default provider of that type of serializer.
DefaultSerializationProviderAttribute(providerType: Type)
DefaultSerializationProviderAttribute(providerTypeName: str)
"""
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, *__args):
"""
__new__(cls: type, providerType: Type)
__new__(cls: type, providerTypeName: str)
"""
pass
ProviderTypeName = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets the type name of the serialization provider.
Get: ProviderTypeName(self: DefaultSerializationProviderAttribute) -> str
"""
class DesignerLoader(object):
""" Provides a basic designer loader interface that can be used to implement a custom designer loader. """
def BeginLoad(self, host):
"""
BeginLoad(self: DesignerLoader, host: IDesignerLoaderHost)
Begins loading a designer.
host: The loader host through which this loader loads components.
"""
pass
def Dispose(self):
"""
Dispose(self: DesignerLoader)
Releases all resources used by the System.ComponentModel.Design.Serialization.DesignerLoader.
"""
pass
def Flush(self):
"""
Flush(self: DesignerLoader)
Writes cached changes to the location that the designer was loaded from.
"""
pass
Loading = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets a value indicating whether the loader is currently loading a document.
Get: Loading(self: DesignerLoader) -> bool
"""
class DesignerSerializerAttribute(Attribute, _Attribute):
"""
Indicates a serializer for the serialization manager to use to serialize the values of the type this attribute is applied to. This class cannot be inherited.
DesignerSerializerAttribute(serializerType: Type, baseSerializerType: Type)
DesignerSerializerAttribute(serializerTypeName: str, baseSerializerType: Type)
DesignerSerializerAttribute(serializerTypeName: str, baseSerializerTypeName: str)
"""
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, *__args):
"""
__new__(cls: type, serializerType: Type, baseSerializerType: Type)
__new__(cls: type, serializerTypeName: str, baseSerializerType: Type)
__new__(cls: type, serializerTypeName: str, baseSerializerTypeName: str)
"""
pass
SerializerBaseTypeName = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets the fully qualified type name of the serializer base type.
Get: SerializerBaseTypeName(self: DesignerSerializerAttribute) -> str
"""
SerializerTypeName = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets the fully qualified type name of the serializer.
Get: SerializerTypeName(self: DesignerSerializerAttribute) -> str
"""
TypeId = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Indicates a unique ID for this attribute type.
Get: TypeId(self: DesignerSerializerAttribute) -> object
"""
class IDesignerLoaderHost(IDesignerHost, IServiceContainer, IServiceProvider):
""" Provides an interface that can extend a designer host to support loading from a serialized state. """
def EndLoad(self, baseClassName, successful, errorCollection):
"""
EndLoad(self: IDesignerLoaderHost, baseClassName: str, successful: bool, errorCollection: ICollection)
Ends the designer loading operation.
baseClassName: The fully qualified name of the base class of the document that this designer is designing.
successful: true if the designer is successfully loaded; otherwise, false.
errorCollection: A collection containing the errors encountered during load, if any. If no errors were
encountered, pass either an empty collection or null.
"""
pass
def Reload(self):
"""
Reload(self: IDesignerLoaderHost)
Reloads the design document.
"""
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class IDesignerLoaderHost2(
IDesignerLoaderHost, IDesignerHost, IServiceContainer, IServiceProvider
):
""" Provides an interface that extends System.ComponentModel.Design.Serialization.IDesignerLoaderHost to specify whether errors are tolerated while loading a design document. """
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
CanReloadWithErrors = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets or sets a value indicating whether it is possible to reload with errors.
Get: CanReloadWithErrors(self: IDesignerLoaderHost2) -> bool
Set: CanReloadWithErrors(self: IDesignerLoaderHost2) = value
"""
IgnoreErrorsDuringReload = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets or sets a value indicating whether errors should be ignored when System.ComponentModel.Design.Serialization.IDesignerLoaderHost.Reload is called.
Get: IgnoreErrorsDuringReload(self: IDesignerLoaderHost2) -> bool
Set: IgnoreErrorsDuringReload(self: IDesignerLoaderHost2) = value
"""
class IDesignerLoaderService:
""" Provides an interface that can extend a designer loader to support asynchronous loading of external components. """
def AddLoadDependency(self):
"""
AddLoadDependency(self: IDesignerLoaderService)
Registers an external component as part of the load process managed by this interface.
"""
pass
def DependentLoadComplete(self, successful, errorCollection):
"""
DependentLoadComplete(self: IDesignerLoaderService, successful: bool, errorCollection: ICollection)
Signals that a dependent load has finished.
successful: true if the load of the designer is successful; false if errors prevented the load from
finishing.
errorCollection: A collection of errors that occurred during the load, if any. If no errors occurred, pass either
an empty collection or null.
"""
pass
def Reload(self):
"""
Reload(self: IDesignerLoaderService) -> bool
Reloads the design document.
Returns: true if the reload request is accepted, or false if the loader does not allow the reload.
"""
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class IDesignerSerializationManager(IServiceProvider):
""" Provides an interface that can manage design-time serialization. """
def AddSerializationProvider(self, provider):
"""
AddSerializationProvider(self: IDesignerSerializationManager, provider: IDesignerSerializationProvider)
Adds the specified serialization provider to the serialization manager.
provider: The serialization provider to add.
"""
pass
def CreateInstance(self, type, arguments, name, addToContainer):
"""
CreateInstance(self: IDesignerSerializationManager, type: Type, arguments: ICollection, name: str, addToContainer: bool) -> object
Creates an instance of the specified type and adds it to a collection of named instances.
type: The data type to create.
arguments: The arguments to pass to the constructor for this type.
name: The name of the object. This name can be used to access the object later through
System.ComponentModel.Design.Serialization.IDesignerSerializationManager.GetInstance(System.Strin
g). If null is passed, the object is still created but cannot be accessed by name.
addToContainer: If true, this object is added to the design container. The object must implement
System.ComponentModel.IComponent for this to have any effect.
Returns: The newly created object instance.
"""
pass
def GetInstance(self, name):
"""
GetInstance(self: IDesignerSerializationManager, name: str) -> object
Gets an instance of a created object of the specified name, or null if that object does not
exist.
name: The name of the object to retrieve.
Returns: An instance of the object with the given name, or null if no object by that name can be found.
"""
pass
def GetName(self, value):
"""
GetName(self: IDesignerSerializationManager, value: object) -> str
Gets the name of the specified object, or null if the object has no name.
value: The object to retrieve the name for.
Returns: The name of the object, or null if the object is unnamed.
"""
pass
def GetSerializer(self, objectType, serializerType):
"""
GetSerializer(self: IDesignerSerializationManager, objectType: Type, serializerType: Type) -> object
Gets a serializer of the requested type for the specified object type.
objectType: The type of the object to get the serializer for.
serializerType: The type of the serializer to retrieve.
Returns: An instance of the requested serializer, or null if no appropriate serializer can be located.
"""
pass
def GetType(self, typeName):
"""
GetType(self: IDesignerSerializationManager, typeName: str) -> Type
Gets a type of the specified name.
typeName: The fully qualified name of the type to load.
Returns: An instance of the type, or null if the type cannot be loaded.
"""
pass
def RemoveSerializationProvider(self, provider):
"""
RemoveSerializationProvider(self: IDesignerSerializationManager, provider: IDesignerSerializationProvider)
Removes a custom serialization provider from the serialization manager.
provider: The provider to remove. This object must have been added using
System.ComponentModel.Design.Serialization.IDesignerSerializationManager.AddSerializationProvider
(System.ComponentModel.Design.Serialization.IDesignerSerializationProvider).
"""
pass
def ReportError(self, errorInformation):
"""
ReportError(self: IDesignerSerializationManager, errorInformation: object)
Reports an error in serialization.
errorInformation: The error to report. This information object can be of any object type. If it is an exception,
the message of the exception is extracted and reported to the user. If it is any other type,
System.Object.ToString is called to display the information to the user.
"""
pass
def SetName(self, instance, name):
"""
SetName(self: IDesignerSerializationManager, instance: object, name: str)
Sets the name of the specified existing object.
instance: The object instance to name.
name: The name to give the instance.
"""
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
Context = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets a stack-based, user-defined storage area that is useful for communication between serializers.
Get: Context(self: IDesignerSerializationManager) -> ContextStack
"""
Properties = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Indicates custom properties that can be serializable with available serializers.
Get: Properties(self: IDesignerSerializationManager) -> PropertyDescriptorCollection
"""
ResolveName = None
SerializationComplete = None
class IDesignerSerializationProvider:
""" Provides an interface that enables access to a serializer. """
def GetSerializer(self, manager, currentSerializer, objectType, serializerType):
"""
GetSerializer(self: IDesignerSerializationProvider, manager: IDesignerSerializationManager, currentSerializer: object, objectType: Type, serializerType: Type) -> object
Gets a serializer using the specified attributes.
manager: The serialization manager requesting the serializer.
currentSerializer: An instance of the current serializer of the specified type. This can be null if no serializer
of the specified type exists.
objectType: The data type of the object to serialize.
serializerType: The data type of the serializer to create.
Returns: An instance of a serializer of the type requested, or null if the request cannot be satisfied.
"""
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class IDesignerSerializationService:
""" Provides an interface that can invoke serialization and deserialization. """
def Deserialize(self, serializationData):
"""
Deserialize(self: IDesignerSerializationService, serializationData: object) -> ICollection
Deserializes the specified serialization data object and returns a collection of objects
represented by that data.
serializationData: An object consisting of serialized data.
Returns: An System.Collections.ICollection of objects rebuilt from the specified serialization data
object.
"""
pass
def Serialize(self, objects):
"""
Serialize(self: IDesignerSerializationService, objects: ICollection) -> object
Serializes the specified collection of objects and stores them in a serialization data object.
objects: A collection of objects to serialize.
Returns: An object that contains the serialized state of the specified collection of objects.
"""
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class INameCreationService:
""" Provides a service that can generate unique names for objects. """
def CreateName(self, container, dataType):
"""
CreateName(self: INameCreationService, container: IContainer, dataType: Type) -> str
Creates a new name that is unique to all components in the specified container.
container: The container where the new object is added.
dataType: The data type of the object that receives the name.
Returns: A unique name for the data type.
"""
pass
def IsValidName(self, name):
"""
IsValidName(self: INameCreationService, name: str) -> bool
Gets a value indicating whether the specified name is valid.
name: The name to validate.
Returns: true if the name is valid; otherwise, false.
"""
pass
def ValidateName(self, name):
"""
ValidateName(self: INameCreationService, name: str)
Gets a value indicating whether the specified name is valid.
name: The name to validate.
"""
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class InstanceDescriptor(object):
"""
Provides the information necessary to create an instance of an object. This class cannot be inherited.
InstanceDescriptor(member: MemberInfo, arguments: ICollection, isComplete: bool)
InstanceDescriptor(member: MemberInfo, arguments: ICollection)
"""
def Invoke(self):
"""
Invoke(self: InstanceDescriptor) -> object
Invokes this instance descriptor and returns the object the descriptor describes.
Returns: The object this instance descriptor describes.
"""
pass
@staticmethod # known case of __new__
def __new__(self, member, arguments, isComplete=None):
"""
__new__(cls: type, member: MemberInfo, arguments: ICollection)
__new__(cls: type, member: MemberInfo, arguments: ICollection, isComplete: bool)
"""
pass
Arguments = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets the collection of arguments that can be used to reconstruct an instance of the object that this instance descriptor represents.
Get: Arguments(self: InstanceDescriptor) -> ICollection
"""
IsComplete = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets a value indicating whether the contents of this System.ComponentModel.Design.Serialization.InstanceDescriptor completely identify the instance.
Get: IsComplete(self: InstanceDescriptor) -> bool
"""
MemberInfo = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets the member information that describes the instance this descriptor is associated with.
Get: MemberInfo(self: InstanceDescriptor) -> MemberInfo
"""
class MemberRelationship(object):
"""
Represents a single relationship between an object and a member.
MemberRelationship(owner: object, member: MemberDescriptor)
"""
def Equals(self, obj):
"""
Equals(self: MemberRelationship, obj: object) -> bool
Determines whether two System.ComponentModel.Design.Serialization.MemberRelationship instances
are equal.
obj: The System.ComponentModel.Design.Serialization.MemberRelationship to compare with the current
System.ComponentModel.Design.Serialization.MemberRelationship.
Returns: true if the specified System.ComponentModel.Design.Serialization.MemberRelationship is equal to
the current System.ComponentModel.Design.Serialization.MemberRelationship; otherwise, false.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: MemberRelationship) -> int
Returns the hash code for this instance.
Returns: A hash code for the current System.ComponentModel.Design.Serialization.MemberRelationship.
"""
pass
def __eq__(self, *args): # cannot find CLR method
""" x.__eq__(y) <==> x==y """
pass
@staticmethod # known case of __new__
def __new__(self, owner, member):
"""
__new__[MemberRelationship]() -> MemberRelationship
__new__(cls: type, owner: object, member: MemberDescriptor)
"""
pass
def __ne__(self, *args): # cannot find CLR method
pass
IsEmpty = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets a value indicating whether this relationship is equal to the System.ComponentModel.Design.Serialization.MemberRelationship.Empty relationship.
Get: IsEmpty(self: MemberRelationship) -> bool
"""
Member = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets the related member.
Get: Member(self: MemberRelationship) -> MemberDescriptor
"""
Owner = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets the owning object.
Get: Owner(self: MemberRelationship) -> object
"""
Empty = None
class MemberRelationshipService(object):
""" Provides the base class for relating one member to another. """
def GetRelationship(self, *args): # cannot find CLR method
"""
GetRelationship(self: MemberRelationshipService, source: MemberRelationship) -> MemberRelationship
Gets a relationship to the given source relationship.
source: The source relationship.
Returns: A relationship to source, or System.ComponentModel.Design.Serialization.MemberRelationship.Empty
if no relationship exists.
"""
pass
def SetRelationship(self, *args): # cannot find CLR method
"""
SetRelationship(self: MemberRelationshipService, source: MemberRelationship, relationship: MemberRelationship)
Creates a relationship between the source object and target relationship.
source: The source relationship.
relationship: The relationship to set into the source.
"""
pass
def SupportsRelationship(self, source, relationship):
"""
SupportsRelationship(self: MemberRelationshipService, source: MemberRelationship, relationship: MemberRelationship) -> bool
Gets a value indicating whether the given relationship is supported.
source: The source relationship.
relationship: The relationship to set into the source.
Returns: true if a relationship between the given two objects is supported; otherwise, false.
"""
pass
def __getitem__(self, *args): # cannot find CLR method
""" x.__getitem__(y) <==> x[y]x.__getitem__(y) <==> x[y] """
pass
def __setitem__(self, *args): # cannot find CLR method
""" x.__setitem__(i, y) <==> x[i]=x.__setitem__(i, y) <==> x[i]= """
pass
class ResolveNameEventArgs(EventArgs):
"""
Provides data for the System.ComponentModel.Design.Serialization.IDesignerSerializationManager.ResolveName event.
ResolveNameEventArgs(name: str)
"""
@staticmethod # known case of __new__
def __new__(self, name):
""" __new__(cls: type, name: str) """
pass
Name = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets the name of the object to resolve.
Get: Name(self: ResolveNameEventArgs) -> str
"""
Value = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets or sets the object that matches the name.
Get: Value(self: ResolveNameEventArgs) -> object
Set: Value(self: ResolveNameEventArgs) = value
"""
class ResolveNameEventHandler(MulticastDelegate, ICloneable, ISerializable):
"""
Represents the method that handles the System.ComponentModel.Design.Serialization.IDesignerSerializationManager.ResolveName event of a serialization manager.
ResolveNameEventHandler(object: object, method: IntPtr)
"""
def BeginInvoke(self, sender, e, callback, object):
""" BeginInvoke(self: ResolveNameEventHandler, sender: object, e: ResolveNameEventArgs, callback: AsyncCallback, object: object) -> IAsyncResult """
pass
def CombineImpl(self, *args): # cannot find CLR method
"""
CombineImpl(self: MulticastDelegate, follow: Delegate) -> Delegate
Combines this System.Delegate with the specified System.Delegate to form a new delegate.
follow: The delegate to combine with this delegate.
Returns: A delegate that is the new root of the System.MulticastDelegate invocation list.
"""
pass
def DynamicInvokeImpl(self, *args): # cannot find CLR method
"""
DynamicInvokeImpl(self: Delegate, args: Array[object]) -> object
Dynamically invokes (late-bound) the method represented by the current delegate.
args: An array of objects that are the arguments to pass to the method represented by the current
delegate.-or- null, if the method represented by the current delegate does not require
arguments.
Returns: The object returned by the method represented by the delegate.
"""
pass
def EndInvoke(self, result):
""" EndInvoke(self: ResolveNameEventHandler, result: IAsyncResult) """
pass
def GetMethodImpl(self, *args): # cannot find CLR method
"""
GetMethodImpl(self: MulticastDelegate) -> MethodInfo
Returns a static method represented by the current System.MulticastDelegate.
Returns: A static method represented by the current System.MulticastDelegate.
"""
pass
def Invoke(self, sender, e):
""" Invoke(self: ResolveNameEventHandler, sender: object, e: ResolveNameEventArgs) """
pass
def RemoveImpl(self, *args): # cannot find CLR method
"""
RemoveImpl(self: MulticastDelegate, value: Delegate) -> Delegate
Removes an element from the invocation list of this System.MulticastDelegate that is equal to
the specified delegate.
value: The delegate to search for in the invocation list.
Returns: If value is found in the invocation list for this instance, then a new System.Delegate without
value in its invocation list; otherwise, this instance with its original invocation list.
"""
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, object, method):
""" __new__(cls: type, object: object, method: IntPtr) """
pass
def __reduce_ex__(self, *args): # cannot find CLR method
pass
class RootDesignerSerializerAttribute(Attribute, _Attribute):
"""
Indicates the base serializer to use for a root designer object. This class cannot be inherited.
RootDesignerSerializerAttribute(serializerTypeName: str, baseSerializerTypeName: str, reloadable: bool)
RootDesignerSerializerAttribute(serializerType: Type, baseSerializerType: Type, reloadable: bool)
RootDesignerSerializerAttribute(serializerTypeName: str, baseSerializerType: Type, reloadable: bool)
"""
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, *__args):
"""
__new__(cls: type, serializerType: Type, baseSerializerType: Type, reloadable: bool)
__new__(cls: type, serializerTypeName: str, baseSerializerType: Type, reloadable: bool)
__new__(cls: type, serializerTypeName: str, baseSerializerTypeName: str, reloadable: bool)
"""
pass
Reloadable = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets a value indicating whether the root serializer supports reloading of the design document without first disposing the designer host.
Get: Reloadable(self: RootDesignerSerializerAttribute) -> bool
"""
SerializerBaseTypeName = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets the fully qualified type name of the base type of the serializer.
Get: SerializerBaseTypeName(self: RootDesignerSerializerAttribute) -> str
"""
SerializerTypeName = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets the fully qualified type name of the serializer.
Get: SerializerTypeName(self: RootDesignerSerializerAttribute) -> str
"""
TypeId = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets a unique ID for this attribute type.
Get: TypeId(self: RootDesignerSerializerAttribute) -> object
"""
class SerializationStore(object, IDisposable):
""" Provides the base class for storing serialization data for the System.ComponentModel.Design.Serialization.ComponentSerializationService. """
def Close(self):
"""
Close(self: SerializationStore)
Closes the serialization store.
"""
pass
def Dispose(self, *args): # cannot find CLR method
"""
Dispose(self: SerializationStore, disposing: bool)
Releases the unmanaged resources used by the
System.ComponentModel.Design.Serialization.SerializationStore and optionally releases the
managed resources.
disposing: true to release both managed and unmanaged resources; false to release only unmanaged resources.
"""
pass
def Save(self, stream):
"""
Save(self: SerializationStore, stream: Stream)
Saves the store to the given stream.
stream: The stream to which the store will be serialized.
"""
pass
def __enter__(self, *args): # cannot find CLR method
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self, *args): # cannot find CLR method
"""
__exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self, *args): # cannot find CLR method
""" __repr__(self: object) -> str """
pass
Errors = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets a collection of errors that occurred during serialization or deserialization.
Get: Errors(self: SerializationStore) -> ICollection
"""
| StarcoderdataPython |
3324671 | <reponame>jameshensman/deepGPy
# Copyright <NAME> and <NAME> 2014
# Licensed under the GNU GPL version 3.0
import numpy as np
from scipy import weave
try:
from scipy.linalg.lapack import dpotri
except:
from scipy import linalg
dpotri = linalg.lapack.clapack.dpotri
import GPy
def safe_root(N):
i = np.sqrt(N)
j = int(i)
if i != j:
raise ValueError, "N is not square!"
return j
def flat_to_triang(flat):
"""take a matrix N x D and return a M X M x D array where
N = M(M+1)/2
the lower triangluar portion of the d'th slice of the result is filled by the d'th column of flat.
"""
N, D = flat.shape
M = (-1 + safe_root(8*N+1))/2
ret = np.zeros((M, M, D))
flat = np.ascontiguousarray(flat)
code = """
int count = 0;
for(int m=0; m<M; m++)
{
for(int mm=0; mm<=m; mm++)
{
for(int d=0; d<D; d++)
{
ret[d + m*D*M + mm*D] = flat[count];
count++;
}
}
}
"""
weave.inline(code, ['flat', 'ret', 'D', 'M'])
return ret
def triang_to_flat(L):
M, _, D = L.shape
L = np.ascontiguousarray(L) # should do nothing if L was created by flat_to_triang
N = M*(M+1)/2
flat = np.empty((N, D))
code = """
int count = 0;
for(int m=0; m<M; m++)
{
for(int mm=0; mm<=m; mm++)
{
for(int d=0; d<D; d++)
{
flat[count] = L[d + m*D*M + mm*D];
count++;
}
}
}
"""
weave.inline(code, ['flat', 'L', 'D', 'M'])
return flat
def multiple_dpotri_old(Ls):
M, _, D = Ls.shape
Kis = np.rollaxis(Ls, -1).copy()
[dpotri(Kis[i,:,:], overwrite_c=1, lower=1) for i in range(D)]
code = """
for(int d=0; d<D; d++)
{
for(int m=0; m<M; m++)
{
for(int mm=0; mm<m; mm++)
{
Kis[d*M*M + mm*M + m ] = Kis[d*M*M + m*M + mm];
}
}
}
"""
weave.inline(code, ['Kis', 'D', 'M'])
Kis = np.rollaxis(Kis, 0, 3) #wtf rollaxis?
return Kis
def multiple_dpotri(Ls):
return np.dstack([GPy.util.linalg.dpotri(np.asfortranarray(Ls[:,:,i]), lower=1)[0] for i in range(Ls.shape[-1])])
def indexes_to_fix_for_low_rank(rank, size):
"""
work out which indexes of the flatteneed array should be fixed if we want the cholesky to represent a low rank matrix
"""
#first we'll work out what to keep, and the do the set difference.
#here are the indexes of the first column, which are the triangular numbers
n = np.arange(size)
triangulars = (n**2 + n) / 2
keep = []
for i in range(rank):
keep.append(triangulars[i:] + i)
#add the diagonal
keep.append(triangulars[1:]-1)
keep.append((size**2 + size)/2 -1)# the very last element
keep = np.hstack(keep)
return np.setdiff1d(np.arange((size**2+size)/2), keep)
class cholchecker(GPy.core.Model):
def __init__(self, L, name='cholchecker'):
super(cholchecker, self).__init__(name)
self.L = GPy.core.Param('L',L)
self.add_parameter(self.L)
def parameters_changed(self):
LL = flat_to_triang(self.L)
Ki = multiple_dpotri(LL)
self.L.gradient = 2*np.einsum('ijk,jlk->ilk', Ki, LL)
self._loglik = np.sum([np.sum(np.log(np.abs(np.diag(LL[:,:,i])))) for i in range(self.L.shape[-1])])
| StarcoderdataPython |
54521 | class RobotPlayer(object):
def __init__(self, rc):
self.rc = rc
def run(self):
while True:
print 'DO RE ME'
self.rc.yield_execution()
| StarcoderdataPython |
9662487 | import torch
from pepper_variant.modules.python.models.simple_model import TransducerGRU
class ModelHandler:
@staticmethod
def save_checkpoint(state, filename):
torch.save(state, filename)
@staticmethod
def get_new_gru_model(image_features, gru_layers, hidden_size, num_classes, num_classes_type):
# get a new model
transducer_model = TransducerGRU(image_features, gru_layers, hidden_size, num_classes, num_classes_type,
bidirectional=True)
return transducer_model
@staticmethod
def load_simple_model_for_training(model_path, image_features, num_classes, num_type_classes):
checkpoint = torch.load(model_path, map_location='cpu')
hidden_size = checkpoint['hidden_size']
gru_layers = checkpoint['gru_layers']
epochs = checkpoint['epochs']
transducer_model = ModelHandler.get_new_gru_model(image_features=image_features,
gru_layers=gru_layers,
hidden_size=hidden_size,
num_classes=num_classes,
num_classes_type=num_type_classes)
model_state_dict = checkpoint['model_state_dict']
from collections import OrderedDict
new_model_state_dict = OrderedDict()
for k, v in model_state_dict.items():
name = k
if k[0:7] == 'module.':
name = k[7:] # remove `module.`
new_model_state_dict[name] = v
transducer_model.load_state_dict(new_model_state_dict)
transducer_model.cpu()
return transducer_model, hidden_size, gru_layers, epochs
@staticmethod
def load_simple_optimizer(transducer_optimizer, checkpoint_path, gpu_mode):
if gpu_mode:
checkpoint = torch.load(checkpoint_path)
transducer_optimizer.load_state_dict(checkpoint['model_optimizer'])
for state in transducer_optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
else:
checkpoint = torch.load(checkpoint_path, map_location='cpu')
transducer_optimizer.load_state_dict(checkpoint['model_optimizer'])
return transducer_optimizer
| StarcoderdataPython |
1952941 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from tempest.common import rest_client
from tempest import exceptions
from tempest.openstack.common.fixture import mockpatch
from tempest.tests import base
from tempest.tests import fake_config
from tempest.tests import fake_http
class BaseRestClientTestClass(base.TestCase):
def _set_token(self):
self.rest_client.token = '<PASSWORD>'
def setUp(self):
super(BaseRestClientTestClass, self).setUp()
self.rest_client = rest_client.RestClient(fake_config.FakeConfig(),
'fake_user', 'fake_pass',
'http://fake_url/v2.0')
self.stubs.Set(httplib2.Http, 'request', self.fake_http.request)
self.useFixture(mockpatch.PatchObject(self.rest_client, '_set_auth',
side_effect=self._set_token()))
self.useFixture(mockpatch.PatchObject(self.rest_client,
'_log_response'))
class TestRestClientHTTPMethods(BaseRestClientTestClass):
def setUp(self):
self.fake_http = fake_http.fake_httplib2()
super(TestRestClientHTTPMethods, self).setUp()
self.useFixture(mockpatch.PatchObject(self.rest_client,
'_error_checker'))
def test_post(self):
__, return_dict = self.rest_client.post('fake_endpoint', {},
{})
self.assertEqual('POST', return_dict['method'])
def test_get(self):
__, return_dict = self.rest_client.get('fake_endpoint')
self.assertEqual('GET', return_dict['method'])
def test_delete(self):
__, return_dict = self.rest_client.delete('fake_endpoint')
self.assertEqual('DELETE', return_dict['method'])
def test_patch(self):
__, return_dict = self.rest_client.patch('fake_endpoint', {},
{})
self.assertEqual('PATCH', return_dict['method'])
def test_put(self):
__, return_dict = self.rest_client.put('fake_endpoint', {},
{})
self.assertEqual('PUT', return_dict['method'])
def test_head(self):
self.useFixture(mockpatch.PatchObject(self.rest_client,
'response_checker'))
__, return_dict = self.rest_client.head('fake_endpoint')
self.assertEqual('HEAD', return_dict['method'])
def test_copy(self):
__, return_dict = self.rest_client.copy('fake_endpoint')
self.assertEqual('COPY', return_dict['method'])
class TestRestClientNotFoundHandling(BaseRestClientTestClass):
def setUp(self):
self.fake_http = fake_http.fake_httplib2(404)
super(TestRestClientNotFoundHandling, self).setUp()
def test_post(self):
self.assertRaises(exceptions.NotFound, self.rest_client.post,
'fake_endpoint', {}, {})
| StarcoderdataPython |
5196789 | <reponame>jkpubsrc/python-module-jk-php-version-parser<gh_stars>0
#from __future__ import annotations
import typing
import jk_typing
import jk_version
from .ComposerToken import ComposerToken
class _ComposerTokenPattern(object):
################################################################################################################################
## Constructor
################################################################################################################################
#
# Constructor method.
#
@jk_typing.checkFunctionSignature()
def __init__(self, tokenType:str, text:str = None):
self.__tokenType = tokenType
self.__text = text
#
################################################################################################################################
## Public Properties
################################################################################################################################
################################################################################################################################
## Helper Methods
################################################################################################################################
################################################################################################################################
## Public Methods
################################################################################################################################
def __str__(self):
return "ComposerTokenPattern<{}, {}>".format(
repr(self.__tokenType).replace("'", "\""),
repr(self.__text).replace("'", "\"")
)
#
def __repr__(self):
return "ComposerTokenPattern<{}, {}>".format(
repr(self.__tokenType).replace("'", "\""),
repr(self.__text).replace("'", "\"")
)
#
def tryMatch(self, token:ComposerToken) -> bool:
assert isinstance(token, ComposerToken)
if self.__tokenType != token.tokenType:
return False
if (self.__text is not None) and (self.__text != token.text):
return False
return True
#
#
| StarcoderdataPython |
3571044 | from office365.entity_collection import EntityCollection
from office365.onedrive.listitems.field_value_set import FieldValueSet
from office365.onedrive.versions.base_item_version import BaseItemVersion
from office365.runtime.queries.service_operation_query import ServiceOperationQuery
from office365.runtime.resource_path import ResourcePath
class ListItemVersion(BaseItemVersion):
"""The listItemVersion resource represents a previous version of a ListItem resource."""
def restore_version(self):
qry = ServiceOperationQuery(self, "restoreVersion")
self.context.add_query(qry)
return self
@property
def fields(self):
"""A collection of the fields and values for this version of the list item.
:rtype: EntityCollection
"""
return self.get_property('fields',
EntityCollection(self.context, FieldValueSet,
ResourcePath("fields", self.resource_path)))
| StarcoderdataPython |
8173374 | # -*- coding: utf-8 -*-
from pyecore.ecore import EClassifier, EAnnotation, EClass, EPackage, EAttribute
import pyecore.ecore as ecore
from pyecore.innerutils import ignored
from .uml import Element
UML_20_URI = "http://www.eclipse.org/uml2/2.0.0/UML"
EXTENSION_PREFIX = "base_"
def patch_ecore_metamodel():
pass
def get_stereotype_from_application(obj):
eclass = obj.eClass
for ref in eclass.eAllReferences():
if ref.name.startswith(EXTENSION_PREFIX) and issubclass(ref.eType, Element):
return get_stereotype(eclass, obj)
def get_stereotype(definition, application):
if isinstance(definition, EClassifier):
with ignored(Exception):
return definition.getEAnnotation(UML_20_URI).references[0]
def get_definition_reference_matching(stereotype, element):
for o, r in stereotype._inverse_rels:
if r is EAnnotation.references:
classifier = o.eModelElement
for reference in classifier.eAllReferences():
if reference.name.startswith(EXTENSION_PREFIX) and isinstance(
element, reference.eType
):
return (classifier, reference)
return (None, None)
def get_application_for(element, definition):
for o, r in element._inverse_rels:
if isinstance(o, definition) and r.name.startswith(EXTENSION_PREFIX):
return o
return None
def define_profile(profile):
epackage = _profile2epackage(profile)
for stereotype in profile.ownedStereotype:
eclass = _stereotype2eclass(stereotype)
epackage.eClassifiers.append(eclass)
return epackage
def _profile2epackage(profile):
epackage = EPackage(profile.name)
epackage.nsURI = profile.URI
epackage.nsPrefix = profile.name
return epackage
def _stereotype2eclass(stereotype):
eclass = EClass(stereotype.name)
eclass.abstract = stereotype.isAbstract
estruct_append = eclass.eStructuralFeatures.append
for tvalue in stereotype.ownedAttribute:
estruct_append(_taggedValue2eattribute(tvalue))
return eclass
def _taggedValue2eattribute(tvalue):
eattribute = EAttribute(tvalue.name)
eattribute.lowerBound = tvalue.lower
eattribute.upperBound = tvalue.upper
with ignored(Exception):
tvalue_type = tvalue.type
model = tvalue_type.get_model()
p = get_application_for(model, EPackage)
metamodel = p.eResource.resource_set.get_resource(p.nsURI).contents[0]
eattribute.eType = metamodel.getEClassifier(tvalue_type.name)
return eattribute
| StarcoderdataPython |
8147870 | <reponame>yandong2023/The-sword-pointing-to-offer-code<gh_stars>1-10
# -*- coding:UTF-8 -*-
import functools
# Python3.x和Python2.x对于map、reduce、filter的处理变得不同
# Python3.x中map和filter的输出是一个map型和filter型, 需要从里面取出需要的值
# Python2.x中map和filter输出的直接是一个list
# Python3.x中使用reduce需要引入functools
# 使用map把list中的int变为str
map(str, [1, 2, 3, 4, 5, 6, 7, 8, 9])
# 使用map()把名字规范化, 首字母大写,其余小写
def standardName(s):
return s.capitalize()
print([x for x in map(standardName, ['adam', 'LISA', 'barT'])])
# 在Python2.x中应该使用print(map(standardName, ['adam', 'LISA', 'barT']))
# 使用reduce()输出一个list的所有数的乘积
def prod(aList):
return functools.reduce(lambda x, y: x*y, aList)
print(prod([1, 2, 3, 4, 5]))
# 使用filter()打印100以内的素数
def isPrime(n):
isPrimeFlag = True
if n <= 1:
isPrimeFlag = False
i = 2
while i * i <= n:
if n % i == 0:
isPrimeFlag = False
break
i += 1
return n if isPrimeFlag else None
print(filter(isPrime, range(101))) | StarcoderdataPython |
1878110 | <gh_stars>10-100
"""Miscellaneous utils."""
import os
import re
import subprocess
import signal
import fnmatch
from contextlib import contextmanager
from anytree import PreOrderIter
import numpy as np
import pandas as pd
import vtk
import psutil
from tqdm import tqdm
try:
from credentials import TNAV_LICENSE_URL
except ImportError:
TNAV_LICENSE_URL = None
@contextmanager
def _dummy_with():
"""Dummy statement."""
yield
def kill(proc_pid):
"""Kill proc and its childs."""
process = psutil.Process(proc_pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
def signal_handler(signum, frame):
"""Timeout handler."""
_ = signum, frame
raise Exception("Timed out!")
def execute_tnav_models(base_script_path, models, license_url=TNAV_LICENSE_URL, logfile=None,
global_timeout=None, process_timeout=None):
"""Execute a bash script for each model in a set of models.
Parameters
----------
base_script_path : str
Path to script to execute.
models : str, list of str
A path to model or list of pathes.
license_url : str
A license server url.
logfile : str
A path to file where to point stdout and stderr.
global_timeout : int
Global timeout in seconds.
process_timeout : int
Process timeout. Kill process that exceeds the timeout and go to the next model.
"""
if license_url is None:
raise ValueError('License url is not defined.')
models = np.atleast_1d(models)
base_args = ['bash', base_script_path, license_url]
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(-1 if global_timeout is None else global_timeout)
with (open(logfile, 'w') if logfile is not None else _dummy_with()) as f:#pylint:disable=consider-using-with
for model in tqdm(models):
try:
p = subprocess.Popen(base_args + [model], stdout=f, stderr=f)#pylint:disable=consider-using-with
try:
p.wait(timeout=process_timeout)
except subprocess.TimeoutExpired:
kill(p.pid)
except Exception as err:
kill(p.pid)
raise err
def recursive_insensitive_glob(path, pattern, return_relative=False):
"""Find files matching pattern ignoring case-style."""
found = []
reg_expr = re.compile(fnmatch.translate(pattern), re.IGNORECASE)
for root, _, files in os.walk(path, topdown=True):
for f in files:
if re.match(reg_expr, f):
f_path = os.path.join(root, f)
found.append(
f_path if not return_relative else os.path.relpath(f_path, start=path)
)
return found
def get_single_path(dir_path, filename, logger=None):
"""Find a file withihn the directory. Raise error if multiple files found."""
files = recursive_insensitive_glob(dir_path, filename)
if not files:
if logger is not None:
logger.warning("{} file was not found.".format(filename))
return None
if len(files) > 1:
raise ValueError('Directory {} contains multiple {} files.'.format(dir_path, filename))
return files[0]
def hasnested(container, *chain):
"""Checks if chain contains in container.
Parameters
----------
container: `collections.abc.Container`
chain: tuple
List of keywords.
Returns
-------
out: bool
True if `chain[0]` in container and `chain[1]` in `container[chain[0]]` etc.
or if chain is empty, else False.
"""
key, chain = chain[0], chain[1:]
if key in container:
return hasnested(container[key], *chain) if chain else True
return False
def rolling_window(a, strides):
"""Rolling window without overlays."""
strides = np.asarray(strides)
output_shape = tuple((np.array(a.shape) - strides)//strides + 1) + tuple(strides)
output_strides = tuple(strides * np.asarray(a.strides)) + a.strides
return np.lib.stride_tricks.as_strided(a, shape=output_shape, strides=output_strides)
def mk_vtk_id_list(id_list):
"""Complementary function."""
vil = vtk.vtkIdList()
for i in id_list:
vil.InsertNextId(int(i))
return vil
def length_segment(p1, p2):
"""Calculates dist between two 3d points."""
p1 = np.asarray(p1)
p2 = np.asarray(p2)
return np.sqrt(np.sum((p1-p2)**2))
def get_point_on_line_at_distance(p1, p2, distance):
""" Finds point on line going through p1 and p2 on given distance from p1."""
p1 = np.asarray(p1)
p2 = np.asarray(p2)
distance_ratio = distance / length_segment(p1, p2)
return p1 + distance_ratio*(p2-p1)
def get_well_mask(field):
"""Get the model's well mask in a spatial form.
Parameters
----------
field: Field
Returns
-------
well_mask: np.array
Array with well-names in cells which are registered as well-blocks and empty strings everywhere else.
"""
if field.state.spatial:
well_mask = np.zeros(field.grid.dimens, dtype='U32')
else:
well_mask = np.zeros(field.grid.actnum.sum(), dtype='U32')
for node in field.wells:
if node.is_main_branch:
for branch in PreOrderIter(node):
ind = branch.blocks
if ind.shape[0]:
if field.wells.state.spatial:
ind = ind.T
well_mask[ind[0], ind[1], ind[2]] = branch.name
else:
well_mask[ind] = branch.name
return well_mask
def full_ind_to_active_ind(ind, grid):
"""Transforms 1D indices with respect to all cells to the indices with respect to only active cells.
Parameters
----------
ind: array-like
Indices to be transformed.
grid: Grid
Grid component of a model.
Returns
-------
ind: array-like
Transformed indices.
"""
ind = ind.copy()
f2a = grid.ravel(attr='actnum', inplace=False).astype(np.int)
ind[f2a[ind] == 0] = -1
f2a[f2a == 1] = np.arange(f2a.sum())
f2a = np.concatenate([f2a, [-1]])
return f2a[ind]
def active_ind_to_full_ind(ind, grid):
"""Transforms 1D indices with respect to active to the indices with respect to all cells.
Parameters
----------
ind: array-like
Indices to be transformed.
grid: Grid
Grid component of a model.
Returns
-------
ind: array-like
Transformed indices.
"""
actnum = grid.ravel(attr='actnum', inplace=False).astype(np.int)
a2f = np.arange(len(actnum))[actnum == 1]
a2f = np.concatenate([a2f, [-1]])
return a2f[ind]
def get_control_interval_mask(control_dates, time_interval):
"""Returns a mask for control dates that affect the given time interval."""
mask_prehistory = control_dates <= time_interval[0]
if not any(mask_prehistory):
raise ValueError('First control date {} is later than the beginning of time interval ({}, {}).'
.format(control_dates[0], time_interval[0], time_interval[1]))
first_control_date = control_dates[mask_prehistory][-1]
mask = (control_dates >= first_control_date) & (control_dates < time_interval[1])
return mask
def get_control_interval_dates(field, time_interval=None):
"""Get the dates of the control changes in the given interval."""
dates = field.wells.event_dates
prehistory_dates = []
if time_interval is not None:
mask_prehistory = dates <= time_interval[0]
if any(mask_prehistory):
first_control_date = dates[mask_prehistory][-1]
else:
first_control_date = dates[0]
prehistory_dates.append(time_interval[0])
mask = (dates >= first_control_date) & (dates < time_interval[1])
dates = dates[mask]
return pd.to_datetime(prehistory_dates), dates
def get_spatial_well_control(field, attrs, date_range=None, fill_shut=0., fill_outside=0.):
"""Get the model's control in a spatial. Also returns control dates relative to model start date.
Parameters
----------
field: Field
Geological model.
attrs: tuple or list
Control attributes to get data from.
date_range: tuple
Minimal and maximal dates for control events.
fill_shut: float
Value to fill closed perforations
fill_outside:
Value to fill non-perforated cells
Returns
-------
control: np.array
"""
spatial = field.state.spatial
well_mask = field.well_mask
attrs = [k.upper() for k in attrs]
prehistory_dates, dates = get_control_interval_dates(field, date_range)
spatial_dims = tuple(field.grid.dimens) if spatial else (np.sum(field.grid.actnum),)
control = np.full((len(prehistory_dates) + len(dates), len(attrs)) + spatial_dims, fill_outside)
for node in field.wells:
if node.is_main_branch and 'EVENTS' in node:
df = pd.DataFrame(fill_shut, index=dates, columns=attrs)
df.loc[node.events['DATE'], attrs] = node.events[attrs].values
df = df.fillna(fill_shut)
if fill_shut:
df = df.replace(0, fill_shut)
for branch in PreOrderIter(node):
control[len(prehistory_dates):, ..., well_mask == branch.name] = np.expand_dims(df.values, -1)
control[:len(prehistory_dates), ..., well_mask == branch.name] = fill_shut
sec_in_day = 86400
dates = prehistory_dates.union(dates)
rel_dates = (pd.to_datetime(dates) - field.start).total_seconds().values / sec_in_day
return {'control': control, 't': rel_dates}
def _remove_repeating_blocks(blocks, values=None):
if not len(blocks):
if values is not None:
return blocks, values
return blocks
new_blocks = []
if values is not None:
new_values = []
for i, p in enumerate(blocks):
if blocks.ndim == 1:
occurrences = np.where((p == blocks))[0]
else:
occurrences = np.where((p == blocks).all(axis=1))[0]
if len(occurrences) > 1:
if i != occurrences[0]:
continue
if values is not None:
for ind in occurrences[1:]:
i = ind if values[ind] > values[i] else i
new_blocks.append(p)
if values is not None:
new_values.append(values[i])
new_blocks = np.stack(new_blocks)
new_values = np.stack(new_values)
return new_blocks, new_values
# pylint: disable=too-many-nested-blocks
def get_spatial_perf(field, subset=None, mode=None):
"""Get model's perforation ratios in a spatial form.
Parameters
----------
field: Field
subset: array-like or None
Subset of timesteps to pick. If None, picks all timesteps available.
mode: str, None
If not None, pick the blocks only with specified mode.
Returns
-------
perf_ratio: np.array
"""
spatial = field.state.spatial
full_perforation = field.wells.state.full_perforation
if subset is None:
n_ts = len(field.wells.event_dates)
else:
n_ts = len(subset) - 1
spatial_dims = tuple(field.grid.dimens) if spatial else (np.sum(field.grid.actnum),)
perf = np.zeros((n_ts, 1) + spatial_dims)
event_dates = field.wells.event_dates
for t in range(n_ts):
field.wells.apply_perforations(event_dates[t])
for well in field.wells:
if well.is_main_branch:
if mode is not None:
if not hasattr(well, 'events'):
continue
mode_at_ts = well.events[well.events['DATE'] == event_dates[t]]['MODE']
if len(mode_at_ts) == 0 or not (mode_at_ts == mode.upper()).all():
continue
for branch in PreOrderIter(well):
perf_ind_mask = branch.perforated_indices()
perf_ind = branch.blocks[perf_ind_mask]
perf_ratio = branch.blocks_info.PERF_RATIO[perf_ind_mask].values
perf_ind, perf_ratio = _remove_repeating_blocks(perf_ind, perf_ratio)
if perf_ind.shape[0]:
if spatial:
perf[t, 0, perf_ind[:, 0], perf_ind[:, 1], perf_ind[:, 2]] = perf_ratio
else:
perf[t, 0, perf_ind] = perf_ratio
if full_perforation:
field.wells.apply_perforations()
return perf
def get_spatial_cf_and_perf(field, date_range=None, mode=None):
"""Get model's connection factors and perforation ratios in a spatial form.
Parameters
----------
field: Field
date_range: tuple
Minimal and maximal dates for events.
mode: str, None
If not None, pick the blocks only with specified mode.
Returns
-------
connection_factors: np.array
perf_ratio: np.array
"""
spatial = field.state.spatial
full_perforation = field.wells.state.full_perforation
prehistory, dates = get_control_interval_dates(field, date_range)
spatial_dims = tuple(field.grid.dimens) if spatial else (np.sum(field.grid.actnum),)
cf = np.zeros((len(prehistory) + len(dates), 1) + spatial_dims)
perf = np.zeros((len(prehistory) + len(dates), 1) + spatial_dims)
for i, date in enumerate(dates):
field.wells.apply_perforations(date)
field.wells.calculate_cf(field.rock, field.grid, units=field.meta.get('UNITS', 'METRIC'))
for well in field.wells:
if well.is_main_branch:
if mode is not None:
if not hasattr(well, 'events'):
continue
mode_at_ts = well.events[well.events['DATE'] == date]['MODE']
if len(mode_at_ts) == 0 or not (mode_at_ts == mode.upper()).all():
continue
for branch in PreOrderIter(well):
perf_ind_mask = branch.perforated_indices()
perf_ind = branch.blocks[perf_ind_mask]
perf_ratio = branch.blocks_info.PERF_RATIO[perf_ind_mask].values
perf_ind, perf_ratio = _remove_repeating_blocks(perf_ind, perf_ratio)
cf_ind = branch.blocks
connection_factors = branch.blocks_info.CF.values
cf_ind, connection_factors = _remove_repeating_blocks(cf_ind, connection_factors)
if perf_ind.shape[0]:
cf_mask = np.stack([(ind.reshape(1, -1) == perf_ind).all(axis=1).any() for ind in cf_ind])
if spatial:
perf[i + len(prehistory), 0, perf_ind[:, 0], perf_ind[:, 1], perf_ind[:, 2]] = perf_ratio
cf[i + len(prehistory), 0, perf_ind[:, 0], perf_ind[:, 1], perf_ind[:, 2]] = \
connection_factors[cf_mask]
else:
perf[i + len(prehistory), perf_ind] = perf_ratio
cf[i + len(prehistory), perf_ind] = connection_factors[cf_mask]
if full_perforation:
field.wells.apply_perforations()
return cf, perf
def get_n_control_ts(model):
"""Get number of timesteps in the model's control variable"""
return len(model.wells.event_dates)
def overflow_safe_mean(arr, axis=None):
"""Computes mean values across an array with reduced overflow risk (NO WARRANTIES, THOUGH - STILL MAY OVERFLOW).
Parameters
----------
arr: array-like
axis: int or tuple, optional
Returns
-------
mean: array-like
"""
if axis is None:
return arr.mean()
if isinstance(axis, int):
return arr.mean(axis=axis)
for ax in sorted(axis, reverse=True):
arr = arr.mean(axis=ax)
return arr
| StarcoderdataPython |
71760 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" ib.ext.cfg.CommissionReport -> config module for CommissionReport.java.
"""
| StarcoderdataPython |
1856229 | #!/usr/bin/python
#: ----------------------------------------------------------------------------
#: Copyright (C) 2017 Verizon. All Rights Reserved.
#: All Rights Reserved
#:
#: file: transform.py
#: details: memsql pipline transform python script
#: author: <NAME>
#: date: 04/27/2017
#:
#: Licensed under the Apache License, Version 2.0 (the "License");
#: you may not use this file except in compliance with the License.
#: You may obtain a copy of the License at
#:
#: http://www.apache.org/licenses/LICENSE-2.0
#:
#: Unless required by applicable law or agreed to in writing, software
#: distributed under the License is distributed on an "AS IS" BASIS,
#: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#: See the License for the specific language governing permissions and
#: limitations under the License.
#: ----------------------------------------------------------------------------
import json
import struct
import sys
import time
def transform_records():
while True:
byte_len = sys.stdin.read(8)
if len(byte_len) == 8:
byte_len = struct.unpack("L", byte_len)[0]
result = sys.stdin.read(byte_len)
yield result
else:
assert len(byte_len) == 0, byte_len
return
for records in transform_records():
flows = json.loads(records)
exported_time = time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(flows["Header"]["ExportTime"]))
try:
for flow in flows["DataSets"]:
sourceIPAddress = "unknown"
destinationIPAddress = "unknown"
bgpSourceAsNumber = "unknown"
bgpDestinationAsNumber = "unknown"
protocolIdentifier = 0
sourceTransportPort = 0
destinationTransportPort = 0
tcpControlBits = "unknown"
ipNextHopIPAddress = "unknown"
octetDeltaCount = 0
ingressInterface = 0
egressInterface = 0
for field in flow:
if field["I"] in [214]:
raise
elif field["I"] in [8, 27]:
sourceIPAddress = field["V"]
elif field["I"] in [12, 28]:
destinationIPAddress = field["V"]
elif field["I"] in [15, 62]:
ipNextHopIPAddress = field["V"]
elif field["I"] == 16:
bgpSourceAsNumber = field["V"]
elif field["I"] == 17:
bgpDestinationAsNumber = field["V"]
elif field["I"] == 14:
ingressInterface = field["V"]
elif field["I"] == 10:
egressInterface = field["V"]
elif field["I"] == 7:
sourceTransportPort = field["V"]
elif field["I"] == 11:
destinationTransportPort = field["V"]
elif field["I"] == 4:
protocolIdentifier = field["V"]
elif field["I"] == 6:
tcpControlBits = field["V"]
elif field["I"] == 1:
octetDeltaCount = field["V"]
out = b"%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" \
% (
flows["AgentID"],
sourceIPAddress,
destinationIPAddress,
ipNextHopIPAddress,
bgpSourceAsNumber,
bgpDestinationAsNumber,
protocolIdentifier,
sourceTransportPort,
destinationTransportPort,
tcpControlBits,
ingressInterface,
egressInterface,
octetDeltaCount,
exported_time,
)
sys.stdout.write(out)
except:
continue
| StarcoderdataPython |
6562496 | from tests.common_tests.ether import (
listening_to_node,
ether_block_number
)
def test_parity_listening_to_node(parity_settings):
listening_to_node(parity_settings)
pass
def test_parity_ether_block_number(parity_settings):
ether_block_number(parity_settings)
pass
| StarcoderdataPython |
6485481 | from datetime import datetime
from contextlib import contextmanager
from functools import wraps
from sqlalchemy import create_engine
from sqlalchemy.engine.row import Row
from sqlalchemy.exc import InvalidRequestError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, class_mapper, Session
BASE = declarative_base()
def decompose_fields(fields: list):
"""
Auxiliary func to check if 'fields' has a relationship expressed as <rel_name.rel_property>
:return Tuple (A, B) where A is the list of fields divided into possible relations and its subproperties,
and B is a boolean expressing if there is at least one relation in this fields
Ex: ([('name',), ('base')], False) ---> no relation
([('source', 'title'), ('name',)], True) ---> there is a relation with 'source'
"""
if not fields:
return [], False
# Check if there are any '*.*' pattern in any field,
# which indicates we need to retrieve some relationship property
splitted_fields = [f.split('.') for f in fields]
are_relations = [len(sf) > 1 for sf in splitted_fields]
return splitted_fields, any(are_relations)
def get_uris(db_type, db_host_or_path, db_port, db_name, db_user, db_passwd):
if not db_type or not db_host_or_path or not db_name:
raise ValueError("Not enough data")
if db_type == "sqlite":
# ensure that no trailing '/' is present
if db_host_or_path[-1] == '/':
db_host_or_path = db_host_or_path[:-1]
uri = f"sqlite:///{db_host_or_path}"
# return {"default": f"sqlite:///{db_host_or_path}"}
else:
uri = f"{db_type}://"
if db_user:
uri += db_user
if db_passwd:
uri += ":" + db_passwd
uri += "@"
uri += db_host_or_path
if db_port:
uri += ":" + str(db_port)
# if db_name is a list and db name 'default' is not specified,
# the default database would be the first in the db_name list
if isinstance(db_name, (list, tuple, set)) and len(db_name) > 0:
uri_dict = {name: uri + "/" + name for name in db_name}
if 'default' not in uri_dict:
uri_dict['default'] = uri + "/" + db_name[0]
elif isinstance(db_name, str):
uri_dict = {'default': uri + "/" + db_name}
else:
raise ValueError("db_name invalid value")
return uri_dict
def manage_session(function):
"""Ensure correct session management in transactions"""
@wraps(function)
def manager(*args, **kwargs):
if 'session' not in kwargs:
db_name = kwargs.get('db_name') or 'default'
schema_name = kwargs.get('schema_name')
with args[0].session_scope(engine_name=db_name, schema_name=schema_name) as session:
kwargs.update({"session": session})
return function(*args, **kwargs)
return function(*args, **kwargs)
return manager
def to_dict(obj, found=None, recursive=False):
if isinstance(obj, Row):
return obj._asdict()
if found is None:
found = set()
mapper = class_mapper(obj.__class__)
columns = [column.key for column in mapper.columns]
get_key_value = lambda c: (c, getattr(obj, c).isoformat()) if isinstance(getattr(obj, c), datetime) else (
c, getattr(obj, c))
out = dict(map(get_key_value, columns))
if recursive:
for name, relation in mapper.relationships.items():
if relation not in found:
found.add(relation)
related_obj = getattr(obj, name)
if related_obj is not None:
if relation.uselist:
out[name] = [to_dict(child, found, True) for child in related_obj]
else:
out[name] = to_dict(related_obj, found, True)
return out
class SQLConnector:
def __init__(self, db_type, db_host_or_path, db_name, db_port=None, db_schemas=None, db_user=None, db_passwd=None,
session_autoflush=True, session_autocommit=False):
"""
Creates an object with necessary parameters for connecting to a sql database
:param db_type: One of 'sqlite', 'postgresql' or 'mysql'
:param db_host_or_path: If db_type=='sqlite', it is the absolute path of the folder containing the file, otherwise it is a hostname or ip
:param db_name: If just one database will be used, it is a single db name (a file name if db_name='sqlite').
If multiple databases, it would be a list of db names or file names.
:param db_port: Port where db server is listening. None if db_type='sqlite'
:param db_schemas: List of schemas used on every specified database
:param db_user: Db server login user. None if db_type='sqlite'
:param db_passwd: Db server login password. None if db_type='sqlite'
"""
allowed_types = ("sqlite", "postgresql", "mysql")
if not db_name:
raise AttributeError("Must specify at least one db_name")
if db_type in allowed_types:
if db_type != 'sqlite' and not (db_name and db_user and db_passwd):
raise AttributeError(f"db_user and db_password must be declared for {db_type}")
self.connection_uris = get_uris(db_type, db_host_or_path, db_port, db_name, db_user, db_passwd)
else:
raise ValueError(f"{db_type} not in {str(allowed_types)}")
self.schemas = db_schemas if not db_type == 'sqlite' else None
if isinstance(self.schemas, str):
self.schemas = [self.schemas]
self.engines = {
name: create_engine(uri) for name, uri in self.connection_uris.items()
}
self.Session = sessionmaker(autoflush=session_autoflush, autocommit=session_autocommit)
def create_tables(self, schemas: [] = None):
schemas = schemas or self.schemas
if isinstance(schemas, str):
schemas = [schemas]
self._create_schemas(schemas)
for _, engine in self.engines.items():
if schemas is not None:
for sc in self.schemas:
BASE.metadata.create_all(
bind=engine.connect().execution_options(
schema_translate_map={None: sc}
)
)
else:
BASE.metadata.create_all(engine)
def _create_schemas(self, schemas: [] = None):
schemas = schemas or self.schemas
if schemas is None:
return
if isinstance(schemas, str):
schemas = [schemas]
for engine_name, _ in self.engines.items():
for sc in schemas:
self.execute_query("CREATE SCHEMA IF NOT EXISTS " + sc, engine_name)
def _dynamic_relations(self, resource_orm_class: BASE, rel_deep_list: list):
chained = getattr(resource_orm_class, rel_deep_list[0])
if len(rel_deep_list) > 1:
return self._dynamic_relations(chained, rel_deep_list[1:])
return chained
def execute_query(self, query: str, engine_name: str = None):
"""Execute a raw query on database 'engine_name'.
If any schema will be used, it must be specified in the sql statement"""
if engine_name is None:
engine_name = 'default'
engine = self.engines.get(engine_name)
if engine is None:
raise ValueError(f"No engine with name {engine_name}")
connection = engine.connect()
response = connection.execute(query)
returnable = False
if hasattr(response, '.fetch_all()'):
response = response.fetch_all()
returnable = True
connection.close()
if returnable:
return response
@manage_session
def compose_filter_query(self,
resource_orm_class: BASE, resource_query_binding_class, filter_and_sort_dict: dict = None,
fields: list = None, limit: int = 1000, offset: int = 0, *, session: Session = None):
"""
Same as 'list_resources' but only returns the total count and query itself, not evaluated
:return: SQLAlchemy Query object
"""
_, are_relations = decompose_fields(fields)
if filter_and_sort_dict:
query = resource_query_binding_class(session=session).evaluate_params(filter_and_sort_dict)
else:
query = session.query(resource_orm_class)
if fields and not are_relations:
columns = [getattr(resource_orm_class, f) for f in fields]
query = query.with_entities(*columns)
total_count = 0
if limit or offset:
total_count = query.count()
# slice operation was kept with documentation purposes
if limit and offset:
end_index = offset + limit
query = query.slice(offset, end_index)
elif limit:
query = query.limit(limit)
elif offset:
query = query.offset(offset)
return total_count, query
@manage_session
def create_resource(self, resource_orm_class: BASE, resource_fields: dict, *, return_id: bool = False,
session: Session = None, **kwargs):
"""
Add a resource. Doesn't check for integrity errors. Valid for resources without foreign keys.
:param resource_orm_class: ORM class related to the resource
:param resource_fields: Dictionary with column names of the new object as keys and their respective values
:param return_id: If it needs to commit this query to catch the new autocreated 'id' and returning it
:param session: Session to be used to execute query
:param kwargs: Additional keyword arguments for session (eg: db_name or schema_name)
:return: True (or resource 'id' if return_id is True) if the operation succeeded
"""
resource = resource_orm_class(**resource_fields)
session.add(resource)
if return_id:
session.flush()
session.refresh(resource)
return resource.id
return True
@manage_session
def delete_resource(self, resource_orm_class: BASE, pk, *, session: Session = None, **kwargs):
"""
Deletes a resource
:param resource_orm_class: ORM class related to the resource
:param pk: Primary key value
:param session: Session to be used to execute query
:param kwargs: Additional keyword arguments for session (eg: db_name or schema_name)
:return: True if the operation succeeded
"""
resource = session.query(resource_orm_class).get(pk)
if resource is not None:
session.delete(resource)
return True
@manage_session
def get_resource(self, resource_orm_class: BASE, pk, pk_fieldname: str = None, fields: list = None, *,
just_check_existence: bool = False, session: Session = None, **kwargs):
"""
Get details about a specific resource.
:param resource_orm_class: ORM class related to the resource
:param pk: Primary key value
:param pk_fieldname: Primary key column name.
:param fields: Desired columns to be returned.
:param just_check_existence: If this method is invoked just to check resource existence
:param session: Session to be used to execute query
:param kwargs: Additional keyword arguments for session (eg: db_name or schema_name)
:return: A dictionary with the resource information
:raise: ValueError if no resource with 'pk' primary key value is found
"""
splitted_fields, are_relations = decompose_fields(fields)
if not pk_fieldname or are_relations:
resource = session.query(resource_orm_class).get(pk)
else:
# retrieving specific fields is a much more efficient way to query
fields = [getattr(resource_orm_class, f) for f in fields]
resource = session.query(*fields).filter(getattr(resource_orm_class, pk_fieldname) == pk).one_or_none()
if just_check_existence:
return resource is not None
if resource is None:
raise ValueError(f"Resource '{resource_orm_class.__tablename__}' with pk='{pk}' not found")
if fields:
return {'.'.join(sf): self._dynamic_relations(resource, sf) for sf in splitted_fields}
return to_dict(resource)
@manage_session
def list_resources(self, resource_orm_class: BASE, resource_query_binding_class, filter_and_sort_dict: dict = None,
fields: list = None, limit: int = 1000, offset: int = 0, *, session: Session = None, **kwargs):
"""
Get a list of resources that meet a set of parameters
:param resource_orm_class: ORM class related to the resource
:param resource_query_binding_class: QueryBinding-based class (from 'sqlalchemy-filterparams')
:param filter_and_sort_dict: Dictionary of options specified by 'filterparams' library
:param fields: Columns to be selected
:param limit: Max number of rows fetched
:param offset: Number of rows to skip before starting to return rows from the query
:param session: Session to be used to execute the query
:param kwargs: Additional keyword arguments for session (eg: db_name or schema_name)
:return: A dictionary with shape {"total": total_count, "resources": [resources_list]}
"""
if limit > 1000:
raise ValueError("Limit out of bounds")
total_count, query = self.compose_filter_query(resource_orm_class, resource_query_binding_class,
filter_and_sort_dict, fields, limit, offset, session=session)
# if are_relations, returned query just ignored fields
splitted_fields, are_relations = decompose_fields(fields)
resources_list = query.all()
if not total_count:
total_count = len(resources_list)
if fields:
response = [
{'.'.join(sf): self._dynamic_relations(resource, sf) for sf in splitted_fields} for resource in
resources_list
]
else:
response = [to_dict(rsc) for rsc in resources_list]
# returns a list of sources, but first element is the amount of sources without pagination
return {"total": total_count, "resources": response}
@manage_session
def update_resource(self, resource_orm_class: BASE, pk, updated_fields: dict, *, raise_if_bad_field: bool = False,
session: Session = None, **kwargs):
"""
Update a resource. Valid for resources without foreign keys
:param resource_orm_class: ORM class related to the resource
:param pk: Primary key of the existing resource
:param updated_fields: Dictionary with column names of the updated object as keys and their respective new values
:param raise_if_bad_field: True if you want to raise an exception when a non-existent field is specified for update
:param session: Session to be used to execute the query
:param kwargs: Additional keyword arguments for session (eg: db_name or schema_name)
:return: True if everything goes well
:raise ValueError if some
"""
resource = session.query(resource_orm_class).get(pk)
if resource is None:
raise ValueError(f"No record in table '{resource_orm_class.__tablename__}' with pk='{pk}'")
for field, new_value in updated_fields.items():
if not hasattr(resource, field):
if raise_if_bad_field:
raise ValueError(f"Table '{resource_orm_class.__tablename__}' has no '{field}' column")
# fails silently by default
continue
setattr(resource, field, new_value)
# nothing else is needed because the execution of session.commit() is made out of this method
return True
@contextmanager
def session_scope(self, engine_name: str = None, schema_name: str = None):
"""Provide a transactional scope around a series of operations."""
engine_name = engine_name or 'default'
engine = self.engines.get(engine_name)
if engine is None:
raise ValueError(f"No engine with name {engine_name}")
if schema_name:
connection = engine.connect().execution_options(
schema_translate_map={None: schema_name}
)
session = self.Session(bind=connection)
else:
session = self.Session(bind=engine)
try:
yield session
session.commit()
except InvalidRequestError:
session.rollback()
except Exception:
session.rollback()
raise
finally:
session.close()
def kill(self):
for engine in self.engines:
self.engines[engine].dispose()
| StarcoderdataPython |
3239997 | <filename>online_judge/blueprints/auth.py
import json
from flask import (render_template, Blueprint, request, session, current_app, redirect, url_for)
from online_judge.db.user import User
from online_judge.helpers.session import redirect_if_authenticated
auth = Blueprint('auth', __name__)
def validate_form():
username = request.form['username']
password = request.form['password']
error = None
if not username:
error = json.dumps({'error': 'Username absent'})
if not password:
error = json.dumps({'error': 'Password absent'})
return username, password, error
@auth.route('/', methods=['GET'])
@redirect_if_authenticated
def display_login_form():
return render_template('login_register.html')
@auth.route('/login', methods=['POST'])
def login():
username, password, error = validate_form()
if error:
return error, 400
if not User.exists(username):
return json.dumps({'error': 'Invalid credentials'}), 400
user = User(username)
if user.verify(password):
session['username'] = username
try:
return redirect(request.args['next'])
except KeyError:
return redirect(url_for('home_page.display_problem_list'))
else:
return json.dumps({'error': 'Invalid credentials'}), 400
@auth.route('/signup', methods=['POST'])
def signup():
username, password, error = validate_form()
if error:
return error, 400
if User.exists(username):
return json.dumps({'error': 'Username exists'}), 400
else:
User(username, password).save()
return json.dumps({'status': 'success'}), 200
@auth.route('/logout', methods=['GET'])
def logout():
current_app.session_interface.store.remove({'sid': session.sid})
session.clear()
return redirect(url_for('.display_login_form'))
| StarcoderdataPython |
9798913 | <filename>PyMOTW/source/tempfile/tempfile_TemporaryFile_binary.py
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2008 <NAME> All rights reserved.
#
"""
"""
#end_pymotw_header
import os
import tempfile
with tempfile.TemporaryFile() as temp:
temp.write(b'Some data')
temp.seek(0)
print(temp.read())
| StarcoderdataPython |
3458180 | from hyperopt import fmin, tpe, space_eval, hp, Trials, STATUS_OK, STATUS_FAIL
from hyperopt.pyll import stochastic, scope
from os.path import join
import os
import pandas as pd
import numpy as np
@scope.define
def round_n(x, n=3):
return np.round(x, n)
def monitor_callback(params, scores, name=''):
tmp = {'NED':scores['ned'],
'Coverage': scores['coverageNS'],
'scores': scores}
tmp = {**params['disc'], **params['clustering'], **tmp}
outfile = join(params['exp_root'], 'results', name + '_expresults.csv')
if os.path.exists(outfile):
pd.DataFrame([tmp]).to_csv(outfile, mode='a', header=False)
else:
pd.DataFrame([tmp]).to_csv(outfile, mode='w', header=True)
def save_csv(params, tmp, name):
outfile = join(params['exp_root'], 'results', name + '_expresults.csv')
if os.path.exists(outfile):
pd.DataFrame([tmp]).to_csv(outfile, mode='a', header=False)
else:
pd.DataFrame([tmp]).to_csv(outfile, mode='w', header=True)
| StarcoderdataPython |
196543 | # NOTIFY CODE
OBSCURE_WHITESPACE = (
'\u180E' # Mongolian vowel separator
'\u200B' # zero width space
'\u200C' # zero width non-joiner
'\u200D' # zero width joiner
'\u2060' # word joiner
'\u00A0' # non breaking space
'\uFEFF' # zero width non-breaking space
)
uk_prefix = '44'
class InvalidPhoneError(Exception):
def __init__(self, message=None):
super().__init__(message or 'Not a valid phone number')
self.msg = message or 'Not a valid phone number'
def validate_notify_compatible_uk_mobile_number(number):
number = normalise_phone_number(number).lstrip(uk_prefix).lstrip('0')
if not number.startswith('7'):
raise InvalidPhoneError('Not a UK mobile number')
if len(number) > 10:
raise InvalidPhoneError('Too many digits')
if len(number) < 10:
raise InvalidPhoneError('Not enough digits')
return '{}{}'.format(uk_prefix, number)
def normalise_phone_number(number):
import string
for character in string.whitespace + OBSCURE_WHITESPACE + '()-+':
number = number.replace(character, '')
try:
list(map(int, number))
except ValueError:
raise InvalidPhoneError('Must not contain letters or symbols')
return number.lstrip('0')
| StarcoderdataPython |
9631668 | <reponame>Tanmoy741127/Intellup<gh_stars>1-10
# Generated by Django 3.2.5 on 2021-07-04 13:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('datahandler', '0004_contesthistory_points'),
]
operations = [
migrations.AddField(
model_name='contest',
name='difficulty',
field=models.CharField(choices=[('beginner', 'Beginner'), ('intermediate', 'Intermediate'), ('expert', 'Expert')], max_length=20, null=True),
),
]
| StarcoderdataPython |
9785855 | <reponame>7Rocky/plantilla-informe-LaTeX
import re
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Comment, Generic, Keyword, String, Text
__all__ = ['DockerfileLexer']
class DockerfileLexer(RegexLexer):
name = 'Dockerfile'
aliases = ['']
filenames = ['*']
# Text: black
# String.Simple: brown
# Keyword: blue
# Generic.Underline: underline
tokens = {
'root': [
(r'^(FROM)(\s)([\w\d\-/]+?)(:.*?)$', bygroups(Keyword, Text, Generic.Underline, Text)),
(r'(ADD|AS|CMD|COPY|ENTRYPOINT|ENV|LABEL|RUN|USER|VOLUME|WORKDIR)', Keyword),
(r'".*?"', String.Simple),
(r'.', Text)
]
}
| StarcoderdataPython |
3372649 | <gh_stars>0
# -*- coding: utf-8 -*
from __future__ import print_function, unicode_literals, division
from eight import *
import numpy as np
def gini_coefficient(x):
"""
Return computed Gini coefficient.
See https://en.wikipedia.org/wiki/Gini_coefficient
Adapted from econpy library.
copyright: 2005-2009 <NAME>
license: MIT license
contact: aisaac AT american.edu
Args:
*x* (list or array): Data
Returns:
Gini coefficient (float)
"""
x = np.array(x)
x.sort()
y = np.cumsum(x)
length = float(x.shape[0])
B = y.sum() / (y[-1] * length)
return float(1.0 + 1 / length - 2 * B)
def herfindahl_index(x, normalize=True):
"""
Return computed Herfindahl index.
See https://en.wikipedia.org/wiki/Herfindahl_index
Normalized scores are bounded [0, 1]; non-normalized scores are [1/len(x), 1]. Normalization only counts non-zero values.
Args:
*x* (list or array): Data
*normalize* (bool, default=True): Flag to normalize scores.
Returns:
Herfindahl index (float)
"""
# Normalize so that total is 1
x = np.array(x) / np.sum(x)
index = (x ** 2).sum()
if normalize:
correction = 1 / (x != 0).sum()
index = (index - correction) / (1 - correction)
return float(index)
def concentration_ratio(x, number=4):
"""
Return computed concentration ratio.
See https://en.wikipedia.org/wiki/Concentration_ratio
The concentration ratio measures the share of the market controlled by the top *number* firms. Returned ratio values vary from 0 to 1.
Args:
*x* (list or array): Data
*number* (int, default=4): Number of values to consider. 4 and 8 are commonly used.
Returns:
Concentration ratio (float)
"""
# Normalize so that total is 1
x = np.array(x) / np.sum(x)
x.sort()
return float(x[-number:].sum())
def theil_index(x):
"""
Return Theil entropy index.
See https://en.wikipedia.org/wiki/Theil_Index
The Theil index is a measure of economic inequality based on information theory. It is the difference between a dataset's maximum possible entropy and observed entropy.
Args:
*x* (list or array): Data
Returns:
Theil index (float)
"""
x = np.array(x).copy()
# Have to filter values because of log transform
x = np.abs(x[x != 0])
average, n = np.average(x), x.shape[0]
return float(1 / n * ((x / average) * np.log(x / average)).sum())
| StarcoderdataPython |
5120506 | <gh_stars>1-10
"""
Common pytest configuration for the test suites
"""
import os
import sys
import pytest
from docutils.parsers.rst.states import RSTStateMachine, Struct, Inliner, state_classes
from docutils.parsers.rst.languages import en
from docutils.statemachine import StringList
from docutils.utils import new_document
from sphinx.testing.path import path
from sphinx.util.docutils import sphinx_domains
# SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
# sys.path.append(os.path.join(SCRIPT_DIR, '..', 'src'))
pytest_plugins = "sphinx.testing.fixtures"
@pytest.fixture()
def local_app(make_app):
"""
Creates a sphinx app specific to this environment.
The main thing that is set up is the path to the conf.py file.
yields:
SphinxApp: The sphinx app.
"""
# Provide sphinx with the path to the documentation directory.
conf_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "assets"))
# Note the sphinx fixture expects a :class:`path` object, not a string
yield make_app(srcdir=path(conf_dir))
@pytest.fixture()
def sphinx_state(local_app):
"""
Fixture which will provide a sphinx state for use in testing sphinx
directives.
Yields:
:class:`docutils.parsers.rst.states.State`: A state for use in testing
directive functionality.
"""
# Get the environment and decorate it with what sphinx may need for the
# parsing.
env = local_app.env
env.temp_data["docname"] = "test" # A fake document name
# Create a document and inliner object, to be perfectly honest not sure
# exactly what these are or do, but needed to get the directive to run.
document = new_document(__file__)
document.settings.pep_references = 1
document.settings.rfc_references = 1
document.settings.env = env
document.settings.tab_width = 4
inliner = Inliner()
inliner.init_customizations(document.settings)
# Create a state machine so that we can get a state to pass back.
statemachine = RSTStateMachine(state_classes=state_classes, initial_state="Body")
statemachine.input_lines = StringList([""] * 40)
state = statemachine.get_state()
state.document = document
state.memo = Struct(
inliner=inliner,
language=en,
title_styles=[],
reporter=document.reporter,
document=document,
section_level=0,
section_bubble_up_kludge=False,
)
state.memo.reporter.get_source_and_line = statemachine.get_source_and_line
# The environemnt isn't normally available on the state in sphinx, but it's
# done here to make testing easier.
state.env = env
# Sphinx monkeypatches docutils when run. This is how it get's
# monkeypatched so that the python directives and roles can be found
with sphinx_domains(env):
# Provide the state back to the test.
yield state
| StarcoderdataPython |
11316776 | <filename>tests/line_break_test.py
from unicodedata_reader.reader import UnicodeDataReader
from unicodedata_reader import *
# This function tests reading property values using the actual data.
# Please see `entry_test.py` for tests using test data.
def test_line_break_value():
# Entries for testing, copied from:
# https://www.unicode.org/Public/UNIDATA/LineBreak.txt
expects = {
0x22: 'QU',
0x39: 'NU',
0x3A: 'IS',
0x3B: 'IS',
0x3C: 'AL',
0x378: 'XX', # missing value.
}
lb = UnicodeDataReader.default.line_break()
# There are 3 ways to read values.
# 1. `value(code)` returns the value for the code point.
# This is the the most memory-friendly, but slower to read values than other
# methods.
for code, value_expected in expects.items():
assert lb.value(code) == value_expected
# 2. `values_for_code()` returns a list of values in the code point order.
# This creates an item for each Unicode code point (~1M items,) but the
# fastest way to read values once the tuple was created.
values_for_code = tuple(lb.values_for_code())
for code, value_expected in expects.items():
assert values_for_code[code] == value_expected
# 3. `to_dict()` creates a dict of values, keyed by code points.
dict = lb.to_dict()
for code, value_expected in expects.items():
value = dict.get(code, lb.missing_value(code))
assert value == value_expected
# When integer values are easier to handle, `map_values_to_int` can do this.
lb.map_values_to_int()
for code, value_expected in expects.items():
value = lb.value(code)
if value == 'XX':
# Missing values are computed that they are not mapped.
assert value == value_expected
else:
assert isinstance(value, int)
assert lb.values_for_int()[value] == value_expected
# Use `fill_missing_values()` to fill entries for missing values.
# Then missing values are also mapped to integers.
lb = UnicodeDataReader.default.line_break()
lb.fill_missing_values()
lb.map_values_to_int()
for code, value_expected in expects.items():
value = lb.value(code)
assert isinstance(value, int)
assert lb.values_for_int()[value] == value_expected
| StarcoderdataPython |
8045072 | <reponame>kiss2u/google-research
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for strip_pruning_vars."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow.compat.v1 as tf
from model_pruning.python import pruning
from model_pruning.python import strip_pruning_vars_lib
from model_pruning.python.layers import rnn_cells
def _get_number_pruning_vars(graph_def):
number_vars = 0
for node in graph_def.node:
if re.match(
r"^.*(mask$)|(threshold$)|(old_weight$)|(old_old_weight$)|(gradient$)",
node.name):
number_vars += 1
return number_vars
def _get_node_names(tensor_names):
return [
strip_pruning_vars_lib._node_name(tensor_name)
for tensor_name in tensor_names
]
class StripPruningVarsTest(tf.test.TestCase):
def setUp(self):
super(StripPruningVarsTest, self).setUp()
param_list = [
"pruning_frequency=1", "begin_pruning_step=1", "end_pruning_step=10",
"nbins=2048", "threshold_decay=0.0", "prune_option=first_order_gradient"
]
self.initial_graph = tf.Graph()
self.initial_graph_def = None
self.final_graph = tf.Graph()
self.final_graph_def = None
self.pruning_spec = ",".join(param_list)
with self.initial_graph.as_default():
self.sparsity = tf.Variable(0.5, name="sparsity")
self.global_step = tf.train.get_or_create_global_step()
self.increment_global_step = tf.assign_add(self.global_step, 1)
self.mask_update_op = None
def _build_convolutional_model(self, number_of_layers):
# Create a graph with several conv2d layers
base_depth = 64
height, width = 7, 9
input_tensor = tf.ones((8, height, width, base_depth))
top_layer = input_tensor
prev_depth = base_depth
depth_step = 32
with tf.variable_scope("conv_model"):
for ix in range(number_of_layers):
layer_name = "layer" + str(ix)
with tf.variable_scope(layer_name) as scope:
cur_depth = prev_depth + depth_step
kernel = tf.Variable(
tf.truncated_normal([3, 3, prev_depth, cur_depth],
dtype=tf.float32),
name="weights")
top_layer = tf.nn.conv2d(
top_layer,
pruning.apply_mask(kernel, scope, "first_order_gradient"),
[1, 1, 1, 1],
padding="SAME")
prev_depth = cur_depth
return top_layer
def _build_fully_connected_model(self, number_of_layers):
base_depth = 128
input_tensor = tf.ones((8, base_depth))
top_layer = input_tensor
prev_depth = base_depth
depth_step = 128
with tf.variable_scope("fc_model"):
for ix in range(number_of_layers):
layer_name = "layer" + str(ix)
with tf.variable_scope(layer_name) as scope:
cur_depth = prev_depth + depth_step
kernel = tf.Variable(
tf.truncated_normal([prev_depth, cur_depth], dtype=tf.float32),
name="weights")
bias = tf.Variable(
tf.truncated_normal([cur_depth], dtype=tf.float32), name="biases")
top_layer = tf.nn.relu_layer(
top_layer,
pruning.apply_mask(kernel, scope, "first_order_gradient"),
bias,
name=scope.name)
prev_depth = cur_depth
return top_layer
def _build_lstm_model(self, number_of_layers):
batch_size = 8
dim = 10
inputs = tf.Variable(tf.random_normal([batch_size, dim]))
def lstm_cell():
return rnn_cells.MaskedBasicLSTMCell(
dim, forget_bias=0.0, state_is_tuple=True, reuse=False)
cell = tf.nn.rnn_cell.MultiRNNCell(
[lstm_cell() for _ in range(number_of_layers)], state_is_tuple=True)
outputs = tf.nn.static_rnn(
cell, [inputs], initial_state=cell.zero_state(batch_size, tf.float32))
return outputs
def _prune_model(self, session):
pruning_hparams = pruning.get_pruning_hparams().parse(self.pruning_spec)
p = pruning.Pruning(pruning_hparams, sparsity=self.sparsity)
self.mask_update_op = p.conditional_mask_update_op()
tf.global_variables_initializer().run()
for _ in range(20):
session.run(self.mask_update_op)
session.run(self.increment_global_step)
def _get_outputs(self, session, input_graph, tensors_list, graph_prefix=None):
outputs = []
for output_tensor in tensors_list:
if graph_prefix:
output_tensor = graph_prefix + "/" + output_tensor
outputs.append(
session.run(session.graph.get_tensor_by_name(output_tensor)))
return outputs
def _get_initial_outputs(self, output_tensor_names_list):
with self.session(graph=self.initial_graph) as sess1:
self._prune_model(sess1)
reference_outputs = self._get_outputs(sess1, self.initial_graph,
output_tensor_names_list)
self.initial_graph_def = tf.graph_util.convert_variables_to_constants(
sess1, sess1.graph.as_graph_def(),
_get_node_names(output_tensor_names_list))
return reference_outputs
def _get_final_outputs(self, output_tensor_names_list):
self.final_graph_def = strip_pruning_vars_lib.strip_pruning_vars_fn(
self.initial_graph_def, _get_node_names(output_tensor_names_list))
_ = tf.graph_util.import_graph_def(self.final_graph_def, name="final")
with self.test_session(self.final_graph) as sess2:
final_outputs = self._get_outputs(
sess2,
self.final_graph,
output_tensor_names_list,
graph_prefix="final")
return final_outputs
def _check_removal_of_pruning_vars(self, number_masked_layers):
self.assertEqual(
_get_number_pruning_vars(self.initial_graph_def), number_masked_layers)
self.assertEqual(_get_number_pruning_vars(self.final_graph_def), 0)
def _check_output_equivalence(self, initial_outputs, final_outputs):
for initial_output, final_output in zip(initial_outputs, final_outputs):
self.assertAllEqual(initial_output, final_output)
def testConvolutionalModel(self):
with self.initial_graph.as_default():
number_masked_conv_layers = 5
top_layer = self._build_convolutional_model(number_masked_conv_layers)
output_tensor_names = [top_layer.name]
initial_outputs = self._get_initial_outputs(output_tensor_names)
# Remove pruning-related nodes.
with self.final_graph.as_default():
final_outputs = self._get_final_outputs(output_tensor_names)
# Check that the final graph has no pruning-related vars
self._check_removal_of_pruning_vars(number_masked_conv_layers)
# Check that outputs remain the same after removal of pruning-related nodes
self._check_output_equivalence(initial_outputs, final_outputs)
def testFullyConnectedModel(self):
with self.initial_graph.as_default():
number_masked_fc_layers = 3
top_layer = self._build_fully_connected_model(number_masked_fc_layers)
output_tensor_names = [top_layer.name]
initial_outputs = self._get_initial_outputs(output_tensor_names)
# Remove pruning-related nodes.
with self.final_graph.as_default():
final_outputs = self._get_final_outputs(output_tensor_names)
# Check that the final graph has no pruning-related vars
self._check_removal_of_pruning_vars(number_masked_fc_layers)
# Check that outputs remain the same after removal of pruning-related nodes
self._check_output_equivalence(initial_outputs, final_outputs)
def testLSTMModel(self):
with self.initial_graph.as_default():
number_masked_lstm_layers = 2
outputs = self._build_lstm_model(number_masked_lstm_layers)
output_tensor_names = [outputs[0][0].name]
initial_outputs = self._get_initial_outputs(output_tensor_names)
# Remove pruning-related nodes.
with self.final_graph.as_default():
final_outputs = self._get_final_outputs(output_tensor_names)
# Check that the final graph has no pruning-related vars
self._check_removal_of_pruning_vars(number_masked_lstm_layers)
# Check that outputs remain the same after removal of pruning-related nodes
self._check_output_equivalence(initial_outputs, final_outputs)
if __name__ == "__main__":
tf.test.main()
| StarcoderdataPython |
134933 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#import tensorflow as tf
import numpy as np
import math
try:
import cPickle as pickle
except:
import pickle
import os
class BlockScramble:
def __init__( self, blockSize_filename ):
if( isinstance( blockSize_filename, str ) ):
self.load( blockSize_filename )
else:
self.blockSize = blockSize_filename
key = self.genKey()
self.setKey( key )
def setKey( self, key ):
self.key = key
self.rev = ( key > key.size/2 )
self.invKey = np.argsort(key)
def load( self, filename ):
fin = open(filename, 'rb')
self.blockSize, self.key = pickle.load( fin )
fin.close()
self.setKey( self.key )
def save( self, filename ): # pkl
fout = open(filename, 'wb')
pickle.dump( [self.blockSize, self.key], fout )
fout.close()
def genKey( self ):
key = self.blockSize[0] * self.blockSize[1]*self.blockSize[2]
key = np.arange(key*2, dtype=np.uint32)
np.random.shuffle(key)
return key
def padding( self, X ): # X is [datanum, width, height, channel]
s = X.shape
t = s[1] / self.blockSize[0]
d = t - math.floor(t)
if( d > 0 ):
paddingSize = self.blockSize[0] * ( math.floor(t) + 1 ) - s[1]
padding = X[:,-1:,:,:]
padding = np.tile( padding, (1, paddingSize, 1, 1 ) )
X = np.concatenate( (X, padding), axis = 1 )
t = s[2] / self.blockSize[1]
d = t - math.floor(t)
if( d > 0 ):
paddingSize = self.blockSize[1] * ( math.floor(t) + 1 ) - s[2]
padding = X[:,:,-1:,:]
padding = np.tile( padding, (1, 1, paddingSize, 1 ) )
X = np.concatenate( (X, padding), axis = 2 )
return X
def Scramble(self, X):
XX = (X * 255).astype(np.uint8)
XX = self.doScramble(XX, self.key, self.rev)
return XX.astype('float32')/255.0
def Decramble(self, X):
XX = (X * 255).astype(np.uint8)
XX = self.doScramble(XX, self.invKey, self.rev)
return XX.astype('float32')/255.0
def doScramble(self, X, ord, rev): # X should be uint8
s = X.shape
#print(s)
# print(self.blockSize)
assert( X.dtype == np.uint8 )
assert( s[1] % self.blockSize[0] == 0 )
assert( s[2] % self.blockSize[1] == 0 )
assert( s[3] == self.blockSize[2] )
numBlock = np.int32( [ s[1] / self.blockSize[0], s[2] / self.blockSize[1] ] );
numCh = self.blockSize[2];
X = np.reshape( X, ( s[0], numBlock[0], self.blockSize[0], numBlock[1], self.blockSize[1], numCh ) )
X = np.transpose( X, (0, 1, 3, 2, 4, 5) )
X = np.reshape( X, ( s[0], numBlock[0], numBlock[1], self.blockSize[0] * self.blockSize[1] * numCh ) )
d = self.blockSize[0] * self.blockSize[1] * numCh;
# print(X)
# print(0xF)
X0 = X & 0xF # あまりが入る(/16)
# print(X0)
X1 = X >> 4 # 16で割ったときの商がはいる
# print(X1)
X = np.concatenate( (X0,X1), axis=3 )
X[:,:,:,rev] = ( 15 - X[:,:,:,rev].astype(np.int32) ).astype(np.uint8)
# print(ord)
X = X[:,:,:,ord]
X[:,:,:,rev] = ( 15 - X[:,:,:,rev].astype(np.int32) ).astype(np.uint8)
X0 = X[:,:,:,:d]
X1 = X[:,:,:,d:]
X = ( X1 << 4 ) + X0
X = np.reshape( X, ( s[0], numBlock[0], numBlock[1], self.blockSize[0], self.blockSize[1], numCh ) )
X = np.transpose( X, ( 0, 1, 3, 2, 4, 5) )
X = np.reshape( X, ( s[0], numBlock[0] * self.blockSize[0], numBlock[1] * self.blockSize[1], numCh ) );
return X
if( __name__ == '__main__' ):
from PIL import Image
import os
import scipy.misc
from matplotlib import cm
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '1' #use GPU with ID=0
# config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5 # maximun alloc gpu50% of MEM
config.gpu_options.allow_growth = True #allocate dynamically
# sess = tf.Session(config = config)
im = Image.open('lena.png')
data = np.asarray(im, dtype=np.uint8)
data = np.reshape( data, (1,)+data.shape )
print(data.shape)
key_file = 'key16/keys1.pkl'
if( os.path.exists(key_file) ):
bs = BlockScramble( key_file )
else:
bs = BlockScramble( [16,16,3] )
bs.save(key_file)
data = bs.padding( data )
print(data.shape)
im = Image.fromarray( data[0,:,:,:] )
im.save('test_bs1.png')
print(data.shape)
data = bs.Scramble( data )
print(data.shape)
#array_resized_image = data[0,:,:,:]
#scipy.misc.imsave("test_bs2.png", array_resized_image)
#im = Image.fromarray( data[0,:,:,:] ,mode='F')
im = Image.fromarray(np.uint8(cm.gist_earth(data[0,:,:,:],bytes=True))*255)
im.save('test_bs2.png')
data = bs.Decramble( data )
print(data.shape)
#array_resized_image = data[0,:,:,:]
#scipy.misc.imsave("test_bs3.png", array_resized_image)
im = Image.fromarray(np.uint8(cm.gist_earth(data[0,:,:,:],bytes=True))*255)
im.save('test_bs3.png')
| StarcoderdataPython |
60975 | <filename>application.py
# Dependencies
import argparse
import requests
from datetime import datetime
from bs4 import BeautifulSoup
# Argparse description
parser = argparse.ArgumentParser(description="Enter Ticker To Receive Prices")
parser.add_argument('ticker', type=str)
args = parser.parse_args()
ticker = args.ticker.lower()
# Current time
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
# URL for coindesk
url = 'https://www.coindesk.com/price/{}'.format(ticker)
r = requests.get(url)
# Beautified content
soup = BeautifulSoup(r.content, 'html.parser')
# Variables which needs to be extracted
price = soup.find('div', {'class': 'price-large'})
mediumChange = soup.find('div', {'class': 'percent-change-medium'})
mktCap = soup.find('div', {'class': 'price-medium'})
# Results to be displayed
print(current_time)
print('${} = {}, [24 Hour % Change] = {}, [MarketCap] = {}'.format(ticker.upper(),
price.text, mediumChange.text, mktCap.text))
| StarcoderdataPython |
3349499 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from pathlib import Path
class TrainPaths:
def __init__(self, base="/opt/ml") -> None:
self.base: Path = Path(base).expanduser().resolve()
self.config: Path = self.base / "input" / "config"
self.data: Path = self.base / "input" / "data"
self.model: Path = self.base / "model"
self.output: Path = self.base / "output"
self.hyperparameters: Path = self.config / "hyperparameters.json"
self.inputdataconfig: Path = self.config / "inputdataconfig.json"
self.resourceconfig: Path = self.config / "resourceconfig.json"
self.config.mkdir(parents=True, exist_ok=True)
self.data.mkdir(parents=True, exist_ok=True)
self.model.mkdir(parents=True, exist_ok=True)
self.output.mkdir(parents=True, exist_ok=True)
class ServePaths:
def __init__(self, base="/opt/ml") -> None:
self.base: Path = Path(base).expanduser().resolve()
self.model: Path = self.base / "model"
self.output: Path = self.base / "output"
self.model.mkdir(parents=True, exist_ok=True)
self.output.mkdir(parents=True, exist_ok=True)
| StarcoderdataPython |
11316550 | def metade(preco = 0):
return preco/2
def dobro(preco = 0):
return preco * 2
def aumentar(preco = 0,r = 0):
return preco * (100 + r)/100
def diminuir(preco = 0,r = 0):
return preco * (100 - r)/100
def moeda(preco = 0, moeda = 'R$'):
return (f'{moeda}{preco:.2f}'.replace('.',','))
| StarcoderdataPython |
175344 | <filename>practice_problems/prog1_vi.py
#!/usr/bin/env python3
#
# Author: <NAME>
# Email: <EMAIL>
#
# Script to Program to find Sum, Diff, Prod, Avg, Div
num1 = input("Enter the first Number: ")
num2 = input("Enter the second Number: ")
# Check if the user has entered the number itself
try:
# Convert the numbers to double always, which would handle integers
# and also floating point numbers
num1 = float(num1)
num2 = float(num2)
except ValueError:
print("The entered input is not a number")
exit(0)
except TypeError:
print("The entered input is not a number")
exit(0)
# Perform operation on numbers
# Sum
print("Sum of the two Numbers is: %.2f"%(num1+num2))
# Difference - always will positive
print("Difference of two numbers is: %.2f"%(abs(num1-num2)))
# average
print("Average of two numbers is: %.2f"%((num1+num2)/2))
# product
print("Product of the two numbers is %.2f"%(num1*num2))
# division
if num2 == 0:
print("Cannot perform division as second number is zero")
else:
print("Division of the two numbers is %.2f"%(num1/num2))
| StarcoderdataPython |
1908402 | # coding: utf-8
import re
import datetime
def money2float(money):
# Remove comma(,)
money = money.replace(',', '')
g = re.match('([0-9\.]+)', money)
if not g:
return money
num = float(g.group(1))
pos = 0
while True:
pos = money.find('万', pos+1)
if pos != -1:
num *= 1e4
else:
break
return num
def period2timedelta(period):
g = re.match('(?:([0-9]+)个月)?(?:([0-9]+)天)?', period)
month = int(g.group(1)) if g.group(1) else 0
day = int(g.group(2)) if g.group(2) else 0
if g.group(1) is None and g.group(2) is None:
return period
return datetime.timedelta(days=day + month*30)
| StarcoderdataPython |
6559131 |
import os, fnmatch, string
from . import iterutils, model as flmd
from .source import Source, SourceFile
import anyconfig
from collections import OrderedDict
DEFAULT_EXCLUDE_PATTERNS = ['*.tar','*.jar','*.zip','*.gz','*.swp','node_modules','target','.idea','*.hide','*save']
DEFAULT_FILE_PATTERNS = ['*.yml','*cfg','*settings','*config','*properties','*props']
VALID_KEY_CHARS = [c for c in string.printable if c not in ['_'] ]
DEFAULT_KEY_FILTER = lambda p, k, v: k == None or isinstance(k, int) or (isinstance(k, str) and len(k)<50 and all(c in string.printable for c in VALID_KEY_CHARS))
DEFAULT_UNFLATTEN_SEPARATOR = '__'
def from_home_dir(root_path=None, include_os_env=False):
return Cfg(base_dir='~', root_path=root_path, include_os_env=include_os_env)
def from_file(filename, root_path=None, include_os_env=False):
path, basename = os.path.split(os.path.realpath(os.path.expanduser(filename)))
return Cfg(root_path=root_path, include_os_env=include_os_env, base_dir=path, file_patterns=[basename])
def from_dict(d, root_path=None, include_os_env=False):
return Cfg(data=d, root_path=root_path, file_patterns=None, include_os_env=include_os_env)
def from_os_env(root_path=None):
return Cfg(root_path=root_path, include_os_env=True, file_patterns=[])
#
# Provide a Global object for convenience. Shadow the main class methods at the module level.
#
__GLOBAL_CFG = None
def __GET_GLOBAL_FLANGE():
global __GLOBAL_CFG
if not __GLOBAL_CFG:
__GLOBAL_CFG = Cfg()
return __GLOBAL_CFG
def obj(*args, **kwargs):
return __GET_GLOBAL_FLANGE().obj(*args, **kwargs)
def objs(*args, **kwargs):
return __GET_GLOBAL_FLANGE().objs(*args, **kwargs)
def path(*args, **kwargs):
return __GET_GLOBAL_FLANGE().path(*args, **kwargs)
def paths(*args, **kwargs):
return __GET_GLOBAL_FLANGE().paths(*args, **kwargs)
def src(*args, **kwargs):
return __GET_GLOBAL_FLANGE().src(*args, **kwargs)
def srcs(*args, **kwargs):
return __GET_GLOBAL_FLANGE().srcs(*args, **kwargs)
def uri(*args, **kwargs):
return __GET_GLOBAL_FLANGE().uri(*args, **kwargs)
def uris(*args, **kwargs):
return __GET_GLOBAL_FLANGE().uris(*args, **kwargs)
def value(*args, **kwargs):
return __GET_GLOBAL_FLANGE().value(*args, **kwargs)
def values(*args, **kwargs):
return __GET_GLOBAL_FLANGE().values(*args, **kwargs)
def fobj(*args, **kwargs):
return __GET_GLOBAL_FLANGE().fobj(*args, **kwargs)
def fobjs(*args, **kwargs):
return __GET_GLOBAL_FLANGE().fobjs(*args, **kwargs)
def search(*args, **kwargs):
return __GET_GLOBAL_FLANGE().search(*args, **kwargs)
def refresh(gather=False, load=True, merge=True, research=True):
return __GET_GLOBAL_FLANGE().refresh(gather, load, merge, research)
def info(path=None):
return __GET_GLOBAL_FLANGE().info(path=path)
def register_model(name, model, research=True):
return __GET_GLOBAL_FLANGE().register_model(name, model, research)
def register_default_model(name, model):
flmd.DEFAULT_MODELS[name] = model
# If the global instance is already constructed, then explicitly register and research now
if __GLOBAL_CFG:
return register_model(name, model)
class PathCacheObject(object):
def __init__(self, val=None, mregs=None, srcs=None, path=None, wrap=False):
if wrap:
# basic wrapper. untested. keep this around for experimentation. This could be used to
# keep the metadata this class holds in-place in the original data via substitution of the
# wrapped object. Then an index of paths wouldn't be necessary.
#
# copied from:
# https://stackoverflow.com/questions/1443129/completely-wrap-an-object-in-python/1445289#1445289
self.__class__ = type(val.__class__.__name__,
(self.__class__, val.__class__),
{})
self.__dict__ = val.__dict__
self.val = val
self.mregs = mregs if mregs else {}
self.srcs = srcs if srcs else set()
self.path = path
@staticmethod
def from_path_and_value(pv):
return PathCacheObject(path=pv[0], val=pv[1])
def __repr__(self):
return "<PathCacheObject {}, #srcs={}, #instances={}>".format(self.path, len(self.srcs), len(self.mregs))
def add_src(self, src):
self.srcs.add(src)
def add_model(self, model, v):
# print 'add model called on ', model
if model.name not in self.mregs:
self.mregs[model.name] = model.registration(v)
def val_equals(self, val):
# print 'assert_val_equals: ', self.val, val
return self.val == val
def instance(self, model=None, reraise=True):
if model:
if model in self.mregs:
# try:
return self.mregs[model].instance(reraise=reraise)
# except Exception as e:
# print e
elif len(self.mregs) == 1:
return list(self.mregs.values())[0].instance(reraise=reraise)
if len(self.mregs) > 1:
raise ValueError('multiple registrations. Must specify model name from :{}'.format(self.mregs))
class Cfg(object):
def __init__(self,
data=None,
include_os_env=True,
root_path=None,
base_dir='.',
file_patterns=DEFAULT_FILE_PATTERNS,
file_exclude_patterns=DEFAULT_EXCLUDE_PATTERNS,
file_search_depth=0,
unflatten_separator=DEFAULT_UNFLATTEN_SEPARATOR,
key_filter=DEFAULT_KEY_FILTER,
src_post_proc=None,
gather=True,
load=True,
merge=True,
research=True,):
"""
:param data: initial data. This is merged as is without regard to root_path
:param include_os_env:
:param research_models:
:param root_path: the namespace/key under which to add all loaded config/data. If model instances are defined at top level this will be needed
:param base_dir: directory or list of directories to search for config/data. ** order matters! later entries override earlier.
:param file_patterns:
:param file_exclude_patterns:
:param file_search_depth:
:param unflatten_separator:
"""
# save params
self.unflatten_separator = unflatten_separator
self.file_patterns = [file_patterns] if isinstance(file_patterns, str) else file_patterns
self.file_exclude_patterns = file_exclude_patterns
self.file_search_depth = file_search_depth
self.include_os_env = include_os_env
self.root_path = root_path
self.init_data = data
# self.gather = gather
# self.merge = merge
# self.research = research
self.key_filter = key_filter
self.src_post_proc = src_post_proc
self.base_dir = base_dir
if isinstance(self.base_dir, str):
self.base_dir = [self.base_dir]
# init data
self.data = {}
self.sources = []
self.path_index = {}
self.models = flmd.DEFAULT_MODELS.copy()
# function to give to Source objects so register themselves with the path_index
# self.source_indexer = lambda src, p, k, v: self.__visit_index_path(self.path_index, src, p, k, v)
# conditionally do initial gather, merge and research
self.refresh(gather, load, merge, research)
#
#
# Flange Cfg() init methods
#
#
def clear_data(self):
if self.data:
del self.data
self.data = {}
if self.path_index:
del self.path_index
self.path_index = {}
if self.models:
del self.models
self.models = flmd.DEFAULT_MODELS.copy()
def refresh(self, gather=False, load=True, merge=True, research=True):
clear = False
if gather:
self.gather_sources()
clear = True
if load:
self.load_sources()
clear = True
if clear:
self.clear_data()
if merge:
self.merge_sources()
if research:
self.research_models()
def gather_sources(self):
if self.sources:
del self.sources
self.visited_uris = set()
if self.file_patterns:
for dir in self.base_dir:
self.sources.extend(self.__get_file_sources(dir))
if self.include_os_env:
# Dont use root path
self.sources.append(Source('os_env', '', os.environ.copy()))
if self.init_data:
# Dont use root path
self.sources.append(Source('init_data', '', self.init_data))
def load_sources(self):
for s in self.sources:
s.load()
def merge_sources(self):
# process sources with called provided function. This gives the caller a chance to
# shape things up or set the src path prior to the filter, index and merge
dlist = []
for s in self.sources:
if s.error:
continue
if self.src_post_proc:
self.src_post_proc(s)
d = {s.root_path: s.contents} if s.root_path else s.contents
# print d.keys()
e = iterutils.unflatten(d, self.unflatten_separator)
# print 'after unflatten', e
dlist.append(iterutils.remap(e, reraise_visit=True, enter=lambda p, k, v: self.__filter_and_index(s, p, k, v)))
# e = {'test': {'exclude': [['192.168.0.0/16'], ['172.16.0.0/12', '10.0.0.0/8'], '10.1.3.0/24']}}
# dlist.append(iterutils.remap(e, enter=lambda p, k, v: self.__filter_and_index(s, p, k, v)))
# then merge, putting the content under the root path for each source
self.data = {}
failed = []
for d in dlist:
try:
# print 'merging ', d
anyconfig.merge(self.data, d)
except:
failed.append(d)
def research_models(self):
plugins = iterutils.research(
self.data,
query=lambda p, k, v: flmd.PLUGIN_MODEL.validator(v))
for p in plugins:
m = self.register_model(
p[1]['name'],
flmd.PLUGIN_MODEL.factory(p[1]),
False)
# Give self to the plugin model instances so, in turn, the models can provide to their instances
if m.inject == 'flange':
m.fobj = self
iterutils.research(
self.data,
query=lambda p, k, v: self.__visit_index_model_instance(self.models.values(), p, k, v))
#
#
# Primary access methods
#
#
def search(self, path=None, values=None, unique=False, raise_absent=False, vfunc=lambda x: x):
"""
Return single model object instance matching given criteria
:param path: tuple or dpath expression representing the hierarchy/chain of parent keys
:param values: single value or list of values to match. If exact is False then .contains method is used as filter
:param raise_absent: if True then raise exception if no match is found
:return: list matching ojects directly from data/config in the form of ((k1, k2, .., kn), value)
"""
path_and_value_list = iterutils.search(
self.data,
path=path,
required_values=values)
# print 'search found ', [x[0] for x in path_and_value_list]
return self.__return_value(path_and_value_list, unique, raise_absent, vfunc)
def __return_value(self, l, unique=True, raise_absent=False, vfunc=lambda x: x):
if l:
if vfunc:
l = [y for y in [vfunc(x) for x in l] if y]
if len(l) > 1 and unique:
raise ValueError('multiple matches found')
if not l:
if raise_absent:
raise ValueError('no match found')
else:
return l[0] if unique else l
def path(self, path=None, values=None, raise_absent=False):
return self.search(path=path, unique=True, values=values, raise_absent=raise_absent, vfunc=lambda x: x[0])
def paths(self, path=None, values=None, raise_absent=False):
return self.search(path=path, unique=False, values=values, raise_absent=raise_absent, vfunc=lambda x: x[0])
def src(self, path=None, values=None, raise_absent=False):
sources = self.search(path=path, values=values, unique=True, raise_absent=raise_absent, vfunc=lambda x: self.path_index[x[0]].srcs)
return next(iter(sources))
def srcs(self, path=None, values=None, raise_absent=False):
sources = self.search(path=path, values=values, unique=False, raise_absent=raise_absent, vfunc=lambda x: self.path_index[x[0]].srcs)
return list(set([s for l in sources for s in l])) if sources else sources
def uri(self, path=None, values=None, raise_absent=False):
sources = self.search(path=path, unique=True, values=values, raise_absent=raise_absent, vfunc=lambda x: self.path_index[x[0]].srcs)
return next(iter(sources)).uri
def uris(self, path=None, values=None, raise_absent=False):
sources = self.search(path=path, unique=False, values=values, raise_absent=raise_absent, vfunc=lambda x: self.path_index[x[0]].srcs)
return [src.uri for l in sources for src in l] if sources else sources
def obj(self, path=None, model=None, values=None, raise_absent=False):
"""
Return single model object instance matching given criteria
:param path: tuple or dpath expression representing the hierarchy/chain of parent keys
:param values: single value or list of values to match. If exact is False then .contains method is used as filter
:param raise_absent: if True then raise exception if no match is found
:return: matching object from cache if already created or new if not
"""
return self.search(path=path, unique=True, raise_absent=raise_absent, values=values,
vfunc=lambda x: self.path_index[x[0]].instance(model=model) if x[0] in self.path_index else None)
def objs(self, path=None, model=None, values=None, raise_absent=False):
"""
Return list of model object instances matching given criteria
:param path: tuple or dpath expression representing the hierarchy/chain of parent keys
:param values: single value or list of values to match. If exact is False then .contains method is used as filter
:param raise_absent: if True then raise exception if no match is found
:return: list of matching objects
"""
return self.search(path=path, unique=False, raise_absent=raise_absent, values=values,
vfunc=lambda x: self.path_index[x[0]].instance(model=model, reraise=False) if x[0] in self.path_index else None)
def value(self, path=None, values=None, raise_absent=False):
"""
Return single data/config value matching given criteria
:param path: tuple or dpath expression representing the hierarchy/chain of parent keys
:param values: single value or list of values to match. If exact is False then .contains method is used as filter
:param raise_absent: if True then raise exception if no match is found
:return: matching value
"""
return self.search(path=path, unique=True, values=values, raise_absent=raise_absent, vfunc=lambda x: x[1])
def values(self, path=None, values=None, raise_absent=False):
"""
Return all data/config values matching given criteria
:param path: tuple or dpath expression representing the hierarchy/chain of parent keys
:param values: single value or list of values to match. If exact is False then .contains method is used as filter
:param raise_absent: if True then raise exception if no match is found
:return: list of matching values
"""
return self.search(path=path, unique=False, values=values, raise_absent=raise_absent, vfunc=lambda x: x[1])
def fobj(self, path=None, values=None, unique=True, raise_absent=False):
"""
Return model instance/registration object matching given criteria
:param path: tuple or dpath expression representing the hierarchy/chain of parent keys
:param values: single value or list of values to match. If exact is False then .contains method is used as filter
:param raise_absent: if True then raise exception if no match is found
:return: single model instance/registration object
"""
return self.path_index[self.search(path=path, unique=unique, values=values, raise_absent=raise_absent)[0]]
def fobjs(self, path=None, values=None, raise_absent=False):
"""
Return all model instance/registration objects matching given criteria
:param path: tuple or dpath expression representing the hierarchy/chain of parent keys
:param values: single value or list of values to match. If exact is False then .contains method is used as filter
:param raise_absent: if True then raise exception if no match is found
:return: list of model instance/registration objects
"""
return self.search(path=path, unique=False, values=values, raise_absent=raise_absent,
vfunc=lambda x: self.path_index[x[0]] if x[0] in self.path_index else None)
#
#
# Init helper methods and remap callbacks
#
#
# def register_model_schema(self, name, schema, factory, research=True):
# self.register_model(
# name,
# flmd.Model(name, flmd.Model.get_schema_validator(schema), factory),
# research)
def register_model(self, name, model, research=True):
self.models[name] = model
if research:
iterutils.research(
self.data,
query=lambda p, k, v: self.__visit_index_model_instance([model], p, k, v))
return model
def __filter_and_index(self, src, p, k, v):
# now the root_path/ns has already been accounted for in the data.
# no prefixing loginc
# np = (src.root_path,) + p if src.root_path else p
np = p
# print '__filter_and_index', p, k
# filter. func may be provided or be class default
if self.key_filter and not self.key_filter(np, k, v):
# print 'filtered out ', np, k
return v, False
# index. internal to Cfg class
full_path = np + (k,)
if full_path in self.path_index:
# print 'preexisting index at ', full_path
if not self.path_index[full_path].val_equals(v):
# print 'updating index at ', full_path
# raise ValueError('unexpected value change at path_index[{}]'.format(full_path))
self.path_index[full_path].add_src(src)
else:
# print 'adding index at ', full_path
self.path_index[full_path] = PathCacheObject(val=v, path=full_path, srcs=set([src]))
return iterutils.default_enter(p, k, v)
def __visit_index_model_instance(self, models, p, k, v):
"""
Called during model research on merged data
"""
# print 'model visit {} on {}'.format(model, v)
cp = p + (k,)
for model in models:
try:
if model.validator(v):
if cp in self.path_index:
# if self.path_index[cp].val != v:
# raise ValueError('unexpected value change at path_index[{}]'.format(cp))
self.path_index[cp].add_model(model, v)
else:
# The object should already be in the index but don't complain for now.
self.path_index[cp] = PathCacheObject(val=v, path=cp, regs=[model])
except:
pass
def __get_file_sources(self, topdir):
sources = []
topdir = os.path.realpath(os.path.expanduser(topdir))
start_depth = topdir.count(os.sep)
# print '__get_file_sources on ', dir
for root, dirnames, filenames in os.walk(os.path.realpath(os.path.expanduser(topdir)), topdown=True):
# print 'walk: {}. {}. {}'.format(root, dirnames, filenames)
depth = root.count(os.sep) - self.base_dir.count(os.sep) - start_depth
if depth >= self.file_search_depth:
# print 'depth {} exceeded {} on {}'.format(depth, self.file_search_depth, root)
del dirnames[:]
# remove excluded directories and files
for e in self.file_exclude_patterns:
for d in [dirname for dirname in fnmatch.filter(dirnames, e)]:
dirnames.remove(d)
for f in [filename for filename in fnmatch.filter(filenames, e)]:
# print 'removing ', f
filenames.remove(f)
to_include = set()
for p in self.file_patterns:
for filename in fnmatch.filter(filenames, p):
to_include.add(os.path.join(root, filename))
for filename in to_include:
# print filename
# Don't add a uri twice
# print '{} is in {}: {}'.format(filename, self.visited_uris, filename in self.visited_uris)
if filename not in self.visited_uris:
src = SourceFile(filename, self.root_path)
if src:
sources.append(src)
self.visited_uris.add(src.uri)
return sources
#
#
#
#
#
def info(self, path=None):
def iprint(s, level=0):
s = str(s)
print(s.rjust(len(s)+3*level))
def psrcs(srcs, level=0):
# print('\n')
iprint('sources:', level)
for src in srcs:
iprint("{0:15.10} {1:60.65} {2}".format(str(src.parser), str(src.uri), 'error: ' + str(src.error) if src.error else ''), level+1)
def pmodels(path_objects, omit_empty=False, level=0):
# print('\n')
iprint('models:', level)
for model in self.models:
paths_with_matching_model = [x for x in path_objects if x.mregs.get(model)]
if not omit_empty or paths_with_matching_model:
iprint('{}'.format(model), level+1)
for path in paths_with_matching_model:
iprint("{0:50} {1}".format('/'.join(path.path), path.mregs.get(model)), level+2)
def pvalues(values, level=0):
# print('\n')
iprint('values:', level)
vs = [dict(x) if isinstance(x, OrderedDict) else x for x in values]
if vs:
for v in vs:
iprint(v, level+1)
s = ''
if not path:
print('\nconfig:\nbase dir: \t{}\nsearch depth: \t{}\nfile include patterns: \t{}\nfile exclude patterns: \t{}'.format(
self.base_dir , self.file_search_depth, self.file_patterns, self.file_exclude_patterns))
print('\n')
pmodels(self.path_index.values())
print('\n')
psrcs(self.sources)
else:
for k in search(path):
print('\n{}:'.format('/'.join(k[0])))
pmodels(fobjs(k[0]), omit_empty=True, level=1)
psrcs(srcs(path), level=1)
pvalues(values(path), level=1)
| StarcoderdataPython |
380324 | <filename>setup.py
from setuptools import setup
setup(
name='Trading-algorithm',
version='1.0.0',
packages=[''],
url='www.linkedin.com/in/luca-pedersoli-820009202',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='A trading algorithm that helps the trader to predict and analyze past and future trends'
)
| StarcoderdataPython |
4825143 | <gh_stars>10-100
'''
Mixin for publishing messages to a topic's listeners. This will be
mixed into topicobj.Topic so that a user can use a Topic object to
send a message to the topic's listeners via a publish() method.
Note that it is important that the PublisherMixin NOT modify any
state data during message sending, because in principle it could
happen that a listener causes another message of same topic to be
sent (presumably, the listener has a way of preventing infinite
loop).
:copyright: Copyright 2006-2009 by <NAME>, all rights reserved.
:license: BSD, see LICENSE.txt for details.
'''
class PublisherMixin:
def __init__(self):
pass
def publish(self, data=None):
self._publish(data)
############## IMPLEMENTATION ###############
def _mix_prePublish(self, data, topicObj=None, iterState=None):
'''Called just before the __sendMessage, to perform any argument
checking, set iterState, etc'''
return None
def _mix_callListener(self, listener, data, iterState):
'''Send the data to given listener.'''
listener(self, data)
| StarcoderdataPython |
11273382 | # Enter your code for "Degree Distribution" here.
import csv
degrees = []
students = []
for l in csv.DictReader(open("degrees.csv")):
degrees.append(l)
for l in csv.DictReader(open("students.csv")):
students.append(l)
students = sorted(students, key=lambda x: float(x["score"]))
students.reverse()
print(students)
| StarcoderdataPython |
286317 | <gh_stars>0
from os.path import join, abspath, relpath, isdir
from os import mkdir, chmod, pardir
import shutil
from glob import glob
import pickle
import numpy as np
from time import time
from datetime import datetime
from ..simulation.environment import ConditionSimulation
class Batch:
"""
Class defines a collection of batch job submissions for Quest.
Attributes:
path (str) - path to batch directory
script_name (str) - name of script for running batch
parameters (iterable) - parameter sets
simulation_paths (dict) - relative paths to simulation directories
sim_kw (dict) - keyword arguments for simulation
Properties:
N (int) - number of samples in parameter space
"""
def __init__(self, parameters):
"""
Instantiate batch of jobs.
Args:
parameters (iterable) - each entry is a parameter set that defines a simulation. Parameter sets are passed to the build_model method.
"""
self.simulation_paths = {}
self.parameters = parameters
self.script_name = 'run_batch.py'
def __getitem__(self, index):
""" Returns simulation instance. """
return self.load_simulation(index)
def __iter__(self):
""" Iterate over serialized simulations. """
self.count = 0
return self
def __next__(self):
""" Returns next simulation instance. """
if self.count < len(self.simulation_paths):
simulation = self.load_simulation(self.count)
self.count += 1
return simulation
else:
raise StopIteration
@property
def N(self):
""" Number of samples in parameter space. """
return len(self.parameters)
@staticmethod
def load(path):
""" Load batch from target <path>. """
with open(join(path, 'batch.pkl'), 'rb') as file:
batch = pickle.load(file)
batch.path = path
return batch
@staticmethod
def build_run_script(path,
script_name,
num_trajectories,
saveall,
deviations,
comparison):
"""
Writes bash run script for local use.
Args:
path (str) - path to simulation top directory
script_name (str) - name of run script
num_trajectories (int) - number of simulation trajectories
saveall (bool) - if True, save simulation trajectories
deviations (bool) - if True, use deviation variables
comparison (str) - type of comparison
"""
# define paths
path = abspath(path)
job_script_path = join(path, 'scripts', 'run.sh')
# copy run script to scripts directory
run_script = abspath(__file__).rsplit('/', maxsplit=2)[0]
run_script = join(run_script, 'scripts', script_name)
shutil.copy(run_script, join(path, 'scripts'))
# declare outer script that reads PATH from file
job_script = open(job_script_path, 'w')
job_script.write('#!/bin/bash\n')
# move to batch directory
job_script.write('cd {:s} \n\n'.format(path))
# run each batch
job_script.write('echo "Starting all batches at `date`"\n')
job_script.write('while read P; do\n')
job_script.write('echo "Processing batch ${P}"\n')
job_script.write('python ./scripts/{:s}'.format(script_name)+' ${P} ')
args = (num_trajectories, saveall, deviations, comparison)
job_script.write('-N {:d} -s {:d} -d {:d} -cm {:s} \n'.format(*args))
job_script.write('done < ./batches/index.txt \n')
job_script.write('echo "Completed all batches at `date`"\n')
job_script.write('exit\n')
# close the file
job_script.close()
# change the permissions
chmod(job_script_path, 0o755)
@staticmethod
def build_submission_script(path,
script_name,
num_trajectories=5000,
saveall=False,
deviations=False,
comparison='empirical',
walltime=10,
allocation='p30653'):
"""
Writes job submission script for QUEST.
Args:
path (str) - path to simulation top directory
script_name (str) - name of run script
num_trajectories (int) - number of simulation trajectories
saveall (bool) - if True, save simulation trajectories
deviations (bool) - if True, use deviation variables
comparison (str) - type of comparison
walltime (int) - estimated job run time
allocation (str) - project allocation, e.g. p30653 (comp. bio)
"""
# define paths
path = abspath(path)
job_script_path = join(path, 'scripts', 'submit.sh')
# copy run script to scripts directory
run_script = abspath(__file__).rsplit('/', maxsplit=2)[0]
run_script = join(run_script, 'scripts', script_name)
shutil.copy(run_script, join(path, 'scripts'))
# determine queue
if walltime <= 4:
queue = 'short'
elif walltime <= 48:
queue = 'normal'
else:
queue = 'long'
# declare outer script that reads PATH from file
job_script = open(job_script_path, 'w')
job_script.write('#!/bin/bash\n')
# move to batch directory
job_script.write('cd {:s} \n\n'.format(path))
# begin outer script for processing batch
job_script.write('while IFS=$\'\\t\' read P\n')
job_script.write('do\n')
job_script.write('b_id=$(echo $(basename ${P}) | cut -f 1 -d \'.\')\n')
job_script.write(' JOB=`msub - << EOJ\n\n')
# =========== begin submission script for individual batch ============
job_script.write('#! /bin/bash\n')
job_script.write('#MSUB -A {:s} \n'.format(allocation))
job_script.write('#MSUB -q {:s} \n'.format(queue))
job_script.write('#MSUB -l walltime={0:02d}:00:00 \n'.format(walltime))
job_script.write('#MSUB -m abe \n')
#job_script.write('#MSUB -M <EMAIL> \n')
job_script.write('#MSUB -o ./log/${b_id}/outlog \n')
job_script.write('#MSUB -e ./log/${b_id}/errlog \n')
job_script.write('#MSUB -N ${b_id} \n')
job_script.write('#MSUB -l nodes=1:ppn=1 \n')
job_script.write('#MSUB -l mem=1gb \n\n')
# load python module and metabolism virtual environment
job_script.write('module load python/anaconda3.6\n')
job_script.write('source activate ~/pythonenvs/metabolism_env\n\n')
# move to batch directory
job_script.write('cd {:s} \n\n'.format(path))
# run script
job_script.write('python ./scripts/{:s}'.format(script_name)+' ${P} ')
args = (num_trajectories, saveall, deviations, comparison)
job_script.write('-N {:d} -s {:d} -d {:d} -cm {:s} \n'.format(*args))
job_script.write('EOJ\n')
job_script.write('`\n\n')
# ============= end submission script for individual batch ============
# print job id
#job_script.write('echo "JobID = ${JOB} submitted on `date`"\n')
job_script.write('done < ./batches/index.txt \n')
job_script.write('echo "All batches submitted as of `date`"\n')
job_script.write('exit\n')
# close the file
job_script.close()
# change the permissions
chmod(job_script_path, 0o755)
def build_batches(self, batch_size=25):
"""
Creates directory and writes simulation paths for each batch.
Args:
batch_size (int) - number of simulations per batch
"""
# get directories for all batches and logs
batches_dir = join(self.path, 'batches')
logs_dir = join(self.path, 'log')
# create index file for batches
index_path = join(batches_dir, 'index.txt')
index = open(index_path, 'w')
# write file containing simulation paths for each batch
for i, simulation_path in self.simulation_paths.items():
# determine batch ID
batch_id = i // batch_size
# process new batch
if i % batch_size == 0:
# open batch file and append to index
batch_path = join(batches_dir, '{:d}.txt'.format(batch_id))
index.write('{:s}\n'.format(relpath(batch_path, self.path)))
batch_file = open(batch_path, 'w')
# create log directory for batch
mkdir(join(logs_dir, '{:d}'.format(batch_id)))
# write paths to batch file
batch_file.write('{:s}\n'.format(simulation_path))
# close batch file
if i % batch_size == (batch_size - 1):
batch_file.close()
chmod(batch_path, 0o755)
index.close()
chmod(index_path, 0o755)
def make_directory(self, directory='./'):
"""
Create directory for batch of jobs.
Args:
directory (str) - destination path
"""
# assign name to batch
timestamp = datetime.fromtimestamp(time()).strftime('%y%m%d_%H%M%S')
name = '{:s}_{:s}'.format(self.__class__.__name__, timestamp)
# create directory (overwrite existing one)
path = join(directory, name)
if not isdir(path):
mkdir(path)
self.path = path
# make subdirectories for simulations and scripts
mkdir(join(path, 'scripts'))
mkdir(join(path, 'simulations'))
mkdir(join(path, 'batches'))
mkdir(join(path, 'log'))
def build(self,
directory='./',
batch_size=25,
num_trajectories=5000,
saveall=False,
deviations=False,
comparison='empirical',
walltime=10,
allocation='p30653',
**sim_kw):
"""
Build directory tree for a batch of jobs. Instantiates and saves a simulation instance for each parameter set, then generates a single shell script to submit each simulation as a separate job.
Args:
directory (str) - destination path
batch_size (int) - number of simulations per batch
num_trajectories (int) - number of simulation trajectories
saveall (bool) - if True, save simulation trajectories
deviations (bool) - if True, use deviation variables
comparison (str) - type of comparison
walltime (int) - estimated job run time
allocation (str) - project allocation
sim_kw (dict) - keyword arguments for ConditionSimulation
"""
# create batch directory
self.make_directory(directory)
# store parameters (e.g. pulse conditions)
self.sim_kw = sim_kw
self.batch_size = batch_size
# build simulations
for i, parameters in enumerate(self.parameters):
simulation_path = join(self.path, 'simulations', '{:d}'.format(i))
self.simulation_paths[i] = relpath(simulation_path, self.path)
self.build_simulation(parameters, simulation_path, **sim_kw)
# save serialized batch
with open(join(self.path, 'batch.pkl'), 'wb') as file:
pickle.dump(self, file, protocol=-1)
# build parameter file for each batch
self.build_batches(batch_size=batch_size)
# build job run script
self.build_run_script(self.path,
self.script_name,
num_trajectories,
saveall,
deviations,
comparison)
# build job submission script
self.build_submission_script(self.path,
self.script_name,
num_trajectories,
saveall,
deviations,
comparison,
walltime=walltime,
allocation=allocation)
@classmethod
def build_simulation(cls, parameters, simulation_path, **kwargs):
"""
Builds and saves a simulation instance for a set of parameters.
Args:
parameters (iterable) - parameter sets
simulation_path (str) - simulation path
kwargs: keyword arguments for ConditionSimulation
"""
# build model
model = cls.build_model(parameters)
# instantiate simulation
simulation = ConditionSimulation(model, **kwargs)
# create simulation directory
if not isdir(simulation_path):
mkdir(simulation_path)
# save simulation
simulation.save(simulation_path)
def load_simulation(self, index):
"""
Load simulation instance from file.
Args:
index (int) - simulation index
Returns:
simulation (ConditionSimulation)
"""
simulation_path = join(self.path, self.simulation_paths[index])
return ConditionSimulation.load(simulation_path)
def apply(self, func):
"""
Applies function to entire batch of simulations.
Args:
func (function) - function operating on a simulation instance
Returns:
output (dict) - {simulation_id: function output} pairs
"""
f = lambda path: func(ConditionSimulation.load(path))
return {i: f(p) for i, p in self.simulation_paths.items()}
| StarcoderdataPython |
6602684 | <reponame>glwhart/autoGR
import os
for i in list(range(50,1001,50)):
with open("struct_enum.in","w+") as f:
f.write("System \n")
f.write("bulk \n")
f.write("1 0 0 \n")
f.write("0 1 0 \n")
f.write("0 0 2 \n")
f.write("2 \n")
f.write("1 \n")
f.write("0.0 0.0 0.0 0/1 \n")
f.write("1 "+str(i)+"\n")
f.write("0.00001 \n")
f.write("part list of labelings (including incomplete labelings) is used \n")
f.write("# Concentration ranges \n")
f.write("5 5 10 \n")
f.write("5 5 10")
print("N cells",i)
os.system("./srHNF.x")
| StarcoderdataPython |
1777997 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Neural net layers for TensorFlow Fold.
Layers are a convenience rather than an integral part of Fold.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import inspect
import itertools
# import google3
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import tensorflow_fold.blocks.blocks
import tensorflow_fold.blocks.result_types as tdt
class Layer(tdt.IOBase):
"""A callable that accepts and returns nests of batched of tensors."""
def __init__(self, input_type=None, output_type=None, name_or_scope=None):
"""Creates the layer.
Args:
input_type: A type.
output_type: A type.
name_or_scope: A string or variable scope. If a string, a new variable
scope will be created by calling
[`create_variable_scope`](#create_variable_scope), with defaults
inherited from the current variable scope. If no caching device is set,
it will be set to `lambda op: op.device`. This is because `tf.while` can
be very inefficient if the variables it uses are not cached locally.
"""
if name_or_scope is None: name_or_scope = type(self).__name__
if isinstance(name_or_scope, tf.VariableScope):
self._vscope = name_or_scope
name = str(self._vscope.name)
elif isinstance(name_or_scope, six.string_types):
self._vscope = create_variable_scope(name_or_scope)
name = name_or_scope
else:
raise TypeError('name_or_scope must be a tf.VariableScope or a string: '
'%s' % (name_or_scope,))
if self._vscope.caching_device is None:
self._vscope.set_caching_device(lambda op: op.device)
super(Layer, self).__init__(input_type, output_type, name)
if not hasattr(self, '_constructor_name'):
self._constructor_name = '__.%s' % self.__class__.__name__
if not hasattr(self, '_constructor_args'):
self._constructor_args = None
if not hasattr(self, '_constructor_kwargs'):
self._constructor_kwargs = None
def set_constructor_args(self, name, args, kwargs):
"""Sets the constructor args used to pretty-print this layer.
Should be called by derived classes in __init__.
Args:
name: the fully qualified name of the constructor
args: a list of constructor arguments
kwargs: a list of (key,value,default) triples for keyword arguments
"""
self._constructor_name = name
self._constructor_args = args
self._constructor_kwargs = kwargs if kwargs is not None else []
@property
def constructor_name(self):
return self._constructor_name
@property
def constructor_args(self):
return self._constructor_args
@property
def constructor_kwargs(self):
return self._constructor_kwargs
def __rshift__(self, rhs):
return tensorflow_fold.blocks.blocks.Pipe(
self, rhs)
def __rrshift__(self, lhs):
return tensorflow_fold.blocks.blocks.Pipe(
lhs, self)
def create_variable_scope(name):
"""Creates a new variable scope based on `name`, nested in the current scope.
If `name` ends with a `/` then the new scope will be created exactly as if
you called `tf.variable_scope(name)`. Otherwise, `name` will be
made globally unique, in the context of the current graph (e.g.
`foo` will become `foo_1` if a `foo` variable scope already exists).
Args:
name: A non-empty string.
Returns:
A variable scope.
Raises:
TypeError: if `name` is not a string.
ValueError: if `name` is empty.
"""
if not isinstance(name, six.string_types):
raise TypeError('name must be a string: %s' % (name,))
if not name: raise ValueError('name must be non-empty')
if name.endswith('/'):
with tf.variable_scope(name) as scope:
return scope
current_scope_name = tf.get_variable_scope().name
if current_scope_name:
full_name = '%s/%s' % (current_scope_name, name)
else:
full_name = name
# We rely on the fact that every variable scope has a name scope
# with the exact same name, so a unique name scope is by
# implication also a unique name for a variable scope.
with tf.name_scope(None): # enter the root name scope
with tf.name_scope(full_name) as unique_name:
pass
if current_scope_name: unique_name = unique_name[len(current_scope_name)+1:]
with tf.variable_scope(unique_name[:-1]) as scope:
return scope
@six.add_metaclass(abc.ABCMeta)
class TensorToTensorLayer(Layer):
"""A set of TF variables and an associated Tensor -> Tensor function."""
def __init__(self, *args, **kwargs):
self._created_variables = False
super(TensorToTensorLayer, self).__init__(*args, **kwargs)
@abc.abstractmethod
def _create_variables(self):
"""Creates the variables associated with this layer.
Guaranteed to be called at most once, either when the layer's call operator
is invoked for the first time, in which case the input type will have been
set, or when the public method create_variables is called for the first
time. Scope will be set to this layer's vscope.
Raises:
TypeError: If `input_type` is invalid for this layer or isn't set.
"""
pass
@abc.abstractmethod
def _process_batch(self, batch):
"""Processes a batch of inputs using this layer; called in its vscope.
Args:
batch: A batch tensor for this layer's input type.
Returns:
A tensor of this layer's output type.
"""
pass
def __call__(self, batch):
"""Calls the function associated with this layer on a batch of inputs.
Creates the variables for this layer if they don't already exist.
Args:
batch: A batch tensor.
Returns:
A tensor of this layer's output type.
Raises:
ValueError: If the layer was previously called with a batch of a different
dtype or shape (not considering the leading dimension).
"""
self.set_input_type(
tdt.TensorType(batch.get_shape().as_list()[1:], batch.dtype))
self.create_variables()
with tf.variable_scope(self._vscope):
return self._process_batch(batch)
def create_variables(self):
"""Creates the variables for this layer if they don't already exist.
If the variables are created by this method rather than by calling the
layer, the input type may need to be set manually.
Raises:
TypeError: If the input type is invalid or unset.
"""
self._check_input_type()
with tf.variable_scope(self._vscope):
if not self._created_variables:
self._create_variables()
self._created_variables = True
class FC(TensorToTensorLayer):
"""A fully connected network layer.
Fully connected layers require a `float32` vector (i.e. 1D tensor) as input,
and build `float32` vector outputs. Layers can be applied to multiple inputs,
provided they all have the same shape.
For example, to apply the same hidden layer to two different input fields:
```python
layer = FC(100)
in = {'a': Vector(10), 'b': Vector(10)}
hidden = [in['a'] >> Call(layer), in['b'] >> Call(layer)] >> Concat()
out = hidden >> Call(FC(10, activation=None))
```
Attributes:
weights: The tensor for the weights of the FC layer.
bias: The tensor for the bias of the FC layer.
scales: The tensor for the scales of the FC layer if weight norm is enabled.
output_size: The size of the output as an integer.
"""
def __init__(self, num_units_out, activation=tf.nn.relu, initializer=None,
input_keep_prob=None, output_keep_prob=None,
normalization_fn=None, weight_norm=False, name=None):
"""Initializes the layer.
Args:
num_units_out: The number of output units in the layer.
activation: The activation function. Default is ReLU. Use `None` to get a
linear layer.
initializer: The initializer for the weights. Defaults to uniform unit
scaling with factor derived in <http://arxiv.org/pdf/1412.6558v3.pdf>
if activation is ReLU, ReLU6, tanh, or linear. Otherwise defaults to
truncated normal initialization with a standard deviation of 0.01.
input_keep_prob: Optional scalar float32 tensor for dropout on input.
Feed 1.0 at serving to disable dropout.
output_keep_prob: Optional scalar float32 tensor for dropout on output.
Feed 1.0 at serving to disable dropout.
normalization_fn: Optional normalization function that will be inserted
before nonlinearity.
weight_norm: A bool to control whether weight normalization is used. See
https://arxiv.org/abs/1602.07868 for how it works.
name: An optional string name. Defaults to `FC_%d % num_units_out`. Used
to name the variable scope where the variables for the layer live.
"""
self.set_constructor_args('td.FC', *get_local_arguments(FC.__init__, True))
if not initializer:
# TODO(SamEisenstat): This constant is calibrated for ReLU, something else
# might be better for ReLU6.
if activation in [tf.nn.relu, tf.nn.relu6]:
initializer = tf.uniform_unit_scaling_initializer(1.43)
elif activation == tf.tanh:
initializer = tf.uniform_unit_scaling_initializer(1.15)
elif not activation:
initializer = tf.uniform_unit_scaling_initializer(1.0)
else:
initializer = tf.truncated_normal_initializer(stddev=0.01)
self._activation = activation
self._initializer = initializer
self._input_keep_prob = input_keep_prob
self._output_keep_prob = output_keep_prob
self._normalization_fn = normalization_fn
self._weight_norm = weight_norm
if name is None: name = 'FC_%d' % num_units_out
super(FC, self).__init__(
output_type=tdt.TensorType([num_units_out]), name_or_scope=name)
@property
def bias(self):
if not self._created_variables:
raise RuntimeError('bias have not been created; call the layer first')
return self._bias
@property
def weights(self):
if not self._created_variables:
raise RuntimeError('weights have not been created; call the layer first')
return self._weights
@property
def scales(self):
if not self._created_variables:
raise RuntimeError('scales have not been created; call the layer first')
return self._scales
@property
def output_size(self):
return self.output_type.shape[0]
@property
def weight_norm(self):
return self._weight_norm
def _create_variables(self):
if self.input_type.dtype != 'float32':
raise TypeError('FC input dtype must be float32: %s' %
self.input_type.dtype)
if self.input_type.ndim != 1:
raise TypeError('FC input shape must be 1D: %s' %
str(self.input_type.shape))
self._bias = tf.get_variable(
'bias', self.output_type.shape, initializer=tf.constant_initializer(0))
self._weights = tf.get_variable(
'weights', [self.input_type.shape[0], self.output_type.shape[0]],
initializer=self._initializer)
if self._weight_norm:
self._scales = tf.get_variable(
'scales',
[self.output_type.shape[0]],
initializer=tf.constant_initializer(1.0))
def _process_batch(self, batch):
if self._input_keep_prob is not None:
batch = tf.nn.dropout(batch, self._input_keep_prob)
if self._weight_norm:
y = tf.nn.xw_plus_b(batch,
tf.nn.l2_normalize(self._weights, 0) * self._scales,
self._bias)
else:
y = tf.nn.xw_plus_b(batch, self._weights, self._bias)
if self._normalization_fn is not None and self._activation is not None:
y = self._normalization_fn(y)
if self._activation is not None: y = self._activation(y)
if self._output_keep_prob is not None:
y = tf.nn.dropout(y, self._output_keep_prob)
return y
class Embedding(TensorToTensorLayer):
"""An embedding for integers.
Embeddings require integer scalars as input, and build `float32` vector
outputs. Embeddings can be applied to multiple inputs. `Embedding` doesn't
do any hashing on its own, it just takes its inputs mod `num_buckets`
to determine which embedding(s) to return.
Implementation detail: `tf.gather` currently only supports `int32`
and `int64`. If the input type is smaller than 32 bits it will be
cast to `tf.int32`. Since all currently defined TF dtypes other than
`int32` and `int64` have less than 32 bits, this means that we
support all current integer dtypes.
"""
def __init__(self, num_buckets, num_units_out, initializer=None, name=None,
trainable=True, mod_inputs=True):
"""Initializes the layer.
Args:
num_buckets: How many buckets the embedding has.
num_units_out: The number of output units in the layer.
initializer: the initializer for the weights. Defaults to uniform unit
scaling. The initializer can also be a Tensor or numpy array, in which
case the weights are initialized to this value and shape. Note that in
this case the weights will still be trainable unless you also pass
`trainable=False`.
name: An optional string name. Defaults to
`Embedding_%d_%d % (num_buckets, num_units_out)`. Used to name the
variable scope where the variables for the layer live.
trainable: Whether or not to make the weights trainable.
mod_inputs: Whether or not to mod the input by the number of buckets.
Raises:
ValueError: If the shape of `weights` is not
`(num_buckets, num_units_out)`.
"""
self.set_constructor_args('td.Embedding',
*get_local_arguments(Embedding.__init__, True))
self._weights_shape = (num_buckets, num_units_out)
if name is None: name = 'Embedding_%d_%d' % self._weights_shape
if initializer is None:
initializer = tf.uniform_unit_scaling_initializer(1.0)
elif isinstance(initializer, np.ndarray):
initializer = tf.convert_to_tensor(initializer)
if isinstance(initializer, tf.Tensor):
initializer.set_shape(self._weights_shape)
self._weights_shape = None # otherwise get_variable barfs
self._initializer = initializer
self._num_buckets = num_buckets
self._num_units_out = num_units_out
self._trainable = trainable
self._mod_inputs = bool(mod_inputs)
super(Embedding, self).__init__(
output_type=tdt.TensorType([num_units_out]), name_or_scope=name)
def _create_variables(self):
if self.input_type.ndim != 0:
raise TypeError('Embeddings take scalar inputs.')
dtype = tf.as_dtype(self.input_type.dtype)
if not dtype.is_integer: raise TypeError('Embeddings take integer inputs.')
if dtype not in (tf.int32, tf.int64): # only dtypes supported by tf.gather
if np.iinfo(dtype.as_numpy_dtype).max > 2147483647:
# pedantic future-proofing to handle hypothetical tf.uint64
raise TypeError('cannot gather or upcast dtype %s' % dtype)
self._cast = True
else:
self._cast = False
self._weights = tf.get_variable(
'weights', self._weights_shape, initializer=self._initializer,
trainable=self._trainable)
@property
def weights(self):
if not self._created_variables:
raise RuntimeError('weights have not been created; call the layer first')
return self._weights
@property
def num_buckets(self):
return self._num_buckets
@property
def num_units_out(self):
return self._num_units_out
def _process_batch(self, batch):
# We have to call tf.abs before calling tf.mod, because tf.mod gives
# native outputs when given negative inputs.
if self._cast: batch = tf.cast(batch, tf.int32)
if self._mod_inputs: batch = tf.mod(tf.abs(batch), self._num_buckets)
return tf.gather(self._weights, batch)
def _binary_sequences_of_at_most(n):
return itertools.chain.from_iterable(
itertools.product((0, 1), repeat=i) for i in xrange(n+1))
class FractalNet(TensorToTensorLayer):
"""An implementation of FractalNet.
See https://arxiv.org/abs/1605.07648 for details.
"""
# Choices for drop-path (names describe which paths are kept.)
_BOTH = 0
_JUST_BASE = 1
_JUST_RECURSE = 2
def __init__(self, num_fractal_blocks, fractal_block_depth,
base_layer_builder, mixer=None, drop_path=False,
p_local_drop_path=0.5, p_drop_base_case=0.25,
p_drop_recursive_case=0.25, name=None):
"""Initializes the FractalNet.
Args:
num_fractal_blocks: The number of fractal blocks the net is made from.
This variable is named `B` in the FractalNet paper. This argument uses
the word `block` in the sense that the FractalNet paper uses it.
fractal_block_depth: How deeply nested the blocks are. This variable is
`C-1` in the paper.
base_layer_builder: A callable that takes a name and returns a `Layer`
object. We would pass in a convolutional layer to reproduce the results
in the paper.
mixer: The join operation in the paper. Assumed to have two arguments.
Defaults to element-wise averaging. Mixing doesn't occur if either path
gets dropped.
drop_path: A boolean, whether or not to do drop-path. Defaults to False.
If selected, we do drop path as described in the paper (unless drop-path
choices is provided in which case how drop path is done can be further
customized by the user.
p_local_drop_path: A probability between 0.0 and 1.0. 0.0 means always do
global drop path. 1.0 means always do local drop path. Default: 0.5,
as in the paper.
p_drop_base_case: The probability, when doing local drop path, to drop the
base case.
p_drop_recursive_case: The probability, when doing local drop path, to
drop the recusrive case. (Requires: `p_drop_base_case +
p_drop_recursive_case < 1`)
name: An optional string name.
"""
self.set_constructor_args('td.FractalNet',
*get_local_arguments(FractalNet.__init__, True))
if mixer is None:
mixer = lambda a, b: tf.add(a, b)/2.0
self._num_fractal_blocks = num_fractal_blocks
self._fractal_block_depth = fractal_block_depth
self._mixer = mixer
self._drop_path = drop_path
self._p_local_drop_path = p_local_drop_path
self._p_drop_base_case = p_drop_base_case
self._p_drop_recursive_case = p_drop_recursive_case
self._drop_path_choices = None
super(FractalNet, self).__init__(name_or_scope=name)
self._children = {}
self._choice_id = {}
self._choices = []
with tf.variable_scope(self._vscope):
for block_idx in xrange(num_fractal_blocks):
for binary_seq in _binary_sequences_of_at_most(fractal_block_depth):
child_name = 'block_' + '_'.join(
[str(block_idx)] + [str(b) for b in binary_seq])
self._children[block_idx, binary_seq] = base_layer_builder(
name=child_name)
if len(binary_seq) < fractal_block_depth:
self._choice_id[(block_idx, binary_seq)] = len(self._choices)
self._choices.append((block_idx, binary_seq))
self._propagate_types()
def _create_drop_path_choices(self):
if not self._drop_path: # Drop path was turned off.
return np.zeros(shape=[len(self._choices)], dtype='int32')
elif np.random.uniform() < self._p_local_drop_path:
# Local drop-path (make each choice independantly at random.)
choices = np.random.uniform(size=[len(self._choices)])
drop_base = choices < self._p_drop_base_case
drop_recursive = np.logical_and(
choices < (self._p_drop_base_case + self._p_drop_recursive_case),
np.logical_not(drop_base))
return (np.int32(drop_base)*self._JUST_RECURSE +
np.int32(drop_recursive)*self._JUST_BASE)
else:
# Global (pick a single column.)
column = np.random.randint(self._fractal_block_depth)
return np.array(
[self._JUST_RECURSE if len(binary_seq) < column else self._JUST_BASE
for _, binary_seq in self._choices],
dtype='int32')
@property
def drop_path(self):
return self._drop_path
@drop_path.setter
def drop_path(self, value):
self._drop_path = bool(value)
def _propagate_types(self):
for _ in xrange(2):
for child in six.itervalues(self._children):
self.set_io_types(child)
def _create_variables(self):
if self._drop_path_choices is None:
self._drop_path_choices, = tf.py_func(
self._create_drop_path_choices, [], [tf.int32],
stateful=True, name='calculate_drop_path')
def _instantiate_subnet(self, batch, block_idx, seq_prefix):
def zeros_fn():
return tf.zeros_like(batch)
def base_case_fn():
return self._children[block_idx, seq_prefix](batch)
def recursive_case_fn():
first_subnet = self._instantiate_subnet(
batch, block_idx, seq_prefix + (0,))
return self._instantiate_subnet(
first_subnet, block_idx, seq_prefix + (1,))
if len(seq_prefix) == self._fractal_block_depth:
return base_case_fn()
else:
choice = self._drop_path_choices[self._choice_id[(block_idx, seq_prefix)]]
base_case = tf.cond(
tf.not_equal(choice, self._JUST_RECURSE), base_case_fn, zeros_fn)
base_case.set_shape(batch.get_shape())
recursive_case = tf.cond(
tf.not_equal(choice, self._JUST_BASE), recursive_case_fn, zeros_fn)
recursive_case.set_shape(batch.get_shape())
cases = [
(tf.equal(choice, self._BOTH),
lambda: self._mixer(base_case, recursive_case)),
(tf.equal(choice, self._JUST_BASE), lambda: base_case),
(tf.equal(choice, self._JUST_RECURSE), lambda: recursive_case)]
result = tf.case(cases, lambda: base_case)
result.set_shape(batch.get_shape())
return result
def _process_batch(self, batch):
for block_idx in xrange(self._num_fractal_blocks):
batch = self._instantiate_subnet(batch, block_idx, ())
return batch
class ScopedLayer(Layer):
"""Create a Fold Layer that wraps a TensorFlow layer or RNN cell.
The default TensorFlow mechanism for weight sharing is to use
tf.variable_scope, but this requires that a scope parameter be passed
whenever the layer is invoked. ScopedLayer stores a TensorFlow layer,
along with its variable scope, and passes the scope appropriately.
For example:
```
gru_cell1 = td.ScopedLayer(tf.contrib.rnn.GRUCell(num_units=16), 'gru1')
... td.RNN(gru_cell1) ...
```
"""
def __init__(self, layer_fn, name_or_scope=None):
"""Wrap a TensorFlow layer.
Args:
layer_fn: A callable that accepts and returns nests of batched tensors. A
nest of tensors is either a tensor or a sequence of nests of tensors.
Must also accept a `scope` keyword argument. For example, may be an
instance of `tf.contrib.rnn.RNNCell`.
name_or_scope: A variable scope or a string to use as the scope name.
"""
self.set_constructor_args('td.ScopedLayer',
*get_local_arguments(ScopedLayer.__init__, True))
if name_or_scope is None:
if hasattr(layer_fn, '__name__'):
name_or_scope = layer_fn.__name__
elif hasattr(layer_fn, 'func') and hasattr(layer_fn.func, '__name__'):
# If layer_fn is e.g. a functools.partial.
name_or_scope = layer_fn.func.__name__
super(ScopedLayer, self).__init__(name_or_scope=name_or_scope)
self._layer_fn = layer_fn
if isinstance(layer_fn, tf.contrib.rnn.RNNCell):
self.set_output_type((layer_fn.output_size, layer_fn.state_size))
@property
def state_size(self):
return self._layer_fn.state_size
@property
def output_size(self):
return self._layer_fn.output_size
def __call__(self, *args):
result = self._layer_fn(*args, scope=self._vscope)
self._vscope.reuse_variables() # Reuse scope on subsequent calls
return result
def get_local_arguments(fun, is_method=False):
"""Return the callers arguments and non-default keyword arguments.
Args:
fun: The function or method that is calling get_local_arguments.
is_method: True if this is a method with a self argument.
Returns:
A tuple of (list of arguments, list of non default keyword arguments)
"""
frame = inspect.currentframe().f_back
argvals = inspect.getargvalues(frame)
argspec = inspect.getargspec(fun)
lvals = argvals.locals
num_args = len(argspec.args) - len(argspec.defaults)
arg_names = argspec.args[0:num_args]
kwarg_names = argspec.args[num_args:]
args = [lvals[k] for k in arg_names]
kwargs_a = [(k, lvals[k], d) for (k, d) in zip(kwarg_names, argspec.defaults)]
kwargs = [(k, v) for (k, v, d) in kwargs_a if v != d]
if is_method: args = args[1:] # strip off the self argument
return (args, kwargs)
| StarcoderdataPython |
3357915 | <gh_stars>0
from IRCParser import parseIRC
import json
import pprint
pp = pprint.PrettyPrinter(indent=4, compact=False, depth=100000)
with open("src/parseTests.json", "r") as f:
tests = json.load(f)
# yes i know its scuffed idc :)
for test in tests:
# get expected values & print them before calculating real values
# to make sure that you know what test failed, assuming it fails
expected = tests[test]
expected["raw"] = test
print(f"test: `{test}`")
print("expected: ")
pp.pprint(expected)
actual = parseIRC(test)
print("actual: ")
print("{")
print(f' "command": ', end="")
pp.pprint(actual.command)
print(f' "param": ', end="")
pp.pprint(actual.param)
print(f' "params": ', end="")
pp.pprint(actual.params)
print(f' "prefix": ', end="")
pp.pprint(actual.prefix)
print(f' "raw": ', end="")
pp.pprint(actual.raw)
print(f' "tags": ', end="")
pp.pprint(actual.tags)
print(f' "trailing": ', end="")
pp.pprint(actual.trailing)
print('}')
assert actual.command == expected["command"]
assert actual.prefix == expected["prefix"]
assert actual.tags == expected["tags"]
assert actual.params == expected["params"]
assert actual.raw == expected["raw"]
assert actual.param == expected["param"]
assert actual.trailing == expected["trailing"]
print("Ur all good!")
| StarcoderdataPython |
3481969 | #!/usr/bin/env python
import codecs
import os
import re
import xml
import xml.sax.saxutils as saxutils
from lib.core.common import getUnicode
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.exception import SqlmapFilePathException
from lib.core.settings import UNICODE_ENCODING
from thirdparty.prettyprint import prettyprint
from xml.dom.minidom import Document
from xml.parsers.expat import ExpatError
TECHNIC_ELEM_NAME = "Technic"
TECHNICS_ELEM_NAME = "Technics"
BANNER_ELEM_NAME = "Banner"
COLUMNS_ELEM_NAME = "DatabaseColumns"
COLUMN_ELEM_NAME = "Column"
CELL_ELEM_NAME = "Cell"
COLUMN_ATTR = "column"
ROW_ELEM_NAME = "Row"
TABLES_ELEM_NAME = "tables"
DATABASE_COLUMNS_ELEM = "DB"
DB_TABLES_ELEM_NAME = "DBTables"
DB_TABLE_ELEM_NAME = "DBTable"
IS_DBA_ELEM_NAME = "isDBA"
FILE_CONTENT_ELEM_NAME = "FileContent"
DB_ATTR = "db"
UNKNOWN_COLUMN_TYPE = "unknown"
USER_SETTINGS_ELEM_NAME = "UserSettings"
USER_SETTING_ELEM_NAME = "UserSetting"
USERS_ELEM_NAME = "Users"
USER_ELEM_NAME = "User"
DB_USER_ELEM_NAME = "DBUser"
SETTINGS_ELEM_NAME = "Settings"
DBS_ELEM_NAME = "DBs"
DB_NAME_ELEM_NAME = "DBName"
DATABASE_ELEM_NAME = "Database"
TABLE_ELEM_NAME = "Table"
DB_TABLE_VALUES_ELEM_NAME = "DBTableValues"
DB_VALUES_ELEM = "DBValues"
QUERIES_ELEM_NAME = "Queries"
QUERY_ELEM_NAME = "Query"
REGISTERY_ENTRIES_ELEM_NAME = "RegistryEntries"
REGISTER_DATA_ELEM_NAME = "RegisterData"
DEFAULT_DB = "All"
MESSAGE_ELEM = "Message"
MESSAGES_ELEM_NAME = "Messages"
ERROR_ELEM_NAME = "Error"
LST_ELEM_NAME = "List"
LSTS_ELEM_NAME = "Lists"
CURRENT_USER_ELEM_NAME = "CurrentUser"
CURRENT_DB_ELEM_NAME = "CurrentDB"
MEMBER_ELEM = "Member"
ADMIN_USER = "Admin"
REGULAR_USER = "User"
STATUS_ELEM_NAME = "Status"
RESULTS_ELEM_NAME = "Results"
UNHANDLED_PROBLEM_TYPE = "Unhandled"
NAME_ATTR = "name"
TYPE_ATTR = "type"
VALUE_ATTR = "value"
SUCESS_ATTR = "success"
NAME_SPACE_ATTR = 'http://www.w3.org/2001/XMLSchema-instance'
XMLNS_ATTR = "xmlns:xsi"
SCHEME_NAME = "sqlmap.xsd"
SCHEME_NAME_ATTR = "xsi:noNamespaceSchemaLocation"
CHARACTERS_TO_ENCODE = range(32) + range(127, 256)
ENTITIES = {'"': '"', "'": "'"}
class XMLDump(object):
'''
This class purpose is to dump the data into an xml Format.
The format of the xml file is described in the scheme file xml/sqlmap.xsd
'''
def __init__(self):
self._outputFile = None
self._outputFP = None
self.__root = None
self.__doc = Document()
def _addToRoot(self, element):
'''
Adds element to the root element
'''
self.__root.appendChild(element)
def __write(self, data, n=True):
'''
Writes the data into the file
'''
if n:
self._outputFP.write("%s\n" % data)
else:
self._outputFP.write("%s " % data)
self._outputFP.flush()
kb.dataOutputFlag = True
def _getRootChild(self, elemName):
'''
Returns the child of the root with the described name
'''
elements = self.__root.getElementsByTagName(elemName)
if elements:
return elements[0]
return elements
def _createTextNode(self, data):
'''
Creates a text node with utf8 data inside.
The text is escaped to an fit the xml text Format.
'''
if data is None:
return self.__doc.createTextNode(u'')
else:
escaped_data = saxutils.escape(data, ENTITIES)
return self.__doc.createTextNode(escaped_data)
def _createAttribute(self, attrName, attrValue):
'''
Creates an attribute node with utf8 data inside.
The text is escaped to an fit the xml text Format.
'''
attr = self.__doc.createAttribute(attrName)
if attrValue is None:
attr.nodeValue = u''
else:
attr.nodeValue = getUnicode(attrValue)
return attr
def string(self, header, data, sort=True):
'''
Adds string element to the xml.
'''
if isinstance(data, (list, tuple, set)):
self.lister(header, data, sort)
return
messagesElem = self._getRootChild(MESSAGES_ELEM_NAME)
if (not(messagesElem)):
messagesElem = self.__doc.createElement(MESSAGES_ELEM_NAME)
self._addToRoot(messagesElem)
if data:
data = self._formatString(data)
else:
data = ""
elem = self.__doc.createElement(MESSAGE_ELEM)
elem.setAttributeNode(self._createAttribute(TYPE_ATTR, header))
elem.appendChild(self._createTextNode(data))
messagesElem.appendChild(elem)
def lister(self, header, elements, sort=True):
'''
Adds information formatted as list element
'''
lstElem = self.__doc.createElement(LST_ELEM_NAME)
lstElem.setAttributeNode(self._createAttribute(TYPE_ATTR, header))
if elements:
if sort:
try:
elements = set(elements)
elements = list(elements)
elements.sort(key=lambda x: x.lower())
except:
pass
for element in elements:
memberElem = self.__doc.createElement(MEMBER_ELEM)
lstElem.appendChild(memberElem)
if isinstance(element, basestring):
memberElem.setAttributeNode(self._createAttribute(TYPE_ATTR, "string"))
memberElem.appendChild(self._createTextNode(element))
elif isinstance(element, (list, tuple, set)):
memberElem.setAttributeNode(self._createAttribute(TYPE_ATTR, "list"))
for e in element:
memberElemStr = self.__doc.createElement(MEMBER_ELEM)
memberElemStr.setAttributeNode(self._createAttribute(TYPE_ATTR, "string"))
memberElemStr.appendChild(self._createTextNode(getUnicode(e)))
memberElem.appendChild(memberElemStr)
listsElem = self._getRootChild(LSTS_ELEM_NAME)
if not(listsElem):
listsElem = self.__doc.createElement(LSTS_ELEM_NAME)
self._addToRoot(listsElem)
listsElem.appendChild(lstElem)
def technic(self, technicType, data):
'''
Adds information about the technic used to extract data from the db
'''
technicElem = self.__doc.createElement(TECHNIC_ELEM_NAME)
technicElem.setAttributeNode(self._createAttribute(TYPE_ATTR, technicType))
textNode = self._createTextNode(data)
technicElem.appendChild(textNode)
technicsElem = self._getRootChild(TECHNICS_ELEM_NAME)
if not(technicsElem):
technicsElem = self.__doc.createElement(TECHNICS_ELEM_NAME)
self._addToRoot(technicsElem)
technicsElem.appendChild(technicElem)
def banner(self, data):
'''
Adds information about the database banner to the xml.
The banner contains information about the type and the version of the database.
'''
bannerElem = self.__doc.createElement(BANNER_ELEM_NAME)
bannerElem.appendChild(self._createTextNode(data))
self._addToRoot(bannerElem)
def currentUser(self, data):
'''
Adds information about the current database user to the xml
'''
currentUserElem = self.__doc.createElement(CURRENT_USER_ELEM_NAME)
textNode = self._createTextNode(data)
currentUserElem.appendChild(textNode)
self._addToRoot(currentUserElem)
def currentDb(self, data):
'''
Adds information about the current database is use to the xml
'''
currentDBElem = self.__doc.createElement(CURRENT_DB_ELEM_NAME)
textNode = self._createTextNode(data)
currentDBElem.appendChild(textNode)
self._addToRoot(currentDBElem)
def dba(self, isDBA):
'''
Adds information to the xml that indicates whether the user has DBA privileges
'''
isDBAElem = self.__doc.createElement(IS_DBA_ELEM_NAME)
isDBAElem.setAttributeNode(self._createAttribute(VALUE_ATTR, getUnicode(isDBA)))
self._addToRoot(isDBAElem)
def users(self, users):
'''
Adds a list of the existing users to the xml
'''
usersElem = self.__doc.createElement(USERS_ELEM_NAME)
if isinstance(users, basestring):
users = [users]
if users:
for user in users:
userElem = self.__doc.createElement(DB_USER_ELEM_NAME)
usersElem.appendChild(userElem)
userElem.appendChild(self._createTextNode(user))
self._addToRoot(usersElem)
def dbs(self, dbs):
'''
Adds a list of the existing databases to the xml
'''
dbsElem = self.__doc.createElement(DBS_ELEM_NAME)
if dbs:
for db in dbs:
dbElem = self.__doc.createElement(DB_NAME_ELEM_NAME)
dbsElem.appendChild(dbElem)
dbElem.appendChild(self._createTextNode(db))
self._addToRoot(dbsElem)
def userSettings(self, header, userSettings, subHeader):
'''
Adds information about the user's settings to the xml.
The information can be user's passwords, privileges and etc..
'''
self._areAdmins = set()
userSettingsElem = self._getRootChild(USER_SETTINGS_ELEM_NAME)
if (not(userSettingsElem)):
userSettingsElem = self.__doc.createElement(USER_SETTINGS_ELEM_NAME)
self._addToRoot(userSettingsElem)
userSettingElem = self.__doc.createElement(USER_SETTING_ELEM_NAME)
userSettingElem.setAttributeNode(self._createAttribute(TYPE_ATTR, header))
if isinstance(userSettings, (tuple, list, set)):
self._areAdmins = userSettings[1]
userSettings = userSettings[0]
users = userSettings.keys()
users.sort(key=lambda x: x.lower())
for user in users:
userElem = self.__doc.createElement(USER_ELEM_NAME)
userSettingElem.appendChild(userElem)
if user in self._areAdmins:
userElem.setAttributeNode(self._createAttribute(TYPE_ATTR, ADMIN_USER))
else:
userElem.setAttributeNode(self._createAttribute(TYPE_ATTR, REGULAR_USER))
settings = userSettings[user]
settings.sort()
for setting in settings:
settingsElem = self.__doc.createElement(SETTINGS_ELEM_NAME)
settingsElem.setAttributeNode(self._createAttribute(TYPE_ATTR, subHeader))
settingTextNode = self._createTextNode(setting)
settingsElem.appendChild(settingTextNode)
userElem.appendChild(settingsElem)
userSettingsElem.appendChild(userSettingElem)
def dbTables(self, dbTables):
'''
Adds information of the existing db tables to the xml
'''
if not isinstance(dbTables, dict):
self.string(TABLES_ELEM_NAME, dbTables)
return
dbTablesElem = self.__doc.createElement(DB_TABLES_ELEM_NAME)
for db, tables in dbTables.items():
tables.sort(key=lambda x: x.lower())
dbElem = self.__doc.createElement(DATABASE_ELEM_NAME)
dbElem.setAttributeNode(self._createAttribute(NAME_ATTR, db))
dbTablesElem.appendChild(dbElem)
for table in tables:
tableElem = self.__doc.createElement(DB_TABLE_ELEM_NAME)
tableElem.appendChild(self._createTextNode(table))
dbElem.appendChild(tableElem)
self._addToRoot(dbTablesElem)
def dbTableColumns(self, tableColumns):
'''
Adds information about the columns of the existing tables to the xml
'''
columnsElem = self._getRootChild(COLUMNS_ELEM_NAME)
if not(columnsElem):
columnsElem = self.__doc.createElement(COLUMNS_ELEM_NAME)
for db, tables in tableColumns.items():
if not db:
db = DEFAULT_DB
dbElem = self.__doc.createElement(DATABASE_COLUMNS_ELEM)
dbElem.setAttributeNode(self._createAttribute(NAME_ATTR, db))
columnsElem.appendChild(dbElem)
for table, columns in tables.items():
tableElem = self.__doc.createElement(TABLE_ELEM_NAME)
tableElem.setAttributeNode(self._createAttribute(NAME_ATTR, table))
colList = columns.keys()
colList.sort(key=lambda x: x.lower())
for column in colList:
colType = columns[column]
colElem = self.__doc.createElement(COLUMN_ELEM_NAME)
if colType is not None:
colElem.setAttributeNode(self._createAttribute(TYPE_ATTR, colType))
else:
colElem.setAttributeNode(self._createAttribute(TYPE_ATTR, UNKNOWN_COLUMN_TYPE))
colElem.appendChild(self._createTextNode(column))
tableElem.appendChild(colElem)
self._addToRoot(columnsElem)
def dbTableValues(self, tableValues):
'''
Adds the values of specific table to the xml.
The values are organized according to the relevant row and column.
'''
tableElem = self.__doc.createElement(DB_TABLE_VALUES_ELEM_NAME)
if (tableValues is not None):
db = tableValues["__infos__"]["db"]
if not db:
db = "All"
table = tableValues["__infos__"]["table"]
count = int(tableValues["__infos__"]["count"])
columns = tableValues.keys()
columns.sort(key=lambda x: x.lower())
tableElem.setAttributeNode(self._createAttribute(DB_ATTR, db))
tableElem.setAttributeNode(self._createAttribute(NAME_ATTR, table))
for i in range(count):
rowElem = self.__doc.createElement(ROW_ELEM_NAME)
tableElem.appendChild(rowElem)
for column in columns:
if column != "__infos__":
info = tableValues[column]
value = info["values"][i]
if re.search("^[\ *]*$", value):
value = "NULL"
cellElem = self.__doc.createElement(CELL_ELEM_NAME)
cellElem.setAttributeNode(self._createAttribute(COLUMN_ATTR, column))
cellElem.appendChild(self._createTextNode(value))
rowElem.appendChild(cellElem)
dbValuesElem = self._getRootChild(DB_VALUES_ELEM)
if (not(dbValuesElem)):
dbValuesElem = self.__doc.createElement(DB_VALUES_ELEM)
self._addToRoot(dbValuesElem)
dbValuesElem.appendChild(tableElem)
logger.info("Table '%s.%s' dumped to XML file" % (db, table))
def dbColumns(self, dbColumns, colConsider, dbs):
'''
Adds information about the columns
'''
for column in dbColumns.keys():
printDbs = {}
for db, tblData in dbs.items():
for tbl, colData in tblData.items():
for col, dataType in colData.items():
if column in col:
if db in printDbs:
if tbl in printDbs[db]:
printDbs[db][tbl][col] = dataType
else:
printDbs[db][tbl] = {col: dataType}
else:
printDbs[db] = {}
printDbs[db][tbl] = {col: dataType}
continue
self.dbTableColumns(printDbs)
def query(self, query, queryRes):
'''
Adds details of an executed query to the xml.
The query details are the query itself and its results.
'''
queryElem = self.__doc.createElement(QUERY_ELEM_NAME)
queryElem.setAttributeNode(self._createAttribute(VALUE_ATTR, query))
queryElem.appendChild(self._createTextNode(queryRes))
queriesElem = self._getRootChild(QUERIES_ELEM_NAME)
if (not(queriesElem)):
queriesElem = self.__doc.createElement(QUERIES_ELEM_NAME)
self._addToRoot(queriesElem)
queriesElem.appendChild(queryElem)
def registerValue(self, registerData):
'''
Adds information about an extracted registry key to the xml
'''
registerElem = self.__doc.createElement(REGISTER_DATA_ELEM_NAME)
registerElem.appendChild(self._createTextNode(registerData))
registriesElem = self._getRootChild(REGISTERY_ENTRIES_ELEM_NAME)
if (not(registriesElem)):
registriesElem = self.__doc.createElement(REGISTERY_ENTRIES_ELEM_NAME)
self._addToRoot(registriesElem)
registriesElem.appendChild(registerElem)
def rFile(self, filePath, data):
'''
Adds an extracted file's content to the xml
'''
fileContentElem = self.__doc.createElement(FILE_CONTENT_ELEM_NAME)
fileContentElem.setAttributeNode(self._createAttribute(NAME_ATTR, filePath))
fileContentElem.appendChild(self._createTextNode(data))
self._addToRoot(fileContentElem)
def setOutputFile(self):
'''
Initiates the xml file from the configuration.
'''
if (conf.xmlFile):
try:
self._outputFile = conf.xmlFile
self.__root = None
if os.path.exists(self._outputFile):
try:
self.__doc = xml.dom.minidom.parse(self._outputFile)
self.__root = self.__doc.childNodes[0]
except ExpatError:
self.__doc = Document()
self._outputFP = codecs.open(self._outputFile, "w+", UNICODE_ENCODING)
if self.__root is None:
self.__root = self.__doc.createElementNS(NAME_SPACE_ATTR, RESULTS_ELEM_NAME)
self.__root.setAttributeNode(self._createAttribute(XMLNS_ATTR, NAME_SPACE_ATTR))
self.__root.setAttributeNode(self._createAttribute(SCHEME_NAME_ATTR, SCHEME_NAME))
self.__doc.appendChild(self.__root)
except IOError:
raise SqlmapFilePathException("Wrong filename provided for saving the xml file: %s" % conf.xmlFile)
def getOutputFile(self):
return self._outputFile
def finish(self, resultStatus, resultMsg=""):
'''
Finishes the dumper operation:
1. Adds the session status to the xml
2. Writes the xml to the file
3. Closes the xml file
'''
if ((self._outputFP is not None) and not(self._outputFP.closed)):
statusElem = self.__doc.createElement(STATUS_ELEM_NAME)
statusElem.setAttributeNode(self._createAttribute(SUCESS_ATTR, getUnicode(resultStatus)))
if not resultStatus:
errorElem = self.__doc.createElement(ERROR_ELEM_NAME)
if isinstance(resultMsg, Exception):
errorElem.setAttributeNode(self._createAttribute(TYPE_ATTR, type(resultMsg).__name__))
else:
errorElem.setAttributeNode(self._createAttribute(TYPE_ATTR, UNHANDLED_PROBLEM_TYPE))
errorElem.appendChild(self._createTextNode(getUnicode(resultMsg)))
statusElem.appendChild(errorElem)
self._addToRoot(statusElem)
self.__write(prettyprint.formatXML(self.__doc, encoding=UNICODE_ENCODING))
self._outputFP.close()
def closeDumper(status, msg=""):
"""
Closes the dumper of the session
"""
if hasattr(conf, "dumper") and hasattr(conf.dumper, "finish"):
conf.dumper.finish(status, msg)
dumper = XMLDump()
| StarcoderdataPython |
11336688 | from django.contrib import admin
# from .models import related models
from .models import *
# Register your models here.
# CarModelInline class
class CarModelInline(admin.StackedInline):
model = CarModel
extra = 5
# CarMakeAdmin class with CarModelInline
class CarMakeAdmin(admin.ModelAdmin):
inlines = [CarModelInline]
# Register models here
admin.site.register(CarMake, CarMakeAdmin)
admin.site.register(CarModel) | StarcoderdataPython |
6508961 | <reponame>Mstoned/Data-Structures
# python3
class HeapBuilder:
def __init__(self):
self._swaps = [] #array of tuples or arrays
self._data = []
def ReadData(self):
n = int(input())
self._data = [int(s) for s in input().split()]
assert n == len(self._data)
def WriteResponse(self):
print(len(self._swaps))
for swap in self._swaps:
print(swap[0], swap[1])
def swapdown(self,i):
n = len(self._data)
min_index = i
l = 2*i+1 if (2*i+1<n) else -1
r = 2*i+2 if (2*i+2<n) else -1
# print(i,l,r)
if l != -1 and self._data[l] < self._data[min_index]:
min_index = l
if r != - 1 and self._data[r] < self._data[min_index]:
min_index = r
if i != min_index:
self._swaps.append((i, min_index))
self._data[i], self._data[min_index] = \
self._data[min_index], self._data[i]
self.swapdown(min_index)
def GenerateSwaps(self):
# The following naive implementation just sorts
# the given sequence using selection sort algorithm
# and saves the resulting sequence of swaps.
# This turns the given array into a heap,
# but in the worst case gives a quadratic number of swaps.
#
# TODO: replace by a more efficient implementation
# efficient implementation is complete binary tree. but here you're not getting data 1 by 1, instead everything at once
# so for i in range(0,n), implement swap up ai < a2i+1 ai < a2i+2
for i in range(len(self._data)//2 ,-1,-1):
self.swapdown(i)
# print('1')
# for j in range(i + 1, len(self._data)):
# if self._data[i] > self._data[j]:
# self._swaps.append((i, j))
# self._data[i], self._data[j] = self._data[j], self._data[i]
def Solve(self):
self.ReadData()
self.GenerateSwaps()
self.WriteResponse()
if __name__ == '__main__':
heap_builder = HeapBuilder()
heap_builder.Solve()
| StarcoderdataPython |
267463 | """ ImageNet Validation Script
Hacked together by / Copyright 2020 <NAME> (https://github.com/rwightman)
"""
import time
import argparse
import fnmatch
import jax
from absl import logging
import objax
import jeffnet.data.tf_imagenet_data as imagenet_data
from jeffnet.common import correct_topk, AverageMeter, get_model_cfg, list_models
from jeffnet.objax import create_model
parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation')
parser.add_argument('data', metavar='DIR', help='path to dataset')
parser.add_argument('--model', '-m', metavar='MODEL', default='tf_efficientnet_b0',
help='model architecture (default: tf_efficientnet_b0)')
parser.add_argument('-b', '--batch-size', default=250, type=int,
metavar='N', help='mini-batch size (default: 256)')
def validate(args):
model = create_model(args.model, pretrained=True)
print(f'Created {args.model} model. Validating...')
eval_step = objax.Jit(
lambda images, labels: eval_forward(model, images, labels),
model.vars())
"""Runs evaluation and returns top-1 accuracy."""
image_size = model.default_cfg['input_size'][-1]
test_ds, num_batches = imagenet_data.load(
imagenet_data.Split.TEST,
is_training=False,
image_size=image_size,
batch_dims=[args.batch_size],
chw=True,
mean=tuple([x * 255 for x in model.default_cfg['mean']]),
std=tuple([x * 255 for x in model.default_cfg['std']]),
tfds_data_dir=args.data)
batch_time = AverageMeter()
correct_top1, correct_top5 = 0, 0
total_examples = 0
start_time = prev_time = time.time()
for batch_index, batch in enumerate(test_ds):
images, labels = batch['images'], batch['labels']
top1_count, top5_count = eval_step(images, labels)
correct_top1 += int(top1_count)
correct_top5 += int(top5_count)
total_examples += images.shape[0]
batch_time.update(time.time() - prev_time)
if batch_index % 20 == 0 and batch_index > 0:
print(
f'Test: [{batch_index:>4d}/{num_batches}] '
f'Rate: {images.shape[0] / batch_time.val:>5.2f}/s ({images.shape[0] / batch_time.avg:>5.2f}/s) '
f'Acc@1: {100 * correct_top1 / total_examples:>7.3f} '
f'Acc@5: {100 * correct_top5 / total_examples:>7.3f}')
prev_time = time.time()
acc_1 = 100 * correct_top1 / total_examples
acc_5 = 100 * correct_top5 / total_examples
print(f'Validation complete. {total_examples / (prev_time - start_time):>5.2f} img/s. '
f'Acc@1 {acc_1:>7.3f}, Acc@5 {acc_5:>7.3f}')
return dict(top1=float(acc_1), top5=float(acc_5))
def eval_forward(model, images, labels):
logits = model(images, training=False)
top1_count, top5_count = correct_topk(logits, labels, topk=(1, 5))
return top1_count, top5_count
def main():
args = parser.parse_args()
logging.set_verbosity(logging.ERROR)
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
if get_model_cfg(args.model) is not None:
validate(args)
else:
models = list_models(pretrained=True)
if args.model != 'all':
models = fnmatch.filter(models, args.model)
if not models:
print(f'ERROR: No models found to validate with pattern ({args.model}).')
exit(1)
print('Validating:', ', '.join(models))
results = []
for m in models:
args.model = m
res = validate(args)
res.update(dict(model=m))
results.append(res)
print('Results:')
for r in results:
print(f"Model: {r['model']}, Top1: {r['top1']}, Top5: {r['top5']}")
if __name__ == '__main__':
main()
| StarcoderdataPython |
1997751 | <reponame>maeotaku/leaf_recognition_sdk<filename>src/Features/Texture.py
import cv2
from mahotas.features import lbp
from ImageCoreManipulation import *
class Texture(object):
def __init__(self, img):
self.img = img#threadsHolding(img)
#self.pixels = pixels
print("Extracting LBP...")
self.lbpR1P8 = normalize(np.float32(lbp(self.img, 1, 8, ignore_zeros=False)))
self.lbpR2P16 = normalize(np.float32(lbp(self.img, 2, 16, ignore_zeros=False)))
self.lbpR3P16 = normalize(np.float32(lbp(self.img, 3, 16, ignore_zeros=False)))
#self.lbpR3P24 = normalize(np.float32(lbp.lbp(self.img, 3, 24, ignore_zeros=False)))
self.lbpR1P8_R2P16 = np.float32(np.concatenate([self.lbpR1P8, self.lbpR2P16]))
self.lbpR1P8_R3P16 = np.float32(np.concatenate([self.lbpR1P8, self.lbpR3P16]))
self.lbpR2P16_R3P16 = np.float32(np.concatenate([self.lbpR2P16, self.lbpR3P16]))
#self.lbpR1P8pic = np.float32(lbp.lbp_transform(self.img, 1, 8, ignore_zeros=False))
#self.lbpR2P16pic = np.float32(lbp.lbp_transform(self.img, 2, 16, ignore_zeros=False))
#self.lbpR3P16pic = np.float32(lbp.lbp_transform(self.img, 3, 16, ignore_zeros=False))
#self.lbpR3P24pic = np.float32(lbp.lbp_transform(self.img, 3, 24, ignore_zeros=False))
'''
self.SURFFimg, self.SURFdescs = SURF(self.img)
self.BRIEFimg, self.BRIEFdescs = BRIEF(self.img)
self.ORBimg, self.ORBdescs = ORB(self.img)
''' | StarcoderdataPython |
38191 | <reponame>rmomizo/Ubiqu-Ity
import copy
from unittest import TestCase
import TestResults
__author__ = 'zthomae'
class TestArgumentParser(TestCase):
def setUp(self):
self.parser = TestResults.make_parser()
def test_type_should_not_be_optional(self):
try:
TestResults.parse_args(self.parser, '--input foo --results bar'.split())
except ValueError:
return
self.fail('Parser does not fail when a test type is not present')
def test_inputs_should_not_be_optional(self):
try:
TestResults.parse_args(self.parser, '--type foo --results bar'.split())
except ValueError:
return
self.fail('Parser does not fail when input files are not present')
def test_results_should_not_be_optional(self):
try:
TestResults.parse_args(self.parser, '--type foo --input bar'.split())
except ValueError:
return
self.fail('Parser does not fail when results files are not present')
def test_only_one_type_allowed(self):
try:
TestResults.parse_args(self.parser, '--type two words --input foo --results bar'.split())
except SystemExit: # TODO: Wrap argparse behavior
return
self.fail('Parser accepts more than one test type')
def test_should_expect_at_least_one_input_file(self):
try:
TestResults.parse_args(self.parser, '--type foo --input --results bar'.split())
except SystemExit:
return
self.fail('Parser accepts zero input files')
def test_should_expect_at_least_one_results_file(self):
try:
TestResults.parse_args(self.parser, '--type foo --input bar --results'.split())
except SystemExit:
return
self.fail('Parser accepts zero results files')
def test_should_allow_more_than_one_input_file(self):
try:
TestResults.parse_args(self.parser, '--type foo --input these files --results bar'.split())
except SystemExit:
self.fail("Parser doesn't accept multiple input files")
def test_should_allow_more_than_one_results_file(self):
try:
TestResults.parse_args(self.parser, '--type foo --input file --results these files'.split())
except SystemExit:
self.fail("Parser doesn't accept multiple results files")
class TestInputFileParser(TestCase):
def setUp(self):
self.input_files = ['/foo/bar/Text1.txt', 'bar/baz/Text2.txt', 'Text3.txt', '../Text4.txt']
self.results = TestResults.parse_input_files(self.input_files)
def test_should_use_basename(self):
if sorted(self.results.keys()) != sorted(['Text1.txt', 'Text2.txt', 'Text3.txt', 'Text4.txt']):
self.fail('parse_input_files should return a dictionary with input file basenames as keys')
def test_should_return_fullpaths(self):
if any(map(lambda x: 'fullpath' not in x, self.results.values())):
self.fail('parse_input_files should return fullpaths to input files')
class TestDocuscopeResultsParser(TestCase):
def setUp(self):
self.ds_results_file = ''.join([
'<AnnotatedText File="Text1.txt" Group="foo" />',
'<AnnotatedText File="Text2.txt" Group="foo" />',
'<AnnotatedText File="Text3.txt" Group="bar" />'
])
self.ds_results_file_2 = ''.join([
'<AnnotatedText File="Text4.txt" Group="foo" />',
'<AnnotatedText File="Text5.txt" Group="bar" />'
])
self.ds_wrong_tag_results_file = ''.join([
'<Text File="Text1.txt" Group="foo" />',
'<Text File="Text2.txt" Group="foo" />',
'<AnnotatedText File="Text3.txt" Group="foo" />'
])
self.ds_wrong_attr_results_file = ''.join([
'<AnnotatedText Fil="Text1.txt" Group="foo" />',
'<AnnotatedText File="Text2.txt" Group="foo" />',
])
def test_should_handle_one_file(self):
results = TestResults.parse_docuscope_results([self.ds_results_file])
keys = results.keys()
if any([
'Text1.txt' not in keys,
'Text2.txt' not in keys,
'Text3.txt' not in keys
]):
self.fail("parse_docuscope_results didn't add expected files for one input file")
def test_should_handle_multiples_files(self):
results = TestResults.parse_docuscope_results([self.ds_results_file, self.ds_results_file_2])
keys = results.keys()
if any([
'Text1.txt' not in keys,
'Text2.txt' not in keys,
'Text3.txt' not in keys,
'Text4.txt' not in keys,
'Text5.txt' not in keys
]):
self.fail("parse_docuscope_results didn't add expected files for multiple input files")
def test_should_not_add_files_in_wrong_element(self):
results = TestResults.parse_docuscope_results([self.ds_wrong_tag_results_file])
if len(results.keys()) > 1:
self.fail('parse_docuscope_results added files not in AnnotatedText elements')
def test_should_do_nothing_if_missing_file_attribute(self):
results = TestResults.parse_docuscope_results([self.ds_wrong_attr_results_file])
# TODO: Bad test
if len(results.keys()) != 1:
self.fail("parse_docuscope_results didn't add files correctly")
def test_should_add_present_status(self):
results = TestResults.parse_docuscope_results([self.ds_results_file])
if any(map(lambda x: 'present' not in x, results.values())):
self.fail('parse_docuscope_results should add "present" key')
def test_should_add_text(self):
results = TestResults.parse_docuscope_results([self.ds_results_file])
# TODO: This test doesn't check as much as it should
if any(map(lambda x: 'text' not in x, results.values())):
self.fail('parse_docuscope_results should add "text" key')
class TestMatchFiles(TestCase):
def setUp(self):
self.results_files = {
'Text1.txt': {'text': '', 'present': False},
'Text2.txt': {'text': '', 'present': False},
'Text3.txt': {'text': '', 'present': False}
}
def test_should_copy_results(self):
if self.results_files != TestResults.match_files([], self.results_files):
self.fail('match_files should return results_files if input_files empty')
def test_should_set_file_true_if_in_inputs(self):
files = TestResults.match_files(['Text1.txt'], self.results_files)
if files['Text1.txt']['present'] is not True:
self.fail('match_files should set entries to True if present in input_files')
def test_should_keep_file_false_if_not_in_inputs(self):
files = TestResults.match_files(['Text1.txt'], self.results_files)
if any([
files['Text2.txt']['present'] is not False,
files['Text3.txt']['present'] is not False
]):
self.fail('match_files should keep entries set to False if not present in input_files')
def test_should_not_change_input_files(self):
input_files = ['Text1.txt']
old_input = copy.copy(input_files)
TestResults.match_files(input_files, self.results_files)
if old_input != input_files:
self.fail('match_files should not change input_files')
def test_should_not_change_results_files(self):
old_results = copy.copy(self.results_files)
TestResults.match_files(['Text1.txt'], self.results_files)
if old_results != self.results_files:
self.fail('match_files should not change results_files')
class TestComputeTestPairs(TestCase):
def setUp(self):
self.job = {
'Text1.txt': {'text': 'first', 'present': True},
'Text2.txt': {'text': 'second', 'present': False},
'Text3.txt': {'text': 'third', 'present': True}
}
self.input_files = {
'Text1.txt': {'fullpath': '/Text1.txt', 'text': ''},
'Text2.txt': {'fullpath': '/Text2.txt', 'text': ''},
'Text3.txt': {'fullpath': '/Text3.txt', 'text': ''},
}
self.results = TestResults.compute_test_pairs(self.job, self.input_files, self.format)
@staticmethod
def format(text):
return text
def test_should_throw_valueerror_if_too_few_input_files(self):
input_files = copy.copy(self.input_files)
del input_files['Text3.txt']
try:
TestResults.compute_test_pairs(self.job, input_files, self.format)
except ValueError:
return
self.fail('compute_test_pairs should throw ValueError if an input file is not in input_files')
def test_should_not_include_not_present_job_files(self):
if 'Text2.txt' in self.results:
self.fail('compute_test_pairs should not include texts if they are not "present" in the job')
def test_should_not_check_if_non_present_input_files_are_missing(self):
input_files = copy.copy(self.input_files)
del input_files['Text2.txt']
try:
TestResults.compute_test_pairs(self.job, input_files, self.format)
except ValueError:
self.fail("compute_test_pairs shouldn't throw ValueError if non-present job file is not in input_files")
def test_should_return_names(self):
for v in self.results.values():
if 'name' not in v:
self.fail('compute_test_pairs should return text names')
def test_should_return_ground_truths(self):
for text in self.results:
if self.results[text]['ground_truth'] != self.job[text]['text']:
self.fail('compute_test_pairs should return ground_truth text')
def test_should_return_formatted_input_file(self):
for v in self.results.values():
if 'test_input' not in v:
self.fail('compute_test_pairs should return test_input')
class TestCompareTestPairs(TestCase):
def setUp(self):
self.test_pairs = {
'Text1.txt': {
'name': 'Text1.txt',
'ground_truth': 'foo',
'test_input': 'foo'
},
'Text2.txt': {
'name': 'Text2.txt',
'ground_truth': 'foo',
'test_input': 'bar'
}
}
self.results = TestResults.compare_test_pairs(self.test_pairs, self.compare)
@staticmethod
def compare(t1, t2):
return {}
def test_should_return_results_for_each_pair(self):
if 'results' not in self.results['Text1.txt'] or 'results' not in self.results['Text2.txt']:
self.fail('compare_test_pairs should return results for each of the test pairs')
# TODO: Test more thoroughly | StarcoderdataPython |
3518375 | #!/usr/bin/python
""" Autor: <NAME>
Universidad Icesi 2017
Proyecto de grado: Sistema open source para la deteccion paginas web maliciosas
Este script tiene como funcion la abstracion de caracteristicas
de un trafico de red obtenido del conjunto de urls.
Toda las caracteristicas obtenidas para el trafico de red son basadas
en el articulo Cross-Layer Detection of Malicious Websites"""
import pyshark
import pyshark.packet.packet
import datetime
import traceback
import os
from fnmatch import fnmatch
from cachecontrol import CacheControl
def longitud_url(url):
""" Determina la longitud de la url
url: es la direccion de la pagina web """
print "--------------- Obteniendo longitud URL -------------------"
return len(str(url))
def numero_caracteres_especiales(url):
""" Cuenta la cantidad de caracteres especiales que hay en la url
url: es la direccion de la pagina web """
print "--------------- Obteniendo Numero de caracteres esp -------------------"
try:
# cuantas letras hay por linea
contador_alfabeto = sum(1 for c in url if c.isalpha())
# cuantos numero hay por linea
contador_digitos = sum(1 for c in url if c.isdigit())
caracteres_alfanumericos = contador_digitos + contador_alfabeto
longitud = len(str(url)) # longitud de la linea
caracteres_especiales = longitud - caracteres_alfanumericos
except Exception:
caracteres_especiales = -1
return caracteres_especiales
def presencia_direccion_ip(url):
""" Determina la si la url no tiene un nombre de dominio
url: es la direccion de la pagina web """
return url
# https://media.readthedocs.org/pdf/requests-docs-es/latest/requests-docs-es.pdf
def chartset(request):
""" Conjunto de caracteres que determian la pagina
request: respuesta de la url"""
print "--------------- Obteniendo charset -------------------"
try:
charset = request.encoding
except AttributeError as error_atributo:
charset = "NA"
print "charset: " + str(error_atributo)
return charset
def nombre_servidor_web(request):
""" Nombre del servidor web donde esta alojada la pagina"""
print "--------------- Obteniendo servidor -------------------"
try:
cabecera = request.headers
server = cabecera.get("server")
except AttributeError as error_atributo:
server = "NA"
print "nombre_servidor_web: " + str(error_atributo)
return server
def http_header_control_cache(request):
""" Tipo de control de cache
url: direccion de la pagina web"""
print "--------------- Obteniendo cache control -------------------"
try:
cabecera = request.headers
cache_control = cabecera.get("cache-control")
except Exception:
cache_control = "NA"
print "Error inesperado en la %s no se encontro cache_control" % (url)
return cache_control
def http_header_content_length(request):
""" longitud del contenido de la cabecera http de la pagina web
url: direccion de la pagina web"""
print "--------------- Obteniendo content length -------------------"
try:
cabecera = request.headers
content_length = cabecera.get("content-length")
except Exception:
content_length = "NA"
print "Error inesperado en la %s no se encontro content_length en la cabecera" % (url)
return content_length
def whois_reg_date(whois):
""" Fecha en la que fue registrado el sitio"""
print "--------------- Obteniendo reg date -------------------"
reg_date = []
try:
reg_date = whois.creation_date
if reg_date != None:
if isinstance(reg_date, datetime.date):
reg_date = reg_date
elif len(reg_date) > 1:
reg_date = reg_date[0]
except AttributeError as error_atributo:
reg_date = "NA"
print "whois_reg_date: " + str(error_atributo)
return reg_date
def whois_update_date(whois):
""" Fecha en la que fue actualizado el sitio
url: direccion de la pagina web"""
print "--------------- Obteniendo update date -------------------"
try:
update = whois.updated_date
if update != None:
if isinstance(update, datetime.date):
update = update
elif len(update) > 1:
update = update[0]
except AttributeError as error_atributo:
update = "NA"
print "whois_update_date: " + str(error_atributo)
return update
def whois_country(whois):
""" nombre del pais donde proviene el servicio web
url: direccion de la pagina web"""
print "--------------- Obteniendo country -------------------"
try:
country = whois.country
except AttributeError as error_atributo:
country = "NA"
print 'whois_country: ' + str(error_atributo)
return country
def whois_state_prov(whois):
""" continente donde proviene el sitio web
url: direccion del sitio web"""
print "--------------- Obteniendo state prov -------------------"
try:
state_prov = whois.state
except AttributeError as error_atributo:
state_prov = "NA"
print "whois_state_prov: " + str(error_atributo)
return state_prov
def within_domain(whois):
""" Nombre del dominio de la url
url: direccion de la pagina web"""
print "--------------- Obteniendo whitin domain -------------------"
try:
domain = whois.domain
except AttributeError as error_atributo:
domain = "NA"
print "within_domain: " + str(error_atributo)
return domain
def read_file(ruta_archivo):
""" Lee la ruta del archivo pcap de tcpdump """
return pyshark.FileCapture(ruta_archivo)
def tcp_conversation_exchange(captura, IP_HONEYPOT): # N1
""" cuenta la cantidad de paquetes que hay para el protocolo asignado """
print "--------------- Obteniendo conversacion tcp -------------------"
pkts = []
for pkt in captura:
try:
if pkt.transport_layer == 'TCP' and pkt.ip.src == IP_HONEYPOT:
pkts.append(pkt)
except AttributeError:
pass
except Exception:
print "Error en tcp_conversartion_exchange"
return pkts
def dist_remote_tcp_port(captura,IP_HONEYPOT): # N2
""" Numero total de puertos distintos a los puertos TCP """
print "--------------- Obteniendo dist remote tcp port -------------------"
numero_puertos = 0
for pkt in captura:
try:
if pkt.transport_layer == 'TCP' and pkt.ip.src == IP_HONEYPOT:
if pkt['TCP'].dstport != '80':
numero_puertos = numero_puertos + 1
except AttributeError:
pass
except Exception:
print 'Error en dist_remote_tcp_port'
return numero_puertos
def remote_ips(pkts,IP_HONEYPOT): # N3
""" Numero distinto de direcciones IP conectadas al honeypot """
print "--------------- Obteniendo remote ips -------------------"
numero_ips = []
for pkt in pkts:
try:
if pkt.ip.src == IP_HONEYPOT:
dst_addr = pkt.ip.dst
if dst_addr != IP_HONEYPOT:
if dst_addr not in numero_ips:
numero_ips.append(dst_addr)
except AttributeError:
pass
except Exception:
print 'Error en remote_ips'
return numero_ips
def pkt_without_dns(captura,IP_HONEYPOT):
""" Almacena en un arreglo todo los paquetes que no son DNS"""
print "--------------- Obteniendo pkt sin dns -------------------"
pkts_temp = []
pkts_dns = []
for pkt in captura:
try:
if pkt.ip.src == IP_HONEYPOT:
for lyr in pkt.layers:
if lyr.layer_name in 'dns':
if pkt not in pkts_dns:
pkts_dns.append(pkt)
if pkt not in pkts_dns and pkt not in pkts_temp:
pkts_temp.append(pkt)
except AttributeError:
pass
except Exception:
print 'Error en pkt_without_dns'
return pkts_temp
def app_bytes(pkts,IP_HONEYPOT): # N4
""" Numero de bytes de la capa de aplicacion envia por el honeypot
hacia el sitio web, no se incluyen los datos de los servidores DNS """
print "--------------- Obteniendo app bytes -------------------"
tamanio_pkt = 0
for pkt in pkts:
try:
tamanio_pkt = tamanio_pkt + int(pkt.captured_length)
except AttributeError:
pass
except Exception:
print 'Error en app_bytes'
return tamanio_pkt
def udp_packets(pkts,IP_HONEYPOT): # N5
""" Numero de paquetes UDP, no se incluyen los datos de los DNS """
print "--------------- Obteniendo udp pkt -------------------"
pkts_temp = []
for pkt in pkts:
try:
if pkt.ip.src == IP_HONEYPOT:
for lyr in pkt.layers:
if lyr.layer_name in 'udp':
if pkt not in pkts_temp:
pkts_temp.append(pkt)
except AttributeError:
pass
except Exception:
print 'Error en udp_packets'
return pkts_temp
def tcp_urg_packet(captura,IP_HONEYPOT): # N6
""" Numero de paquetes TCP con la bandera de URG """
print "--------------- Obteniendo tcp urg -------------------"
pkts_temp = []
for pkt in captura:
try:
if pkt.transport_layer == 'TCP' and pkt.ip.src == IP_HONEYPOT:
for lyr in pkt.layers:
flag_urg = str(lyr.get_field_value('tcp.flags.urg'))
if flag_urg not in 'None' and flag_urg not in '0' and pkt not in pkts_temp:
pkts_temp.append(pkt)
except AttributeError:
pass
except Exception:
print 'Error en tcp_urg_packet'
return pkts_temp
def source_app_packets(captura,IP_HONEYPOT): # N7
""" Numero de paquetes enviados por el honeypot hacia el servidor remoto """
print "--------------- Obteniendo source app pkts -------------------"
pkts = []
for pkt in captura:
try:
if pkt.ip.src == IP_HONEYPOT:
pkts.append(pkt)
except AttributeError:
pass
except Exception:
print 'Error en source_app_packets'
return pkts
def remote_app_packets(captura, IP_HONEYPOT): # N8
""" Numero de paquetes enviados por el servidor remoto hacia el honeypot """
print "--------------- Obteniendo remote pkts -------------------"
pkts = []
for pkt in captura:
try:
if pkt.ip.dst == IP_HONEYPOT:
pkts.append(pkt)
except AttributeError:
pass
except Exception:
print 'Error en remote_app_packets'
return pkts
def source_app_bytes(captura, IP_HONEYPOT): # N9
""" volumen en bytes de la comunicacion de honeypot a servidor web """
print "--------------- Obteniendo src app bytes -------------------"
tamanio_pkt = 0
for pkt in captura:
try:
if pkt.ip.dst == IP_HONEYPOT:
tamanio_pkt = tamanio_pkt + int(pkt.captured_length)
except AttributeError:
pass
except Exception:
print 'Error en source_app_bytes'
return tamanio_pkt
def remote_app_bytes(captura,IP_HONEYPOT): # N10
""" volumen en bytes de la comunicacion del servidor web al honeypot """
print "--------------- Obteniendo remote app bytes -------------------"
tamanio_pkt = 0
for pkt in captura:
try:
if pkt.ip.src == IP_HONEYPOT:
tamanio_pkt = tamanio_pkt + int(pkt.captured_length)
except AttributeError:
pass
except Exception:
print 'Error en remote_app_bytes'
return tamanio_pkt
def paginas_visitadas(captura, IP_HONEYPOT):
""" cantidad de paginas http encontradas por el sniffer"""
print "--------------- Obteniendo paginas visitadas -------------------"
pkts_temp = []
for pkt in captura:
try:
if pkt.ip.src == IP_HONEYPOT:
for lyr in pkt.layers:
if lyr.layer_name in 'http':
if pkt not in pkts_temp:
pkts_temp.append(pkt)
except AttributeError:
pass
except Exception:
print 'Error en paginas_visitadas'
return pkts_temp
def duration(captura, IP_HONEYPOT): # N11
""" Tiempo de duracion de la pagina web """
print "--------------- Obteniendo duraccion -------------------"
time = 'NA'
try:
pkts_http = paginas_visitadas(captura ,IP_HONEYPOT)[0]
for lyr in pkts_http.layers:
timestamp = lyr.get_field_value('tcp.options.timestamp.tsval')
if str(timestamp) not in 'None':
time = str(timestamp)
except IndexError:
pass
return time
def avg_local_pkt_rate(captura,IP_HONEYPOT): # N12
""" promedio de paquetes IP por segundo N9/N11 """
print "--------------- Obteniendo avg local -------------------"
time = duration(captura,IP_HONEYPOT)
if time not in 'NA':
resultado = len(source_app_packets(captura,IP_HONEYPOT)) / float(time)
else:
resultado = 'NA'
return resultado
def avg_remote_pkt_rate(captura,IP_HONEYPOT): # N13
""" promedio de paquetes IP por segundo N10/N11 """
print "--------------- Obteniendo avg remote -------------------"
time = duration(captura,IP_HONEYPOT)
if time not in 'NA':
resultado = len(remote_app_packets(captura,IP_HONEYPOT)) / float(time)
else:
resultado = 'NA'
return resultado
def app_packets(captura,IP_HONEYPOT): # N14
""" numero de paquetes IP incluidos los del servidor DNS """
print "--------------- Obteniendo app pkts -------------------"
pkts_temp = []
for pkt in captura:
try:
if pkt.ip.src == IP_HONEYPOT:
for lyr in pkt.layers:
if lyr.layer_name in 'ip':
if pkt not in pkts_temp:
pkts_temp.append(pkt)
except AttributeError:
pass
except Exception:
print 'Error en app_packets'
return pkts_temp
def dns_query_times(captura,IP_HONEYPOT): # N15
""" Lista de capas de DNS queries """
print "--------------- Obteniendo dns query -------------------"
layers_dns = []
for pkt in captura:
try:
if pkt.ip.src == IP_HONEYPOT:
for lyr in pkt.layers:
if lyr.layer_name in 'dns':
layers_dns.append(lyr)
except AttributeError:
pass
except Exception:
print 'Error en dns_query_times'
return layers_dns
def dns_response_time(): # N16
""" tiempo de los servidores DNS """
return 0
def print_conversation_header(pkt): # Example
""" Imprime la cabecera de informacion """
try:
protocol = pkt.transport_layer
src_addr = pkt.ip.src
src_port = pkt[pkt.transport_layer].srcport
dst_addr = pkt.ip.dst
dst_port = pkt[pkt.transport_layer].dstport
print '%s %s:%s --> %s:%s' % (protocol, src_addr, src_port, dst_addr, dst_port)
except AttributeError:
pass
except Exception:
print 'Error en print_conversation_header'
#Este metodo es con el proposito de extraer el nombre del subdirectorio benign or malign
def tipo(str):
arrstr= str.split('/')
return arrstr[2]
def nom(str):
arrstr= str.split('/')
return arrstr[3]
def crear_matriz(ruta_datos, ruta_mtx_trans):
""" Crea una matriz con las caracteristicas de la capa de transporte
@param ruta_dataset ruta de los dataset a analizar
@param ruta_matriz ruta del archivo a crear con la matriz de caracteristicas"""
with open(name=ruta_mtx_trans, mode='w') as matriz:
try:
#id_url = linea.split(';')[0]
#url = linea.split(';')[1]
# cambiar a ip_honeypot a ip_android
#IP_HONEYPOT = linea.split(';')[2]
file_pcap='../samples/'
pattern="*.pcap"
route=''
captura =pyshark.FileCapture
count=0
p=0
#primero en el filecapture el pcap
#luego ver como pyshark lee cada paquete y obtener el primero
# con el primer paquete ver la ip src entonces esta va a ser el ip_honeypot
#captura[0].ip.src
print 'Extrayendo'
f = open("../Malware/rutas.txt", "r")
route = f.read().split("\n")
for x in route:
captura = pyshark.FileCapture(x)
for pkt in captura:
IP_HONEYPOT= pkt.ip.src
print "################### ESCRIBIENDO DATOS #############################"
print x
print "###################################################################"
matriz.writelines(nom(x)+';' + str(len(tcp_conversation_exchange(captura, IP_HONEYPOT))) + ';'
+ str(dist_remote_tcp_port(captura, IP_HONEYPOT)) + ';'
+ str(len(remote_ips(pkt_without_dns(captura, IP_HONEYPOT), IP_HONEYPOT))) + ';'
+ str(app_bytes(pkt_without_dns(captura, IP_HONEYPOT), IP_HONEYPOT)) + ';'
+ str(len(udp_packets(pkt_without_dns(captura, IP_HONEYPOT), IP_HONEYPOT))) + ';'
+ str(len(tcp_urg_packet(captura, IP_HONEYPOT))) + ';'
+ str(len(source_app_packets(captura, IP_HONEYPOT))) + ';'
+ str(len(remote_app_packets(captura, IP_HONEYPOT))) + ';'
+ str(source_app_bytes(captura, IP_HONEYPOT)) + ';'
+ str(remote_app_bytes(captura, IP_HONEYPOT)) + ';'
+ str(duration(captura, IP_HONEYPOT)) + ';'
+ str(avg_local_pkt_rate(captura, IP_HONEYPOT)) + ';'
+ str(avg_remote_pkt_rate(captura, IP_HONEYPOT)) + ';'
+ str(len(app_packets(captura, IP_HONEYPOT))) + ';'
+ str(len(dns_query_times(captura, IP_HONEYPOT)))+';'+tipo(x)+'\n')
break
except Exception,e:
traceback.print_exc()
ruta_dataset = '../Malware/malware.csv'
ruta_matriz = '../Malware/resutados_malware.csv'
crear_matriz(ruta_dataset, ruta_matriz) | StarcoderdataPython |
1806152 | <filename>tests/test_choices.py
import valleydeight as vd
import pytest
def test_choice_static():
choice_t = vd.Choice("one", 4, True)
assert choice_t("one") == "one"
assert choice_t(True) is True
assert choice_t(4) == 4
with pytest.raises(vd.ValidatorException):
choice_t(8)
with pytest.raises(vd.ValidatorException):
choice_t(dict(one=1, two="2"))
with pytest.raises(vd.ValidatorException):
choice_t("two")
def test_choice_validators():
choice_t = vd.Choice(vd.Str(), vd.Bool())
assert choice_t(True) is True
assert choice_t("True") == "True"
with pytest.raises(vd.ValidatorException):
choice_t(8)
with pytest.raises(vd.ValidatorException):
choice_t(dict(one=1, two="2"))
def test_choice_mixed():
choice_t = vd.Choice(3, 2.22, vd.Str(), vd.Bool())
assert choice_t(True) is True
assert choice_t("True") == "True"
assert choice_t(3) == 3
assert choice_t(2.22) == 2.22
with pytest.raises(vd.ValidatorException):
choice_t(8)
with pytest.raises(vd.ValidatorException):
choice_t(dict(one=1, two="2"))
with pytest.raises(vd.ValidatorException):
choice_t(7.92342)
| StarcoderdataPython |
5066393 | from copy import deepcopy
import random
from abc import abstractmethod
class abstract_cascade:
def __init__(self, G, itterations=10000):
self.G = deepcopy(G)
self.cascase_id = 1
self.step = 1
self.d = {}
self.activated = ""
self.numberOfNodes = len(self.G.nodes())
self.numberactivated = 0
self.n = set([n for n, attrdict in self.G.node.items() if attrdict['activated'] == 0])
self.a = set([n for n, attrdict in self.G.node.items() if attrdict['activated'] > 0])
self.iterations = itterations
self.step_time = None
self.tag = "a"
def __iter__(self):
return self
def decision(self, probability):
'''
Returns a True/False dissision bases on a random distribution and a probability threshold.
:param probability: Probability threshold
:type probability: int
:return: True/False
:rtype: bool
'''
return random.random() < probability
@abstractmethod
def next(self):
pass
def getInfectedNode(self):
"""
:return:
:rtype:
"""
return self.activated
def getDeepGraph(self):
return deepcopy(self.G)
def getGraph(self):
return self.G
def getStep(self):
'''
Returns the current iteration the cascade is in
:return: step number
:rtype: int
'''
return self.step
def getStepTime(self):
return self.step_time
def getTag(self):
return self.tag
| StarcoderdataPython |
1791107 | """Data Provider implementation module for constructing data based on standard stock indicators
The data provider in this module is not indented to be instantiated outside of this module. Instead, upon the importing
of this module, the provider will create an instance of itself and register itself with the global DataProviderRegistry.
After this, data consumers can register themselves as recipients of data from this provider using the id located at
data_provider_static_names.INDICATOR_BLOCK_PROVIDER_ID.
Detailed argument list that can be provided to this provider can be found in the generate_data method.
TODO[<NAME>] Extract repeated code from generate_prediction_data and generate_data after global style rewrite
"""
import configparser
import datetime
import numpy
from data_providing_module import configurable_registry
from data_providing_module import data_provider_registry
from data_providing_module.data_providers import data_provider_static_names
from general_utils.config import config_util
from general_utils.logging import logger
from general_utils.mysql_management.mysql_tables import stock_data_table
from stock_data_analysis_module.data_processing_module.data_retrieval_module import ranged_data_retriever
from stock_data_analysis_module.indicators import moving_average
from stock_data_analysis_module.indicators import bollinger_band
from stock_data_analysis_module.indicators import stochastic_oscillator
_ENABLED_CONFIG_ID = "enabled"
def _standardize_price_data(price_data):
ret_data = numpy.copy(price_data)
ret_data = ret_data.flatten()
max_price = numpy.max(ret_data)
min_price = numpy.min(ret_data)
for i in range(len(ret_data)):
ret_data[i] = (ret_data[i]-min_price)/max_price
return ret_data.reshape(price_data.shape)
class IndicatorBlockProvider(data_provider_registry.DataProviderBase):
"""Data Provider that will provide data constructed using stock indicators normally used by stock traders
Details on these indicators can be found in the modules of the indicators package.
Additionally, this provider provides support for configurable parameters through the configuration file. These
parameters are listed in the Configurable Parameters section.
Configurable Parameters:
enable: Whether this provider is enabled for consumers to receive data from.
"""
def generate_prediction_data(self, *args, **kwargs):
"""Generates data for a Consumer wanting to make predictions about the next day's state.
This method is identical to generate_data for all but the return values. As such, for arguments
and further details, see generate_data.
Returns:
List[Tuple[str, numpy.ndarray, float, float]]. Broken down, for every stock, there is a tuple
containing the ticker, the data block generated, the average price, and the average volume.
The average price and volume is to allow for the original magnitudes of the prices and volumes to
be reconstructed should the predictions require it.
For a breakdown of the rows in the data block, see generate_data's documentation in the Returns section.
"""
if len(args) < 1:
raise ValueError("Expected %d positional argument but received %d" % (1, len(args)))
data_block_length = args[0]
max_additional_period = 0
for key, value in self.default_kwargs.items():
if key not in kwargs:
kwargs[key] = self.default_kwargs[key]
if key.endswith("period") and value > max_additional_period:
max_additional_period = value
padded_data_block_length = max_additional_period + data_block_length
start_date = datetime.datetime.now() - datetime.timedelta(weeks=(padded_data_block_length + 360) // 5)
start_date = start_date.isoformat()[:10].replace('-', '/')
end_date = datetime.datetime.now().isoformat()[:10].replace('-', '/')
data_retriever = ranged_data_retriever.RangedDataRetriever(
[
stock_data_table.HIGH_PRICE_COLUMN_NAME,
stock_data_table.LOW_PRICE_COLUMN_NAME,
stock_data_table.CLOSING_PRICE_COLUMN_NAME,
stock_data_table.VOLUME_COLUMN_NAME
],
start_date,
end_date)
ret_blocks = []
for ticker, sources in data_retriever.data_sources.items():
ticker_data = data_retriever.retrieveData(ticker, sources[0])
ticker_data = numpy.array(ticker_data, dtype=numpy.float32)
high = ticker_data[:, 0]
low = ticker_data[:, 1]
close = ticker_data[:, 2]
volume = ticker_data[:, 3]
# high, low, close, volume = ticker_data # unpack manually
avg_high = numpy.average(high)
avg_low = numpy.average(low)
avg_close = numpy.average(close)
avg_price = ((avg_high * len(high)) + (avg_low * len(high)) + (avg_close * len(high))) / (len(high) * 3)
avg_vol = numpy.average(volume)
std_high = [(high[i] - avg_price) / avg_price
for i in range(len(high))]
std_low = [(low[i] - avg_price) / avg_price
for i in range(len(high))]
std_close = [(close[i] - avg_price) / avg_price
for i in range(len(high))]
volume = [(volume[i] - avg_vol) / avg_vol
for i in range(len(volume))]
if len(std_high) < padded_data_block_length:
len_warning = (
"Could not process %s into an indicator block, "
"needed %d days of trading data but received %d" %
(ticker, padded_data_block_length, len(std_high))
)
logger.logger.log(logger.WARNING, len_warning)
continue
sma = moving_average.SMA(std_close, kwargs['sma_period'])
sma = sma[-data_block_length:]
boll_band = bollinger_band.bollinger_band(std_high, std_low, std_close,
smoothing_period=kwargs["bollinger_band_period"],
standard_deviations=kwargs["bollinger_band_stdev"]
)
oscillator = stochastic_oscillator.stochastic_oscillator(close, high,
low, kwargs['oscillator_period'])
oscillator = oscillator[-data_block_length:]
oscillator /= 100
data_block = numpy.zeros((8, data_block_length), dtype=numpy.float32)
data_block[0] = std_high[-data_block_length:]
data_block[1] = std_low[-data_block_length:]
data_block[2] = std_close[-data_block_length:]
data_block[3] = volume[-data_block_length:]
data_block[4] = sma
data_block[5] = boll_band[0][-data_block_length:]
data_block[6] = boll_band[1][-data_block_length:]
data_block[7] = oscillator
ret_blocks.append((ticker, data_block, avg_price, avg_vol))
return ret_blocks
def write_default_configuration(self, section: "configparser.SectionProxy"):
"""Writes default configuration values into the SectionProxy provided.
For more details see abstract class documentation.
"""
section[_ENABLED_CONFIG_ID] = "True"
def load_configuration(self, parser: "configparser.ConfigParser"):
"""Attempts to load the configurable parameters for this provider from the provided parser.
For more details see abstract class documentation.
"""
section = config_util.create_type_section(parser, self)
if not parser.has_option(section.name, _ENABLED_CONFIG_ID):
self.write_default_configuration(section)
enabled = parser.getboolean(section.name, _ENABLED_CONFIG_ID)
if enabled:
data_provider_registry.registry.register_provider(
data_provider_static_names.INDICATOR_BLOCK_PROVIDER_ID, self)
def generate_data(self, *args, **kwargs):
"""Generates data using stock indicators over a set period of time
Generates blocks (numpy arrays) of data using indicators that are used by normal stock traders.
These include bollinger bands, simple moving average and the stochastic oscillator.
The types of data that get fed into these algorithms come from the high, low, closing, and volume columns
of the data tables in the database. Additionally, these values are standardized to allow algorithms to draw
conclusions based off the relative change in the stock, and not be blinded by the magnitude of the prices or
volumes.
This standardization process is performed by calculating the average price across the highs, lows, and closing
prices of the stock, then every element in each of the lists is updated according to the following equation
(assume that price is the high, low, or closing price being modified):
(price - avg_price) / avg_price
The same process is also performed on the volume data.
Additionally, consumers are required to pass in a positional argument through *args, and may pass in
keyword arguments. These are covered in the Arguments section below
Arguments:
*args:
Only one positional argument is required.
data_block_length: int This controls how many columns will
be present in the return data block. As a note the data block will always have 8 rows.
**kwargs:
Several keyword arguments are supported.
sma_period: int Controls how many days are considered in the calculation of the simple moving average.
For a given day x, the previous x-sma_period days will be used
bollinger_band_stdev: int Controls how many standard deviations will be used in the calculation
of the bollinger bands
bollinger_band_period: int Controls how many days will be used in the calculation of the bollinger
bands.
oscillator_period: int Controls the number of days used in the calculation of the stochastic oscillator
Returns:
Numpy.ndarray object with three dimensions. This is effectively a 3D matrix of data blocks, where each
data block will have 8 rows and data_block_length columns.
Each data block row corresponds to one data type or calculated indicator values, are listed below:
0: high price
1: low price
2: closing price
3: volume
4: simple moving average (SMA)
5: upper bollinger band
6: lower bollinger band
7: stochastic oscillator
"""
if len(args) < 1:
raise ValueError("Expected %d positional argument but received %d" % (1, len(args)))
data_block_length = args[0]
max_additional_period = 0
for key, value in self.default_kwargs.items():
if key not in kwargs:
kwargs[key] = self.default_kwargs[key]
if key.endswith("period") and value > max_additional_period:
max_additional_period = value
padded_data_block_length = max_additional_period + data_block_length
start_date = datetime.datetime.now() - datetime.timedelta(weeks=(padded_data_block_length + 360) // 5)
start_date = start_date.isoformat()[:10].replace('-', '/')
end_date = datetime.datetime.now().isoformat()[:10].replace('-', '/')
data_retriever = ranged_data_retriever.RangedDataRetriever(
[
stock_data_table.HIGH_PRICE_COLUMN_NAME,
stock_data_table.LOW_PRICE_COLUMN_NAME,
stock_data_table.CLOSING_PRICE_COLUMN_NAME,
stock_data_table.VOLUME_COLUMN_NAME
],
start_date,
end_date)
ret_blocks = []
for ticker, sources in data_retriever.data_sources.items():
ticker_data = data_retriever.retrieveData(ticker, sources[0])
ticker_data = numpy.array(ticker_data, dtype=numpy.float32)
high = ticker_data[:, 0]
low = ticker_data[:, 1]
close = ticker_data[:, 2]
volume = ticker_data[:, 3]
# high, low, close, volume = ticker_data # unpack manually
std_high = _standardize_price_data(high)
std_close = _standardize_price_data(close)
std_low = _standardize_price_data(low)
volume = _standardize_price_data(volume)
if len(std_high) < padded_data_block_length:
len_warning = (
"Could not process %s into an indicator block, "
"needed %d days of trading data but received %d" %
(ticker, padded_data_block_length, len(std_high))
)
logger.logger.log(logger.WARNING, len_warning)
continue
sma = moving_average.SMA(std_close, kwargs['sma_period'])
sma = sma[-data_block_length:]
boll_band = bollinger_band.bollinger_band(std_high, std_low, std_close,
smoothing_period=kwargs["bollinger_band_period"],
standard_deviations=kwargs["bollinger_band_stdev"]
)
oscillator = stochastic_oscillator.stochastic_oscillator(close, high,
low, kwargs['oscillator_period'])
oscillator = oscillator[-data_block_length:]
oscillator /= 100
data_block = numpy.zeros((8, data_block_length), dtype=numpy.float32)
data_block[0] = std_high[-data_block_length:]
data_block[1] = std_low[-data_block_length:]
data_block[2] = std_close[-data_block_length:]
data_block[3] = volume[-data_block_length:]
data_block[4] = sma
data_block[5] = boll_band[0][-data_block_length:]
data_block[6] = boll_band[1][-data_block_length:]
data_block[7] = oscillator
ret_blocks.append(data_block)
return numpy.array(ret_blocks, dtype=numpy.float32)
def __init__(self):
"""Initializes IndicatorBlockProvider and registers the instance with the global DataProviderRegistry
"""
super(IndicatorBlockProvider, self).__init__()
configurable_registry.config_registry.register_configurable(self)
self.default_kwargs = {
"sma_period": 50,
"bollinger_band_stdev": 2,
"bollinger_band_period": 20,
"oscillator_period": 17
}
provider = IndicatorBlockProvider()
| StarcoderdataPython |
3250371 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import re
print re.match('www', 'www.runoob.com').span()
print re.match('com', 'www.runoob.com')
line = "Cats are smarter than dogs"
matchObj = re.match( r'(.*) are (.*?) .*', line, re.M | re.I)
if matchObj:
print "matchObj.group(): ", matchObj.group()
print "matchObj.group(1): ", matchObj.group(1)
print "matchObj.group(2): ", matchObj.group(2)
else:
print "no match"
print re.search('www', 'www.runoob.com').span()
print re.search('com', 'www.runoob.com').span()
phone = "2004-959-559 # this is international phone number"
num = re.sub(r'#.*$', "", phone)
print "phone number is", num
num = re.sub(r'\D', "", phone)
print "phone number is", num
def double(matched):
value = int(matched.group('value'))
return str(value * 2)
s = 'A23G4HFD567'
print re.sub('(?P<value>\d+)', double, s)
print re.sub('(?P<value>\d+)', "", s)
pattern = re.compile(r'\d+')
print pattern.match('one12twothree34four', 3, 10).group()
print pattern.findall('runoob 123 google 456')
print pattern.findall('run88oob123google456', 0, 10)
it = re.finditer(r"\d+", "12a32bc43jf3")
for match in it:
print match.group()
print re.split('\W+', 'runoob, runoob, runoob.') | StarcoderdataPython |
3237354 | # -*- coding: utf-8 -*-
"""Main module."""
def LargeIfFactual(s):
```returns the word big if the input says the word true
```
if "true" in s:
return "big"
else:
return "small"
a="true"
b="false"
c="anything else"
x=LargeIfFactual(a)
y=LargeIfFactual(b)
z=LargeIfFactual(c)
| StarcoderdataPython |
5003027 | import sys
import pyaudio
from struct import unpack
import numpy as np
# from Adafruit_LED_Backpack import BicolorMatrix8x8
# Create BicolorMatrix display instance with default settings
# display = BicolorMatrix8x8.BicolorMatrix8x8()
# display.begin()
# display.clear()
# display.set_brightness(7)
spectrum = [1,1,1,3,3,3,2,2]
matrix = [0,0,0,0,0,0,0,0]
power = []
weighting = [2,8,8,16,16,32,32,64]
def list_devices():
# List all audio input devices
p = pyaudio.PyAudio()
i = 0
n = p.get_device_count()
while i < n:
dev = p.get_device_info_by_index(i)
if dev['maxInputChannels'] > 0:
print(str(i)+'. '+dev['name'])
i += 1
# Audio setup
no_channels = 1
sample_rate = 44100
# Chunk must be a multiple of 8
# NOTE: If chunk size is too small the program will crash
# with error message: [Errno Input overflowed]
chunk = 3072
list_devices()
# Use results from list_devices() to determine your microphone index
device = 2
p = pyaudio.PyAudio()
stream = p.open(format = pyaudio.paInt16,
channels = no_channels,
rate = sample_rate,
input = True,
frames_per_buffer = chunk,
input_device_index = device)
# Return power array index corresponding to a particular frequency
def piff(val):
return int(2*chunk*val/sample_rate)
def calculate_levels(data, chunk,sample_rate):
global matrix
# Convert raw data (ASCII string) to numpy array
data = unpack("%dh"%(len(data)/2),data)
data = np.array(data, dtype='h')
# Apply FFT - real data
fourier=np.fft.rfft(data)
# Remove last element in array to make it the same size as chunk
fourier=np.delete(fourier,len(fourier)-1)
# Find average 'amplitude' for specific frequency ranges in Hz
power = np.abs(fourier)
matrix[0]= int(np.mean(power[piff(0) :piff(156):1]))
matrix[1]= int(np.mean(power[piff(156) :piff(313):1]))
matrix[2]= int(np.mean(power[piff(313) :piff(625):1]))
matrix[3]= int(np.mean(power[piff(625) :piff(1250):1]))
matrix[4]= int(np.mean(power[piff(1250) :piff(2500):1]))
matrix[5]= int(np.mean(power[piff(2500) :piff(5000):1]))
matrix[6]= int(np.mean(power[piff(5000) :piff(10000):1]))
matrix[7]= int(np.mean(power[piff(10000):piff(20000):1]))
# Tidy up column values for the LED matrix
matrix=np.divide(np.multiply(matrix,weighting),1000000)
# Set floor at 0 and ceiling at 8 for LED matrix
matrix=matrix.clip(0,8)
return matrix
# Main loop
while 1:
try:
# Get microphone data
data = stream.read(chunk)
matrix=calculate_levels(data, chunk,sample_rate)
# display.clear()
output = ""
for y in range (0,8):
output = "%s%.02f "%(output, matrix[y])
# for x in range(0, matrix[y]):
# display.set_pixel(x, y, spectrum[x])
print(output)
except KeyboardInterrupt:
print("Ctrl-C Terminating...")
stream.stop_stream()
stream.close()
p.terminate()
sys.exit(1)
except Exception as e:
print(e)
print("ERROR Terminating...")
stream.stop_stream()
stream.close()
p.terminate()
sys.exit(1)
| StarcoderdataPython |
3478673 | from NewsSummaryDataset import NewsSummaryDataset
import pandas as pd
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from transformers import (
T5TokenizerFast as T5Tokenizer
)
# News Summary Dataset Module containing the main
# train, test and validation dataloaders to be used
# in model fine-tuning
class NewsSummaryDatasetModule(pl.LightningDataModule):
def __init__(
self,
train_df: pd.DataFrame,
test_df: pd.DataFrame,
tokenizer: T5Tokenizer,
batch_size: int = 8,
test_max_token_len: int = 512,
summary_max_token_len: int = 128
):
super().__init__()
self.train_df = train_df
self.test_df = test_df
self.batch_size = batch_size
self.tokenizer = tokenizer
self.test_max_token_len = test_max_token_len # used for News Summary Dataset config
self.summary_max_token_len = summary_max_token_len # used for News Summary Dataset config
def setup(self, stage=None):
# create train dataset
self.train_dataset = NewsSummaryDataset(
self.train_df,
self.tokenizer,
self.test_max_token_len,
self.summary_max_token_len
)
# create test dataset
self.test_dataset = NewsSummaryDataset(
self.test_df,
self.tokenizer,
self.test_max_token_len,
self.summary_max_token_len
)
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size = self.batch_size,
shuffle=True,
num_workers=2
)
def val_dataloader(self):
return DataLoader(
self.test_dataset,
batch_size = self.batch_size,
shuffle=False,
num_workers=2
)
def test_dataloader(self):
return DataLoader(
self.test_dataset,
batch_size = self.batch_size,
shuffle=False,
num_workers=2
) | StarcoderdataPython |
3359234 | from nbox import plug, PRETRAINED_MODELS
import nbox
import torch
import numpy as np
class DoubleInSingleOut(torch.nn.Module):
def __init__(self):
super().__init__()
self.f1 = torch.nn.Linear(2, 4)
self.f2 = torch.nn.Linear(2, 4)
self.logit_scale = torch.nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
def forward(self, x, y):
out = self.f1(x) + self.f2(y)
logit_scale = self.logit_scale.exp()
out = logit_scale - out @ out.t()
return out
def my_model_builder_fn(**kwargs):
# let it accept **kwargs, you use what you need
# each method must return two things the model itself, and some extra args
return DoubleInSingleOut(), {}
num_sources = len(PRETRAINED_MODELS)
print(" number of models in the registry:", num_sources)
# plug the model
plug(
"my_model_method", # what should be the name / key
my_model_builder_fn, # method that will be called to build the model
{"x": "image", "y": "image"}, # what are the categories of inputs and outputs
)
new_num_sources = len(PRETRAINED_MODELS)
print("number of models in the registry after plug:", new_num_sources)
# loading my mew model
model = nbox.load("my_model_method")
out = model({"x": torch.randn(4, 2).numpy(), "y": torch.randn(4, 2).numpy()})
print(out.shape)
| StarcoderdataPython |
1960675 | <reponame>lucaparisi91/qmc4<gh_stars>0
import pandas as pd
import matplotlib.pylab as plt
import seaborn as sns
from scipy.optimize import curve_fit
import numpy as np
if __name__ == "__main__":
datas=pd.read_csv("2bDistancesTimings_2.dat",sep=" ",index_col=False)
print(datas)
cutOff=0;
for hue,data in datas.groupby("method"):
plt.plot(data["N"] , data["time"], "o",label=hue )
f=lambda x,a,b : a + b*x
x=data["N"]
y=data["time"]
y=y[x>cutOff]
x=x[x>cutOff]
params,sigma=curve_fit(f,np.log(x),np.log(y))
errors=np.sqrt(np.diag(sigma))
x=np.linspace( np.min(x),np.max(x) , num=1000 )
plt.plot(x,np.exp(f(np.log(x),*params) ) , "--")
print("{} {}".format(hue,params))
plt.xscale("log")
plt.yscale("log")
plt.legend()
plt.show()
| StarcoderdataPython |
1738509 | <reponame>czyczyyzc/Compressed_Self-Attention
from __future__ import absolute_import
from .duke import Duke
from .market import Market
from .cuhk import CUHK
__factory = {
'market': Market,
'duke': Duke,
"cuhk": CUHK,
}
def names():
return sorted(__factory.keys())
def create(name, root, *args, **kwargs):
"""
Create a dataset instance.
Parameters
----------
name : str
The dataset name. Can be one of 'market', 'duke'.
root : str
The path to the dataset directory.
"""
if name not in __factory:
raise KeyError("Unknown dataset:", name)
return __factory[name](root, *args, **kwargs)
| StarcoderdataPython |
8169362 | """Test Node Child Base model."""
import pytest
from openzwavemqtt.models.node import OZWNode
from openzwavemqtt.models.node_child_base import OZWNodeChildBase
class MockDescendant(OZWNodeChildBase):
"""Mock a descendant."""
EVENT_CHANGED = "mock-changed"
def test_node():
"""Test a node."""
node = OZWNode(None, None, "mock-node-id", 1)
child = MockDescendant(None, node, "mock-child-id", 12)
assert child.node is node
grandchild = MockDescendant(None, child, "mock-grandchild-id", 123)
assert grandchild.node is node
assert str(grandchild) == "<MockDescendant 123 (node: 1)>"
no_node_parent = MockDescendant(None, None, "", "")
with pytest.raises(RuntimeError):
# test access node property without valid parent (node)
no_node_parent.node # pylint: disable=pointless-statement
assert str(no_node_parent) == "<MockDescendant (node: <missing> (bad!))>"
| StarcoderdataPython |
8036081 | import io
from typing import Optional
import pytest
from helpers import unhex
from unblob.file_utils import round_up
from unblob.handlers.filesystem.squashfs import (
SquashFSv3Handler,
SquashFSv4BEHandler,
SquashFSv4LEHandler,
)
SQUASHFS_V4_LE_NO_PAD_CONTENTS = unhex(
"""\
00000000 68 73 71 73 05 00 00 00 06 b6 03 62 00 00 02 00 |hsqs.......b....|
00000010 01 00 00 00 01 00 11 00 c0 00 01 00 04 00 00 00 |................|
00000020 80 00 00 00 00 00 00 00 34 01 00 00 00 00 00 00 |........4.......|
00000030 2c 01 00 00 00 00 00 00 ff ff ff ff ff ff ff ff |,...............|
00000040 79 00 00 00 00 00 00 00 b9 00 00 00 00 00 00 00 |y...............|
00000050 fe 00 00 00 00 00 00 00 1e 01 00 00 00 00 00 00 |................|
00000060 78 da 4b 2c 28 c8 49 35 e4 4a 04 51 46 10 ca 18 |x.K,(.I5.J.QF...|
00000070 42 99 70 01 00 8b ee 09 3b 3e 00 78 da 63 62 58 |B.p.....;>.x.cbX|
00000080 c2 c8 00 04 17 37 33 27 81 19 48 80 1d 88 99 90 |.....73'..H.....|
00000090 e4 99 d0 e4 d0 e5 99 91 e4 f9 b0 c8 b3 20 c9 8b |............. ..|
000000a0 42 e5 19 19 fe 82 e5 af 03 e5 59 a1 72 20 7b c2 |B.........Y.r {.|
000000b0 81 98 0d 88 01 97 54 0d e3 33 00 78 da 63 66 80 |......T..3.x.cf.|
000000c0 00 46 28 cd c4 c0 c9 90 58 50 90 93 6a a8 57 52 |.F(.....XP..j.WR|
000000d0 51 a2 00 14 87 8b 18 81 44 1c 80 7c b8 88 31 48 |Q.......D..|..1H|
000000e0 24 81 81 19 21 62 02 12 01 00 10 1c 10 41 0e 00 |$...!b.......A..|
000000f0 78 da 4b 60 80 00 49 28 0d 00 06 d8 00 7a ee 00 |x.K`..I(.....z..|
00000100 00 00 00 00 00 00 16 00 78 da 63 60 80 00 05 28 |........x.c`...(|
00000110 ed 00 a5 13 a0 74 03 94 06 00 14 28 01 41 06 01 |.....t.....(.A..|
00000120 00 00 00 00 00 00 04 80 00 00 00 00 26 01 00 00 |............&...|
00000130 00 00 00 00 |....|
00000134
"""
)
SQUASHFS_V4_BE_NO_PAD_CONTENTS = unhex(
"""\
00000000 73 71 73 68 00 00 00 05 62 1f 9f 26 00 02 00 00 |................|
00000010 00 00 00 01 00 01 00 11 00 c0 00 01 00 04 00 00 |................|
00000020 00 00 00 00 00 00 00 80 00 00 00 00 00 00 01 33 |................|
00000030 00 00 00 00 00 00 01 2b ff ff ff ff ff ff ff ff |................|
00000040 00 00 00 00 00 00 00 79 00 00 00 00 00 00 00 b5 |................|
00000050 00 00 00 00 00 00 00 ff 00 00 00 00 00 00 01 1d |................|
00000060 78 da 4b 2c 28 c8 49 35 e4 4a 04 51 46 10 ca 18 |................|
00000070 42 99 70 01 00 8b ee 09 3b 00 3a 78 da 63 60 62 |................|
00000080 5c c2 00 04 49 f2 b1 c1 40 8a 91 01 15 b0 33 a0 |................|
00000090 ca 33 a1 c8 61 ca 33 23 c9 f3 61 91 67 41 92 17 |................|
000000a0 05 cb 33 32 fe 45 92 67 85 ca 31 31 84 83 69 36 |................|
000000b0 00 55 b7 0a 45 00 36 78 da 63 60 60 60 66 80 00 |................|
000000c0 46 30 c9 c4 c0 99 58 50 90 93 6a a8 57 52 51 c2 |................|
000000d0 a0 00 14 85 89 18 81 45 1c 80 7c 98 88 31 58 24 |................|
000000e0 01 68 02 4c c4 04 24 02 00 0f 1e 10 41 80 10 00 |................|
000000f0 00 00 00 00 00 00 60 00 00 00 19 00 00 00 00 00 |................|
00000100 00 00 00 00 00 00 ed 00 14 78 da 63 60 40 01 0a |................|
00000110 50 da 01 4a 27 40 e9 06 00 0b 68 01 41 00 00 00 |................|
00000120 00 00 00 01 07 80 04 00 00 03 e8 00 00 00 00 00 |................|
00000130 00 01 25 |...|
00000133
"""
)
SQUASHFS_V3_LE_NO_PAD_CONTENTS = unhex(
"""\
00000000 68 73 71 73 05 00 00 00 00 00 00 00 00 00 00 00 |hsqs............|
00000010 00 00 00 00 00 00 00 00 00 00 00 00 03 00 01 00 |................|
00000020 00 00 11 00 c0 01 00 c9 cc 03 62 80 00 00 00 00 |..........b.....|
00000030 00 00 00 00 00 02 00 01 00 00 00 00 00 00 00 40 |...............@|
00000040 01 00 00 00 00 00 00 3c 01 00 00 00 00 00 00 00 |.......<........|
00000050 00 00 00 00 00 00 00 90 00 00 00 00 00 00 00 cd |................|
00000060 00 00 00 00 00 00 00 16 01 00 00 00 00 00 00 34 |...............4|
00000070 01 00 00 00 00 00 00 78 da 4b 2c 28 c8 49 35 e4 |.......x.K,(.I5.|
00000080 4a 04 51 46 10 ca 18 42 99 70 01 00 8b ee 09 3b |J.QF...B.p.....;|
00000090 3b 00 78 da 73 92 62 f8 7f 71 33 73 12 33 03 26 |;.x.s.b..q3s.3.&|
000000a0 60 07 62 27 a8 3c 0b 16 39 64 79 56 34 79 3e 34 |`.b'.<..9dyV4y>4|
000000b0 79 26 34 79 51 a8 fc 45 79 86 ff d7 81 f2 8c 40 |y&4yQ..Ey......@|
000000c0 36 48 8d 07 54 9e 0d 88 01 cc 3c 11 94 37 00 78 |6H..T.....<..7.x|
000000d0 da 63 66 00 02 66 10 e1 c0 c9 c0 90 58 50 90 93 |.cf..f......XP..|
000000e0 6a a8 57 52 51 a2 e0 c0 c9 08 e1 1a 81 b8 0e 0e |j.WRQ...........|
000000f0 9c 4c 10 ae 31 88 9b e0 c0 f9 ff 3f 98 6b 02 e2 |.L..1......?.k..|
00000100 02 00 01 31 13 36 0e 00 78 da 2b 67 80 00 49 28 |...1.6..x.+g..I(|
00000110 0d 00 08 48 00 91 06 01 00 00 00 00 00 00 14 00 |...H............|
00000120 78 da 6b 60 80 80 04 06 54 a0 00 a5 1d a0 34 00 |x.k`....T.....4.|
00000130 24 28 01 41 1e 01 00 00 00 00 00 00 00 00 00 00 |$(.A............|
00000140
"""
)
SQUASHFS_V3_BE_NO_PAD_CONTENTS = unhex(
"""\
00000000 73 71 73 68 00 00 00 05 00 00 00 00 00 00 00 00 |sqsh............|
00000010 00 00 00 00 00 00 00 00 00 00 00 00 00 03 00 01 |................|
00000020 00 00 00 11 c0 01 00 62 03 cc c3 00 00 00 00 00 |.......b........|
00000030 00 00 80 00 02 00 00 00 00 00 01 00 00 00 00 00 |................|
00000040 00 00 00 00 00 01 40 00 00 00 00 00 00 01 3c 00 |......@.......<.|
00000050 00 00 00 00 00 00 00 00 00 00 00 00 00 00 90 00 |................|
00000060 00 00 00 00 00 00 cb 00 00 00 00 00 00 01 14 00 |................|
00000070 00 00 00 00 00 01 34 78 da 4b 2c 28 c8 49 35 e4 |......4x.K,(.I5.|
00000080 4a 04 51 46 10 ca 18 42 99 70 01 00 8b ee 09 3b |J.QF...B.p.....;|
00000090 00 39 78 da 53 5c c2 f0 3f 89 79 f3 45 06 06 06 |.9x.S\\..?.y.E...|
000000a0 66 06 4c c0 ae 88 90 67 41 97 43 93 67 45 93 e7 |f.L....gA.C.gE..|
000000b0 43 93 67 42 93 17 05 c9 0b fe 05 cb 5f 07 b2 19 |C.gB........_...|
000000c0 c1 6a 38 e1 f2 6c 00 6f a6 13 17 00 35 78 da 63 |.j8..l.o....5x.c|
000000d0 66 80 00 66 06 26 4e 06 86 c4 82 82 9c 54 43 bd |f..f.&N......TC.|
000000e0 92 8a 12 46 20 97 11 cc 35 02 71 99 80 5c 26 30 |...F ...5.q..\\&0|
000000f0 d7 18 c4 65 66 e2 fc ff 1f cc 35 01 71 01 cb 90 |...ef.....5.q...|
00000100 11 84 80 10 00 00 00 00 00 00 00 77 00 00 00 19 |...........w....|
00000110 00 00 00 00 00 00 00 00 00 00 01 02 00 16 78 da |..............x.|
00000120 63 60 00 83 06 08 c5 90 c0 80 0a 14 a0 b4 03 00 |c`..............|
00000130 1b 68 01 41 00 00 00 00 00 00 01 1c 00 00 00 00 |.h.A............|
00000140
"""
)
def pad_contents(contents: bytes, alignment: int):
content_size = len(contents)
padded_size = round_up(content_size, alignment)
return contents + b"\0" * (padded_size - content_size)
@pytest.mark.parametrize(
"contents, handler_class",
(
pytest.param(SQUASHFS_V4_LE_NO_PAD_CONTENTS, SquashFSv4LEHandler, id="v4_le"),
pytest.param(SQUASHFS_V4_BE_NO_PAD_CONTENTS, SquashFSv4BEHandler, id="v4_be"),
pytest.param(SQUASHFS_V3_LE_NO_PAD_CONTENTS, SquashFSv3Handler, id="v3_le"),
pytest.param(SQUASHFS_V3_BE_NO_PAD_CONTENTS, SquashFSv3Handler, id="v3_be"),
),
)
@pytest.mark.parametrize(
"start",
(
pytest.param(b"", id="zero_start"),
pytest.param(b"A" * 128, id="non_zero_start"),
),
)
@pytest.mark.parametrize(
"pad_align",
(
pytest.param(None, id="no_pad"),
pytest.param(1024, id="1k_pad"),
pytest.param(4096, id="4k_pad"),
),
)
@pytest.mark.parametrize(
"extra",
(
pytest.param(
b"",
id="no_extra_end",
),
pytest.param(
b"A" * 128,
id="extra_end",
),
pytest.param(
b"A" * 4096,
id="extra_long_end",
),
pytest.param(
b"\0" * 128,
id="extra_null_end",
),
pytest.param(
b"\0" * 128 + b"A" + b"\0" * 4096,
id="extra-non-null-pad",
),
),
)
def test_squashfs_chunk_is_detected(
contents: bytes, handler_class, start: bytes, pad_align: Optional[int], extra: bytes
):
start_offset = len(start)
if pad_align is None:
pad_align = len(contents)
complete_content = pad_contents(contents, pad_align)
chunk = handler_class().calculate_chunk(
io.BytesIO(start + complete_content + extra), start_offset
)
assert chunk.start_offset == start_offset
assert chunk.end_offset == start_offset + pad_align
@pytest.mark.parametrize(
"contents, handler_class",
(
pytest.param(SQUASHFS_V4_LE_NO_PAD_CONTENTS, SquashFSv4LEHandler, id="v4_le"),
pytest.param(SQUASHFS_V4_BE_NO_PAD_CONTENTS, SquashFSv4BEHandler, id="v4_be"),
pytest.param(SQUASHFS_V3_LE_NO_PAD_CONTENTS, SquashFSv3Handler, id="v3_le"),
pytest.param(SQUASHFS_V3_BE_NO_PAD_CONTENTS, SquashFSv3Handler, id="v3_be"),
),
)
def test_squashfs_incomplete_header(contents: bytes, handler_class):
with pytest.raises(EOFError):
handler_class().calculate_chunk(io.BytesIO(contents[:10]), 0)
@pytest.mark.parametrize(
"contents, handler_class",
(
pytest.param(SQUASHFS_V4_LE_NO_PAD_CONTENTS, SquashFSv4LEHandler, id="v4_le"),
pytest.param(SQUASHFS_V4_BE_NO_PAD_CONTENTS, SquashFSv4BEHandler, id="v4_be"),
pytest.param(SQUASHFS_V3_LE_NO_PAD_CONTENTS, SquashFSv3Handler, id="v3_le"),
pytest.param(SQUASHFS_V3_BE_NO_PAD_CONTENTS, SquashFSv3Handler, id="v3_be"),
),
)
def test_squashfs_incomplete_file(contents: bytes, handler_class):
chunk = handler_class().calculate_chunk(io.BytesIO(contents[:-10]), 0)
# It is ok to return the whole chunk, incomplete files are taken care by the framework
# the handlers does not need to manage that
assert chunk.start_offset == 0
assert chunk.end_offset == len(contents)
| StarcoderdataPython |
4886861 | <filename>examples/blender/eventTiming.py
#!/usr/bin/env python3
"""
eventTiming helps when timing events
"""
# standard library modules
import datetime
class eventTiming(object):
'''stores name and beginning and ending of an arbitrary "event"'''
def __init__(self, eventName, startDateTime=None, endDateTime=None):
self.eventName = eventName
self.startDateTime = startDateTime if startDateTime else datetime.datetime.now(datetime.timezone.utc)
self.endDateTime = endDateTime
def __repr__( self ):
return str(self.toStrList())
def finish(self):
self.endDateTime = datetime.datetime.now(datetime.timezone.utc)
def duration(self):
if self.endDateTime:
return self.endDateTime - self.startDateTime
else:
return datetime.timedelta(0)
def toStrList(self):
return [self.eventName,
self.startDateTime.isoformat(),
self.endDateTime.isoformat() if self.endDateTime else None
]
| StarcoderdataPython |
6634237 | from .child import Child
from typing import Dict, Optional
from config import Config
from os.path import join, abspath, exists
from os import remove
class Action(Child):
def __init__(self, service, type: str, vnfd, token):
super().__init__(f"{service.id}_{type}")
self.service = service # type: "NetworkService"
self.type = type
self.vnfd = vnfd # type: "VnfdPackage"
self.token = token
self.message = "Init"
self.result = None
def Run(self):
try:
handler = getattr(self, self.type)
handler()
except Exception as e:
self.hasFailed = True
self.message = f"Error: {e}"
def onboardVim(self):
from REST import DispatcherApi
filePath = abspath(join(Config.UPLOAD_FOLDER, *self.service.VimLocalPath, self.service.vim_image))
vimName = self.service.vim_name
self.message = f"VIM Image onboarding in progress"
maybeError = DispatcherApi().OnboardVim(filePath, vimName, self.token, self.service.is_public)
if maybeError is None:
self.result = "<onboarded>" # Not a known ID but a value to signal it's been onboarded
self.message = f"VIM Image successfully onboarded"
else:
raise RuntimeError(f"Exception during onboarding process: {maybeError}")
def onboardNsd(self):
from REST import DispatcherApi
filePath = abspath(join(Config.UPLOAD_FOLDER, *self.service.NsdLocalPath, self.service.nsd_file))
self.message = f"NSD file onboarding in progress"
maybeId, success = DispatcherApi().OnboardNsd(filePath, self.token, self.service.is_public)
if success:
self.result = maybeId
self.message = f"NSD file successfully onboarded"
else:
raise RuntimeError(f"Exception during onboarding process: {maybeId}")
def onboardVnf(self):
from REST import DispatcherApi
filePath = abspath(join(Config.UPLOAD_FOLDER, *self.vnfd.VnfdLocalPath, self.vnfd.vnfd_file))
self.message = f"VNFD package onboarding in progress"
maybeId, success = DispatcherApi().OnboardVnfd(filePath, self.token, self.service.is_public)
if success:
self.result = maybeId
self.message = f"Onboarded VNFD with id: {maybeId}"
else:
raise RuntimeError(f"Exception during onboarding process: {maybeId}")
def deleteVim(self):
if self.service.vim_id is not None:
self.message = "Deletion of onboarded VIM images is not supported"
else:
self.message = "Deleting VIM image"
self._deleteLocalFile(self.service.VimLocalPath, self.service.vim_image)
self.message = "Deleted VIM image from temporal storage"
def deleteNsd(self):
if self.service.nsd_id is not None:
self.message = "Deletion of onboarded NSD is not supported"
else:
self.message = "Deleting NSD file"
self._deleteLocalFile(self.service.NsdLocalPath, self.service.nsd_file)
self.message = "Deleted NSD file from temporal storage"
def deleteVnf(self):
if self.vnfd.vnfd_id is not None:
self.message = "Deletion of onboarded VNFDs is not supported"
else:
self.message = "Deleting VNFD package"
self._deleteLocalFile(self.vnfd.VnfdLocalPath, self.vnfd.vnfd_file)
self.message = "Deleted VNFD package file from temporal storage"
def _deleteLocalFile(self, path, file):
filePath = abspath(join(Config.UPLOAD_FOLDER, *path, file))
if exists(filePath):
remove(filePath)
def __str__(self):
return f"Action: {self.name} (St:{self.hasStarted}, Ed:{self.hasFinished}, Fail:{self.hasFailed})"
class ActionHandler:
collection: Dict[int, Action] = {}
@classmethod
def Get(cls, id: int) -> Optional[Action]:
return cls.collection.get(id, None)
@classmethod
def Set(cls, id: int, action: Action) -> None:
if id in cls.collection.keys():
from .log import Log
Log.W(f"Adding duplicated key to active Actions ({id}), overwritting existing: {cls.collection[id]}")
cls.collection[id] = action
@classmethod
def Delete(cls, id: int) -> None:
_ = cls.collection.pop(id)
| StarcoderdataPython |
3249720 | from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('role', models.IntegerField(choices=[(0, 'Producer'), (1, 'Executive Producer'), (2, 'Production Manager'), (3, 'Production Designer'), (4, 'Actor'), (5, 'Director'), (6, 'Medic'), (7, 'Wardrobe'), (8, 'Writer'), (9, 'Client'), (10, 'Other')])),
('city', models.CharField(max_length=30)),
('country', models.CharField(max_length=30)),
('phone_number', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region=None)),
('email', models.EmailField(max_length=254)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contacts', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Contact',
'verbose_name_plural': 'Contacts',
'ordering': ('created_at',),
'get_latest_by': 'updated_at',
'unique_together': {('user', 'email'), ('user', 'phone_number')},
},
),
]
| StarcoderdataPython |
12820972 | <filename>NRUtil/constants.py<gh_stars>0
""" Declaring constants used by the archive script. """
import os
import dotenv
import sys
envPath = os.path.join(os.path.dirname(__file__), '.env')
if os.path.exists(envPath):
print("loading dot env...")
dotenv.load_dotenv()
OBJ_STORE_BUCKET = os.environ['OBJ_STORE_BUCKET']
OBJ_STORE_SECRET = os.environ['OBJ_STORE_SECRET']
OBJ_STORE_USER = os.environ['OBJ_STORE_USER']
OBJ_STORE_HOST = os.environ['OBJ_STORE_HOST']
# optional params
optionals = ['TEST_OBJ_NAME']
module = sys.modules[__name__]
for optional in optionals:
if optional in os.environ:
setattr(module, optional, os.environ[optional])
| StarcoderdataPython |
6685736 | <filename>server/app/request.py
from flask import jsonify, abort
def api_error(code, message=""):
resp = jsonify({"message": message})
resp.status_code = code
return abort(resp)
def bad_credentials():
return api_error(403, "Wrong username or password")
def get_json_key(data, key):
if not isinstance(data, dict):
return api_error(400, "You have to send a dictionary of parameters")
value = data.get(key, None)
if value is None:
return api_error(400, "Key {} is missing".format(key))
return value
| StarcoderdataPython |
1926233 | # Copyright 2022 The Symanto Research Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A meta-dataset for evaluating few-shot learning models for text classification."""
from typing import List, Mapping, Optional, Tuple, no_type_check
import datasets
from symanto_fsb.datasets import (
SemEval2016TaskA,
cola,
deisear,
sab,
sb10k,
subj,
zsb,
)
_modules = {
"SemEval2016TaskA": SemEval2016TaskA,
"cola": cola,
"deisear": deisear,
"sab": sab,
"sb10k": sb10k,
"subj": subj,
"zsb": zsb,
}
@no_type_check
def load_dataset(
name: str,
config: Optional[str] = None,
) -> Mapping[str, datasets.Dataset]:
if name == "yahoo":
name = "zsb"
config = "topic"
if name == "unified":
name = "zsb"
config = "emotion"
try:
ds: Mapping[str, datasets.Dataset] = datasets.load_dataset(
_modules[name].__file__,
config,
ignore_verifications=True,
)
except KeyError:
ds: Mapping[str, datasets.Dataset] = datasets.load_dataset(
name,
config,
ignore_verifications=True,
)
for split_name in ds:
data = ds[split_name]
if name == "head_qa":
data = data.remove_columns(
["name", "year", "qid", "ra", "image", "answers"]
)
data = data.rename_column("category", "label")
data = data.rename_column("qtext", "text")
new_features = data.features.copy()
new_features["label"] = datasets.ClassLabel(
names=[
"medicine",
"nursery",
"chemistry",
"biology",
"psychology",
"pharmacology",
]
)
def to_label(d):
d["label"] = new_features["label"].str2int(d["label"])
return d
data = data.map(
to_label,
)
data = data.cast(new_features)
elif name == "trec":
data = data.remove_columns(["label-fine"])
data = data.rename_column("label-coarse", "label")
elif name == "amazon_reviews_multi":
data = data.remove_columns(
[
"review_id",
"product_id",
"reviewer_id",
"review_title",
"language",
"product_category",
]
)
data = data.rename_column("review_body", "text")
data = data.rename_column("stars", "label")
new_features = data.features.copy()
new_features["label"] = datasets.ClassLabel(
names=[
"1",
"2",
"3",
"4",
"5",
]
)
def to_label(d):
d["label"] = d["label"] - 1
return d
data = data.map(
to_label,
)
data = data.cast(new_features)
if "label" not in data.features:
raise ValueError(name, data.features.keys())
if not isinstance(data.features["label"], datasets.ClassLabel):
raise ValueError(name, data.features.keys())
if "text" not in data.features:
raise ValueError(name, data.features.keys())
if (
not isinstance(data.features["text"], datasets.Value)
or data.features["text"].dtype != "string" # noqa: W503
):
raise ValueError(name, data.features.keys())
ds[split_name] = data
return ds
def get_english_datasets() -> List[Tuple[str, Optional[str]]]:
return [ # type: ignore
(name, None)
for name in [
"ag_news",
"yahoo",
"imdb",
"yelp_review_full",
"yelp_polarity",
"SemEval2016TaskA",
"unified",
"cola",
"subj",
"trec",
]
] + [
("amazon_reviews_multi", "en") # type: ignore
]
def get_german_datasets() -> List[Tuple[str, Optional[str]]]:
return [ # type: ignore
(name, None) for name in ["gnad10", "sb10k", "deisear"]
] + [
("amazon_reviews_multi", "de") # type: ignore
]
def get_spanish_datasets() -> List[Tuple[str, Optional[str]]]:
return [(name, None) for name in ["head_qa", "sab"]] + [ # type: ignore
("amazon_reviews_multi", "es") # type: ignore
]
| StarcoderdataPython |
312883 | #Calcula o volume de uma fossa sépica para estabelecimentos residenciais em conformidade com a NBR 7229/1993
'''
Entre com os dados da Fossa Séptica
'''
print(' Dimensionamento de uma Fossa Séptica pra estabelecimentos residenciais em conformidade com a NBR 7229/1993:')
N = 1
while N != 0:
N = int(input('Digite o número de contribuintes ou 0 (zero) para encerrar:\n '))
if N == 0:
print('Você finalizou o programa!')
break
elif N < 0:
print('ERROR! Você digitou um valor inválido!\n Digite um valor maior que zero ou 0 para encerrar')
continue
else:
temperatura = float(input('Digite a temperatura média do local em Graus Celsius:\n '))
limpeza = float(input('Digite o intervalo de limpeza em anos:\n '))
if limpeza < 0 or limpeza > 5:
print('ERROR!O intervalo de limpeza deve está entre 1 ano e 5 anos!\n')
continue
else:
padrao = input('Digite o padrão resiencial (a para alto, m para médio ou b para baixo):\n ')
if padrao == 'a' or padrao == 'A':
C = 160
padrao = 'alto'
elif padrao == 'm' or padrao == 'M':
C = 130
padrao = 'médio'
elif padrao == 'b' or padrao == 'B':
C = 100
padrao = 'baixo'
else:
print('ERROR!Verifique a opção desejada digite apenas a, b ou m!\n')
continue
if limpeza == 1:
if temperatura <= 10:
k = 94
elif temperatura > 10 and temperatura <= 20:
k = 65
else:
k = 57
elif limpeza == 2:
if temperatura <= 10:
k = 134
elif temperatura > 10 and temperatura <= 20:
k = 105
else:
k = 97
elif limpeza == 3:
if temperatura <= 10:
k = 174
elif temperatura > 10 and temperatura <= 20:
k = 145
else:
k = 137
elif limpeza == 4:
if temperatura <= 10:
k = 214
elif temperatura > 10 and temperatura <= 20:
k = 185
else:
k = 177
elif limpeza == 5:
if temperatura <= 10:
k = 254
elif temperatura > 10 and temperatura <= 20:
k = 225
else:
k = 217
contribuicao_diaria = N * C
if contribuicao_diaria <= 0:
print('ERRROR! Não é possível dimensionar o volume da fossa - N e C devem se maior que zero!')
elif contribuicao_diaria > 0 and contribuicao_diaria<= 1500:
T = T
elif contribuicao_diaria > 1500 and contribuicao_diaria <= 3000:
T = 0.92
elif contribuicao_diaria > 3000 and contribuicao_diaria <= 4500:
T = 0.83
elif contribuicao_diaria > 4500 and contribuicao_diaria <= 6000:
T = 0.75
elif contribuicao_diaria >= 6000 and contribuicao_diaria <= 7500:
T = 0.67
elif contribuicao_diaria > 7500 and contribuicao_diaria <= 9000:
T = 0.58
else:
T = 0.50
Lf = 1
print('Tratando apenas de empreendimentos residenciais, conforme NBR 7229/1993 Lf = 1!')
V = 1000 + N*(C*T + k*Lf)
print('\nMEMÓRIA DE CÁLCULO\n')
print(f'Polulação: {N} contribuintes;\nPadrão Residencial: {padrao};\nTemperatura Média: {temperatura}C;\nContribuição Diária (C): {C} Litros;\nTemmpo de Detenção (Td): {T};')
print(f'Contribuição de Lodo Fresco (Lf): {Lf};\nIntervalo de Limmpeza {limpeza} anos;\nVolume Útil: {V} Litros\n')
| StarcoderdataPython |
3440843 | """Weather plugin for the VO simulations."""
# ----------------------------------------------------------------------------!
import openclsim.model as model
# ----------------------------------------------------------------------------!
class HasDelayPlugin:
"""Mixin forActivity to initialize WeatherPluginActivity."""
def __init__(self, delay_percentage=None, *args, **kwargs):
"""Class constructor."""
super().__init__(*args, **kwargs)
if delay_percentage is not None and isinstance(self, model.PluginActivity):
delay_plugin = DelayPlugin(delay_percentage=delay_percentage)
self.register_plugin(plugin=delay_plugin, priority=3)
# ----------------------------------------------------------------------------!
class DelayPlugin(model.AbstractPluginClass):
"""
Mixin for all activities to add delay and downtime.
The DelayPlugin allows the user to extend the activity length by a
certain `delay percentage`. The user may define the delay
percentage as either discrete valued or stochastic. If the user
wishes to define the variable as a random variable, make sure to
use the `scipy` package.
Parameters
----------
delay_percentage: float or scipy.stats.rv_continuous
Either deterministic or statistically defined delay in
percentage of the total duration. When using scipy.stats,
make sure to define the distribution. For example,
scipy.stats.norm(loc=0, scale=1).
"""
def __init__(self, delay_percentage=None, *args, **kwargs):
"""Class constructor."""
super().__init__(*args, **kwargs)
try:
if isinstance(delay_percentage, (float, int)):
self.delay_factor = delay_percentage / 100
self.delay_is_dist = False
elif hasattr(delay_percentage, "rvs"):
self.delay_factor = delay_percentage
self.delay_is_dist = True
elif delay_percentage is None:
self.delay_factor = None
self.delay_is_dist = False
else:
raise TypeError(
'delay_percentage accepts only a "float", '
+ '"int" or "scipy.stats.rv_continuous"'
)
except TypeError:
raise
def post_process(
self, env, activity_log, activity, start_activity, *args, **kwargs
):
"""Post processes the activity."""
# Check if delay has been defined. If not no delay is added.
if self.delay_factor is None:
return {}
# Check if given delay factor is a random variate.
elif self.delay_is_dist:
dt = env.now - start_activity
activity_delay = dt * self.delay_factor.rvs() / 100
# If delay is discrete valued.
else:
activity_delay = (env.now - start_activity) * self.delay_factor
activity_label = {"type": "plugin", "ref": "delay"}
return activity.delay_processing(
env, activity_label, activity_log, activity_delay
)
| StarcoderdataPython |
381624 | <filename>cells/TC_mdPul.py
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 22 08:38:28 2019
@author: <NAME>
"""
from brian2 import *
defaultclock.dt = 0.01*ms
prefs.codegen.target = 'numpy'
eq_TC_mdPul='''
dV/dt=(-INa-IK-IL-IKL-IH-ITLT-J-Isyn-Iapp-Itheta)/Cm_TC : volt
J : amp * meter ** -2
Itheta = amptheta * int(sin(2*pi*time_total*ftheta)<0) : amp * meter ** -2
amptheta : amp * meter ** -2
ftheta : Hz
dtime_total/dt=1 : second
Isyn=IsynHTC+IsynTC+IsynI+IsynREGABAA+IsynREGABAB+IsynREA+IsynREB+Isyn_FEF+Isyn_LIP : amp * meter ** -2
IsynHTC : amp * meter ** -2
IsynTC : amp * meter ** -2
IsynI : amp * meter ** -2
IsynREGABAA : amp * meter ** -2
IsynREGABAB : amp * meter ** -2
IsynREA : amp * meter ** -2
IsynREB : amp * meter ** -2
Isyn_FEF : amp * meter ** -2
Isyn_LIP : amp * meter ** -2
Vt=V+25*mV : volt
INa=gNa_TC*mNa**3*hNa*(V-ENa_TC)*int(mNa>0)*int(hNa>0) : amp * meter ** -2
dmNa/dt = (mNainf-mNa)/taumNa : 1
dhNa/dt = (hNainf-hNa)/tauhNa : 1
mNainf = alphamNa/(alphamNa+betamNa) : 1
taumNa=1*msecond/(alphamNa+betamNa) : second
hNainf = alphahNa/(alphahNa+betahNa) : 1
tauhNa=1*msecond/(alphahNa+betahNa) : second
alphamNa=(0.32*(13*mV-Vt))/(exp((13*mV-Vt)/4/mV)-1)/mV : 1
betamNa = (0.28*(Vt-40*mV))/(exp((Vt-40*mV)/5/mV)-1)/mV : 1
alphahNa=(0.128*exp((17*mV-Vt)/18/mV)) : 1
betahNa = 4/(1+exp((40*mV-Vt)/5/mV)) : 1
IK=gK_TC*mK**4*(V-EK_TC) : amp * meter ** -2
dmK/dt=(mKinf-mK)/taumK : 1
mKinf = alphamK/(alphamK+betamK) : 1
taumK=1/(alphamK+betamK) : second
alphamK = (0.032*(15*mV-Vt)/mV)/(exp((15*mV-Vt)/5/mV)-1)/ms : hertz
betamK=0.5 * exp((10*mV-Vt)/40/mV)/ms : hertz
IL=gL_TC*(V-EL_TC) : amp * meter ** -2
IKL=gKL_TC*(V-EKL_TC) : amp * meter ** -2
ITLT = gTLT_TC * mTLT**2 * hTLT * (V-ETLT) : amp * meter ** -2
dmTLT/dt = (mTLTinf-mTLT)/taumTLT : 1
dhTLT/dt = (hTLTinf-hTLT)/tauhTLT : 1
mTLTinf = 1/(1+exp(-(V+59*mV)/6.2/mV)) : 1
taumTLT= 0.1*ms: second
hTLTinf = 1/(1+exp((V+83*mV)/4/mV)) : 1
tauhTLT= 0.1*msecond*(30.8+ (211.4 + exp((V+115.2*mV)/5/mV))/(1+exp((V+86*mV)/3.2/mV)))/3.7372 : second
dCa_i/dt = (-10000000*ITLT)/(2*96485.3* coulomb * mole ** -1 * meter)*int(ITLT<0*amp * meter ** -2)- (Ca_i-0.00024*mmolar)/5/ms : mole * meter**-3
ETLT=120*mV : volt
IH = gH_TC * r * (V-EH_TC) : amp * meter ** -2
dr/dt=(rinf-r)/tausr : 1
tausr = 20*ms + 1000*ms / (exp((V+71.5*mV)/14.2/mV)+exp(-(V+89*mV)/11.6/mV)) : second
rinf = 1/(1+exp((V+75*mV)/5.5/mV)) : 1
Iapp=sinp*ginp_TC*(V-Vrev_inp) : amp * meter ** -2
dsinp/dt=-sinp/taudinp + (1-sinp)/taurinp*0.5*(1+tanh(Vinp/10/mV)) : 1
dVinp/dt=1/tauinp*(Vlow-Vinp) : volt
ginp_TC : siemens * metre **-2
Iapp2=sinp2*ginp_TC2*(V-Vrev_inp) : amp * meter ** -2
dsinp2/dt=-sinp2/taudinp2 + (1-sinp2)/taurinp2*0.5*(1+tanh(Vinp/10/mV)) : 1
dVinp2/dt=1/tauinp2*(Vlow2-Vinp2) : volt
ginp_TC2 : siemens * metre **-2
'''
#ETLT=R*T/(z*F)*log(2*mmolar/Ca_i)/log(2) : volt
Cm_TC = 2.5* ufarad * cm ** -2
gNa_TC=90e-3 * siemens * cm **-2
ENa_TC=50*mV
gK_TC=10e-3 * siemens * cm **-2 #10
EK_TC=-100*mV
gL_TC=0.01e-3 * siemens * cm **-2 #0.01
#gL_TC=0.015e-3 * siemens * cm **-2
EL_TC=-70*mV
#gKL_TC=0.006e-3 * siemens * cm **-2 #0.006 0.0028
gKL_TC=0.0028e-3 * siemens * cm **-2
EKL_TC=-100*mV
#gTLT_TC= 10e-3 * siemens * cm **-2
gTLT_TC= 30e-3 * siemens * cm **-2
R = 8.314 * joule * kelvin**-1 * mole **-1
T = (273.15+37.2)*kelvin
z = 2
F = 96485.3* coulomb * mole ** -1
gH_TC = 0.02e-3 * siemens * cm **-2
EH_TC = -43 * mV
a=1
N_TC=1
if __name__=='__main__':
close('all')
start_scope()
TC=NeuronGroup(N_TC,eq_TC_mdPul,threshold='V>0*mvolt',refractory=3*ms,method='rk4')
TC.V = '-80*mvolt'
TC.Ca_i = '0.00024*mmolar'
# TC.o1 = '0.5'
# TC.c1 = '0.5'
# TC.p0 = '0.5'
TC.J = '0 * nA * cmeter ** -2'
TC.mTLT = '1'
V1=StateMonitor(TC,'V',record=[0])
# V2=StateMonitor(TC,'IH',record=[0])
V3=StateMonitor(TC,'Ca_i',record=[0])
#R1=SpikeMonitor(SI)
V6=StateMonitor(TC,'INa',record=[0])
V7=StateMonitor(TC,'IK',record=[0])
V8=StateMonitor(TC,'IL',record=[0])
V9=StateMonitor(TC,'IKL',record=[0])
# V10=StateMonitor(TC,'cSK',record=[0])
V11=StateMonitor(TC,'ITLT',record=[0])
V12=StateMonitor(TC,'hTLT',record=[0])
V13=StateMonitor(TC,'mTLT',record=[0])
V14=StateMonitor(TC,'ETLT',record=[0])
V15=StateMonitor(TC,'hNa',record=[0])
V16=StateMonitor(TC,'mNa',record=[0])
H1=StateMonitor(TC,'IH',record=[0])
# H2=StateMonitor(TC,'o1',record=[0])
# H3=StateMonitor(TC,'p0',record=[0])
# H4=StateMonitor(TC,'c1',record=[0])
# H5=StateMonitor(TC,'tausH',record=[0])
# H6=StateMonitor(TC,'hinfH',record=[0])
run(1*second)
figure()
plot(V1.t/second,V1.V[0]/volt)
xlabel('Time (s)')
ylabel('Membrane potential (V)')
title('TC cell')
# figure()
# plot(V10.t/second,V10.cSK[0]/volt)
figure()
plot(V3.t/second,V3.Ca_i[0])
xlabel('Time (s)')
title('Ca concentration')
figure()
subplot(131)
plot(V11.t/second,V11.ITLT[0])
xlabel('Time (s)')
title('I_TLT')
subplot(132)
plot(V12.t/second,V12.hTLT[0],label='h')
plot(V13.t/second,V13.mTLT[0],label='m')
xlabel('Time (s)')
legend()
subplot(133)
plot(V14.t/second,V14.ETLT[0])
xlabel('Time (s)')
title('E_TLT')
figure()
subplot(131)
plot(V6.t/second,V6.INa[0])
xlabel('Time (s)')
title('I_Na')
subplot(132)
plot(V15.t/second,V15.hNa[0])
xlabel('Time (s)')
title('h_Na')
subplot(133)
plot(V16.t/second,V16.mNa[0])
xlabel('Time (s)')
title('mNa')
figure()
plot(V6.t/second,V6.INa[0],label='INa')
plot(V7.t/second,V7.IK[0],label='IK')
plot(V8.t/second,V8.IL[0],label='IL')
plot(V9.t/second,V9.IKL[0],label='IKL')
plot(V11.t/second,V11.ITLT[0],label='ITLT')
plot(H1.t/second,H1.IH[0],label='IH')
# plot(V10.t/second,V10.Iapp[0],label='Iapp')
# plot(V11.t/second,V11.IappGABA[0],label='IappGABA')
# plot(V11.t/second,V6.INa[0]+V7.IK[0]+V8.IL[0]+V9.IKL[0]+V4.ITLT[0]+V2.IH[0]+V10.Iapp[0]+V11.IappGABA[0],label='sum of all I')
legend()
# figure()
# plot(H1.t/second,H1.IH[0],label='IH')
# plot(H1.t/second,H2.o1[0],label='o1')
# plot(H1.t/second,H3.p0[0],label='p0')
# plot(H1.t/second,H4.c1[0],label='c1')
# plot(H1.t/second,H5.tausH[0],label='tauh')
# plot(H1.t/second,H6.hinfH[0],label='hinf')
# legend()
#
#figure()
#plot(V2.t/second,-V2.Iapp[0])
#xlabel('Time (s)')
#ylabel('Input current')
# IH = gH_TC * (o1 + a*(1-c1-o1)) * (V-EH_TC) : amp * meter ** -2
# do1/dt = 0.0001/ms * (1-c1-o1) - 0.001/ms * ((1-p0)/0.01) : 1
# dp0/dt = 0.0004/ms * (1-p0) - 0.004/ms * (Ca_i/(0.0002*mmolar))**2 : 1
# dc1/dt = betaH * o1 - alphaH * c1 : 1
# betaH = (1-hinfH)/tausH : hertz
# alphaH = hinfH / tausH : hertz
# tausH = 20 * ms + 1000* ms/(exp((V+71.5*mV)/14.2/mV)+exp(-(V+89*mV)/11.6/mV)) : second
# hinfH = 1 / (1 + exp((V+75*mV)/5.5/mV)) : 1
#*int(ITLT<0*amp * meter ** -2)
#mTLTinf = 1/(1+exp(-(V+59*mV)/6.2/mV)) : 1
#taumTLT= 1*ms: second
#hTLTinf = 1/(1+exp((V+83*mV)/4/mV)) : 1
#tauhTLT= (30.8+ (211.4 + exp((V+115.2*mV)/5/mV))/(1+exp((V+86*mV)/3.2/mV)))/3.7372*ms : second
# ISK = gSK_HTC * cSK * (V-EK_TC) : amp * meter ** -2
# dcSK/dt = (cSKinf-cSK)/taucSK : 1
# cSKinf = 0.81 / (1 + exp(-(log(Ca_i/mmolar)+0.3)/0.46)): 1
# taucSK = 6.1*msecond : second | StarcoderdataPython |
4965202 | <gh_stars>0
import matplotlib.pyplot as plt
import numpy as np
def exp_moving_average(values, window):
""" Numpy implementation of EMA
"""
if window >= len(values):
sma = np.mean(np.asarray(values))
a = [sma] * len(values)
else:
weights = np.exp(np.linspace(-1., 0., window))
weights /= weights.sum()
a = np.convolve(values, weights, mode='full')[:len(values)]
a[:window] = a[window]
return a
fig = plt.figure()
ax = []
losses = {}
losses[0] = [1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9]
losses[1] = [2.0, 2.1, 1.2, 1.3, 1.4, 2.5, 1.6, 1.7, 1.8, 1.9]
for worker_idx in range(2):
ax.append(fig.add_subplot(2, 1, worker_idx + 1))
fig.subplots_adjust(hspace=0.25)
mean_loss = np.mean(losses[0][-10:])
ax[0].plot(range(len(losses[0])), losses[0], 'b')
ax[0].plot(range(len(losses[0])),
exp_moving_average(losses[0], 10), 'r')
ax[0].legend(["Loss", "Loss_EMA"])
ax[1].plot(range(len(losses[1])), losses[1], 'b')
ax[1].plot(range(len(losses[1])),
exp_moving_average(losses[1], 10), 'r')
ax[1].legend(["Loss", "Loss_EMA"])
fig.savefig("loss_worker.png")
| StarcoderdataPython |
3238131 | print('''Existe uma sequência que se repete indefinidamente com as figuras
de um triângulo, árvore, losângulo, espada, coração e um quadrado, nesse padrão.
Ou seja, há um padrão de repetição das 6 primeiras figuras.\n''')
n = int(input("Digite o número de elmentos que a sequência vai ter.\n"))
padroes = n / 6
resto = n % 6
print(f'''Num cenário em que essa sequência tenha {n} fuguras,
haverá {padroes} padrões e restará {resto} elementos.''')
n1 = int(input("Digite o número inicial para saber a sequência de figuras:"))
n2 = int(input("Digite o número final:"))
for i in range(n1, n2):
resto = i % 6
if resto == 0:
print(f"{i} triângulo ", end=' ')
elif resto == 1:
print(f"{i} árvore ", end=' ')
elif resto == 2:
print(f"{i} losângulo ", end=' ')
elif resto == 3:
print(f"{i} espada ", end=' ')
elif resto == 4:
print(f"{i} coração ", end=' ')
elif resto == 5:
print(f"{i} quadrado ", end=' ')
| StarcoderdataPython |
6583277 | # This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Most of this work is copyright (C) 2013-2020 <NAME>
# (<EMAIL>), but it contains contributions by others. See
# CONTRIBUTING.rst for a full list of people who may hold copyright, and
# consult the git log if you need to determine who owns an individual
# contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
#
# END HEADER
from hypothesis.internal.conjecture.dfa import ConcreteDFA
SHRINKING_DFAS = {}
# Note: Everything below the following line is auto generated.
# Any code added after this point will be deleted by an automated
# process. Don't write code below this point.
#
# AUTOGENERATED BEGINS
# fmt: off
SHRINKING_DFAS['datetimes()-d66625c3b7'] = ConcreteDFA([[(0, 1), (1, 255, 2)], [(0, 3), (1, 255, 4)], [(0, 255, 4)], [(0, 5), (1, 255, 6)], [(0, 255, 6)], [(5, 255, 7)], [(0, 255, 7)], []], {7}) # noqa: E501
SHRINKING_DFAS['emails()-fde8f71142'] = ConcreteDFA([[(0, 1), (1, 255, 2)], [(0, 255, 2)], []], {2}) # noqa: E501
SHRINKING_DFAS['floats()-58ab5aefc9'] = ConcreteDFA([[(1, 1), (2, 255, 2)], [(1, 3)], [(0, 1, 3)], []], {3}) # noqa: E501
SHRINKING_DFAS['floats()-6b86629f89'] = ConcreteDFA([[(3, 1), (4, 255, 2)], [(1, 3)], [(0, 1, 3)], []], {3}) # noqa: E501
SHRINKING_DFAS['floats()-aa8aef1e72'] = ConcreteDFA([[(2, 1), (3, 255, 2)], [(1, 3)], [(0, 1, 3)], []], {3}) # noqa: E501
SHRINKING_DFAS['floats()-bf71ffe70f'] = ConcreteDFA([[(4, 1), (5, 255, 2)], [(1, 3)], [(0, 1, 3)], []], {3}) # noqa: E501
SHRINKING_DFAS['text()-05c917b389'] = ConcreteDFA([[(0, 1), (1, 8, 2)], [(9, 255, 3)], [(0, 255, 4)], [], [(0, 255, 5)], [(0, 255, 3)]], {3}) # noqa: E501
SHRINKING_DFAS['text()-807e5f9650'] = ConcreteDFA([[(0, 8, 1), (9, 255, 2)], [(1, 8, 3)], [(1, 8, 3)], [(0, 4)], [(0, 255, 5)], []], {2, 5}) # noqa: E501
# fmt: on
| StarcoderdataPython |
11284234 | import os
import argparse
import random
from statistics import mean
import xml.etree.ElementTree as ET
random.seed(42)
#
# config
#
parser = argparse.ArgumentParser()
parser.add_argument('--train-dir', action='store', dest='train_dir',
help='train directory location', required=True)
parser.add_argument('--test-gold-labels', action='store', dest='test_gold_labels',
help='test gold labels xml file', required=False)
args = parser.parse_args()
#train_dir = '/mnt/c/Users/sophi/Documents/phd/data/coliee2019/task1/task1_train'
#test_gold_labels = '/mnt/c/Users/sophi/Documents/phd/data/coliee2019/task1/task1_test_golden-labels.xml'
#
# load directory structure
#
list_dir = [x for x in os.walk(args.train_dir)]
if args.test_gold_labels:
#
# load gold labels as dictionary
#
tree = ET.parse(args.test_gold_labels)
root = tree.getroot()
gold_labels = {}
for child in root:
rank = child.find('cases_noticed').text
rank = rank.split(',')
gold_labels.update({child.attrib['id']: rank})
recall = []
for sub_dir in list_dir[0][1]:
if args.test_gold_labels:
doc_rel_id = gold_labels.get(sub_dir)
else:
# read in relevant document ids
with open(os.path.join(args.train_dir, sub_dir, 'noticed_cases.txt'), 'r') as entailing_paragraphs:
doc_rel_id = entailing_paragraphs.read().splitlines()
with open(os.path.join(args.train_dir, sub_dir, 'bm25_top50.txt'), 'r') as top50:
doc_bm25 = top50.read().splitlines()
doc_bm25 = [doc.split('_')[1].strip() for doc in doc_bm25]
recall.append(len(set(doc_rel_id) & set(doc_bm25))/len(doc_rel_id))
print(mean(recall))
# also add code for precision? and then F1-score? | StarcoderdataPython |
8075570 | <gh_stars>10-100
# Schema Version 3
# Canvas course and student IDs are now cached
V3 = {
"$schema": "http://json-schema.org/schema#",
"type": "object",
"properties": {
# Config version
"version": {
"type": "integer",
},
# Backend type (gitlab / mock)
"backend": {
"type": "object",
"oneOf" : [
{ # Gitlab
"properties" : {
"name": {
"type": "string",
"enum" : ["gitlab"],
},
# GitLab private token
"token": {
"type": "string",
},
# GitLab domain (https://git.gitlab.com)
"host": {
"type": "string",
},
},
"required" : ["name", "token", "host"],
"additionalProperties": False,
},
{ # Mock
"properties" : {
"name": {
"type": "string",
"enum" : ["mock"],
},
},
"required" : ["name"],
"additionalProperties": False,
}
]
},
# GitLab Namespace name
"namespace": {
"type": "string",
},
# GitLab Namespace ID (we'd have to retrieve that)
"namespace-id": {
"type": "integer",
},
# Verbose name of the course (might be unnecessary)
"course-name": {
"type": "string",
},
# Current semester
"semester": {
"type": "string",
"pattern": r"^\d{4}-(SP|FS|SS)$"
},
# Roster of students
"roster": {
"type": "array",
"items": {
"type": "object",
"properties": {
# Their full name
"name": {
"type": "string"
},
# Section
"section": {
"type": "string"
},
# Their GitLab username (single sign on)
"username": {
"type": "string",
"pattern": "^[\w\.\-]+$",
},
# Their GitLab id (might be handy, but we'd have
# to fetch it and save it). Should save time in
# the long run instead of constantly querying
"id": {
"type": "integer",
},
"canvas-id": {
"type": "integer",
},
},
"required": ["name", "username", "section"],
"additionalProperties": False,
},
},
# Canvas API token
"canvas-token": {
"type": "string",
},
# Canvas domain
"canvas-host": {
"type": "string",
},
# Canvas course IDs by section
"canvas-courses": {
"type": "array",
"items": {
"type": "object",
"properties": {
# Section
"section": {
"type": "string"
},
# Canvas course ID
"id": {
"type": "integer",
},
},
"required": ["section"],
"additionalProperties": False,
},
},
},
"required": ["version", "backend", "namespace", "semester"],
"additionalProperties": False,
}
| StarcoderdataPython |
4920661 | <filename>entities/health.py
import pygame
from random import randint
gros = 4
long = 8
class Health():
def __init__(self, w, h, min_life, max_life, screen):
self.x = randint(0 + w, screen.get_width() - w)
self.y = randint(0 + h, screen.get_height() - h)
self.w = w
self.h = h
self.life = randint(min_life, max_life)
def tick(self, screen):
self.cont = 0
def render(self, screen):
pygame.draw.rect(screen, (255, 255, 255), (self.getX(), self.getY(), self.getWidth(), self.getHeight()))
pygame.draw.rect(screen, (255, 0, 0), (self.getX() + (long / 2), self.getY() + ((self.getHeight() / 2 - (gros / 2))), self.getWidth() - long, gros)) #Horizontal
pygame.draw.rect(screen, (255, 0, 0), (self.getX() + ((self.getWidth() / 2 - (gros / 2))), self.getY() + (long / 2), gros, self.getHeight() - long)) #Vertical
#Get
def getX(self):
return self.x
def getY(self):
return self.y
def getWidth(self):
return self.w
def getHeight(self):
return self.h
def getLife(self):
return self.life
#
#Set
def setX(self, newX):
self.x = newX
def setY(self, newY):
self.y = newY
def setWidth(self, newWidth):
self.w = newWidth
def setHeight(self, newHeight):
self.h = newHeight
def setLife(self, newLife):
self.life = newLife
#
def setup(w, h, min_life, max_life, screen):
return Health(w, h, min_life, max_life, screen) | StarcoderdataPython |
6457917 | <gh_stars>10-100
"""Installation file for ansys-dpf-post module"""
import os
from io import open as io_open
from setuptools import setup
install_requires = ["ansys.dpf.core>=0.3.0", "scooby"]
# Get version from version info
filepath = os.path.dirname(__file__)
__version__ = None
version_file = os.path.join(filepath, "ansys", "dpf", "post", "_version.py")
with io_open(version_file, mode="r") as fd:
exec(fd.read()) # execute file from raw string
readme_file = os.path.join(filepath, "README.md")
setup(
name="ansys-dpf-post",
packages=["ansys.dpf.post", "ansys.dpf.post.examples"],
version=__version__,
author='ANSYS',
author_email='<EMAIL>',
maintainer="ANSYS",
maintainer_email="<EMAIL>",
description="DPF-Post Python gRPC client",
url="https://github.com/pyansys/pydpf-post",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Information Analysis",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
python_requires=">=3.6.*",
extras_require={
"plotting": ["vtk<9.1.0", "pyvista>=0.24.0", "matplotlib"],
},
install_requires=install_requires,
license='MIT',
)
| StarcoderdataPython |
9707675 | <reponame>wudinaonao/FlaskMark12306Captcha<filename>constants/ImagePositionCoordinates.py<gh_stars>1-10
IMAGE_POSITION_COORDINATES = [
[(5, 12 + 30), (72, 79 + 30)],
[(5, 84 + 30), (72, 151 + 30)],
[(77, 12 + 30), (142, 79 + 30)],
[(77, 84 + 30), (142, 151 + 30)],
[(147, 12 + 30), (214, 79 + 30)],
[(147, 84 + 30), (214, 151 + 30)],
[(221, 12 + 30), (286, 79 + 30)],
[(221, 84 + 30), (286, 151 + 30)]
] | StarcoderdataPython |
11354365 | <filename>setup.py
import os.path
import re
import setuptools
def find_version(filename):
with open(filename) as f:
text = f.read()
match = re.search(r"^_version_str = '(.*)'$", text, re.MULTILINE)
if not match:
raise RuntimeError('cannot find version')
return match.group(1)
tld = os.path.abspath(os.path.dirname(__file__))
version = find_version(os.path.join(tld, 'bitcoinx', '__init__.py'))
setuptools.setup(
name='bitcoinX',
version=version,
python_requires='>=3.6',
install_requires=['attrs', 'pyaes', 'electrumsv-secp256k1'],
packages=['bitcoinx'],
description='Library of Bitcoin functions',
author='<NAME>',
author_email='<EMAIL>',
license='MIT Licence',
url='https://github.com/kyuupichan/bitcoinX',
download_url=('https://github.com/kyuupichan/bitcoinX/archive/'
f'{version}.tar.gz'),
long_description=(
'Library of Bitcoin functions covering network protocol, consensus, '
'transactions, scripting and signing.'
),
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
"Programming Language :: Python :: 3.6",
'Topic :: Internet',
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| StarcoderdataPython |
11305839 | <filename>bot/service/network_service.py
import requests
def is_syncing(node_ip):
response = requests.get(f'{node_ip}/syncing')
if not response.ok:
raise ConnectionError
syncing = response.json().get('syncing', False)
if syncing is bool and syncing:
return True
else:
return False
| StarcoderdataPython |
6684149 | from django.shortcuts import redirect, render
from django.views.generic import TemplateView, ListView
from django.db.models import Q
from jobs.models import Job
from events.models import Event
from meetups.models import Meetup
from communities.models import Community
class HomePageView(TemplateView):
template_name = 'search/search.html'
def SearchResultsView(request):
context = {
"title": "result",
}
if request.method == "POST":
query = request.POST.get('q', None)
context['query'] = query
job_result = Job.objects.filter(Q(title__icontains=query) | Q(description__icontains=query) | Q(address__icontains=query) | Q(seniority__icontains=query) | Q(seniority__icontains=query) | Q(employment__icontains=query) | Q(role__icontains=query))
event_result = Event.objects.filter(Q(title__icontains=query) | Q(description__icontains=query) | Q(venue__icontains=query))
meetup_result = Meetup.objects.filter(Q(title__icontains=query) | Q(description__icontains=query) | Q(address__icontains=query) | Q(location__icontains=query))
communitie_result = Community.objects.filter(Q(title__icontains=query) | Q(description__icontains=query))
total = job_result.count() + event_result.count() + meetup_result.count() + communitie_result.count()
context['total'] = total
context['jobs'] = job_result
context['events'] = event_result
context['meetups'] =meetup_result
context['communities'] = communitie_result
return render(request, "search/results.html", context) | StarcoderdataPython |
4834954 | from prettytable import PrettyTable
def is_symmetric (A, n):
for i in range(n):
for j in range(i-1):
if A[i][j] != A[j][i]:
return False
return True
def LU (A, n):
for k in range(n):
for i in range(k+1, n):
if A[k][k] == 0:
print("ERROR! One of the leading principal submatrices is not singular.")
exit()
A[i][k] /= A[k][k]
for j in range(k+1, n):
A[i][j] -= (A[i][k] * A[k][j])
def print_LDLT (A, n):
if is_symmetric(A, n) == False:
print("ERROR! The matrix is not symmetric.")
exit()
LU(A, n)
L = PrettyTable()
for i in range(n):
l_row = []
for j in range(n):
if j == i:
l_row.append(1)
elif j > i:
l_row.append(0)
else:
x = A[i][j]
if x % 1 == 0:
l_row.append(int(x))
else:
l_row.append(x)
L.add_row(l_row)
print("\nL:")
L.border, L.header = False, False
print(L)
D = PrettyTable()
for i in range(n):
d_row = []
for j in range(n):
if i != j:
d_row.append(0)
else:
x = A[i][j]
if x % 1 == 0:
d_row.append(int(x))
else:
d_row.append(x)
D.add_row(d_row)
print("\nD:")
D.border, D.header = False, False
print(D)
n = int(input("Enter the number of columns and rows:"))
print("Enter the matrix (separate columns with tab or space and separate rows with enter):")
A = []
for i in range(n):
A.append([])
inp = input().split()
for j in range(n):
if len(inp) == 0:
print("ERROR! The number of columns is less than expected.")
exit()
A[i].append(float(inp.pop(0)))
if len(inp) > 0:
print("ERROR! The number of columns is greater than expected.")
exit()
print_LDLT(A, n) | StarcoderdataPython |
1658511 | """
Migration script to add the post_job_action_association table.
"""
import logging
from sqlalchemy import (
Column,
ForeignKey,
Integer,
MetaData,
Table,
)
from galaxy.model.migrate.versions.util import (
create_table,
drop_table,
)
log = logging.getLogger(__name__)
metadata = MetaData()
PostJobActionAssociation_table = Table(
"post_job_action_association",
metadata,
Column("id", Integer, primary_key=True),
Column("post_job_action_id", Integer, ForeignKey("post_job_action.id"), index=True, nullable=False),
Column("job_id", Integer, ForeignKey("job.id"), index=True, nullable=False),
)
def upgrade(migrate_engine):
print(__doc__)
metadata.bind = migrate_engine
metadata.reflect()
create_table(PostJobActionAssociation_table)
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
drop_table(PostJobActionAssociation_table)
| StarcoderdataPython |
6620186 | <filename>Linux-Operation0605/app/core/templatetags/tags.py<gh_stars>0
# coding=utf-8
import datetime
from django import template
from app.core.models import Domain
from app.utils.domain_session import get_domainid_bysession
register = template.Library()
@register.filter
def int2datetime(t):
try:
return datetime.datetime.fromtimestamp(float(t)).strftime("%Y-%m-%d %H:%M:%S") if t else '-'
except:
return t
@register.filter
def float2percent(t):
return '%.2f' % t if isinstance(t, float) else '-'
@register.filter
def list_sum(list, key):
return sum([l.get(key, 0) for l in list])
@register.filter
def preview_check(filname):
# allow_suffix = ( 'jpg', 'jpeg', 'png', 'gif', 'bmp', 'tif', 'tiff', 'xbm', 'xpm',
# 'doc', 'docx', 'dot', 'dotx',
# 'ppt', 'pptx', 'pps', 'ppsx', 'pot', 'potx',
# 'xls', 'xlsx', 'xlt', 'xltx'
# )
allow_suffix = ( 'jpg', 'jpeg', 'png', 'gif', 'bmp')
suffix = filname.split('.')[-1]
suffix = suffix.lower()
return suffix in allow_suffix
@register.filter
def smooth_timedelta(timedeltaobj):
"""Convert a datetime.timedelta object into Days, Hours, Minutes, Seconds."""
secs = timedeltaobj.total_seconds()
timetot = ""
if secs > 86400: # 60sec * 60min * 24hrs
days = secs // 86400
timetot += "{} 天".format(int(days))
secs = secs - days*86400
if secs > 3600:
hrs = secs // 3600
timetot += " {} 小时".format(int(hrs))
secs = secs - hrs*3600
if secs > 60:
mins = secs // 60
timetot += " {} 分钟".format(int(mins))
secs = secs - mins*60
if secs > 0:
timetot += " {} 秒".format(int(secs))
return timetot
@register.inclusion_tag('switch_domain.html')
def switch_domain(request):
domain_list = Domain.objects.filter(disabled='-1')
domain_id = get_domainid_bysession(request)
return {
'domain_list': domain_list,
'domain_id': domain_id
}
| StarcoderdataPython |
3465285 | <reponame>MarvinT/ephys-analysis
from __future__ import absolute_import
import h5py
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from . import core
def kwik2rigid_pandas(block_path):
'''
loads data in a manner that is very useful for acute experiments
Parameters
------
block_path : str
the path to the block
Returns
------
spikes : Pandas.DataFrame
columns: cluster, recording, stim_name, stim_presentation,
stim_duration, stim_aligned_time
stims : Pandas.DataFrame
columns: stim_start, stim_end, stim_name, stim_presentation,
stim_duration
'''
spikes = core.load_spikes(block_path)
stims = load_acute_stims(block_path)
count_events(stims)
fs = core.load_fs(block_path)
stims['stim_duration'] = stims['stim_end'] - stims['stim_start']
timestamp2time(stims, fs, 'stim_duration')
spikes = spikes.join(align_events(spikes, stims))
spikes['stim_aligned_time'] = (spikes['time_samples'].values.astype('int') -
spikes['stim_start'].values)
del spikes['time_samples']
del spikes['stim_start']
timestamp2time(spikes, fs, 'stim_aligned_time')
return spikes, stims
def load_acute_stims(block_path):
'''
Fast code to load up stimuli information for an acute recording
Makes fewer checks and more assumptions than core.load_trials()
Doesn't include behavior only columns.
~8000x speedup on an example acute dataset.
Parameters
-------
block_path : str
the path to the block
Returns
------
trials : pandas dataframe
Columns
------
stim_start : int
Time in samples of the start of a stimulus (trial)
stim_name : str
Name of the stimulus
stim_end : int
Time in samples of the end of the stimulus
'''
stims = core.load_events(block_path, 'DigMark')
# assumes one start and one end for each trial
stims.loc[stims['codes'] == b'<', 'stim_end'] = stims[
stims['codes'] == b'>']['time_samples'].values
stims = stims[stims['codes'] == b'<']
# on some recs there are random date entries in the stim text field at the
# start... removing them here
stimdat = core.load_events(block_path, 'Stimulus')
stims['stim_name'] = stimdat['text'][stimdat['time_samples'] > stims['time_samples'].min()][
1::2].values
stims.reset_index(drop=True, inplace=True)
del stims['codes']
stims.rename(columns={'time_samples': 'stim_start'}, inplace=True)
return stims
def count_events(events, index='stim_name', target='stim_presentation'):
'''
Adds a column containing the event index
Parameters
-------
events : pandas dataframe containing events to count,
as from load_acute_stims()
index : str
Column to use for counter keys. Default: 'stim_name'
target : str
Column to drop event counts into. Default: 'stim_presentation'
'''
events[target] = events[index].map(_EventCounter().count)
def timestamp2time(df, sample_rate, time_stamp_label,
time_label=None, inplace=True):
'''
Converts a column from time samples to time in seconds
Parameters
------
df : Pandas.DataFrame
DataFrame containing column indicated by time_stamp_label
df will be modified and not returned
sample_rate : int
sample rate, from core.load_fs()
time_stamp_label : str
label of column with time stamp DataFrame
time_label : str
label of target column
if None (default) leaves time data in df[time_stamp_label]
inplace : boolean
whether to overwrite df[time_stamp_label]
'''
if inplace:
df[time_stamp_label] = df[time_stamp_label].values / sample_rate
if time_label:
df.rename(columns={time_stamp_label: time_label}, inplace=True)
else:
assert time_label, 'must provide time_label if not inplace'
df[time_label] = df[time_stamp_label].values / sample_rate
def raster_by_unit(spikes, cluster, sample_rate, window_size=1, plot_by='stim_name', col_wrap=None):
sns.set_context("notebook", font_scale=1.5,
rc={'lines.markeredgewidth': .1, 'patch.linewidth': 1})
sns.set_style("white")
num_repeats = np.max(spikes['stim_presentation'].values)
num_stims = len(np.unique(spikes[plot_by]))
if col_wrap is None:
col_wrap = int(np.sqrt(num_stims))
g = sns.FacetGrid(spikes[spikes['cluster'] == cluster],
col=plot_by, col_wrap=col_wrap)
g.map(_raster, "stim_aligned_time", "stim_presentation", "stim_duration",
window_size=window_size)
g = g.set_titles("cluster %d, stim: {col_name}" % (cluster))
def _raster(stim_aligned_time, stim_presentation, stim_duration, window_size=1, **kwargs):
plt.scatter(stim_aligned_time, stim_presentation, marker='|', **kwargs)
num_repeats = np.max(stim_presentation)
stim_length = stim_duration.iloc[0]
plt.plot((0, 0), (0, num_repeats), c=".2", alpha=.5)
plt.plot((stim_length, stim_length), (0, num_repeats), c=".2", alpha=.5)
plt.xlim((-window_size, stim_length + window_size))
plt.ylim((0, num_repeats))
from collections import Counter
class _EventCounter(Counter):
def count(self, key):
self[key] += 1
return self[key] - 1
def align_events(spikes, events, columns2copy=['stim_name', 'stim_presentation',
'stim_start', 'stim_duration'],
start_label='stim_start', end_label='stim_end'):
'''
Generates a dataframe that labels spikes as belonging to event windows
Event windows must be non-overlapping
Spikes that lie between event windows will be assigned to the closest window
O(len(spikes) + len(events))
Parameters
-------
spikes : Pandas.DataFrame
from core.load_spikes
events : Pandas.DataFrame
such as from load_acute_stims or core.load_trials
must have non-overlapping windows defined by start_label and end_label
columns2copy : iterable of strings
labels of columns of events that you want to populate spike_stim_info_df
in the order you want them
start_label : str
label of the column of events corresponding to the start of the event
end_label : str
label of the column of events corresponding to the end of the event
Returns
------
spike_event_info_df : Pandas.DataFrame
Spikes assigned to events
This DataFrame is indexed by spikes.index
Contains columns indicated by columns2copy
'''
data = []
grouped_spikes = spikes.groupby('recording')
for recording, event_recording_group in events.groupby('recording'):
data.extend(grouped_spikes.get_group(recording)["time_samples"].map(
_EventAligner(event_recording_group, output_labels=columns2copy,
start_label=start_label, end_label=end_label,
event_index=event_recording_group.index[0]).event_checker))
return pd.DataFrame(data=data, columns=columns2copy, index=spikes.index)
class _EventAligner(object):
# TODO: duplicate spikes that are <2 sec from 2 stimuli
def __init__(self, events, output_labels, start_label='stim_start',
end_label='stim_end', event_index=0):
self.event_index = event_index
self.start_event_index = event_index
self.events = events
event_columns = list(events.keys().get_values())
self.output_indices = \
[event_columns.index(lbl) for lbl in output_labels]
self.start_index = event_columns.index(start_label)
self.end_index = event_columns.index(end_label)
self.prev_event = None
self.cur_event = self.events.loc[self.event_index].values
self.next_event = self.events.loc[self.event_index + 1].values
def event_checker(self, time_stamp):
if time_stamp < self.cur_event[self.start_index]:
if self.event_index == self.start_event_index or \
self.cur_event[self.start_index] - time_stamp < \
time_stamp - self.prev_event[self.end_index]:
return self.cur_event[self.output_indices]
else:
return self.prev_event[self.output_indices]
elif time_stamp < self.cur_event[self.end_index]:
return self.cur_event[self.output_indices]
else:
if self.event_index + 1 < len(self.events):
self.event_index += 1
self.prev_event = self.cur_event
self.cur_event = self.next_event
if self.event_index + 1 < len(self.events):
self.next_event = self.events.loc[self.event_index + 1].values
else:
self.next_event = None
return self.event_checker(time_stamp)
else:
return self.cur_event[self.output_indices]
| StarcoderdataPython |
1639364 |
from requests.exceptions import ConnectionError
from coreapi import exceptions, utils
from coreapi.transports.base import BaseTransport
from coreapi.document import Document, Error
from coreapi.transports.http import (
BlockAll,
_get_params,
_get_url,
_get_headers,
_decode_result,
_handle_inplace_replacements,
_coerce_to_error
)
from django.core.management.base import CommandError
from utility.terminal import TerminalMixin
from utility.encryption import Cipher
import logging
import requests
import itypes
import urllib3
import json
import yaml
logger = logging.getLogger(__name__)
class CommandHTTPSTransport(TerminalMixin, BaseTransport):
schemes = ['https']
def __init__(self, headers = None, auth = None, params_callback = None, message_callback = None):
self._auth = auth
if headers:
headers = {key.lower(): value for key, value in headers.items()}
self._headers = itypes.Dict(headers or {})
self._params_callback = params_callback
self._message_callback = message_callback
urllib3.disable_warnings()
def init_session(self, require_auth = True):
session = requests.Session()
if require_auth and self._auth is not None:
session.auth = self._auth
if not getattr(session.auth, 'allow_cookies', False):
session.cookies.set_policy(BlockAll())
return session
def _encrypt_params(self, params):
cipher = Cipher.get('params')
enc_params = {}
for key, value in params.items():
key = cipher.encrypt(key)
value = cipher.encrypt(value)
enc_params[key] = value
return enc_params
def _build_get_request(self, session, url, headers, params):
opts = { "headers": headers or {} }
if params.query:
opts['params'] = self._encrypt_params(params.query)
request = requests.Request('GET', url, **opts)
return session.prepare_request(request)
def _build_post_request(self, session, url, headers, params):
opts = { "headers": headers or {} }
if params.data:
opts['data'] = self._encrypt_params(params.data)
request = requests.Request('POST', url, **opts)
return session.prepare_request(request)
def transition(self, link, decoders, params = None, link_ancestors = None, force_codec = None):
encoding = link.encoding if link.encoding else 'application/x-www-form-urlencoded'
params = _get_params(link.action.upper(), encoding, link.fields, params)
url = _get_url(link.url, params.path)
headers = _get_headers(url, decoders)
headers.update(self._headers)
connection_error_message = self.error_color("\n".join([
'',
'The Zimagi client failed to connect with the server.',
'',
'This could indicate the server is down or restarting.',
'If restarting, retry in a few minutes...'
]))
if link.action == 'get':
try:
result = self.request_page(url, headers, params, decoders)
if isinstance(result, Document) and link_ancestors:
result = _handle_inplace_replacements(result, link, link_ancestors)
if isinstance(result, Error):
raise exceptions.ErrorMessage(result)
return result
except ConnectionError as e:
self.print(connection_error_message)
raise CommandError()
else:
if self._params_callback and callable(self._params_callback):
self._params_callback(params.data)
try:
return self.request_stream(url, headers, params, decoders)
except ConnectionError as e:
self.print(connection_error_message)
raise CommandError()
def request_page(self, url, headers, params, decoders):
session = self.init_session(False) # GET
request = self._build_get_request(session, url, headers, params)
settings = session.merge_environment_settings(
request.url, None, None, False, None
)
settings['timeout'] = 30
response = session.send(request, **settings)
if response.status_code >= 500:
logger.debug("Request error: {}".format(response.text))
raise ConnectionError()
return _decode_result(response, decoders)
def request_stream(self, url, headers, params, decoders):
session = self.init_session(True) # POST
request = self._build_post_request(session, url, headers, params)
settings = session.merge_environment_settings(
request.url, None, True, False, None
)
logger.debug("Request headers: {}".format(request.headers))
response = session.send(request, **settings)
result = []
if response.status_code >= 400:
message = "Error {}: {}".format(response.status_code, response.reason)
self.print(self.error_color(message))
try:
self.print(self.error_color(json.loads(response.text)['detail']))
except Exception:
self.print(self.error_color(response.text))
raise CommandError()
try:
for line in response.iter_lines():
data = self._decode_message(response, line, decoders)
if self._message_callback and callable(self._message_callback):
self._message_callback(data)
result.append(data)
except Exception as e:
logger.debug("Error response headers: {}".format(response.headers))
self.print(self.error_color("Remote command failed for {}:\n\n{}".format(
url,
yaml.dump(params.data)
)))
raise e
logger.debug("Success response headers: {}".format(response.headers))
logger.debug("Status code: {}".format(response.status_code))
return result
def _decode_message(self, response, data, decoders):
result = None
if data:
content_type = response.headers.get('content-type')
codec = utils.negotiate_decoder(decoders, content_type)
options = {
'base_url': response.url
}
if 'content-type' in response.headers:
options['content_type'] = response.headers['content-type']
if 'content-disposition' in response.headers:
options['content_disposition'] = response.headers['content-disposition']
result = codec.load(data, **options)
return result
| StarcoderdataPython |
5177126 | from unittest import TestCase, main
from project.folder_reader import KillerFolder, KillerFile, FolderStore, RootDoesNotExist
class KillerFolderTestCase(TestCase):
def setUp(self):
self.folder = KillerFolder('king', 'C:\\user\\main')
self.folder_1 = KillerFolder('queen', 'C:\\user\\main\\queen', parent_folder=self.folder)
self.folder_2 = KillerFolder('prince', 'C:\\user\\prince', parent_folder=self.folder)
self.folder_3 = KillerFolder('qu', 'C:\\user\\main\\queen\\qu', parent_folder=self.folder_1)
KillerFile('one.txt', 45, self.folder)
KillerFile('two.txt', 45, self.folder)
KillerFile('three.txt', 45, self.folder)
KillerFile('four.txt', 45, self.folder_1)
KillerFile('five.txt', 45, self.folder_1)
KillerFile('six.txt', 45, self.folder_1)
KillerFile('seven.txt', 45, self.folder_2)
KillerFile('eight.txt', 45, self.folder_2)
KillerFile('nine.txt', 45, self.folder_2)
KillerFile('ten.txt', 45, self.folder_3)
def test_parent_folder(self):
"""the parent should be of type Folder"""
self.assertRaises(ValueError, self.folder.parent_folder, "sd")
def test__eq__(self):
"""Test comparison of values of KillerFolder"""
self.assertRaises(TypeError, self.folder.__eq__, 8080)
fold = KillerFolder("jim", 'C:\\user\\main')
self.assertEqual(self.folder == fold, True)
self.assertEqual(self.folder == "C:\\user\\main", True)
fold.path += "\\jim"
self.assertEqual(self.folder == fold, False)
def test_statement(self):
"""Statement of a folder"""
statement_folder_3 = "\nREADER_FOLDER_LABEL_QU\nten.txt - 45\n---- END OF QU ----"
statement_folder_2 = "\nREADER_FOLDER_LABEL_PRINCE\nseven.txt - 45\neight.txt - 45\nnine.txt - 45\n---- END " \
"OF PRINCE ----"
statement_folder_1 = "\nREADER_FOLDER_LABEL_QUEEN\nfour.txt - 45\nfive.txt - 45\nsix.txt - 45" + \
statement_folder_3 + "\n---- END OF QUEEN ----"
statement_folder = "\nREADER_FOLDER_LABEL_KING\none.txt - 45\ntwo.txt - 45\nthree.txt - 45" + \
statement_folder_1 + statement_folder_2 + "\n---- END OF KING ----"
self.assertEqual(self.folder.statement(), statement_folder)
self.assertEqual(self.folder_1.statement(), statement_folder_1)
self.assertEqual(self.folder_2.statement(), statement_folder_2)
self.assertEqual(self.folder_3.statement(), statement_folder_3)
class KillerFileTestCase(TestCase):
def test_path(self):
"""The path of the file should be similar to the parent folder's"""
folder = KillerFolder('Folder', 'C:\\users\\main')
file = KillerFile('file', 45, folder)
self.assertEqual(file.path, folder.path)
class FolderStoreTestCase(TestCase):
def setUp(self):
self.folder = KillerFolder('king', 'C:\\user\\main')
self.folder_1 = KillerFolder('queen', 'C:\\user\\main\\queen', parent_folder=self.folder)
self.folder_2 = KillerFolder('prince', 'C:\\user\\main\\prince', parent_folder=self.folder)
self.folder_3 = KillerFolder('qu', 'C:\\user\\main\\queen\\qu', parent_folder=self.folder_1)
self.folder_store = FolderStore([
self.folder, self.folder_1, self.folder_2, self.folder_3,
])
def test_init_type_checking(self):
"""
ensure that when a list is passed during initialization it fails
if even one object is not a KillerFolder object
"""
trial = [KillerFolder('name', 'path'), 23]
self.assertRaises(TypeError, FolderStore, trial)
def test_store_append(self):
"""the append method should only allow FolderStore objects"""
self.assertRaises(TypeError, self.folder_store.append, 'string')
self.assertRaises(TypeError, self.folder_store.append, 3434)
self.assertRaises(TypeError, self.folder_store.append, [])
self.assertRaises(TypeError, self.folder_store.append, [self.folder_3])
def test_folder_fetch(self):
"""the FolderStore.fetch method should return the first instance"""
self.assertEqual(
self.folder_store.fetch('C:\\user\\main\\queen\\qu'),
self.folder_3
)
new_folder = KillerFolder('jim', 'C:\\user\\main\\queen\\qu')
# still the first folder instance
self.folder_store.append(new_folder)
self.assertEqual(
self.folder_store.fetch('C:\\user\\main\\queen\\qu'),
self.folder_3
)
# if new_folder is the first instance
self.folder_store.insert(1, self.folder_store.pop(-1))
self.assertEqual(
self.folder_store.fetch('C:\\user\\main\\queen\\qu'),
new_folder
)
def test_root_statement(self):
"""get statement from the root folder of the store"""
"""Statement of a folder"""
statement_folder_3 = "\nREADER_FOLDER_LABEL_QU\nten.txt - 45\n---- END OF QU ----"
statement_folder_2 = "\nREADER_FOLDER_LABEL_PRINCE\nseven.txt - 45\neight.txt - 45\nnine.txt - 45\n---- END " \
"OF PRINCE ----"
statement_folder_1 = "\nREADER_FOLDER_LABEL_QUEEN\nfour.txt - 45\nfive.txt - 45\nsix.txt - 45" + \
statement_folder_3 + "\n---- END OF QUEEN ----"
statement_folder = "\nREADER_FOLDER_LABEL_KING\none.txt - 45\ntwo.txt - 45\nthree.txt - 45" + \
statement_folder_1 + statement_folder_2 + "\n---- END OF KING ----"
KillerFile('one.txt', 45, self.folder)
KillerFile('two.txt', 45, self.folder)
KillerFile('three.txt', 45, self.folder)
KillerFile('four.txt', 45, self.folder_1)
KillerFile('five.txt', 45, self.folder_1)
KillerFile('six.txt', 45, self.folder_1)
KillerFile('seven.txt', 45, self.folder_2)
KillerFile('eight.txt', 45, self.folder_2)
KillerFile('nine.txt', 45, self.folder_2)
KillerFile('ten.txt', 45, self.folder_3)
self.assertRaises(RootDoesNotExist, self.folder_store.root_statement)
self.folder_store.__dict__['root'] = "string"
self.assertRaises(AttributeError, self.folder_store.root_statement)
# register a root folder
self.folder_store.__dict__['root'] = self.folder.path
self.assertEqual(self.folder_store.root_statement(), statement_folder)
if __name__ == '__main__':
main()
| StarcoderdataPython |
8164356 | <gh_stars>0
"""Helper classes and functions to wrap the zeroconf-library"""
try:
import zeroconf as zc
zeroConfOK=True
except ImportError:
zeroConfOK=False
from PyFoam.Infrastructure.Logging import foamLogger
from PyFoam.Error import warning
from PyFoam.ThirdParty.six import print_,b
from PyFoam.Infrastructure.FoamMetaServer import ServerInfo
from os import path
from time import sleep
import socket
foamServerDescriptorString="_foamserver._tcp.local."
class ZeroConfFoamServer:
def __init__(self):
self.info=None
if zeroConfOK:
self.zero=zc.Zeroconf()
def register(self,answerer,port,ssl):
if not zeroConfOK:
return
desc = { 'host' : answerer.hostname(),
'ip' : answerer.ip(),
'id' : answerer.id(),
'pid' : str(answerer.pid()),
'port' : str(port),
'ssl' : str(ssl),
'path' : answerer.pathToSolution(),}
if desc["host"].find(".")>0:
shorthost=desc["host"][0:desc["host"].index(".")]
else:
shorthost=desc["host"]
desc["name"]= "@"+shorthost+":"+desc["port"]+"."+ \
foamServerDescriptorString
basename=path.basename(desc["path"]).replace(".","_")
extraLen=63-len(desc["name"])
if len(basename)>extraLen:
desc["name"]=basename[0:extraLen].replace('.','')+desc["name"]
else:
desc["name"]=basename+desc["name"]
self.info=zc.ServiceInfo(type_=foamServerDescriptorString,
name=desc["name"],
# name="Nix da."+foamServerDescriptorString,
address=socket.inet_aton(answerer.ip()),
port=port,
weight=0,
priority=0,
properties=desc,
server=desc["host"]+".")
self.zero.register_service(self.info)
def deregister(self):
if not zeroConfOK:
return
if self.info:
self.zero.unregister_service(self.info)
self.info=None
def getServerList(timeout=5,verbose=False,progress=False):
if not zeroConfOK:
warning("zeroconf-module not installed.")
return {}
if verbose:
progress=False
zero=zc.Zeroconf()
servers={}
def on_service_state_change(zeroconf, service_type, name, state_change):
if verbose:
print_("Service %s of type %s state changed: %s" % (name, service_type, state_change))
if state_change is zc.ServiceStateChange.Added:
info = zero.get_service_info(service_type, name)
if info:
if verbose:
print_(" Address: %s:%d" % (socket.inet_ntoa(info.address), info.port))
print_(" Weight: %d, priority: %d" % (info.weight, info.priority))
print_(" Server: %s" % (info.server,))
if info.properties:
if verbose:
print_(" Properties are:")
for key, value in info.properties.items():
print_(" %s: %s" % (key, value))
try:
new=ServerInfo(info.properties[b("ip")],
int(info.properties[b("pid")]),
int(info.properties[b("port")]),
eval(info.properties[b("ssl")]) if b("ssl") in info.properties else False)
new.queryData()
servers[name]=new
if progress:
print_("+",flush=True,end="")
except socket.error:
warning("Connection refused by",new["ip"])
else:
if verbose:
print_(" No properties")
else:
if verbose:
print_(" No info")
elif state_change is zc.ServiceStateChange.Removed:
if name in servers:
if verbose:
print_("Remove",name)
del servers[name]
if progress:
print_("-",flush=True,end="")
browser = zc.ServiceBrowser(zero,
foamServerDescriptorString,
handlers=[on_service_state_change])
if progress:
print_("Searching ",flush=True,end="")
while timeout>0:
if progress:
print_(" . ",flush=True,end="")
timeout-=1
sleep(1)
zero.close()
if progress:
print_(" Done\n")
return servers
| StarcoderdataPython |
6507984 | #!/usr/bin/env python
# wfrun.pi.py - Sample script for WFRUN. It runs "pi" (calculates pi)
# many times. This is the master part WFRUN. It is interpreted by a
# Python interpreter invoked by "wfrun", and the interpreter marker at
# the first line (#!) is meaningless. For running this script, see
# the comment in "wfrun.c". Pass this script as an argument to
# "wfrun" ("mpiexec wfrun wfrun.pi.py").
from mpi4py import MPI
import sys
import kmr4py
kmr0 = kmr4py.KMR("world")
NPROCS = kmr0.nprocs
RANK = kmr0.rank
if (NPROCS < 5):
print "USAGE: mpiexec -n N ./wfrun wfrun.pi.py (with N>=5)"
sys.exit(0)
masterrank = NPROCS - 1
# Makes an array of strings of "I:N", where I for lane number and N
# for the number of ranks in the lane.
def make_lane_description():
print "NPROCS=" + str(NPROCS)
lanes = ["0.0:2", "0.1:2"]
return lanes
# Makes a KVS filled with key-value pairs (LANE,COMMAND), where LANE
# is 0 to (|lanes|-1), and COMMAND is "pi".
def make_works():
nlanes = 2
k00 = kmr0.make_kvs(key="cstring", value="cstring")
for i in range(5):
k00.add("0.0", "pi with-2-ranks")
k00.add("0.1", "pi with-2-ranks")
k00.add("0", "pi with-4-ranks")
for i in range(5):
k00.add("0.0", "pi with-2-ranks")
k00.add("0.1", "pi with-2-ranks")
k00.add("0", "pi with-4-ranks")
k00.add_kv_done()
return k00
## MAIN
sys.stdout.flush()
sys.stderr.flush()
kmr0.set_option("trace_map_spawn", "true")
kmr0.set_option("swf_record_history", "true")
lanes = make_lane_description()
splitcomms = kmr0.split_swf_lanes(masterrank, lanes, True)
kmr0.init_swf(splitcomms, masterrank)
kmr0.dump_swf_lanes()
kmr0.detach_swf_workers()
k20 = make_works()
k20.map_swf(None, separator_space=True, output=False)
kmr0.stop_swf_workers()
kmr0.dismiss()
sys.stdout.flush()
sys.stderr.flush()
| StarcoderdataPython |
1621794 | <filename>setup.py<gh_stars>0
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="ietfdata",
version="0.5.0",
author="<NAME>",
author_email="<EMAIL>",
description="Access the IETF Data Tracker and RFC Index",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/glasgow-ipl/ietfdata",
packages=setuptools.find_packages(),
package_data = {
'ietfdata': ['py.typed'],
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
],
python_requires='>=3.9',
setup_requires=["setuptools-pipfile"],
use_pipfile=True
)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.