content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import numpy as np
import pandas as pd
import os
def prm_to_df(prm):
"""Convert prm to a pandas DataFrame"""
values = list(prm.values())
columns = list(prm.keys())
df_prm = pd.DataFrame(columns=columns)
for value, column in zip(values, columns):
df_prm[column] = [value]
return (df_prm)
def save_on_disk(prm):
"""Saving prm on a specific .csv file"""
df_prm = prm_to_df(prm)
info = '/training_results/' + prm['info']
df_prm.to_csv(prm['output_dir'] + info + '/prm.csv')
def add_columns_to_df(df, columns):
"""Create new columns in a dataframe"""
for column in columns:
df[column] = np.nan
return (df)
def update_training_record(prm):
"""Update a csv file containing all previous info about training"""
# New prm
new_prm = prm_to_df(prm)
# Add new columns
columns = ['RMSE_UV_mean', 'RMSE_UV_std', 'RMSE_UV_min', 'RMSE_UV_max',
'RMSE_UVW_mean', 'RMSE_UVW_std', 'RMSE_UVW_min', 'RMSE_UVW_max',
'RMSE_U_mean', 'RMSE_U_std', 'RMSE_U_min', 'RMSE_U_max',
'RMSE_V_mean', 'RMSE_V_std', 'RMSE_V_min', 'RMSE_V_max',
'RMSE_W_mean', 'RMSE_W_std', 'RMSE_W_min', 'RMSE_W_max',
'corr_coeff_UV_mean', 'corr_coeff_UV_std', 'corr_coeff_UV_min', 'corr_coeff_UV_max',
'corr_coeff_UVW_mean', 'corr_coeff_UV_std', 'corr_coeff_UVW_min', 'corr_coeff_UV_max',
'corr_coeff_U_mean', 'corr_coeff_U_std', 'corr_coeff_U_min', 'corr_coeff_U_max',
'corr_coeff_V_mean', 'corr_coeff_V_std', 'corr_coeff_V_min', 'corr_coeff_V_max',
'corr_coeff_W_mean', 'corr_coeff_W_std', 'corr_coeff_W_min', 'corr_coeff_W_max',
'bias_UV_mean', 'bias_UV_std', 'bias_UV_min', 'bias_UV_max',
'bias_UVW_mean', 'bias_UVW_std', 'bias_UVW_min', 'bias_UVW_max',
'bias_U_mean', 'bias_U_std', 'bias_U_min', 'bias_U_max',
'bias_V_mean', 'bias_V_std', 'bias_V_min', 'bias_V_max',
'bias_W_mean', 'bias_W_std', 'bias_W_min', 'bias_W_max']
new_prm = add_columns_to_df(new_prm, columns)
# Path
out_dir = prm['output_dir'] + 'training_results/'
path_file = out_dir + 'training_prm_record.csv'
if os.path.isfile(path_file):
# Load all_prm
all_prm = pd.read_csv(path_file)
# Append new prm to all_prm
all_prm = all_prm.append(new_prm)
else:
all_prm = new_prm
# Save all_prm
all_prm.to_csv(path_file)
print('\nprm saved in training_prm_record.csv\n')
def create_name_simu_and_info(index, prm):
"""Create name_simu and info key"""
prm['name_simu'] = prm['name_simu'] + "_" + str(index)
prm['info'] = 'date_' + prm['date'] + '_name_simu_' + prm['name_simu'] + '_model_' + prm['model']
return (prm)
|
nilq/baby-python
|
python
|
import time
import random
#import pygame
import threading
'''
Start with the person in an empty room with zombies coming at them. The maze, is part of the "nice to have" section, but not pertinent.
'''
class Zombies:
def __init__(self, xcor, ycor):
self.xcor = xcor
self.ycor= ycor
self.image = ''
#the above empty string will eventually be part of a pygame module
def __str__(self):
string = ""
string += "X Coordinate: " + str(self.xcor) + "\n"
string += "Y Coordinate: " + str(self.ycor) + "\n"
return string
def movement(self):
xvar=player.xcor
yvar=player.ycor
self.movement(xvar,yvar)
'''
Pass an x,y unit as to where the player is. Then the zombie has to move a unit at a time to be constantly moving towards the player. Figure out the x coordinate (if < or >, add or subtract 1 accordingly)
'''
def death(self , bullet):
if self.xcor == bullet.xcor and self.ycor == bullet.ycor:
return("Zombie's Dead")
#when we start using pygame, there's a collision ability which will be used instead of this
else:
return "Alive!"
#At some point we're going to need to keep track of the zombies as images, since they're stored as x and y. This is something we'll learn after break but we should add a tentative part of the init that the self.image (to keep track of data items)
class Spawn:
#this class should be a function in the Controller class
def __init__(self,xcor,ycor):
self.xcor=xcor
self.ycor=ycor
def zspawn(self):
zombies=0
self.zombies=threading.Timer(3, zombies.movement()).start()
class Player:
def __init__(self,xcor,ycor):
self.xcor=xcor
self.ycor=ycor
self.image #this is like the zombie note above
def __str__(self):
string = ""
string += "X Coordinate: " + str(self.xcor) + "\n"
string += "Y Coordinate: " + str(self.ycor) + "\n"
return string
def movement(self, xcor, ycor):
self.xcor = xcor
self.ycor = ycor
#send in a direction, not an x and y coordinate. Moving one or two pixel at a time.
def death(self, zombie):
if self.xcor == zombie.xcor :
return ("game_over")
#same collision note as above
class Score:
def __init__(self, xcor, ycor, time):
self.xcor=xcor
self.ycor=ycor
self.time=time
#writing to a file --> for data requirement
#Won't need an x and y coordinate, but would be handled by your view
#This should keep track of time and high scores of how long people could stay alive
class Bullet:
def __init__(self, xcor, ycor):
self.xcor=xcor
self.ycor=ycor
#self.bulletcount=bulletcount
def movement(self, xcor, ycor):
self.xcor = xcor
self.ycor = ycor
#same deal as above
#pass in parameter direction so it moves in that direction forever until it hits something or leaves the screen. DOn't need the x and y coor because you just need the initial direction
#loop
def hit(self, bullet, zombie):
if bullet.xcor==zombie.xcor :
return("delete zombie")
else:
return("delete bullet")
#collision stuff from pygame but good for now
'''Each of these class should be on a different file'''
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from .schema import dump, load
from .loaded import LoadedValue
from .schema import CallableFunc
__all__ = ['dump', 'load', 'LoadedValue', 'CallableFunc']
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Author: CloudNClock @ github
# Date 2020/01/26
# Usage: DDNS updating for namesilo
import urllib3
import xmltodict
import requests
import time
import sys
import datetime
import configparser
import xml.etree.ElementTree as ET
# Update DNS in namesilo
def update(currentIP,recordID,targetDomain,targetIP):
# Initial DNS update request
updateDNSRecords_request="https://www.namesilo.com/api/dnsUpdateRecord?version=1&type=xml&key=" \
+ apiKey + "&domain=" + domain \
+"&rrid=" + recordID
if host != "":
updateDNSRecords_request += "&rrhost=" + host
updateDNSRecords_request += "&rrhost=" + host \
+"&rrvalue=" + currentIP \
+ "&rrttl=7207"
# Evaluate the response
response = requests.get(updateDNSRecords_request)
Element = ET.fromstring(response.content)
for reply in Element.iter('reply'):
detail = reply.find('detail').text
if detail != "success":
print("Error: " + detail)
print("Exiting ... ")
file = open("DDNS_Update.log", "a+")
ts = time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
file.writelines(timestamp + "\n")
file.writelines("Error: " + detail + "\n\n")
file.close()
sys.exit()
else:
file = open("DDNS_Update.log", "a+")
ts = time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
file.writelines(timestamp + "\n")
file.writelines(targetDomain + "\n")
file.writelines(targetIP + " Updated to -> " + currentIP + "\n\n")
file.close()
#Check if the IP is changed, then perform update
def check():
# Initial reqeust to namesilo
dnsListRecords_request = "https://www.namesilo.com/api/dnsListRecords?version=1&type=xml&key="\
+ apiKey + "&domain=" + domain
# Get response from namesilo
response = requests.get(dnsListRecords_request)
Element = ET.fromstring(response.content)
# Determine if the request is success
for reply in Element.iter('reply'):
detail = reply.find('detail').text
if detail != "success":
print("Error: " + detail)
print("Exiting ... ")
file = open("DDNS_Update.log", "a+")
ts = time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
file.writelines(timestamp + "\n")
file.writelines("Error: " + detail + "\n\n")
file.close()
sys.exit()
# Find local IP
for request in Element.iter('request'):
currentIP = request.find('ip').text
break
# Add host to target domain if found
if host != "":
targetDomain = host + "." + domain
else:
targetDomain = domain
# Find record ID for updating usage
found = 0
for resource_record in Element.iter('resource_record'):
temp_host = resource_record.find('host').text
if temp_host == targetDomain:
found = 1
targetIP = resource_record.find('value').text
recordID = resource_record.find('record_id').text
if found == 0:
print("Error:" + targetDomain + "not found.")
print("Existing ... ")
sys.exit()
#Update it if the public IP is changed
if currentIP != targetIP:
update(currentIP, recordID,targetDomain,targetIP )
else:
file = open("DDNS_Update.log", "a+")
ts = time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
file.writelines(timestamp + "\n")
file.writelines("Public IP have not changed.\n\n")
file.close()
# Read Config File
conf = configparser.ConfigParser()
conf.read('config.ini', encoding="utf-8")
domain = conf.get('DEFAULT', 'domain')
host = conf.get('DEFAULT', 'host')
apiKey = conf.get('DEFAULT', 'api_key')
check_interval = conf.getint('DEFAULT', 'check_interval')
# Begin checking
while True:
check()
time.sleep(check_interval)
|
nilq/baby-python
|
python
|
import torch
from abc import ABCMeta, abstractmethod
from torchtrainer.utils.mixins import CudaMixin
from .callbacks import Callback, CallbackContainer, History
from .meters import ZeroMeasurementsError
from enum import Enum
from itertools import chain
from torch.autograd import Variable
from .utils.defaults import parse_meters
class ValidationGranularity(Enum):
AT_LOG='log'
AT_EPOCH='epoch'
class _OnLogValidScheduler(Callback):
def on_log(self):
self.trainer._validate()
class _OnEpochValidScheduler(Callback):
def on_log(self):
if self.trainer.step == self.trainer.total_steps-1:
self.trainer._validate()
class BatchValidator(CudaMixin, metaclass=ABCMeta):
""" Abstract class for all validation classes that works with batched inputs.
All those validators should subclass this class
"""
METER_ALREADY_EXISTS_MESSAGE=('Meter {name} already exists as train meter')
def __init__(self, model, meters):
super(BatchValidator, self).__init__()
self.model = model
meters = parse_meters(meters)
self._meters = meters
self._metrics = {}
def _prepare_tensor(self, x):
if torch.is_tensor(x):
return Variable(self._tensor_to_cuda(x))
else:
return x
@abstractmethod
def validate_batch(self, *arg, **kwargs):
""" Abstract method for validate model per batch
Args:
*args (variable length arguments of :class:`torch.autograd.Variable`
of Tensors or cuda Tensors):
Unamed batch parameters
**kwargs (variable length keyword arguments of
:class:`torch.autograd.Variable` of
Tensors or cuda Tensors):
Named batch parameters
"""
pass
def meters_names(self):
return self._meters.keys()
@property
def meters(self):
return self._meters
def _compile_metrics(self):
for metric_name, meter in self._meters.items():
try:
value = meter.value()
self._metrics[metric_name] = value
except ZeroMeasurementsError:
continue
def _reset_meters(self):
self._metrics = {}
for meter in self._meters.values():
meter.reset()
def validate(self, valid_dataloader):
self._reset_meters()
if not valid_dataloader:
return self._metrics
self.model.train(mode=False)
with torch.no_grad():
for batch in valid_dataloader:
if isinstance(batch, torch.Tensor):
batch = (batch, )
batch = list(map(self._prepare_tensor, batch))
self.validate_batch(*batch)
self.model.train(mode=True)
self._compile_metrics()
return self._metrics
def add_named_meter(self, name, meter):
if name in self._meters:
raise Exception(self.METER_ALREADY_EXISTS_MESSAGE.format(name=name))
self._meters[name] = meter
class BatchTrainer(CudaMixin, metaclass=ABCMeta):
""" Abstract trainer for all trainer classes that works with batched inputs.
All those trainers should subclass this class
"""
INVALID_EPOCH_MESSAGE=('Expected epoch to be a non-negative integer, '
'got: {epochs}')
INVALID_LOGGING_FRECUENCY_MESSAGE=('Expected loggin frecuency to be a '
'non-negative integer, '
'got: {logging_frecuency}')
INVALID_VALIDATION_GRANULARITY_MESSAGE=('Expected logging frecuency to be '
'one of '
'ValidationGranularity.AT_LOG\' or '
'ValidationGranularity.AT_EPOCH\' '
'got: {mode}')
METER_ALREADY_EXISTS_MESSAGE=('Meter {name} already exists as train meter')
SCHED_BY_GRANULARITY = {ValidationGranularity.AT_EPOCH : _OnEpochValidScheduler,
ValidationGranularity.AT_LOG: _OnLogValidScheduler}
@staticmethod
def prepend_name_dict(prefix, d):
return {prefix + name: value for name, value in d.items()}
@abstractmethod
def create_validator(self):
# return BatchValidator(self.model, self.val_meters)
pass
def __init__(self,
model,
callbacks=[],
train_meters={}, val_meters={},
logging_frecuency=1,
prefixes=('', ''),
validation_granularity=ValidationGranularity.AT_EPOCH):
""" Constructor
Args:
model (:class:`torch.nn.Module`):
Module to train
callbacks (:class:`torchtrainer.callbacks.Callback`):
Pluggable callbacks for epoch/batch events.
train_meters (list of :class: `torchtrainer.meters.Meter'):
Training meters
val_meters (list of :class: `torchtrainer.meters.Meter'):
Validation meters
logging_frecuency (int):
Frecuency of log to monitor train/validation
prefixes (tuple, list):
Prefixes of train and val metrics
validation_granularity (ValidationGranularity):
Change validation criterion (after every log vs after every epoch)
"""
if logging_frecuency < 0:
raise Exception(self.INVALID_LOGGING_FRECUENCY_MESSAGE.format(logging_frecuency=logging_frecuency))
if not isinstance(validation_granularity,ValidationGranularity) or validation_granularity not in ValidationGranularity:
raise Exception(self.INVALID_VALIDATION_GRANULARITY_MESSAGE.format(mode=validation_granularity))
super(BatchTrainer, self).__init__()
valid_sched = self.SCHED_BY_GRANULARITY[validation_granularity]()
self.logging_frecuency = logging_frecuency
self.model = model
self._epochs_trained = 0
self._steps_trained = 0
self._train_metrics = {}
self._val_metrics = {}
self._prefixes = prefixes
train_meters = parse_meters(train_meters)
if val_meters is None:
val_meters = {name: meter.clone() for name, meter in train_meters.items()}
else:
val_meters = parse_meters(val_meters)
self.train_meters = self.prepend_name_dict(prefixes[0], train_meters)
self.val_meters = self.prepend_name_dict(prefixes[1], val_meters)
self._raised_stop_training = False
self._history_callback = History()
self.validator = self.create_validator()
self._callbacks = CallbackContainer()
self._callbacks.accept(self)
self._callbacks.add(valid_sched)
self._callbacks.add(self._history_callback)
for callback in callbacks:
self._callbacks.add(callback)
@property
def history(self):
return self._history_callback.registry
def cuda(self, device=None):
""" Turn model to cuda
"""
super(BatchTrainer, self).cuda(device=device)
self.model.cuda(device=device)
self.validator.cuda(device=device)
def cpu(self):
""" Turn model to cpu
"""
super(BatchTrainer, self).cpu()
self.model.cpu()
self.validator.cpu()
def meters_names(self):
""" Returns the meters names
"""
return sorted(chain(self.train_meters.keys(),
self.validator.meters_names()))
@property
def meters(self):
return {**self.train_meters, **self.validator.meters}
@property
def metrics(self):
""" Last statistic recopiled from meters
Returns
dict: Dictionary of metric name and value, one for each
`meters` that made at least one measure
"""
return {**self._train_metrics, **self._val_metrics}
def _compile_train_metrics(self):
self._train_metrics = {}
for metric_name, meter in self.train_meters.items():
try:
value = meter.value()
self._train_metrics[metric_name] = value
except ZeroMeasurementsError:
continue
@property
def epochs_trained(self):
""" Total number of epochs epochs_trained
Returns:
int: number of epochs
"""
return self._epochs_trained
@property
def steps_trained(self):
return self._steps_trained
@epochs_trained.setter
def epochs_trained(self, value):
if value < 0:
raise AttributeError('can\'t set epochs_trained'
'to a value less than zero')
@abstractmethod
def update_batch(self, *args, **kwargs):
""" Abstract method for update model parameters given a batch
Args:
*args (variable length arguments of :class:`torch.autograd.Variable`
of Tensors or cuda Tensors):
Unamed batch parameters
**kwargs (variable length keyword arguments of
:class:`torch.autograd.Variable` of
Tensors or cuda Tensors):
Named batch parameters
"""
pass
def reset_meters(self):
self._train_metrics = {}
self._val_metrics = {}
for meter in self.train_meters.values():
meter.reset()
def _prepare_tensor(self, x):
if torch.is_tensor(x):
return Variable(self._tensor_to_cuda(x))
else:
return x
def log(self):
self._callbacks.on_log()
def log_started(self):
return self.logging_frecuency > 0 and self.step % self.logging_frecuency == 0
def _train_epoch(self, train_dataloader, valid_dataloader=None):
for self.step, batch in enumerate(train_dataloader):
if self.log_started():
self.reset_meters()
# convert to 1-d tuple if batch was a tensor instead of a tuple
if torch.is_tensor(batch):
batch = (batch, )
batch = map(self._prepare_tensor, batch)
self.update_batch(*batch)
self._steps_trained += 1
if self._is_time_to_log():
self._compile_train_metrics()
self.log()
self._epochs_trained += 1
def train(self, dataloader, valid_dataloader=None, epochs=1):
""" Train the model
Args:
dataloader (:class:`torch.utils.DataLoader`):
Train data loader
valid_dataloader (:class:`torch.utils.DataLoader`):
Validation data loader
epochs (int):
Number of epochs to train
"""
if epochs < 0:
raise Exception(self.INVALID_EPOCH_MESSAGE.format(epochs=epochs))
self._raised_stop_training = False
self.total_epochs = epochs
self.total_steps = len(dataloader)
self.valid_dataloader = valid_dataloader
self._callbacks.on_train_begin()
# Turn model to training mode
self.model.train(mode=True)
self.epoch = 0
while self.epoch < self.total_epochs and not self._raised_stop_training:
self._callbacks.on_epoch_begin()
self._train_epoch(dataloader, valid_dataloader)
self._callbacks.on_epoch_end()
self.epoch += 1
self._callbacks.on_train_end()
del self.valid_dataloader
# Turn model to evaluation mode
self.model.train(mode=False)
def _is_time_to_log(self):
log_frec = self.logging_frecuency
return log_frec > 0 and ((self.total_steps % log_frec != 0 and
self.step == self.total_steps - 1)
or self.step % log_frec == log_frec - 1)
def _validate(self):
self._val_metrics = self.validator.validate(self.valid_dataloader)
def stop_training(self):
self._raised_stop_training = True
def add_named_train_meter(self, name, meter):
name = self._prefixes[0] + name
if name in self.train_meters:
raise Exception(self.METER_ALREADY_EXISTS_MESSAGE.format(name=name))
self.train_meters[name] = meter
def add_named_val_meter(self, name, meter):
name = self._prefixes[1] + name
self.validator.add_named_meter(name, meter)
def add_callback(self, callback):
self._callbacks.add(callback)
|
nilq/baby-python
|
python
|
version = '0.66'
short_version = version
full_version = version
|
nilq/baby-python
|
python
|
from flask import url_for
from authentek.database.models import User
def test_get_user(client, db, user, admin_headers):
# test 404
user_url = url_for('api.user_by_id', user_id="100000")
rep = client.get(user_url, headers=admin_headers)
assert rep.status_code == 404
db.session.add(user)
db.session.commit()
# test get_user
user_url = url_for('api.user_by_id', user_id=user.id)
rep = client.get(user_url, headers=admin_headers)
assert rep.status_code == 200
data = rep.get_json()["user"]
assert data["username"] == user.username
assert data["email"] == user.email
assert data["active"] == user.active
def test_put_user(client, db, user, admin_headers):
# test 404
user_url = url_for('api.user_by_id', user_id="100000")
rep = client.put(user_url, headers=admin_headers)
assert rep.status_code == 404
db.session.add(user)
db.session.commit()
data = {"username": "updated"}
user_url = url_for('api.user_by_id', user_id=user.id)
# test update user
rep = client.put(user_url, json=data, headers=admin_headers)
assert rep.status_code == 200
data = rep.get_json()["user"]
assert data["username"] == "updated"
assert data["email"] == user.email
assert data["active"] == user.active
def test_delete_user(client, db, user, admin_headers):
# test 404
user_url = url_for('api.user_by_id', user_id="100000")
rep = client.delete(user_url, headers=admin_headers)
assert rep.status_code == 404
db.session.add(user)
db.session.commit()
# test get_user
user_url = url_for('api.user_by_id', user_id=user.id)
rep = client.delete(user_url, headers=admin_headers)
assert rep.status_code == 200
assert db.session.query(User).filter_by(id=user.id).first() is None
def test_create_user(client, db, admin_headers):
# test bad data
users_url = url_for('api.users')
data = {"username": "created"}
rep = client.post(users_url, json=data, headers=admin_headers)
assert rep.status_code == 400
data["password"] = "admin"
data["email"] = "create@mail.com"
rep = client.post(users_url, json=data, headers=admin_headers)
assert rep.status_code == 201
data = rep.get_json()
user = db.session.query(User).filter_by(id=data["user"]["id"]).first()
assert user.username == "created"
assert user.email == "create@mail.com"
def test_get_all_user(client, db, user_factory, admin_headers):
users_url = url_for('api.users')
users = user_factory.create_batch(30)
db.session.add_all(users)
db.session.commit()
rep = client.get(users_url, headers=admin_headers)
assert rep.status_code == 200
results = rep.get_json()
for user in users:
assert any(u["id"] == user.id for u in results["results"])
|
nilq/baby-python
|
python
|
''' setup
'''
import re
import io
from distutils.command.build_ext import build_ext as build_ext_orig
from setuptools import setup, find_packages, Extension
# source: https://stackoverflow.com/a/39671214
__version__ = re.search(
r'__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
io.open('nei_vcf/__init__.py', encoding='utf_8_sig').read()
).group(1)
# ==== ctypes extensions
class CTypesExtension(Extension):
'''CTypesExtension'''
class build_ext(build_ext_orig):
'''build_ext'''
def build_extension(self, ext):
self._ctypes = isinstance(ext, CTypesExtension)
return super().build_extension(ext)
def get_export_symbols(self, ext):
if self._ctypes:
return ext.export_symbols
return super().get_export_symbols(ext)
def get_ext_filename(self, ext_name):
if self._ctypes:
return ext_name + '.so'
return super().get_ext_filename(ext_name)
nei_module = CTypesExtension(
'nei_vcf.lib.nei',
sources=['nei_vcf/src/nei.cpp'],
language='c++',
)
vcf_module = CTypesExtension(
'nei_vcf.lib.vcf',
sources=['nei_vcf/src/vcf.cpp'],
language='c++',
)
ext_modules = [
nei_module,
vcf_module,
]
install_requires = [
'numpy',
]
# ====
description = 'Nei (SNP) distance calculation for VCF data.'
long_description = io.open('README.md').read()
long_description_content_type = 'text/markdown'
# ====
setup(
name='nei_vcf',
version=__version__,
packages=find_packages(),
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
author='Tankred Ott',
platforms=['any'],
python_requires='>=3.6',
install_requires=install_requires,
cmdclass={'build_ext': build_ext},
ext_modules=ext_modules,
# url='',
entry_points = {
'console_scripts': [
'nei_vcf=nei_vcf.commandline:main'
],
},
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
"""opencv module tests"""
import sys
import os
import inspect
import logging
import cv2
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib import ticker
from matplotlib.colors import LinearSegmentedColormap
logger = logging.getLogger("OPENCV")
def opencv_module(args=None):
logger.info("modules_opencv")
if args:
print(vars(args))
logger.info(vars(args))
# vid_test(args)
NIRPlantVideoTracking(args)
# https://www.geeksforgeeks.org/face-detection-using-python-and-opencv-with-webcam/
# webcam_create_data(args)
# webcam_face_recognize(args)
# nieve approach to getting a list of webcam ids
# https://stackoverflow.com/a/62639343
# list_cams(args)
def webcam_create_data(args):
module_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
input_dir = module_dir + "/../../input"
output_dir = module_dir + "/../../output"
# Creating database
# It captures images and stores them in datasets
# folder under the folder name of sub_data
haar_file = input_dir + "/haarcascade_frontalface_default.xml"
# All the faces data will be
# present this folder
datasets = output_dir + "/datasets"
if not os.path.isdir(datasets):
os.mkdir(datasets)
# These are sub data sets of folder,
# for my faces I've used my name you can
# change the label here
sub_data = "me2"
path = os.path.join(datasets, sub_data)
if not os.path.isdir(path):
os.mkdir(path)
# defining the size of images
(width, height) = (130, 100)
# '0' is used for my webcam,
# if you've any other camera
# attached use '1' like this
face_cascade = cv2.CascadeClassifier(haar_file)
webcam = cv2.VideoCapture(2)
cv2.normalize()
# The program loops until it has 30 images of the face.
count = 1
while count < 50:
(_, im) = webcam.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 4)
for (x, y, w, h) in faces:
cv2.rectangle(im, (x, y), (x + w, y + h), (255, 0, 0), 2)
face = gray[y : y + h, x : x + w]
face_resize = cv2.resize(face, (width, height))
cv2.imwrite("% s/% s.png" % (path, count), face_resize)
count += 1
cv2.imshow("OpenCV", im)
key = cv2.waitKey(10)
if key == 27:
break
def webcam_face_recognize(args):
module_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
input_dir = module_dir + "/../../input"
output_dir = module_dir + "/../../output"
# It helps in identifying the faces
# size = 4
haar_file = input_dir + "/haarcascade_frontalface_default.xml"
datasets = output_dir + "/datasets"
if not os.path.isdir(datasets):
os.mkdir(datasets)
# Part 1: Create fisherRecognizer
print("Recognizing Face Please Be in sufficient Lights...")
# Create a list of images and a list of corresponding names
(images, labels, names, id) = ([], [], {}, 0)
for (subdirs, dirs, files) in os.walk(datasets):
for subdir in dirs:
names[id] = subdir
subjectpath = os.path.join(datasets, subdir)
for filename in os.listdir(subjectpath):
path = subjectpath + "/" + filename
label = id
images.append(cv2.imread(path, 0))
labels.append(int(label))
id += 1
(width, height) = (130, 100)
# Create a Numpy array from the two lists above
(images, labels) = [np.array(lis) for lis in [images, labels]]
# OpenCV trains a model from the images
# NOTE FOR OpenCV2: remove '.face'
model = cv2.face.LBPHFaceRecognizer_create()
model.train(images, labels)
# Part 2: Use fisherRecognizer on camera stream
face_cascade = cv2.CascadeClassifier(haar_file)
webcam = cv2.VideoCapture(2)
while True:
(_, im) = webcam.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(im, (x, y), (x + w, y + h), (255, 0, 0), 2)
face = gray[y : y + h, x : x + w]
face_resize = cv2.resize(face, (width, height))
# Try to recognize the face
prediction = model.predict(face_resize)
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 3)
if prediction[1] < 500:
cv2.putText(
im,
"% s - %.0f" % (names[prediction[0]], prediction[1]),
(x - 10, y - 10),
cv2.FONT_HERSHEY_PLAIN,
1,
(0, 255, 0),
)
else:
cv2.putText(
im,
"not recognized",
(x - 10, y - 10),
cv2.FONT_HERSHEY_PLAIN,
1,
(0, 255, 0),
)
cv2.imshow("OpenCV", im)
key = cv2.waitKey(10)
if key == 27:
break
def vid_test(args=None):
cap = cv2.VideoCapture(0)
# Check if camera opened successfully
if not cap.isOpened():
print("Error opening video file")
# Read until video is completed
while cap.isOpened():
# Capture frame-by-frame
ret, frame = cap.read()
if ret:
# Display the resulting frame
cv2.imshow("Frame", frame)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord("q"):
break
# Break the loop
else:
break
# When everything done, release
# the video capture object
cap.release()
# Closes all the frames
cv2.destroyAllWindows()
def list_cams(args):
"""Test the ports and returns a tuple with the available ports and the ones that are working."""
# is_working = True
dev_port = 0
working_ports = []
available_ports = []
max_ports = 20
while dev_port < max_ports:
try:
camera = cv2.VideoCapture(dev_port)
if camera.isOpened():
# is_working = False
# print("Port %s is not working." %dev_port)
# else:
is_reading, img = camera.read()
w = camera.get(3)
h = camera.get(4)
if is_reading:
print("Port %s is working and reads images (%s x %s)" % (dev_port, h, w))
working_ports.append(dev_port)
else:
print("Port %s for camera ( %s x %s) is present but does not reads." % (dev_port, h, w))
available_ports.append(dev_port)
except: # noqa: E722
a, b, c = sys.exc_info()
print(a)
print(b)
print(c)
dev_port += 1
return available_ports, working_ports
# from: https://github.com/MuonRay/Image-VideoSegmentationinNIRforPlantDetection/blob/master/NIRPlantVideoTracking.py
def NIRPlantVideoTracking(args):
cap = cv2.VideoCapture(0)
# custom colormap for ndvi greyscale video
cols3 = ["gray", "blue", "green", "yellow", "red"]
def create_colormap(args):
return LinearSegmentedColormap.from_list(name="custom1", colors=cols3)
# colour bar to match grayscale units
def create_colorbar(fig, image):
position = fig.add_axes([0.125, 0.19, 0.2, 0.05])
norm = colors.Normalize(vmin=-1.0, vmax=1.0)
cbar = plt.colorbar(
image,
cax=position,
orientation="horizontal",
norm=norm,
)
cbar.ax.tick_params(labelsize=6)
tick_locator = ticker.MaxNLocator(nbins=3)
cbar.locator = tick_locator
cbar.update_ticks()
cbar.set_label("NDVI", fontsize=10, x=0.5, y=0.5, labelpad=-25)
while 1:
# Take each frame
_, frame = cap.read()
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of red NIR vegetation color in HSV
low_red = np.array([160, 105, 84])
high_red = np.array([179, 255, 255])
# Threshold the HSV image to get only red colors
mask = cv2.inRange(hsv, low_red, high_red)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame, frame, mask=mask)
# NDVI Processing
ir = (res[:, :, 0]).astype("float")
r = (res[:, :, 2]).astype("float")
ndvi = np.true_divide(np.subtract(ir, r), np.add(ir, r))
cols3 = ["gray", "blue", "green", "yellow", "red"]
# def create_colormap(args):
# return LinearSegmentedColormap.from_list(name="custom1", colors=cols3)
# colour bar to match grayscale units
# def create_colorbar(fig, image):
# position = fig.add_axes([0.125, 0.19, 0.2, 0.05])
# norm = colors.Normalize(vmin=-1.0, vmax=1.0)
# cbar = plt.colorbar(
# image,
# cax=position,
# orientation="horizontal",
# norm=norm,
# )
# cbar.ax.tick_params(labelsize=6)
# tick_locator = ticker.MaxNLocator(nbins=3)
# cbar.locator = tick_locator
# cbar.update_ticks()
# cbar.set_label(
# "NDVI",
# fontsize=10,
# x=0.5,
# y=0.5,
# labelpad=-25,
# )
image = plt.imshow(ndvi, cmap=create_colormap(colors))
# plt.axis('off')
# image = cv2.imshow(ndvi, cmap=create_colormap(colors))
cv2.imshow("frame", frame)
cv2.imshow("mask", mask)
cv2.imshow("res", res)
# this step adds considerable processing, be sure to use only 720p files at most a minute long
# cv2.imshow('ndvi',ndvi)
cv2.imshow("ndvi with color", ndvi)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.7 on 2021-09-03 10:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True, verbose_name='name')),
('address', models.TextField()),
('discription', models.TextField()),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='company', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='PermissionList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='permissionlist', to='api.company')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='permissionlist', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lastname', models.CharField(max_length=100, verbose_name='lastname')),
('firstname', models.CharField(max_length=100, verbose_name='firstname')),
('secondname', models.CharField(max_length=100, verbose_name='secondname')),
('position', models.TextField()),
('phone_number', phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128, region=None, unique=True)),
('office_number', phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128, region=None)),
('fax_number', phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128, region=None)),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='employee', to='api.company')),
],
options={
'unique_together': {('lastname', 'firstname', 'secondname', 'company')},
},
),
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import pytest
from great_expectations.core import (
ExpectationConfiguration,
ExpectationValidationResult,
)
from great_expectations.render.renderer.renderer import Renderer
def test_render():
# ??? Should this really return the input object?
# Seems like raising NotImplementedError might be preferable.
assert Renderer().render({}) == {}
assert Renderer().render("wowza") == "wowza"
# TODO: Implement this test thoughtfully
# def test__id_from_configuration():
# Renderer()._id_from_configuration(expectation_type, expectation_kwargs, data_asset_name=None)
# TODO: Implement this test thoughtfully
# def test__get_expectation_type():
# Renderer()._get_expectation_type(ge_object)
# TODO: Implement this test thoughtfully
# def test__find_ge_object_type():
# Renderer()._find_ge_object_type(ge_object)
def test__find_evr_by_type(titanic_profiled_evrs_1):
# TODO: _find_all_evrs_by_type should accept an ValidationResultSuite, not ValidationResultSuite.results
found_evr = Renderer()._find_evr_by_type(
titanic_profiled_evrs_1.results, "expect_column_to_exist"
)
print(found_evr)
assert found_evr is None
# TODO: _find_all_evrs_by_type should accept an ValidationResultSuite, not ValidationResultSuite.results
found_evr = Renderer()._find_evr_by_type(
titanic_profiled_evrs_1.results, "expect_column_distinct_values_to_be_in_set"
)
print(found_evr)
assert found_evr == ExpectationValidationResult(
success=True,
result={
"observed_value": ["*", "1st", "2nd", "3rd"],
"element_count": 1313,
"missing_count": 0,
"missing_percent": 0.0,
"details": {
"value_counts": [
{"value": "*", "count": 1},
{"value": "1st", "count": 322},
{"value": "2nd", "count": 279},
{"value": "3rd", "count": 711},
]
},
},
exception_info={
"raised_exception": False,
"exception_message": None,
"exception_traceback": None,
},
expectation_config=ExpectationConfiguration(
expectation_type="expect_column_distinct_values_to_be_in_set",
kwargs={"column": "PClass", "value_set": None, "result_format": "SUMMARY"},
),
)
def test__find_all_evrs_by_type(titanic_profiled_evrs_1):
# TODO: _find_all_evrs_by_type should accept an ValidationResultSuite, not ValidationResultSuite.results
found_evrs = Renderer()._find_all_evrs_by_type(
titanic_profiled_evrs_1.results, "expect_column_to_exist", column_=None
)
print(found_evrs)
assert found_evrs == []
# TODO: _find_all_evrs_by_type should accept an ValidationResultSuite, not ValidationResultSuite.results
found_evrs = Renderer()._find_all_evrs_by_type(
titanic_profiled_evrs_1.results, "expect_column_to_exist", column_="SexCode"
)
print(found_evrs)
assert found_evrs == []
# TODO: _find_all_evrs_by_type should accept an ValidationResultSuite, not ValidationResultSuite.results
found_evrs = Renderer()._find_all_evrs_by_type(
titanic_profiled_evrs_1.results,
"expect_column_distinct_values_to_be_in_set",
column_=None,
)
print(found_evrs)
assert len(found_evrs) == 4
# TODO: _find_all_evrs_by_type should accept an ValidationResultSuite, not ValidationResultSuite.results
found_evrs = Renderer()._find_all_evrs_by_type(
titanic_profiled_evrs_1.results,
"expect_column_distinct_values_to_be_in_set",
column_="SexCode",
)
print(found_evrs)
assert len(found_evrs) == 1
def test__get_column_list_from_evrs(titanic_profiled_evrs_1):
column_list = Renderer()._get_column_list_from_evrs(titanic_profiled_evrs_1)
print(column_list)
assert column_list == [
"Unnamed: 0",
"Name",
"PClass",
"Age",
"Sex",
"Survived",
"SexCode",
]
|
nilq/baby-python
|
python
|
from MyCodes.personal import title, inputInt, inputFloat
from urllib.request import urlopen
title('Exercício 113', 50, 34)
a = inputInt('Digite um valor inteiro: ')
b = inputFloat('Digite um valor real: ')
print(f'O valor inteiro é {a} e o valor real é {b:.1f}.')
title('Exercício 114', 50, 34)
try:
pag = urlopen('http://www.pudim.com.br/')
except:
print('\033[31mO site não está acessível!\033[m')
else:
print('\033[32mO site está acessível!\033[m')
|
nilq/baby-python
|
python
|
# Copyright 2017 Gustavo Baratto. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class BaseStack(object):
""" Base class for different types of stacks.
"""
def __init__(self, **kwargs):
[setattr(self, k, v) for k, v in kwargs.items()]
def factory(**kwargs):
""" Factory for different types of stacks
Imports are being done here so SDKs for multiple providers don't need to
be installed if never used.
"""
# default type is Cloudformation
possible_stack_type_keys = ["StackType", "stack_type", "Type", "type"]
stack_keys = kwargs.keys()
for possible_stack_type in possible_stack_type_keys:
if possible_stack_type in stack_keys:
stack_type = kwargs.pop(possible_stack_type).lower()
break
else:
stack_type = "cloudformation"
if stack_type == "cloudformation":
import gpwm.stacks.aws
return gpwm.stacks.aws.CloudformationStack(**kwargs)
elif stack_type == "azure":
import gpwm.stacks.azure
return gpwm.stacks.azure.AzureStack(**kwargs)
elif stack_type == "shell":
import gpwm.stacks.shell
return gpwm.stacks.shell.ShellStack(**kwargs)
elif stack_type == "gcp":
import gpwm.stacks.gcp
return gpwm.stacks.gcp.GCPStack(**kwargs)
raise SystemExit("Stack type not supported: {}".format(stack_type))
|
nilq/baby-python
|
python
|
from .block import *
from .chain import *
__version__ = "0.0.1"
|
nilq/baby-python
|
python
|
import time
from multiprocessing.pool import ThreadPool
from core.events import EventHandler
from core.keystore import KeyStore as kb
from core.packetcap import pktcap
from core.actionModule import actionModule
from core.mymsf import myMsf
class msfActionModule(actionModule):
seentargets = dict()
def __init__(self, config, display, lock):
actionModule.__init__(self, config, display, lock)
# connect to msfrpc
msf = myMsf(host=self.config['msfhost'], port=int(self.config['msfport']), user=self.config['msfuser'],
password=self.config['msfpass'])
def go(self, vector):
self.vector = vector
self.display.verbose("-> Running : " + self.getTitle())
self.display.debug("---> " + self.getDescription())
if not msf.isAuthenticated():
return
ret = self.process()
msf.cleanup()
return ret
def execMsf(self, target, cmds):
myMsf.lock.acquire()
self.display.verbose(self.shortName + " - Connecting to " + t)
for line in cmds['config']:
if line == "SLEEP":
msf.sleep(int(self.config['msfexploitdelay']))
else:
msf.execute(line + "\n")
if cmds['payload'] == "none":
pass
elif cmds['payload'] == "win":
pass
elif cmds['payload'] == "linux":
msf.execute("set PAYLOAD linux/x86/meterpreter/reverse_tcp")
msf.execute("set LPORT 4445")
msf.execute("exploit -j\n")
msf.sleep(int(self.config['msfexploitdelay']))
outfile = self.config["proofsDir"] + self.shortName + "_" + t + "_" + Utils.getRandStr(10)
result = msf.getResult()
#while (re.search(".*execution completed.*", result) is None):
# result = result + msf.getResult()
myMsf.lock.release()
Utils.writeFile(result, outfile)
return results, outfile
|
nilq/baby-python
|
python
|
string = 'emir','zarina','baizak','nazira'
string = string.replace('emir','vanya')
print(string)
|
nilq/baby-python
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm.contrib.ethosu.cascader as pl
from tvm.contrib.ethosu.cascader.parts import EthosuPart
def test_ethosu_part():
te_subgraph = pl.TESubgraph([], None)
output_quantum = [1, 2, 2, 8]
quantum_cycles = 32
propagator = pl.Propagator(
[[1, 0, 0, 0, 2], [0, 1, 0, 0, 2], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
[0, 0, 0, 0],
)
stripe_config = pl.StripeConfig(
[1, 4, 4, 16], [1, 64, 72, 96], [1, 4, 4, 16], [1, 2, 3, 4], [1, 16, 13, 6], [0, 0, 0, 0]
)
part = EthosuPart(te_subgraph, [propagator], output_quantum, quantum_cycles)
assert part.get_stripe_align_hint() == output_quantum
# Check that the performance model runs, don't verify output
part.get_performance_info(stripe_config, False)
part.get_performance_info(stripe_config, True)
if __name__ == "__main__":
pytest.main([__file__])
|
nilq/baby-python
|
python
|
import os
import tempfile
from pathlib import Path
from test.splitgraph.commands.test_commit_diff import _alter_diff_splitting_dataset
from test.splitgraph.conftest import API_RESOURCES, OUTPUT
from unittest import mock
from unittest.mock import call, patch, sentinel
import httpretty
import pytest
from click import ClickException
from click.testing import CliRunner
from splitgraph.commandline import (
cli,
config_c,
dump_c,
eval_c,
import_c,
prune_c,
rm_c,
upstream_c,
)
from splitgraph.commandline.common import ImageType, RepositoryType
from splitgraph.commandline.example import alter_c, generate_c, splitfile_c
from splitgraph.commandline.misc import (
_get_binary_url_for,
_get_download_paths,
_get_system_id,
upgrade_c,
)
from splitgraph.config import PG_PWD, PG_USER
from splitgraph.core.engine import repository_exists
from splitgraph.core.fragment_manager import FragmentManager
from splitgraph.core.repository import Repository
from splitgraph.engine import ResultShape
from splitgraph.exceptions import (
ImageNotFoundError,
RepositoryNotFoundError,
TableNotFoundError,
)
def test_image_spec_parsing():
assert ImageType()("test/pg_mount") == (Repository("test", "pg_mount"), "latest")
assert ImageType(default="HEAD")("test/pg_mount") == (Repository("test", "pg_mount"), "HEAD")
assert ImageType()("test/pg_mount:some_tag") == (Repository("test", "pg_mount"), "some_tag")
assert ImageType()("pg_mount") == (Repository("", "pg_mount"), "latest")
assert ImageType()("pg_mount:some_tag") == (Repository("", "pg_mount"), "some_tag")
assert ImageType(default="HEAD")("pg_mount:some_tag") == (
Repository("", "pg_mount"),
"some_tag",
)
def test_image_repo_parsing_errors(pg_repo_local):
repo = Repository("test", "pg_mount")
assert ImageType(get_image=True, default="latest")("test/pg_mount")[1] == repo.images["latest"]
assert (
ImageType(get_image=True, default="latest")("test/pg_mount:00000000")[1]
== repo.images["00000000"]
)
with pytest.raises(ImageNotFoundError):
ImageType(get_image=True, default="latest")("test/pg_mount:doesnt_exist")
with pytest.raises(RepositoryNotFoundError):
ImageType(get_image=True, default="latest")("test/doesntexist:latest")
with pytest.raises(RepositoryNotFoundError):
RepositoryType(exists=True)("test/doesntexist")
def test_upstream_management(pg_repo_local):
runner = CliRunner()
# sgr upstream test/pg_mount
result = runner.invoke(upstream_c, ["test/pg_mount"])
assert result.exit_code == 0
assert "has no upstream" in result.output
# Set to nonexistent engine
result = runner.invoke(upstream_c, ["test/pg_mount", "--set", "dummy_engine", "test/pg_mount"])
assert result.exit_code == 1
assert "Remote engine 'dummy_engine' does not exist" in result.output
# Set to existing engine (should we check the repo actually exists?)
result = runner.invoke(upstream_c, ["test/pg_mount", "--set", "remote_engine", "test/pg_mount"])
assert result.exit_code == 0
assert "set to track remote_engine:test/pg_mount" in result.output
# Get upstream again
result = runner.invoke(upstream_c, ["test/pg_mount"])
assert result.exit_code == 0
assert "is tracking remote_engine:test/pg_mount" in result.output
# Reset it
result = runner.invoke(upstream_c, ["test/pg_mount", "--reset"])
assert result.exit_code == 0
assert "Deleted upstream for test/pg_mount" in result.output
assert pg_repo_local.upstream is None
# Reset it again
result = runner.invoke(upstream_c, ["test/pg_mount", "--reset"])
assert result.exit_code == 1
assert "has no upstream" in result.output
@pytest.mark.mounting
def test_import(pg_repo_local, mg_repo_local):
runner = CliRunner()
head = pg_repo_local.head
# sgr import mountpoint, table, target_mountpoint (3-arg)
result = runner.invoke(import_c, [str(mg_repo_local), "stuff", str(pg_repo_local)])
assert result.exit_code == 0
new_head = pg_repo_local.head
assert new_head.get_table("stuff")
with pytest.raises(TableNotFoundError):
head.get_table("stuff")
# sgr import with alias
result = runner.invoke(
import_c, [str(mg_repo_local), "stuff", str(pg_repo_local), "stuff_copy"]
)
assert result.exit_code == 0
new_new_head = pg_repo_local.head
assert new_new_head.get_table("stuff_copy")
with pytest.raises(TableNotFoundError):
new_head.get_table("stuff_copy")
# sgr import with alias and custom image hash
mg_repo_local.run_sql("DELETE FROM stuff")
new_mg_head = mg_repo_local.commit()
result = runner.invoke(
import_c,
[
str(mg_repo_local) + ":" + new_mg_head.image_hash,
"stuff",
str(pg_repo_local),
"stuff_empty",
],
)
assert result.exit_code == 0
new_new_new_head = pg_repo_local.head
assert new_new_new_head.get_table("stuff_empty")
with pytest.raises(TableNotFoundError):
new_new_head.get_table("stuff_empty")
assert pg_repo_local.run_sql("SELECT * FROM stuff_empty") == []
# sgr import with query, no alias
result = runner.invoke(
import_c,
[
str(mg_repo_local) + ":" + new_mg_head.image_hash,
"SELECT * FROM stuff",
str(pg_repo_local),
],
)
assert result.exit_code != 0
assert "TARGET_TABLE is required" in str(result.stdout)
def test_rm_repositories(pg_repo_local, pg_repo_remote):
runner = CliRunner()
# sgr rm test/pg_mount, say "no"
result = runner.invoke(rm_c, [str(pg_repo_local)], input="n\n")
assert result.exit_code == 1
assert "Repository test/pg_mount will be deleted" in result.output
assert repository_exists(pg_repo_local)
# sgr rm test/pg_mount, say "yes"
result = runner.invoke(rm_c, [str(pg_repo_local)], input="y\n")
assert result.exit_code == 0
assert not repository_exists(pg_repo_local)
# sgr rm test/pg_mount -r remote_engine
result = runner.invoke(rm_c, [str(pg_repo_remote), "-r", "remote_engine"], input="y\n")
assert result.exit_code == 0
assert not repository_exists(pg_repo_remote)
def test_rm_images(pg_repo_local_multitag, pg_repo_remote_multitag):
# Play around with both engines for simplicity -- both have 2 images with 2 tags
runner = CliRunner()
local_v1 = pg_repo_local_multitag.images["v1"].image_hash
local_v2 = pg_repo_local_multitag.images["v2"].image_hash
# Test deleting checked out image causes an error
result = runner.invoke(rm_c, [str(pg_repo_local_multitag) + ":v2"])
assert result.exit_code != 0
assert "do sgr checkout -u test/pg_mount" in str(result.exc_info)
pg_repo_local_multitag.uncheckout()
# sgr rm test/pg_mount:v2, say "no"
result = runner.invoke(rm_c, [str(pg_repo_local_multitag) + ":v2"], input="n\n")
assert result.exit_code == 1
# Specify most of the output verbatim here to make sure it's not proposing
# to delete more than needed (just the single image and the single v2 tag)
assert (
"Images to be deleted:\n" + local_v2 + "\nTotal: 1\n\nTags to be deleted:\nv2\nTotal: 1"
in result.output
)
# Since we cancelled the operation, 'v2' still remains.
assert pg_repo_local_multitag.images["v2"].image_hash == local_v2
assert pg_repo_local_multitag.images[local_v2] is not None
# Uncheckout the remote too (it's supposed to be bare anyway)
remote_v2 = pg_repo_remote_multitag.images["v2"].image_hash
pg_repo_remote_multitag.uncheckout()
# sgr rm test/pg_mount:v2 -r remote_engine, say "yes"
result = runner.invoke(
rm_c, [str(pg_repo_remote_multitag) + ":v2", "-r", "remote_engine"], input="y\n"
)
assert result.exit_code == 0
assert pg_repo_remote_multitag.images.by_tag("v2", raise_on_none=False) is None
with pytest.raises(ImageNotFoundError):
pg_repo_remote_multitag.images.by_hash(remote_v2)
# sgr rm test/pg_mount:v1 -y
# Should delete both images since v2 depends on v1
result = runner.invoke(rm_c, [str(pg_repo_local_multitag) + ":v1", "-y"])
assert result.exit_code == 0
assert local_v2 in result.output
assert local_v1 in result.output
assert "v1" in result.output
assert "v2" in result.output
# One image remaining (the 00000.. base image)
assert len(pg_repo_local_multitag.images()) == 1
def test_prune(pg_repo_local_multitag, pg_repo_remote_multitag):
runner = CliRunner()
# Two engines, two repos, two images in each (tagged v1 and v2, v1 is the parent of v2).
pg_repo_remote_multitag.uncheckout()
# sgr prune test/pg_mount -- all images are tagged, nothing to do.
result = runner.invoke(prune_c, [str(pg_repo_local_multitag)])
assert result.exit_code == 0
assert "Nothing to do" in result.output
# Delete tag v2 and run sgr prune -r remote_engine test/pg_mount, say "no": the image
# that used to be 'v2' now isn't tagged so it will be a candidate for removal (but not the v1 image).
remote_v2 = pg_repo_remote_multitag.images["v2"]
remote_v2.delete_tag("v2")
pg_repo_remote_multitag.commit_engines()
result = runner.invoke(
prune_c, [str(pg_repo_remote_multitag), "-r", "remote_engine"], input="n\n"
)
assert result.exit_code == 1 # Because "n" aborted the command
assert remote_v2.image_hash in result.output
assert "Total: 1" in result.output
# Make sure the image still exists
assert pg_repo_remote_multitag.images.by_hash(remote_v2.image_hash)
# Delete tag v1 and run sgr prune -r remote_engine -y test_pg_mount:
# now both images aren't tagged so will get removed.
remote_v1 = pg_repo_remote_multitag.images["v1"]
remote_v1.delete_tag("v1")
pg_repo_remote_multitag.commit_engines()
result = runner.invoke(prune_c, [str(pg_repo_remote_multitag), "-r", "remote_engine", "-y"])
assert result.exit_code == 0
assert remote_v2.image_hash in result.output
assert remote_v1.image_hash in result.output
# 2 images + the 000... image
assert "Total: 3" in result.output
assert not pg_repo_remote_multitag.images()
# Finally, delete both tags from the local engine and prune. Since there's still
# a HEAD tag pointing to the ex-v2, nothing will actually happen.
result = runner.invoke(prune_c, [str(pg_repo_local_multitag), "-y"])
assert "Nothing to do." in result.output
# 2 images + the 000.. image
assert len(pg_repo_local_multitag.images()) == 3
assert len(pg_repo_local_multitag.get_all_hashes_tags()) == 3
def test_config_dumping():
runner = CliRunner()
# sgr config (normal, with passwords shielded)
result = runner.invoke(config_c, catch_exceptions=False)
assert result.exit_code == 0
assert PG_PWD not in result.output
assert "remote_engine:" in result.output
assert ("SG_ENGINE_USER=%s" % PG_USER) in result.output
assert "DUMMY=test.splitgraph.splitfile" in result.output
assert "S3=splitgraph.hooks.s3" in result.output
# sgr config -s (no password shielding)
result = runner.invoke(config_c, ["-s"])
assert result.exit_code == 0
assert ("SG_ENGINE_USER=%s" % PG_USER) in result.output
assert ("SG_ENGINE_PWD=%s" % PG_PWD) in result.output
assert "remote_engine:" in result.output
# sgr config -sc (no password shielding, output in config format)
result = runner.invoke(config_c, ["-sc"])
assert result.exit_code == 0
assert ("SG_ENGINE_USER=%s" % PG_USER) in result.output
assert ("SG_ENGINE_PWD=%s" % PG_PWD) in result.output
assert "[remote: remote_engine]" in result.output
assert "[defaults]" in result.output
assert "[commands]" in result.output
assert "[external_handlers]" in result.output
assert "[data_sources]" in result.output
assert "S3=splitgraph.hooks.s3" in result.output
# sgr config -n (print connection string to engine)
result = runner.invoke(config_c, ["-n"])
assert result.output == "postgresql://sgr:supersecure@localhost:5432/splitgraph\n"
def test_examples(local_engine_empty):
# Test the example-generating commands used in the quickstart
runner = CliRunner()
result = runner.invoke(generate_c, ["example/repo_1"])
assert result.exit_code == 0
repo = Repository.from_schema("example/repo_1")
assert len(repo.images()) == 2
assert repo.run_sql("SELECT COUNT(*) FROM demo", return_shape=ResultShape.ONE_ONE) == 10
assert repo.diff("demo", repo.head, None, aggregate=True) == (0, 0, 0)
result = runner.invoke(alter_c, ["example/repo_1"])
assert result.exit_code == 0
assert len(repo.images()) == 2
assert repo.diff("demo", repo.head, None, aggregate=True) == (2, 2, 2)
result = runner.invoke(splitfile_c, ["example/repo_1", "example/repo_2"])
assert result.exit_code == 0
assert "FROM example/repo_1 IMPORT demo AS table_1" in result.stdout
assert "FROM example/repo_2:${IMAGE_2} IMPORT demo AS table_2" in result.stdout
def test_commandline_dump_load(pg_repo_local):
pg_repo_local.run_sql("ALTER TABLE fruits ADD PRIMARY KEY (fruit_id)")
pg_repo_local.commit()
pg_repo_local.run_sql("INSERT INTO fruits VALUES (3, 'mayonnaise')")
pg_repo_local.commit()
pg_repo_local.run_sql("UPDATE fruits SET name = 'banana' WHERE fruit_id = 1")
pg_repo_local.commit()
pg_repo_local.head.tag("test_tag")
runner = CliRunner()
result = runner.invoke(dump_c, [str(pg_repo_local)], catch_exceptions=False)
assert result.exit_code == 0
dump = result.stdout
# Now delete the repo and try loading the dump to test it actually works.
pg_repo_local.delete()
pg_repo_local.objects.cleanup()
pg_repo_local.engine.run_sql(dump)
pg_repo_local.images["test_tag"].checkout()
assert pg_repo_local.run_sql("SELECT * FROM fruits ORDER BY fruit_id") == [
(1, "banana"),
(2, "orange"),
(3, "mayonnaise"),
]
def test_commandline_eval():
runner = CliRunner()
result = runner.invoke(eval_c, ["print()"], input="n\n", catch_exceptions=False)
assert result.exit_code == 1
assert "Aborted!" in result.output
result = runner.invoke(
eval_c,
[
"assert Repository.from_schema('test/repo').namespace == 'test';"
"assert object_manager is not None; print('arg_1=%s' % arg_1)",
"--arg",
"arg_1",
"val_1",
"--i-know-what-im-doing",
],
catch_exceptions=False,
)
assert result.exit_code == 0
assert "arg_1=val_1" in result.output
_GH_TAG = "https://api.github.com/repos/splitgraph/splitgraph/releases/tags/v0.1.0"
_GH_LATEST = "https://api.github.com/repos/splitgraph/splitgraph/releases/latest"
_GH_NONEXISTENT = "https://api.github.com/repos/splitgraph/splitgraph/releases/tags/vnonexistent"
def _gh_response(request, uri, response_headers):
with open(os.path.join(API_RESOURCES, "github_releases.json")) as f:
return [200, response_headers, f.read()]
def _gh_404(request, uri, response_headers):
return [404, response_headers, ""]
@httpretty.activate(allow_net_connect=False)
@pytest.mark.parametrize(
("system", "release", "result"),
[
(
"linux",
"latest",
(
"0.1.0",
"https://github.com/splitgraph/splitgraph/releases/download/v0.1.0/sgr-linux-x86_64",
),
),
(
"linux",
"v0.1.0",
(
"0.1.0",
"https://github.com/splitgraph/splitgraph/releases/download/v0.1.0/sgr-linux-x86_64",
),
),
(
"osx",
"latest",
(
"0.1.0",
"https://github.com/splitgraph/splitgraph/releases/download/v0.1.0/sgr-osx-x86_64",
),
),
(
"windows",
"latest",
(
"0.1.0",
"https://github.com/splitgraph/splitgraph/releases/download/v0.1.0/sgr-windows-x86_64.exe",
),
),
("windows", "vnonexistent", ValueError),
("weirdplatform", "v0.1.0", ValueError),
],
)
def test_get_binary_url(system, release, result):
httpretty.register_uri(httpretty.HTTPretty.GET, _GH_TAG, body=_gh_response)
httpretty.register_uri(httpretty.HTTPretty.GET, _GH_LATEST, body=_gh_response)
httpretty.register_uri(httpretty.HTTPretty.GET, _GH_NONEXISTENT, body=_gh_404)
if result == ValueError:
with pytest.raises(result):
_get_binary_url_for(system, release)
else:
assert _get_binary_url_for(system, release) == result
def test_system_id_not_exists():
with mock.patch("splitgraph.commandline.misc.platform.system", return_value="TempleOS"):
with pytest.raises(ClickException):
_get_system_id()
@pytest.mark.parametrize(
("path", "final_path"),
[
("/home/user/", "/home/user/sgr"),
("/home/user/sgr_dest", "/home/user/sgr_dest"),
(None, "/usr/local/bin/sgr"),
],
)
def test_get_download_paths(fs_fast, path, final_path):
Path("/home/user/").mkdir(parents=True)
with mock.patch("splitgraph.commandline.misc.sys") as m_sys:
m_sys.executable = "/usr/local/bin/sgr"
temp_path_actual, final_path_actual = _get_download_paths(
path, "https://some.url.com/assets/sgr"
)
assert str(final_path_actual) == final_path
@httpretty.activate(allow_net_connect=False)
def test_upgrade_end_to_end():
_BODY = "new sgr client"
httpretty.register_uri(httpretty.HTTPretty.GET, _GH_TAG, body=_gh_response)
httpretty.register_uri(httpretty.HTTPretty.GET, _GH_LATEST, body=_gh_response)
httpretty.register_uri(httpretty.HTTPretty.GET, _GH_NONEXISTENT, body=_gh_404)
httpretty.register_uri(
httpretty.HTTPretty.GET,
"https://github.com/splitgraph/splitgraph/releases/download/v0.1.0/sgr-linux-x86_64",
body=_BODY,
adding_headers={"Content-Length": len(_BODY)},
)
runner = CliRunner()
# Patch a lot of things
with tempfile.TemporaryDirectory() as dir:
with open(os.path.join(dir, "sgr"), "w") as f:
f.write("old sgr client")
_module = "splitgraph.commandline.misc"
with mock.patch(_module + ".sys") as m_sys:
m_sys.executable = os.path.join(dir, "sgr")
m_sys.frozen = True
with mock.patch(_module + ".platform.system", return_value="Linux"):
with mock.patch(_module + ".subprocess.check_call") as subprocess:
with mock.patch(_module + ".list_engines", return_value=[sentinel.engine]):
with mock.patch("splitgraph.commandline.misc.atexit.register") as register:
result = runner.invoke(upgrade_c, ["--force"], catch_exceptions=False)
assert result.exit_code == 0
print(result.output)
assert subprocess.mock_calls == [
call([mock.ANY, "--version"]),
call([mock.ANY, "engine", "upgrade"]),
]
# Call the atexit callback that swaps the new sgr in and check it does that correctly.
# mock_calls is a list of tuples (name, args, kwargs), so grab the first arg
finalize_callback = register.mock_calls[-1][1][0]
assert finalize_callback.__name__ == "_finalize"
finalize_callback()
with open(os.path.join(dir, "sgr")) as f:
assert f.read() == "new sgr client"
with open(os.path.join(dir, "sgr.old")) as f:
assert f.read() == "old sgr client"
def test_rollback_on_error(local_engine_empty):
# For e.g. commit/checkout/other commands, we don't do commits/rollbacks
# in the library itself and expect the caller to manage transactions. In CLI,
# we need to make sure that erroneous transactions (e.g. interrupted SG commits)
# are rolled back correctly instead of being committed.
runner = CliRunner()
OUTPUT.init()
OUTPUT.run_sql("CREATE TABLE test (key INTEGER PRIMARY KEY, value_1 VARCHAR, value_2 INTEGER)")
for i in range(11):
OUTPUT.run_sql("INSERT INTO test VALUES (%s, %s, %s)", (i + 1, chr(ord("a") + i), i * 2))
OUTPUT.commit(chunk_size=5, in_fragment_order={"test": ["key", "value_1"]})
assert len(OUTPUT.images()) == 2
assert len(OUTPUT.objects.get_all_objects()) == 3
_alter_diff_splitting_dataset()
OUTPUT.commit_engines()
# Simulate the commit getting interrupted by the first object going through and being
# recorded, then a KeyboardInterrupt being raised.
called_once = False
def interrupted_register(*args, **kwargs):
nonlocal called_once
if called_once:
raise BaseException("something went wrong")
else:
called_once = True
return FragmentManager._register_object(*args, **kwargs)
with patch(
"splitgraph.core.fragment_manager.FragmentManager._register_object",
side_effect=interrupted_register,
) as ro:
with pytest.raises(BaseException):
runner.invoke(cli, ["commit", OUTPUT.to_schema()])
# Check that no image/object metadata was written
assert len(OUTPUT.images()) == 2
assert len(OUTPUT.objects.get_all_objects()) == 3
assert ro.call_count == 2
# Check that the data in the audit trigger wasn't deleted
assert len(OUTPUT.engine.get_pending_changes(OUTPUT.to_schema(), table="test")) == 6
|
nilq/baby-python
|
python
|
name = "CheeseBurger"
|
nilq/baby-python
|
python
|
import json
import requests
from requests.auth import HTTPBasicAuth as BasicAuth
from requests.packages import urllib3
from urllib3.exceptions import InsecureRequestWarning
urllib3.disable_warnings(InsecureRequestWarning)
url = "https://sbx-nxos-mgmt.cisco.com:443/ins"
username = "admin"
password = "Admin_1234!"
data = {
"ins_api": {
"version": "1.0",
# Possible values:
# 1- cli_show
# 2- cli_show_array (For multiple show commands at once)
# 3- cli_show_ascii
# 4- cli_conf
"type": "cli_show",
"chunk": "0",
"sid": "1",
"input": "show vlan brief", # any command
"output_format": "json", # or XML
}
}
# POST: Request
response = requests.post(
url=url,
auth=BasicAuth(username, password),
json=data,
verify=False,
)
vlan_brief = response.json()
# Export response to a JSON file
with open(file="vlan-brief-output.json", mode="w") as outfile:
json.dump(obj=vlan_brief, fp=outfile, indent=4)
print("Done")
|
nilq/baby-python
|
python
|
# Generated by Django 2.1 on 2019-05-15 22:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0006_deviceconfig_last_modify'),
]
operations = [
migrations.AlterField(
model_name='deviceconfig',
name='last_modify',
field=models.DateTimeField(null=True),
),
]
|
nilq/baby-python
|
python
|
class Diplomacy:
"""
Respresents every player diplomacy stances with others
"""
def __init__(self, stances=None):
"""
Initialize Diplomacy
Args:
stances (list(int)): stances with other player
Note:
there's 8 stances with other players
"""
if stances is None:
stances = [3] * 17
self.stances = stances
self.gaia = [0] * 17
def __repr__(self):
name = "Diplomacy:\n"
stances = "\t"
for ind in range(len(self.stances)):
stances += "P{}: {}; ".format(ind, self.stances[ind])
return name + stances
def __setitem__(self, playerIndex, stance):
"""
Todo:
describe method
"""
self.stances[playerIndex] = stance
def __getitem__(self, playerIndex):
"""
Get diplomacy stance with another player
Args:
playerIndex (int): index of another player
Return:
(int): player stance (0=allied, 1=neutral, 3=enemy)
"""
return self.stances[playerIndex]
def toJSON(self):
"""return JSON"""
data = dict()
for i in range(len(self.stances)):
data[i] = self.stances[i]
return data
def allStances(self, stance):
"""
Set diplomacy stance with all players
Args:
stance (int): diplomacy stance
"""
for i in range(len(self.stances)):
self.stances[i] = stance
def getPlayersByStance(self, stance):
"""
Get Players, which have selected stance
Args:
stance (int): 0=allied, 1=neutral, 3=enemy
Return:
(list(int)): player indexes, which have selected stance
Todo:
describe param stance
"""
result = list()
for ind in range(len(self.stances)):
if self.stances[ind] == stance:
result.append(ind)
return result
def allies(self):
"""
Get all players indexes with ally stance (0)
Return:
(list(int)): player indexes, which are allies
"""
return self.getPlayersByStance(0)
def neutrals(self):
"""
Get all players indexes with neutral stance (1)
Return:
(list(int)): player indexes, which are neutrals
"""
return self.getPlayersByStance(1)
def enemies(self):
"""
Get all players indexes with enemy stance (2)
Return:
(list(int)): player indexes, which are enemies
"""
return self.getPlayersByStance(3)
|
nilq/baby-python
|
python
|
from portfolyo import dev, testing
from portfolyo.tools import frames
from numpy import nan
import portfolyo as pf
import numpy as np
import pandas as pd
import pytest
@pytest.mark.parametrize("series_or_df", ["series", "df"])
@pytest.mark.parametrize("bound", ["right", "left"])
@pytest.mark.parametrize(
("in_vals_num_specialconditions", "start"), # normal, WT->ST, ST->WT
[(96, "2020-03-01"), (92, "2020-03-29"), (100, "2020-10-25")],
)
@pytest.mark.parametrize("in_aware", [True, False])
@pytest.mark.parametrize("in_tz", ["Europe/Berlin", "Asia/Kolkata"])
@pytest.mark.parametrize("force", ["agnostic", "aware"])
@pytest.mark.parametrize("freq", ["15T", "D"])
def test_standardize(
in_vals_num_specialconditions: int,
start: str,
bound: str,
in_aware: bool,
in_tz: str,
series_or_df: str,
force: str,
freq: str,
):
"""Test if series and dataframes are correctly standardized to tz-aware, for
quarterhour timeseries with/without DST."""
if not in_aware and in_tz != "Europe/Berlin":
return # cannot convert tz-naive fr to different timezone
if freq == "D":
in_vals_num = 200
elif force == "agnostic" and in_tz != "Europe/Berlin":
in_vals_num = 96
else:
in_vals_num = in_vals_num_specialconditions
in_vals = np.random.random(in_vals_num)
# Prepare expected output frame.
out_tz = "Europe/Berlin" if force == "aware" else None
if force == "aware" or freq == "D":
out_vals = in_vals
else: # always return 96 values
a, b = (12, -84) if in_vals_num_specialconditions == 100 else (8, -88)
out_vals = [*in_vals[:a], *in_vals[b:]]
iout = pd.date_range(start, freq=freq, periods=len(out_vals), tz=out_tz)
expected = pd.Series(out_vals, iout.rename("ts_left"))
if series_or_df == "df":
expected = pd.DataFrame({"a": expected})
# Prepare input frame.
if force == "aware":
out_tz = "Europe/Berlin"
else:
out_tz = in_tz
iin = pd.date_range(start, freq=freq, periods=len(in_vals), tz=out_tz)
if out_tz != in_tz and freq == "D":
return # cannot test because not at day boundary.
iin = iin.tz_convert(in_tz).rename("the_time_stamp")
if not in_aware:
iin = iin.tz_localize(None)
if bound == "right":
td = pd.Timedelta(hours=24 if freq == "D" else 0.25)
iin = pd.DatetimeIndex([*iin[1:], iin[-1] + td])
kw = {"bound": bound, "floating": False, "tz": out_tz}
# Do actual tests.
if isinstance(expected, pd.Series):
# 1: Using expected frame: should stay the same.
result = frames.standardize(expected, force)
pd.testing.assert_series_equal(result, expected)
# 2: Series.
result = frames.standardize(pd.Series(in_vals, iin), force, **kw)
pd.testing.assert_series_equal(result, expected)
else:
# 1: Using expected frame: should stay the same.
result = frames.standardize(expected, force)
pd.testing.assert_frame_equal(result, expected)
# 2: Dataframe with index.
result = frames.standardize(pd.DataFrame({"a": in_vals}, iin), force, **kw)
pd.testing.assert_frame_equal(result, expected)
# 3: Dataframe with column that must become index.
result = frames.standardize(
pd.DataFrame({"a": in_vals, "t": iin}), force, index_col="t", **kw
)
pd.testing.assert_frame_equal(result, expected)
@pytest.mark.parametrize("series_or_df", ["series", "df"])
@pytest.mark.parametrize("removesome", [0, 1, 2]) # 0=none, 1=from end, 2=from middle
@pytest.mark.parametrize("tz", [None, "Europe/Berlin", "Asia/Kolkata"])
@pytest.mark.parametrize("floating", [True, False])
@pytest.mark.parametrize("bound", ["left", "right"])
@pytest.mark.parametrize("freq", [*pf.FREQUENCIES, "Q", "30T", "M", "AS-FEB"])
def test_standardizeawere_error(freq, tz, removesome, floating, series_or_df, bound):
"""Test raising errors on incorrect frequencies or indices with gaps."""
must_raise = False
# Get index.
while True:
i = dev.get_index(freq, tz)
if len(i) > 10:
break
# If no timezone specified and below-daily values, the created index will have too few/many datapoints.
if not tz and pf.freq_up_or_down(freq, "D") > 1:
return # don't check this edge case
if tz == "Asia/Kolkata" and pf.freq_shortest(freq, "H") == "H" and not floating:
# Kolkata and Berlin timezone only share 15T-boundaries. Therefore, any other
# frequency should raise an error.
must_raise = True
# remove timestamp from index.
if removesome == 1: # remove one from end or start
i = i.delete(np.random.choice([0, len(i) - 1]))
elif removesome == 2: # remove max 3 from middle
i = i.delete(np.random.randint(2, len(i) - 2, 3))
must_raise = True
# Add values.
if series_or_df == "series":
fr = dev.get_series(i)
else:
fr = dev.get_dataframe(i)
# See if error is raised.
if freq not in pf.FREQUENCIES or must_raise:
with pytest.raises(ValueError):
_ = frames.standardize(fr, "aware", bound, floating=floating)
return
result = frames.standardize(fr, "aware", bound, floating=floating)
assert result.index.freq == freq
@pytest.mark.parametrize(
("values", "maxgap", "gapvalues"),
[
([1, 2, 3, 4, 25, 7, 8], 1, []),
([1, 2, 3, 4, nan, 7, 8], 1, [5.5]),
([1, 2, 3, 4, nan, 7, 8], 2, [5.5]),
([1, 2, 3, 4, nan, 7, 8], 3, [5.5]),
([3, 2, 1, nan, nan, 7, 8], 1, [nan, nan]),
([3, 2, 1, nan, nan, 7, 8], 2, [3, 5]),
([3, 2, 1, nan, nan, 7, 8], 3, [3, 5]),
([nan, 2, 1, nan, nan, 7, nan], 1, [nan, nan, nan, nan]),
([nan, 2, 1, nan, nan, 7, nan], 2, [nan, 3, 5, nan]),
],
)
@pytest.mark.parametrize(
("index", "tol"),
[
(range(7), 0),
(range(-3, 4), 0),
(pd.date_range("2020", periods=7, freq="D"), 0),
(pd.date_range("2020", periods=7, freq="M", tz="Europe/Berlin"), 0.04),
],
)
def test_fill_gaps(values, index, maxgap, gapvalues, tol):
"""Test if gaps are correctly interpolated."""
# Test as Series.
s = pd.Series(values, index)
s_new = frames.fill_gaps(s, maxgap)
s[s.isna()] = gapvalues
pd.testing.assert_series_equal(s_new, s, rtol=tol)
# Test as DataFrame.
df = pd.DataFrame({"a": values}, index)
df_new = frames.fill_gaps(df, maxgap)
df[df.isna()] = gapvalues
pd.testing.assert_frame_equal(df_new, df, rtol=tol)
@pytest.mark.parametrize(
("df_columns", "header", "expected_columns"),
[
(["A"], "B", [("B", "A")]),
(["A1", "A2"], "B", [("B", "A1"), ("B", "A2")]),
(pd.MultiIndex.from_tuples([("B", "A")]), "C", [("C", "B", "A")]),
(
pd.MultiIndex.from_product([["B"], ["A1", "A2"]]),
"C",
[("C", "B", "A1"), ("C", "B", "A2")],
),
(
pd.MultiIndex.from_tuples([("B1", "A1"), ("B2", "A2")]),
"C",
[("C", "B1", "A1"), ("C", "B2", "A2")],
),
],
)
def test_addheader_tocolumns(df_columns, header, expected_columns):
"""Test if header can be added to the columns of a dataframe."""
i = dev.get_index()
df_in = pd.DataFrame(np.random.rand(len(i), len(df_columns)), i, df_columns)
result_columns = frames.add_header(df_in, header).columns.to_list()
assert np.array_equal(result_columns, expected_columns)
# TODO: put in ... fixture (?)
test_index_D = dev.get_index("D")
test_index_D_deconstructed = test_index_D.map(lambda ts: (ts.year, ts.month, ts.day))
test_index_H = dev.get_index("H")
test_index_H_deconstructed = test_index_H.map(lambda ts: (ts.year, ts.month, ts.day))
@pytest.mark.parametrize(
("df_index", "header", "expected_index"),
[
(test_index_D, "test", [("test", i) for i in test_index_D]),
(
test_index_D_deconstructed,
"test",
[("test", *i) for i in test_index_D_deconstructed],
),
(test_index_H, "test", [("test", i) for i in test_index_H]),
(
test_index_H_deconstructed,
"test",
[("test", *i) for i in test_index_H_deconstructed],
),
],
)
def test_addheader_torows(df_index, header, expected_index):
"""Test if header can be added to the rows of a dataframe."""
df_in = pd.DataFrame(np.random.rand(len(df_index), 2), df_index, ["A", "B"])
result_index = frames.add_header(df_in, header, axis=0).index.to_list()
assert np.array_equal(result_index, expected_index)
# TODO: put in ... fixture (?)
test_values = np.random.rand(len(test_index_D), 10)
test_df_1 = pd.DataFrame(test_values[:, :2], test_index_D, ["A", "B"])
test_df_2 = pd.DataFrame(test_values[:, 2], test_index_D, ["C"])
expect_concat_12 = pd.DataFrame(test_values[:, :3], test_index_D, ["A", "B", "C"])
test_df_3 = pd.DataFrame(test_values[:, 2], test_index_D, pd.Index([("D", "C")]))
expect_concat_13 = pd.DataFrame(
test_values[:, :3], test_index_D, pd.Index([("A", ""), ("B", ""), ("D", "C")])
)
@pytest.mark.parametrize(
("dfs", "axis", "expected"),
[
([test_df_1, test_df_2], 1, expect_concat_12),
([test_df_1, test_df_3], 1, expect_concat_13),
],
)
def test_concat(dfs, axis, expected):
"""Test if concatenation works as expected."""
result = frames.concat(dfs, axis)
testing.assert_frame_equal(result, expected)
@pytest.mark.parametrize("weightsas", ["none", "list", "series"])
@pytest.mark.parametrize("axis", [0, 1])
def test_wavg_valuesasseries1(weightsas, axis):
"""Test if weighted average of a series is correctly calculated."""
values = pd.Series([100, 200, 300, -150])
weights = [10, 10, 10, 20]
if weightsas == "none":
weights = None
expected_result = 112.5
elif weightsas == "list":
expected_result = 60
elif weightsas == "series":
weights = pd.Series(weights, index=[3, 2, 1, 0]) # align by index
expected_result = 110
assert np.isclose(frames.wavg(values, weights, axis), expected_result)
@pytest.mark.parametrize("weightsas", ["list", "series"])
@pytest.mark.parametrize("axis", [0, 1])
def test_wavg_valuesasseries2(weightsas, axis):
"""Test if weighted average of a series is correctly calculated."""
values = pd.Series([100, 200, 300, -150])
weights = [10, 0, 10, 20]
if weightsas == "list":
expected_result = 25
elif weightsas == "series":
weights = pd.Series(weights, index=[3, 2, 1, 0]) # align by index
expected_result = 62.5
assert np.isclose(frames.wavg(values, weights, axis), expected_result)
@pytest.mark.parametrize("weightsas", ["list", "series"])
@pytest.mark.parametrize("axis", [0, 1])
def test_wavg_valuesasseries_na(weightsas, axis):
"""Test if weighted average of a series is correctly identified as error,
when all weights are 0 but not all values are equal."""
values = pd.Series([100, 200, 300, -150])
weights = [0, 0, 0, 0]
if weightsas == "series":
weights = pd.Series(weights, index=[3, 2, 1, 0]) # align by index
assert np.isnan(frames.wavg(values, weights, axis))
@pytest.mark.parametrize("weightsas", ["list", "series"])
@pytest.mark.parametrize("axis", [0, 1])
def test_wavg_valuesasseries_0weights(weightsas, axis):
"""Test if weighted average of a series is correctly calculated,
when all weights are 0 and all values are equal."""
values = pd.Series([100, 100, 100, 100])
weights = [0, 0, 0, 0]
if weightsas == "series":
weights = pd.Series(weights, index=[3, 2, 1, 0]) # align by index
assert frames.wavg(values, weights, axis) == 100
@pytest.mark.parametrize("weightsas", ["none", "list", "series", "dataframe"])
@pytest.mark.parametrize("axis", [0, 1])
def test_wavg_valuesasdataframe1(weightsas, axis):
"""Test if weighted average of a dataframe is correctly calculated."""
values = pd.DataFrame({"a": [100, 200, 300, -150], "b": [100, -200, 300, -150]})
if weightsas == "none":
weights = None
if axis == 0:
expected_result = pd.Series({"a": 112.5, "b": 12.5})
else:
expected_result = pd.Series([100, 0, 300, -150])
if weightsas == "list":
if axis == 0:
weights = [10, 10, 10, 20]
expected_result = pd.Series({"a": 60, "b": -20})
else:
weights = [10, 30]
expected_result = pd.Series([100, -100, 300, -150])
if weightsas == "series":
if axis == 0:
weights = pd.Series([10, 10, 10, 20], index=[3, 2, 1, 0])
expected_result = pd.Series({"a": 110, "b": 30})
else:
weights = pd.Series({"b": 30, "a": 10})
expected_result = pd.Series([100, -100, 300, -150])
if weightsas == "dataframe":
weights = pd.DataFrame({"a": [10, 10, 10, 20], "b": [10, 10, 30, 0]})
if axis == 0:
expected_result = pd.Series({"a": 60, "b": 160})
else:
expected_result = pd.Series([100, 0, 300, -150])
pd.testing.assert_series_equal(
frames.wavg(values, weights, axis), expected_result, check_dtype=False
)
@pytest.mark.parametrize("weightsas", ["list", "series", "dataframe"])
@pytest.mark.parametrize("axis", [0, 1])
def test_wavg_valuesasdataframe2(weightsas, axis):
"""Test if weighted average of a dataframe is correctly calculated."""
values = pd.DataFrame({"a": [100, 200, 200, -150], "b": [100, -200, 300, -150]})
if weightsas == "list":
if axis == 0:
weights = [10, 10, 0, 20]
expected_result = pd.Series({"a": 0, "b": -100})
else:
weights = [10, 0]
expected_result = pd.Series([100, 200, 200, -150])
if weightsas == "series":
if axis == 0:
weights = pd.Series([10, 10, 0, 20], index=[3, 2, 1, 0])
expected_result = pd.Series({"a": 62.5, "b": 87.5})
else:
weights = pd.Series({"b": 0, "a": 10})
expected_result = pd.Series([100, 200, 200, -150])
if weightsas == "dataframe":
weights = pd.DataFrame({"a": [10, 10, 0, 20], "b": [10, 10, 30, 0]})
if axis == 0:
expected_result = pd.Series({"a": 0, "b": 160})
else:
expected_result = pd.Series([100, 0, 300, -150])
pd.testing.assert_series_equal(
frames.wavg(values, weights, axis), expected_result, check_dtype=False
)
@pytest.mark.parametrize("weightsas", ["list", "series", "dataframe"])
@pytest.mark.parametrize("axis", [0, 1])
def test_wavg_valuesasdataframe_na(weightsas, axis):
"""Test if weighted average of a dataframe is correctly is correctly identified as error,
when all weights are 0 but not all values are equal."""
values = pd.DataFrame({"a": [130, 200, 200, -160], "b": [100, -200, 300, -150]})
if axis == 0:
weights = [0, 0, 0, 0]
expected_result = pd.Series({"a": np.nan, "b": np.nan})
else:
weights = [0, 0]
expected_result = pd.Series([np.nan, np.nan, np.nan, np.nan])
if weightsas == "series":
if axis == 0:
weights = pd.Series(weights, index=[3, 2, 1, 0])
else:
weights = pd.Series(weights, index=["a", "b"])
if weightsas == "dataframe":
weights = pd.DataFrame({"a": [0, 0, 0, 0], "b": [0, 0, 0, 0]})
pd.testing.assert_series_equal(
frames.wavg(values, weights, axis), expected_result, check_dtype=False
)
@pytest.mark.parametrize("weightsas", ["list", "series", "dataframe"])
@pytest.mark.parametrize("axis", [0, 1])
def test_wavg_valuesasdataframe_0weights(weightsas, axis):
"""Test if weighted average of a dataframe is correctly is correctly identified as error,
when all weights are 0. Some averages are calculated from identical values and should
result in that value."""
values = pd.DataFrame({"a": [100, 200, 200, -150], "b": [100, -200, 300, -150]})
if axis == 0:
weights = [0, 0, 0, 0]
expected_result = pd.Series({"a": np.nan, "b": np.nan})
else:
weights = [0, 0]
expected_result = pd.Series([100, np.nan, np.nan, -150])
if weightsas == "series":
if axis == 0:
weights = pd.Series(weights, index=[3, 2, 1, 0])
else:
weights = pd.Series(weights, index=["a", "b"])
if weightsas == "dataframe":
weights = pd.DataFrame({"a": [0, 0, 0, 0], "b": [0, 0, 0, 0]})
pd.testing.assert_series_equal(
frames.wavg(values, weights, axis), expected_result, check_dtype=False
)
vals1 = np.array([1, 2.0, -4.1234, 0])
vals2 = np.array([1, 2.0, -4.1234, 0.5])
@pytest.mark.parametrize(
("s1", "s2", "expected_result"),
[
(pd.Series(vals1), pd.Series(vals1), True),
(pd.Series(vals1), pd.Series(vals2), False),
(pd.Series(vals1), pd.Series(vals1, dtype="pint[MW]"), False),
(pd.Series(vals1).astype("pint[MW]"), pd.Series(vals1, dtype="pint[MW]"), True),
(
pd.Series(vals1 * 1000).astype("pint[kW]"),
pd.Series(vals1, dtype="pint[MW]"),
True,
),
(
pd.Series(vals1 * 1000).astype("pint[MW]"),
pd.Series(vals1, dtype="pint[MW]"),
False,
),
],
)
def test_series_allclose(s1, s2, expected_result):
"""Test if series can be correctly compared, even if they have a unit."""
assert frames.series_allclose(s1, s2) == expected_result
|
nilq/baby-python
|
python
|
import sublime
from ui.read import settings as read_settings
from ui.write import write, highlight as write_highlight
from lookup import file_type as lookup_file_type
from ui.read import x as ui_read
from ui.read import spots as read_spots
from ui.read import regions as ui_regions
from core.read import read as core_read
from structs.general_thread import *
from structs.thread_handler import *
from structs.highlight_list import *
from structs.flag_region import *
from core.analyse import analyse
def flags():
return [
FlagRegion('bolt.incorrect', 'comment', 'light_x', 0),
FlagRegion('bolt.missing', 'string', 'arrow_right', 0),
FlagRegion('bolt.unused', 'comment', 'dot', sublime.DRAW_OUTLINED),
FlagRegion('bolt.wrong_module', 'comment', 'light_x', 0)
]
def highlight_setting():
return 'bolt.live.highlight'
def rate_setting():
return 'bolt.live.highlight.rate'
def is_enabled():
settings = read_settings.load_settings()
return settings.get(highlight_setting(), False)
def get_rate():
settings = read_settings.load_settings()
return settings.get(rate_setting(), 1000)
def set_enabled(state):
settings = read_settings.load_settings()
settings.set(highlight_setting(), state)
write.save_settings()
def toggle(view):
def noop(v):
return True
handler = ThreadHandler(noop, noop, noop)
prev = is_enabled()
current = not prev
if (current):
run(view, handler)
else:
clear(view)
set_enabled(current)
def run(view, handler):
valid = lookup_file_type.is_bolt_module(view)
if not valid:
open_file = view.file_name() if view.file_name() != None else '-- no view'
print 'View is not a bolt module: ' + open_file
handler.cancel()
else:
read_view = ui_read.all(view)
spots = read_spots.spots(view)
plasmas = core_read.plasmas(read_view.ptext)
def update_ui(highlights, module_wrong):
def run():
regions = write_highlight.regions(view, highlights)
module_region = [ui_regions.module_name(view)] if module_wrong else []
flag_info = zip(flags(), [regions.incorrect, regions.missing, regions.unused, module_region])
def highlight_flag(x):
if len(x[1]) > 0:
write_highlight.highlight(view, x[1], x[0]),
else:
write_highlight.remove_highlight(view, x[0])
map(highlight_flag, flag_info)
sublime.set_timeout(run, 0)
thread = GeneralThread(_highlighter(read_view, spots, plasmas, update_ui), handler.success, handler.failure)
sublime.set_timeout(thread.start, 0)
handler.init(thread)
def clear(view):
def run():
write_highlight.remove_highlights(view, flags())
sublime.set_timeout(run, 0)
def _highlighter(read_view, spots, plasmas, callback):
def r():
try:
highlights = analyse.all(read_view.base, read_view.nests, plasmas, spots, read_view.external)
module_wrong = analyse.module_wrong(read_view)
callback(highlights, module_wrong)
except Exception as exc:
print "Error during identifying highlighted regions: " + str(exc)
traceback.print_exc(limit=10)
callback(HighlightLists([], [], []), False)
return r
|
nilq/baby-python
|
python
|
import random, time, torch
import numpy as np
from nlplingo.oregon.event_models.uoregon.define_opt import opt
from nlplingo.oregon.event_models.uoregon.tools.utils import *
from nlplingo.oregon.event_models.uoregon.models.pipeline._01.readers import read_abstract_train_data
from nlplingo.oregon.event_models.uoregon.models.pipeline._01.trainers import *
opt['train_on_arb'] = 1
opt['train_strategy'] = 'retrain.add-all'
opt['initialize_with_pretrained'] = 1
opt['finetune_on_arb'] = 1
opt['observed_train'] = 'datasets/8d/update2/arabic-wa-corpus.bp.json'
opt['dev_file'] = 'datasets/8d/update2/arabic-abstract-sample.bp.json'
assert opt['co_train_lambda'] == 0
assert opt['input_lang'] == 'arabic'
""" opt:
ED_eval_epoch : 0
argument_eval_epoch : 0
bad_threshold : 0.4
batch_size : 16
biw2v_map_dir : resources/aligned_w2v
biw2v_size : 354186
biw2v_vecs : [[ 0.00000000e+00 0.00000000e+00 0.00000000e+00 ... 0.00000000e+00
0.00000000e+00 0.00000000e+00]
[ 8.26119033e-01 3.68800311e-01 8.69561242e-01 ... 2.70505650e-01
2.05427664e-01 2.01526267e-01]
[ 1.33400000e-03 1.47300000e-03 -1.27700000e-03 ... -4.37000000e-04
-5.52000000e-04 1.02400000e-03]
...
[-1.15833000e-01 -8.17270000e-02 -5.58370000e-02 ... -1.59482000e-01
-3.43660000e-02 6.65400000e-03]
[-3.82970000e-02 -5.19210000e-02 -7.23600000e-02 ... -1.40313000e-01
1.73640000e-02 1.28790000e-02]
[-1.11085000e-01 -4.86380000e-02 -8.37620000e-02 ... -1.55592000e-01
6.28500000e-03 3.66210000e-02]]
ckpt_dir : checkpoints
co_train_lambda : 0
context_layer : lstm
cross_valid :
data : abstract
data_map : None
datapoint_dir : datapoints
delete_nonbest_ckpts : 1
deprel_dim : 30
dev_file : datasets/8d/update2/arabic-abstract-sample.bp.json
device : cuda
dist_dim : 30
do_exp : default
docker_run : 0
dropout_xlmr : 0.1
edge_lambda : 0.1
ensemble_mode : 0
ensemble_seeds : ['seed-2021', 'seed-2022', 'seed-2023', 'seed-2024', 'seed-2025']
finetune_biw2v : 0
finetune_on_arb : 1
finetune_xlmr : 1
finetuned_xlmr_layers : ['xlmr_embedding.model.decoder.sentence_encoder.embed_tokens',
'xlmr_embedding.model.decoder.sentence_encoder.embed_positions', 'self_att.attention_layers', 'gcn_layer',
'biw2v_embedding', 'xlmr_embedding.model.decoder.sentence_encoder.layers.0.',
'xlmr_embedding.model.decoder.sentence_encoder.layers.1.',
'xlmr_embedding.model.decoder.sentence_encoder.layers.2.',
'xlmr_embedding.model.decoder.sentence_encoder.layers.3.']
gcn_dropout : 0.5
get_perf_of_separate_models : 0
grad_clip_xlmr : 0
hidden_dim : 200
hidden_eval : 0
inhouse_eval : 0
initialize_with_pretrained : 1
input_lang : arabic
lambda_mix : 0.8
log_dir : logs
log_name : train.log.arg.arabic-wa-corpus
lr : 2e-05
lstm_add_satt : 0
lstm_by_satt : 0
lstm_layers_entity : 1
lstm_layers_event : 1
lstm_layers_trigger : 4
max_grad_norm : 5.0
mode : None
model : pipeline-01
ner_dim : 30
num_epoch : 10
num_first_xlmr_layers : 5
num_last_layer_xlmr : 1
observed_train : datasets/8d/update2/arabic-wa-corpus.bp.json
optim : adam
output_file : None
output_format : json
output_offsets : 1
params : None
position_embed_for_satt : 1
prune_tree : 0
readers_mode : 1
remove_incomplete : 0
save_last_epoch : 1
seed : 2020
self_att_d_qkv : 200
self_att_dropout : 0.1
self_att_heads : 1
self_att_layers : 6
stanford_resource_dir : resources/stanford
test_file : None
test_is_dir : False
train_ED : 0
train_argument : 1
train_file : app/train_data.bp.json
train_is_dir : True
train_on_arb : 1
train_strategy : retrain.add-all
trainer : trigger
upos_dim : 30
use_biw2v : 0
use_cased_entity : 1
use_dep2sent : 0
use_dep_edge : 0
use_elmo : 0
use_ner : 0
xlmr_model_dir : models/xlmr.base
xlmr_version : xlmr.base
xpos_dim : 30
"""
data_map = read_abstract_train_data(opt['observed_train'], opt['dev_file'])
opt['data_map'] = data_map
# ************* ED model *****************
if opt['train_ED']:
torch.autograd.set_detect_anomaly(True)
random.seed(opt['seed'])
np.random.seed(opt['seed'])
torch.manual_seed(opt['seed'])
torch.cuda.manual_seed(opt['seed'])
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
ED_trainer = EDTrainer(opt)
if opt['get_perf_of_separate_models']:
ED_trainer.eval_with_saved_model()
else:
ED_trainer.train()
# ************* argument model *****************
if opt['train_argument']:
torch.autograd.set_detect_anomaly(True)
random.seed(opt['seed'])
np.random.seed(opt['seed'])
torch.manual_seed(opt['seed'])
torch.cuda.manual_seed(opt['seed'])
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
arg_trainer = ArgumentTrainer(opt)
if opt['get_perf_of_separate_models']:
arg_trainer.eval_with_saved_model()
else:
arg_trainer.train()
if not opt['get_perf_of_separate_models']:
print('Training done!')
|
nilq/baby-python
|
python
|
from dbnd._core.cli.click_utils import _help
from dbnd._core.cli.service_auto_completer import completer
from dbnd._core.task_build.task_registry import get_task_registry
from dbnd._vendor import click
@click.command()
@click.argument("search", default="", autocompletion=completer.config())
@click.option("--module", "-m", help="Used for dynamic loading of modules")
@click.pass_context
def show_configs(ctx, module, search):
"""Show and search configurations"""
_list_tasks(ctx, module, search, is_config=True)
@click.command()
@click.argument("search", default="", autocompletion=completer.task())
@click.option("--module", "-m", help="Used for dynamic loading of modules")
@click.pass_context
def show_tasks(ctx, module, search):
"""Show and search tasks"""
_list_tasks(ctx, module, search, is_config=False)
COMMON_PARAMS = {"task_version", "task_env", "task_target_date"}
def _list_tasks(ctx, module, search, is_config):
from dbnd import Config
from dbnd._core.parameter.parameter_definition import _ParameterKind
from dbnd._core.context.databand_context import new_dbnd_context
formatter = ctx.make_formatter()
with new_dbnd_context(module=module):
tasks = get_task_registry().list_dbnd_task_classes()
for task_cls in tasks:
td = task_cls.task_definition
full_task_family = td.full_task_family
task_family = td.task_family
if not (task_family.startswith(search) or full_task_family.startswith(search)):
continue
if issubclass(task_cls, Config) != is_config:
continue
dl = []
for param_name, param_obj in td.task_param_defs.items():
if param_obj.system or param_obj.kind == _ParameterKind.task_output:
continue
if not is_config and param_name in COMMON_PARAMS:
continue
param_help = _help(param_obj.description)
dl.append((param_name, param_help))
if dl:
with formatter.section(
"{task_family} ({full_task_family})".format(
full_task_family=full_task_family, task_family=task_family
)
):
formatter.write_dl(dl)
click.echo(formatter.getvalue().rstrip("\n"))
|
nilq/baby-python
|
python
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The page cycler v2.
For details, see design doc:
https://docs.google.com/document/d/1EZQX-x3eEphXupiX-Hq7T4Afju5_sIdxPWYetj7ynd0
"""
from core import perf_benchmark
import page_sets
from benchmarks import loading_metrics_category
from telemetry import benchmark
from telemetry.page import cache_temperature
from telemetry.web_perf import timeline_based_measurement
class _PageCyclerV2(perf_benchmark.PerfBenchmark):
options = {'pageset_repeat': 2}
def CreateCoreTimelineBasedMeasurementOptions(self):
tbm_options = timeline_based_measurement.Options()
loading_metrics_category.AugmentOptionsForLoadingMetrics(tbm_options)
return tbm_options
@classmethod
def ShouldDisable(cls, possible_browser):
# crbug.com/619254
if possible_browser.browser_type == 'reference':
return True
# crbug.com/616781
if (cls.IsSvelte(possible_browser) or
possible_browser.platform.GetDeviceTypeName() == 'Nexus 5X' or
possible_browser.platform.GetDeviceTypeName() == 'AOSP on BullHead'):
return True
return False
@benchmark.Disabled('win10')
@benchmark.Disabled('android') # crbug.com/654217
@benchmark.Owner(emails=['kouhei@chromium.org', 'ksakamoto@chromium.org'])
class PageCyclerV2Typical25(_PageCyclerV2):
"""Page load time benchmark for a 25 typical web pages.
Designed to represent typical, not highly optimized or highly popular web
sites. Runs against pages recorded in June, 2014.
"""
@classmethod
def Name(cls):
return 'page_cycler_v2.typical_25'
def CreateStorySet(self, options):
return page_sets.Typical25PageSet(run_no_page_interactions=True,
cache_temperatures=[
cache_temperature.PCV1_COLD, cache_temperature.PCV1_WARM])
def GetExpectations(self):
return page_sets.Typical25StoryExpectations()
@benchmark.Owner(emails=['kouhei@chromium.org', 'ksakamoto@chromium.org'])
class PageCyclerV2IntlArFaHe(_PageCyclerV2):
"""Page load time for a variety of pages in Arabic, Farsi and Hebrew.
Runs against pages recorded in April, 2013.
"""
page_set = page_sets.IntlArFaHePageSet
@classmethod
def Name(cls):
return 'page_cycler_v2.intl_ar_fa_he'
def CreateStorySet(self, options):
return page_sets.IntlArFaHePageSet(cache_temperatures=[
cache_temperature.PCV1_COLD, cache_temperature.PCV1_WARM])
def GetExpectations(self):
return page_sets.IntlArFaHeStoryExpectations()
@benchmark.Owner(emails=['kouhei@chromium.org', 'ksakamoto@chromium.org'])
class PageCyclerV2IntlEsFrPtBr(_PageCyclerV2):
"""Page load time for a pages in Spanish, French and Brazilian Portuguese.
Runs against pages recorded in April, 2013.
"""
page_set = page_sets.IntlEsFrPtBrPageSet
@classmethod
def Name(cls):
return 'page_cycler_v2.intl_es_fr_pt-BR'
def CreateStorySet(self, options):
return page_sets.IntlEsFrPtBrPageSet(cache_temperatures=[
cache_temperature.PCV1_COLD, cache_temperature.PCV1_WARM])
def GetExpectations(self):
return page_sets.IntlEsFrPtBrStoryExpectations()
@benchmark.Owner(emails=['kouhei@chromium.org', 'ksakamoto@chromium.org'])
class PageCyclerV2IntlHiRu(_PageCyclerV2):
"""Page load time benchmark for a variety of pages in Hindi and Russian.
Runs against pages recorded in April, 2013.
"""
page_set = page_sets.IntlHiRuPageSet
@classmethod
def Name(cls):
return 'page_cycler_v2.intl_hi_ru'
def CreateStorySet(self, options):
return page_sets.IntlHiRuPageSet(cache_temperatures=[
cache_temperature.PCV1_COLD, cache_temperature.PCV1_WARM])
def GetExpectations(self):
return page_sets.IntlHiRuStoryExpectations()
@benchmark.Disabled('android') # crbug.com/666898
@benchmark.Owner(emails=['kouhei@chromium.org', 'ksakamoto@chromium.org'])
class PageCyclerV2IntlJaZh(_PageCyclerV2):
"""Page load time benchmark for a variety of pages in Japanese and Chinese.
Runs against pages recorded in April, 2013.
"""
@classmethod
def Name(cls):
return 'page_cycler_v2.intl_ja_zh'
def CreateStorySet(self, options):
return page_sets.IntlJaZhPageSet(cache_temperatures=[
cache_temperature.PCV1_COLD, cache_temperature.PCV1_WARM])
def GetExpectations(self):
return page_sets.IntlJaZhStoryExpectations()
@benchmark.Owner(emails=['kouhei@chromium.org', 'ksakamoto@chromium.org'])
class PageCyclerV2IntlKoThVi(_PageCyclerV2):
"""Page load time for a variety of pages in Korean, Thai and Vietnamese.
Runs against pages recorded in April, 2013.
"""
page_set = page_sets.IntlKoThViPageSet
@classmethod
def Name(cls):
return 'page_cycler_v2.intl_ko_th_vi'
def CreateStorySet(self, options):
return page_sets.IntlKoThViPageSet(cache_temperatures=[
cache_temperature.PCV1_COLD, cache_temperature.PCV1_WARM])
def GetExpectations(self):
return page_sets.IntlKoThViStoryExpectations()
@benchmark.Enabled('android')
@benchmark.Owner(emails=['kouhei@chromium.org', 'ksakamoto@chromium.org'])
class PageCyclerV2Top10Mobile(_PageCyclerV2):
"""Page load time benchmark for the top 10 mobile web pages.
Runs against pages recorded in November, 2013.
"""
@classmethod
def Name(cls):
return 'page_cycler_v2.top_10_mobile'
def CreateStorySet(self, options):
return page_sets.Top10MobilePageSet(cache_temperatures=[
cache_temperature.PCV1_COLD, cache_temperature.PCV1_WARM])
def GetExpectations(self):
return page_sets.Top10MobileStoryExpectations()
|
nilq/baby-python
|
python
|
from typing import List
from machine.params import Parameters
from machine.plugin import Plugin
from machine.connection import Connection
from machine.plugin import PluginGenerator, PluginResult
class Sequence(Plugin):
def __init__(self, plugins: List[PluginGenerator]):
assert len(plugins) > 0, "Sequence cannot be empty!"
self._plugins = plugins
async def __call__(
self, conn: Connection, params: Parameters
) -> PluginResult:
applied_plugins: List[PluginResult] = []
try:
for plugin_gen in self._plugins:
plugin = plugin_gen()(conn, params)
conn, params = await plugin.__anext__()
applied_plugins.append(plugin)
yield conn, params
for plugin in reversed(applied_plugins):
try:
await plugin.__anext__()
except StopAsyncIteration:
continue
return
except Exception as exception:
error = exception
while len(applied_plugins) > 0:
try:
plugin = applied_plugins.pop(-1)
await plugin.athrow(error)
break
except Exception as e:
error = e
def sequence(plugins: List[PluginGenerator]) -> PluginGenerator:
return lambda: Sequence(plugins)
|
nilq/baby-python
|
python
|
# You are given N pairs of numbers.
# In every pair, the first number is always smaller than the second number.
# A pair (c, d) can follow another pair (a, b) if b < c.
# Chain of pairs can be formed in this fashion.
# You have to find the longest chain which can be formed from the given set of pairs.
dp = [[0]*100]*100
def maxchain(Parr, n, prev, pos):
if pos >= n:
return 0
if dp[pos][prev]:
return dp[pos][prev]
if Parr[pos][0] < prev:
return maxchain(Parr, n, prev, pos+1)
else:
ans = max(maxchain(Parr, n, Parr[pos][1],
0)+1, maxchain(Parr, n, prev, pos+1))
dp[pos][prev] = ans
return ans
print(maxchain([[1, 2], [2, 3], [3, 4]], 3, 0, 0))
def findLongestChain(pairs):
pairs.sort()
dp = [1] * len(pairs)
for j in range(len(pairs)):
for i in range(j):
if pairs[i][1] < pairs[j][0]:
dp[j] = max(dp[j], dp[i] + 1)
return max(dp)
|
nilq/baby-python
|
python
|
""" This is the geo_mean atom.
geo_mean(x,y) = sqrt(x * y)
If either x or y is a vector, the atom is applied elementwise.
It is a CONCAVE atom. It is DECREASING in the first argument, and
DECREASING in the second argument.
It returns a SCALAR expression if the both arguments are SCALAR.
Otherwise, it returns a VECTOR expression (sized to match the largest
arugment).
In every module, you must have defined two functions:
attributes :: [arg] -> (sign, vexity, shape)
rewrite :: [arg] -> Program
"""
import atom
from utils import *
class QC_geo_mean(atom.Atom):
def __init__(self, x, y):
super(QC_geo_mean, self).__init__(x,y)
def _monotonicity(self):
return [monotonicity.increasing, monotonicity.increasing]
def _curvature(self):
return curvature.Concave()
def _sign(self):
return sign.Positive()
def _shape(self):
return self.args[0].shape + self.args[1].shape
def _canonicalize(self):
v = Variable('', self.shape)
x, y = self.args
constraints = [
SOCProd(x + y, [y - x, Number(2.0)*v]),
y >= Number(0),
x >= Number(0)
]
return (v, constraints)
# register with the atom library
atom.atoms['geo_mean'] = QC_geo_mean
|
nilq/baby-python
|
python
|
import torch
__TESTS__ = { }
def get_tests():
for k, v in __TESTS__.items():
yield k, v
def register(test):
name = getattr(test, "__name__", test.__class__.__name__)
if name in __TESTS__:
raise RuntimeError(f'Encountered a test name collision "{name}"')
__TESTS__[name] = test
return test
@register
def test1():
unaligned = torch.Tensor([
[-0.9743, -0.2491],
[-0.5582, -0.1589],
[-0.2159, -0.1677],
[ 0.1593, -0.3002],
[-0.9743, 0.2714],
[-0.5582, 0.1491],
[-0.2159, 0.1377],
[ 0.1593, 0.2662]
])
return torch.chunk(unaligned.unsqueeze(0), 2, dim=-2)
@register
def test2():
unaligned = torch.Tensor([
[-0.9791, -0.3541],
[-0.5517, -0.1809],
[-0.2686, -0.1822],
[ 0.2237, -0.4052],
[-0.9791, 0.2998],
[-0.5517, 0.1574],
[-0.2686, 0.1590],
[ 0.2237, 0.3572]
])
return torch.chunk(unaligned.unsqueeze(0), 2, dim=-2)
@register
def test3():
unaligned = torch.Tensor([
[-0.9882, -0.0736],
[-0.7437, -0.0662],
[-0.2522, -0.0995],
[ 0.2172, -0.1664],
[-0.9882, 0.2589],
[-0.7437, 0.1070],
[-0.2522, 0.1835],
[ 0.2172, 0.2766]
])
return torch.chunk(unaligned.unsqueeze(0), 2, dim=-2)
@register
def test4():
unaligned = torch.Tensor([
[-0.9846, -0.5418],
[ 0.0205, -0.2194],
[ 0.2252, -0.2062],
[ 0.5488, -0.2653],
[-0.9846, 0.4780],
[ 0.0205, 0.2186],
[ 0.2252, 0.1963],
[ 0.5488, 0.1743]
])
return torch.chunk(unaligned.unsqueeze(0), 2, dim=-2)
@register
def test5():
unaligned = torch.Tensor([
[-0.9825, -0.1939],
[-0.7482, -0.1596],
[-0.1407, -0.2930],
[ 0.2218, -0.4294],
[-0.9825, 0.1199],
[-0.7482, 0.1063],
[-0.1407, 0.1077],
[ 0.2218, 0.1338]
])
return torch.chunk(unaligned.unsqueeze(0), 2, dim=-2)
@register
def test6():
unaligned = torch.Tensor([
[-0.9225, -0.2832],
[-0.7755, -0.2755],
[-0.1321, -0.4738],
[ 0.2905, -0.6044],
[-0.9225, 0.2201],
[-0.7755, 0.2172],
[-0.1321, 0.4091],
[ 0.2905, 0.5314]
])
return torch.chunk(unaligned.unsqueeze(0), 2, dim=-2)
@register
def test7():
unaligned = torch.Tensor([
[-0.9369, -0.4814],
[-0.4945, -0.1954],
[-0.3008, -0.2248],
[ 0.2783, -0.4565],
[-0.9369, 0.4577],
[-0.4945, 0.1532],
[-0.3008, 0.1621],
[ 0.2783, 0.3577]
])
return torch.chunk(unaligned.unsqueeze(0), 2, dim=-2)
|
nilq/baby-python
|
python
|
from os import path
from glob import glob
from cStringIO import StringIO
import numpy as np
import h5py
from scipy.sparse.csgraph import connected_components
from scipy.sparse import csr_matrix
from util import sort_nicely, veclen, filter_reindex
def convert_sequence_to_hdf5(filename_pattern, loader_function, hdf_output_file):
verts_all = []
tris = None
files = glob(path.expanduser(filename_pattern))
sort_nicely(files)
for i, f in enumerate(files):
print "loading file %d/%d [%s]" % (i+1, len(files), f)
verts, new_tris = loader_function(f)
if tris is not None and new_tris.shape != tris.shape and new_tris != tris:
raise ValueError, "inconsistent topology between meshes of different frames"
tris = new_tris
verts_all.append(verts)
verts_all = np.array(verts_all, np.float32)
verts_all, tris, _, verts_mean, verts_scale = preprocess_mesh_animation(verts_all, tris)
with h5py.File(hdf_output_file, 'w') as f:
f.create_dataset('verts', data=verts_all, compression='gzip')
f['tris'] = tris
f.attrs['mean'] = verts_mean
f.attrs['scale'] = verts_scale
print "saved as %s" % hdf_output_file
def preprocess_mesh_animation(verts, tris):
"""
Preprocess the mesh animation:
- removes zero-area triangles
- keep only the biggest connected component in the mesh
- normalize animation into -0.5 ... 0.5 cube
"""
print "Vertices: ", verts.shape
print "Triangles: ", verts.shape
assert verts.ndim == 3
assert tris.ndim == 2
# check for zero-area triangles and filter
e1 = verts[0, tris[:,1]] - verts[0, tris[:,0]]
e2 = verts[0, tris[:,2]] - verts[0, tris[:,0]]
n = np.cross(e1, e2)
tris = tris[veclen(n) > 1.e-8]
# remove unconnected vertices
ij = np.r_[np.c_[tris[:,0], tris[:,1]],
np.c_[tris[:,0], tris[:,2]],
np.c_[tris[:,1], tris[:,2]]]
G = csr_matrix((np.ones(len(ij)), ij.T), shape=(verts.shape[1], verts.shape[1]))
n_components, labels = connected_components(G, directed=False)
if n_components > 1:
size_components = np.bincount(labels)
if len(size_components) > 1:
print "[warning] found %d connected components in the mesh, keeping only the biggest one" % n_components
print "component sizes: "
print size_components
keep_vert = labels == size_components.argmax()
else:
keep_vert = np.ones(verts.shape[1], np.bool)
verts = verts[:, keep_vert, :]
tris = filter_reindex(keep_vert, tris[keep_vert[tris].all(axis=1)])
# normalize triangles to -0.5...0.5 cube
verts_mean = verts.mean(axis=0).mean(axis=0)
verts -= verts_mean
verts_scale = np.abs(verts.ptp(axis=1)).max()
verts /= verts_scale
print "after preprocessing:"
print "Vertices: ", verts.shape
print "Triangles: ", verts.shape
return verts, tris, ~keep_vert, verts_mean, verts_scale
def load_ply(filename):
try:
from enthought.tvtk.api import tvtk
except ImportError:
try:
from tvtk.api import tvtk
except ImportError:
print "Reading PLY files requires TVTK. The easiest way is to install mayavi2"
print "(e.g. on Ubuntu: apt-get install mayavi2)"
raise
reader = tvtk.PLYReader(file_name=filename)
reader.update()
polys = reader.output.polys.to_array().reshape((-1, 4))
assert np.all(polys[:,0] == 3)
return reader.output.points.to_array(), polys[:,1:]
def load_off(filename, no_colors=False):
lines = open(filename).readlines()
lines = [line for line in lines if line.strip() != '' and line[0] != '#']
assert lines[0].strip() in ['OFF', 'COFF'], 'OFF header missing'
has_colors = lines[0].strip() == 'COFF'
n_verts, n_faces, _ = map(int, lines[1].split())
vertex_data = np.loadtxt(
StringIO(''.join(lines[2:2 + n_verts])),
dtype=np.float)
if n_faces > 0:
faces = np.loadtxt(StringIO(''.join(lines[2+n_verts:])), dtype=np.int)[:,1:]
else:
faces = None
if has_colors:
colors = vertex_data[:,3:].astype(np.uint8)
vertex_data = vertex_data[:,:3]
else:
colors = None
if no_colors:
return vertex_data, faces
else:
return vertex_data, colors, faces
def save_off(filename, vertices=None, faces=None):
if vertices is None:
vertices = []
if faces is None:
faces = []
with open(filename, 'w') as f:
f.write("OFF\n%d %d 0\n" % (len(vertices), len(faces)))
if len(vertices) > 1:
np.savetxt(f, vertices, fmt="%f %f %f")
if len(faces) > 1:
for face in faces:
fmt = " ".join(["%d"] * (len(face) + 1)) + "\n"
f.write(fmt % ((len(face),) + tuple(map(int, face))))
def load_splocs(component_hdf5_file):
with h5py.File(component_hdf5_file, 'r') as f:
tris = f['tris'].value
Xmean = f['default'].value
names = sorted(list(set(f.keys()) - set(['tris', 'default'])))
components = np.array([
f[name].value - Xmean
for name in names])
return Xmean, tris, components, names
|
nilq/baby-python
|
python
|
# This script created by Joseph Aaron Campbell - 10/2020
# this script references https://www.agisoft.com/forum/index.php?topic=10564.msg47949#msg47949
# Use this as a learning tool only.
# I am not responsible for any damage to data or hardware if the script is not properly utilized.
# Following Code tested and based on Metashape Pro 1.6.2 using Windows 10 Pro
""" With Help from:
https://www.agisoft.com/forum/index.php?topic=12027.msg53791#msg53791
"""
"""
# # # # # # # # # # # # # # #
SET UP THE WORKING ENVIRONMENT
# # # # # # # # # # # # # # #
"""
import Metashape
"""create a reference to the current project"""
doc = Metashape.app.document
# create reference for list of chunks in project
chunkList = Metashape.app.document.chunks
# set reference to the currently selected chunk -- this should be the duplicated chunk from part-01
activeChunk = Metashape.app.document.chunk
# must include this line between each attempt to build a model. or it overwrites last created model
activeChunk.model = None
# using optimized sparse cloud, create lower resolution model
activeChunk.buildModel\
(
surface_type=Metashape.Arbitrary,
interpolation=Metashape.EnabledInterpolation,
face_count=Metashape.FaceCount.LowFaceCount,
face_count_custom=200000,
source_data=Metashape.PointCloudData,
vertex_colors=True,
vertex_confidence=True,
volumetric_masks=False,
keep_depth=True,
trimming_radius=10,
subdivide_task=True,
workitem_size_cameras=20,
max_workgroup_size=100
)
# import masks function using lower resolution model as source for all cameras in chunk
activeChunk.importMasks\
(
path='{filename}_mask.png',
source=Metashape.MaskSourceModel,
operation=Metashape.MaskOperationReplacement,
tolerance=10
)
# get the current Chunks label ( name )
currentChunkLabel = activeChunk.label
# get the current (saved) project's parent folder URL via python3 pathLib
# this path variable is used when exporting the 3D model later in the script.
# 'parent' will return the parent folder the project lives in
# 'name' will return the saved project name and extension
# 'stem' will return just the project name without extension
from pathlib import Path
parentFolderPath = str(Path(Metashape.app.document.path).parent)
print("parent Folder is : " + parentFolderPath)
# set reference to the output folders as string
outputFolder = Path(str(parentFolderPath) + "\\" + "_Output")
outputChunkFolder = Path(str(outputFolder) + "\\" + "_" + str(currentChunkLabel))
outputMaskfolder = Path(str(outputChunkFolder) + "\\" + "_Masks")
print("output folder: " + str(outputFolder))
print("output chunk folder: " + str(outputChunkFolder))
print("model output folder is: " + str(outputMaskfolder))
# create an 'output' sub-folder for exported data from project
# also create sub-folder for model export within 'output' sub-folder
# this method will create the folder if doesnt exist, and also do nothing if it does exist
Path(outputFolder).mkdir(exist_ok=True)
Path(outputChunkFolder).mkdir(exist_ok=True)
Path(outputMaskfolder).mkdir(exist_ok=True)
# export masks to output mask folder
# this uses the Metashape Task class, otherwise loop through every camera in chunk and save mask as image file
# create a reference to the Tasks ExportMasks method
mask_task = Metashape.Tasks.ExportMasks()
# define which cameras to export masks for
mask_task.cameras = activeChunk.cameras
# define the output path for the exported mask files
mask_task.path = str(str(outputMaskfolder) + "\\" + "{filename}.png")
# activate the task for the active chunk to export the masks
mask_task.apply(object=activeChunk)
# delete lower resolution model
activeChunk.remove(activeChunk.models[0])
# save document
doc.save()
|
nilq/baby-python
|
python
|
load("//tools/bzl:maven_jar.bzl", "maven_jar")
AWS_SDK_VER = "2.16.19"
AWS_KINESIS_VER = "2.3.4"
JACKSON_VER = "2.10.4"
def external_plugin_deps():
maven_jar(
name = "junit-platform",
artifact = "org.junit.platform:junit-platform-commons:1.4.0",
sha1 = "34d9983705c953b97abb01e1cd04647f47272fe5",
)
maven_jar(
name = "amazon-kinesis-client",
artifact = "software.amazon.kinesis:amazon-kinesis-client:" + AWS_KINESIS_VER,
sha1 = "6bb6fcbc5a0f6fd6085f3b1589e738485b0b7867",
)
maven_jar(
name = "amazon-kinesis",
artifact = "software.amazon.awssdk:kinesis:" + AWS_SDK_VER,
sha1 = "bec13fc5ef9225d1a10f13fbe1de8cb114448cf8",
)
maven_jar(
name = "amazon-dynamodb",
artifact = "software.amazon.awssdk:dynamodb:" + AWS_SDK_VER,
sha1 = "33ec7d291973658779b5777db2a0214a5c469e81",
)
maven_jar(
name = "amazon-cloudwatch",
artifact = "software.amazon.awssdk:cloudwatch:" + AWS_SDK_VER,
sha1 = "7585fbe349a92e0a9f040e4194ac89ca32e7983d",
)
maven_jar(
name = "amazon-regions",
artifact = "software.amazon.awssdk:regions:" + AWS_SDK_VER,
sha1 = "089f4f3d3ef20b2486f09e71da638c03100eab64",
)
maven_jar(
name = "amazon-netty-nio-client",
artifact = "software.amazon.awssdk:netty-nio-client:" + AWS_SDK_VER,
sha1 = "bb674feda8417513a647c7aa8cba9a537068d099",
)
maven_jar(
name = "amazon-utils",
artifact = "software.amazon.awssdk:utils:" + AWS_SDK_VER,
sha1 = "53edaa1f884682ac3091293eff3eb024ed0e36bb",
)
maven_jar(
name = "amazon-sdk-core",
artifact = "software.amazon.awssdk:sdk-core:" + AWS_SDK_VER,
sha1 = "02a60fd9c138048272ef8b6c80ae67491dd386a9",
)
maven_jar(
name = "amazon-aws-core",
artifact = "software.amazon.awssdk:aws-core:" + AWS_SDK_VER,
sha1 = "0f50f5cf2698a0de7d2d77322cbf3fb13f76187f",
)
maven_jar(
name = "amazon-http-client-spi",
artifact = "software.amazon.awssdk:http-client-spi:" + AWS_SDK_VER,
sha1 = "e4027e7e0cb064602100b34e19f131983f76f872",
)
maven_jar(
name = "amazon-auth",
artifact = "software.amazon.awssdk:auth:" + AWS_SDK_VER,
sha1 = "4163754b2a0eadcb569a35f0666fd5d859e43ef8",
)
maven_jar(
name = "reactive-streams",
artifact = "org.reactivestreams:reactive-streams:1.0.2",
sha1 = "323964c36556eb0e6209f65c1cef72b53b461ab8",
)
maven_jar(
name = "reactor-core",
artifact = "io.projectreactor:reactor-core:3.4.3",
sha1 = "df23dbdf95f892f7a04292d040fd8b308bd66602",
)
maven_jar(
name = "rxjava",
artifact = "io.reactivex.rxjava2:rxjava:2.1.14",
sha1 = "20dbf7496e417da474eda12717bf4653dbbd5a6b",
)
maven_jar(
name = "jackson-databind",
artifact = "com.fasterxml.jackson.core:jackson-databind:" + JACKSON_VER,
sha1 = "76e9152e93d4cf052f93a64596f633ba5b1c8ed9",
)
maven_jar(
name = "jackson-dataformat-cbor",
artifact = "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:" + JACKSON_VER,
sha1 = "c854bb2d46138198cb5d4aae86ef6c04b8bc1e70",
)
maven_jar(
name = "events-broker",
artifact = "com.gerritforge:events-broker:3.5.0-alpha-202108041529",
sha1 = "309fe8cc08c46593d9990d4e5c448cc85e5a62b0",
)
maven_jar(
name = "io-netty-all",
artifact = "io.netty:netty-all:4.1.51.Final",
sha1 = "5e5f741acc4c211ac4572c31c7e5277ec465e4e4",
)
maven_jar(
name = "awssdk-query-protocol",
artifact = "software.amazon.awssdk:aws-query-protocol:" + AWS_SDK_VER,
sha1 = "4c88c66daa5039813e879b324636d15fa2802787",
)
maven_jar(
name = "awssdk-protocol-core",
artifact = "software.amazon.awssdk:protocol-core:" + AWS_SDK_VER,
sha1 = "6200c1617f87eed0216c6afab35bab2403da140c",
)
maven_jar(
name = "awssdk-json-protocol",
artifact = "software.amazon.awssdk:aws-json-protocol:" + AWS_SDK_VER,
sha1 = "16449e555f61607b917dc7f242c1928298de9bdd",
)
maven_jar(
name = "awssdk-cbor-protocol",
artifact = "software.amazon.awssdk:aws-cbor-protocol:" + AWS_SDK_VER,
sha1 = "7353a868437576b9e4911779ae66a85ef6be0d9e",
)
maven_jar(
name = "awssdk-metrics-spi",
artifact = "software.amazon.awssdk:metrics-spi:" + AWS_SDK_VER,
sha1 = "d8669974b412766751b5eaf9c1edad908bfe5c38",
)
maven_jar(
name = "amazon-profiles",
artifact = "software.amazon.awssdk:profiles:" + AWS_SDK_VER,
sha1 = "5add2a843de43bd0acf45e1ab8c2b94c3638dd66",
)
maven_jar(
name = "apache-commons-lang3",
artifact = "org.apache.commons:commons-lang3:3.12.0",
sha1 = "c6842c86792ff03b9f1d1fe2aab8dc23aa6c6f0e",
)
maven_jar(
name = "testcontainer-localstack",
artifact = "org.testcontainers:localstack:1.15.2",
sha1 = "ae3c4717bc5f37410abbb490cb46d349a77990a0",
)
maven_jar(
name = "aws-java-sdk-core",
artifact = "com.amazonaws:aws-java-sdk-core:1.11.960",
sha1 = "18b6b2a5cb83a0e2e33a593302b5dbe0ca2ade64",
)
maven_jar(
name = "awssdk-url-connection-client",
artifact = "software.amazon.awssdk:url-connection-client:" + AWS_SDK_VER,
sha1 = "b84ac8bae45841bc65af3c4f55164d9a3399b653",
)
maven_jar(
name = "awssdk-kinesis-producer",
artifact = "com.amazonaws:amazon-kinesis-producer:0.14.6",
sha1 = "7f83582df816dccc5217f05ece309a5cd8c7a9a5",
)
maven_jar(
name = "aws-glue-schema-serde",
artifact = "software.amazon.glue:schema-registry-serde:1.0.0",
sha1 = "30815b670f89876465caa69b47e6df6fd6875d0f",
)
maven_jar(
name = "apache-commons-io",
artifact = "commons-io:commons-io:2.4",
sha1 = "b1b6ea3b7e4aa4f492509a4952029cd8e48019ad",
)
maven_jar(
name = "javax-xml-bind",
artifact = "javax.xml.bind:jaxb-api:2.3.1",
sha1 = "8531ad5ac454cc2deb9d4d32c40c4d7451939b5d",
)
|
nilq/baby-python
|
python
|
import collections
from typing import List
from thinglang.lexer.values.identifier import Identifier, GenericIdentifier
from thinglang.symbols.merged_symbol import MergedSymbol
from thinglang.symbols.symbol import Symbol
from thinglang.utils import collection_utils
class SymbolMap(object):
"""
Describes a symbol map - the public fields (members and methods) of a ThingDefinition.
Each SymbolMap also has an index number, by which it is known to the runtime.
"""
def __init__(self, members: List[Symbol], methods: List[Symbol], name: Identifier, extends: Identifier, generics: List[Identifier], convention, member_offset: int=0, method_offset: int=0):
self.members, self.methods, self.name, self.extends, self.generics, self.convention, self.member_offset, self.method_offset = \
members, self.merge_method_symbols(methods), name, extends, generics or [], convention, member_offset, method_offset
self.lookup = {
symbol.name: symbol for symbol in self.members + self.methods
}
assert len(self.methods) + len(self.members) == len(self.lookup), 'Thing definition contains colliding elements'
assert {x.convention for x in self.lookup.values()} == {self.convention}, 'Inconsistent calling conventions identified'
def serialize(self) -> dict:
"""
Serialize this symbol map (and its symbols) into a dict
"""
return {
"name": self.name,
"extends": self.extends,
"generics": self.generics,
"offsets": {
"members": self.member_offset,
"methods": self.method_offset
},
"convention": Symbol.serialize_convention(self.convention),
"symbols": collection_utils.flatten([x.serialize() for x in self.lookup.values()])
}
@classmethod
def from_serialized(cls, data: dict) -> 'SymbolMap':
"""
Reads a serialized symbol map and returns a new SymbolMap object.
Additionally, deserializes its symbols into Symbol objects
"""
symbols = [Symbol.load(elem) for elem in data['symbols']]
members = [symbol for symbol in symbols if symbol.kind == Symbol.MEMBER]
methods = [symbol for symbol in symbols if symbol.kind == Symbol.METHOD]
extends = Symbol.load_identifier(data['extends']) if data['extends'] else None
return cls(members=members,
methods=methods,
name=Identifier(data['name']),
extends=extends,
generics=[Identifier(x) for x in data['generics']],
convention=Symbol.serialize_convention(data['convention']),
member_offset=data['offsets']['members'],
method_offset=data['offsets']['methods'])
@classmethod
def from_thing(cls, thing, extends: 'SymbolMap') -> 'SymbolMap':
"""
Creates a new Symbol map from a ThingDefinition
:param thing: the source ThingDefinition
:param index: the index of the new symbol map
:param extends: optionally, the symbol map from which this thing inherits
"""
member_offset, method_offset = 0, 0
if extends is not None:
member_offset, method_offset = len(extends.members) + extends.member_offset, len(extends.methods) + extends.method_offset
members = [elem.symbol().update_index(member_offset + index) for index, elem in enumerate(thing.members)]
methods = [elem.symbol().update_index(method_offset + index) for index, elem in enumerate(thing.methods)]
return cls(members,
methods,
thing.name,
thing.extends,
thing.generics,
Symbol.BYTECODE,
member_offset=member_offset,
method_offset=method_offset)
def parameterize(self, parameters: dict) -> 'SymbolMap':
"""
Creates a new SymbolMap, replacing the generic parameters in this SymbolMap with determined values
:param parameters: a mapping of generic name -> resolved name
"""
assert set(parameters.keys()) == set(self.generics), 'Partial parameterization is not allowed'
return SymbolMap(
[x.parameterize(parameters) for x in self.members],
[x.parameterize(parameters) for x in self.methods],
GenericIdentifier(self.name, tuple([parameters[x] for x in self.generics])),
self.extends,
[],
self.convention,
self.member_offset,
self.method_offset)
def __getitem__(self, item: Identifier) -> Symbol:
"""
Returns a symbol from this map
"""
return self.lookup[item]
def __contains__(self, item: Identifier) -> bool:
"""
Checks if a symbol identified by `item` exists
"""
return item in self.lookup
def __iter__(self):
"""
Iterates over all the fields of this symbol map
"""
return iter(self.lookup.values())
def __repr__(self):
return f'SymbolMap({self.name})'
@staticmethod
@collection_utils.drain()
def merge_method_symbols(methods):
method_symbols = collections.defaultdict(list)
for method_symbol in methods:
method_symbols[method_symbol.name].append(method_symbol)
for symbol_name, symbols in method_symbols.items():
yield symbols.pop() if len(symbols) == 1 else MergedSymbol(symbols)
|
nilq/baby-python
|
python
|
# encoding:utf-8
from numpy import *
import math
import copy
import pickle
class C4_5DTree(object):
def __init__(self): # 构造方法
self.tree = {} # 生成的树
self.dataSet = [] # 数据集
self.labels = [] # 标签集
def loadDataSet(self, path, labels, split):
recordlist = []
with open(path, "r") as in_file:
for line in in_file:
recordlist.append(line.strip().split(split))
self.dataSet = recordlist
self.labels = labels
def train(self):
labels = copy.deepcopy(self.labels)
self.tree = self.buildTree(self.dataSet, labels)
def buildTree(self, dataSet, labels):
cateList = [data[-1] for data in dataSet]
if cateList.count(cateList[0]) == len(cateList):
return cateList[0]
if len(dataSet[0]) == 1:
return self.maxCate(cateList)
bestFeat, featValueList = self.getBestFeat(dataSet)
bestFeatLabel = labels[bestFeat]
tree = {bestFeatLabel: {}}
del (labels[bestFeat])
for value in featValueList:
subLabels = labels[:]
splitDataSet = self.splitDataSet(dataSet, bestFeat, value)
subTree = self.buildTree(splitDataSet, subLabels)
tree[bestFeatLabel][value] = subTree
return tree
# 计算出现次数最多的类别标签
def maxCate(self, catelist):
items = dict([(catelist.count(i), i) for i in catelist])
return items[max(items.keys())]
# 计算信息熵
def computeEntropy(self, dataSet): # 计算香农熵
datalen = float(len(dataSet))
cateList = [data[-1] for data in dataSet] # 从数据集中得到类别标签
# 得到类别为key、出现次数value的字典
items = dict([(i, cateList.count(i)) for i in cateList]) # 用法不错
infoEntropy = 0.0
for key in items:
prob = float(items[key]) / datalen
infoEntropy -= prob * math.log(prob, 2)
return infoEntropy
# 划分数据集;分隔数据集;删除特征轴所在的数据列, 返回剩余的数据集
# dataSet:数据集;axis:特征轴;value:特征轴取值
def splitDataSet(self, dataSet, axis, value):
rtnList = []
for featVec in dataSet:
if featVec[axis] == value:
rFeatVec = featVec[:axis] # list操作:提取0 - (axis - 1)的元素
rFeatVec.extend(featVec[axis + 1:])
rtnList.append(rFeatVec)
return rtnList
# 计算划分信息
def computeSplitInfo(self, featureVList):
numEntries = len(featureVList)
featureValueSetList = list(
set(featureVList))
valueCounts = [featureVList.count(featVec) for featVec in featureValueSetList]
pList = [float(item) / numEntries for item in valueCounts]
lList = [item * math.log(item, 2) for item in pList]
splitInfo = -sum(lList)
return splitInfo, featureValueSetList
# 计算最优特征
def getBestFeat(self, dataSet):
Num_Feats = len(dataSet[0][:-1]) # 4
totality = len(dataSet) # 1024
BaseEntropy = self.computeEntropy(dataSet)
ConditionEntropy = []
splitInfo = []
allFeatVList = []
for f in range(Num_Feats):
featList = [example[f] for example in dataSet]
[splitI, featureValueList] = self.computeSplitInfo(featList)
allFeatVList.append(featureValueList) # ['0','1','2'],['0','1','2'],['0','1']
splitInfo.append(splitI)
resultGain = 0.0
for value in featureValueList:
subSet = self.splitDataSet(dataSet, f, value)
appearNum = float(len(subSet))
subEntropy = self.computeEntropy(subSet)
resultGain += (appearNum / totality) * subEntropy
ConditionEntropy.append(resultGain)
infoGainArray = BaseEntropy * ones(Num_Feats) - array(ConditionEntropy)
# infoGainRatio = infoGainArray / array(splitInfo)
infoGainRatio = array([0 if j == 0 else i / j for i, j in zip(infoGainArray, splitInfo)])
bestFeatureIndex = argsort(-infoGainRatio)[0]
return bestFeatureIndex, allFeatVList[bestFeatureIndex]
def predict(self, inputTree, featLabels, testVec): # 分类器
root = list(inputTree.keys())[0]
secondDict = inputTree[root] # value-子树结构或分类标签
featIndex = featLabels.index(root) # 根节点在分类标签集中的位置
key = testVec[featIndex]
valueOfFeat = secondDict[key] # 易错点valueOfFeat = secondDict[key]
if isinstance(valueOfFeat, dict):
classLabel = self.predict(valueOfFeat, featLabels, testVec) # 递归分类
else:
classLabel = valueOfFeat
return classLabel
def storeTree(self, inputTree, filename):
fw = open(filename, 'wb')
# 对象持久化包
pickle.dump(inputTree, fw)
fw.close()
def grabTree(self, filename):
fr = open(filename, 'rb')
return pickle.load(fr)
|
nilq/baby-python
|
python
|
import logging
import subprocess
import sys
class Spotify():
def __init__(self, args):
logging.info("Spotify Connect client started.")
command = "spotifyd --no-daemon"
self.process = subprocess.Popen(command, shell=True)
def stop(self):
self.process.kill()
logging.info("Stopped Spotify Connect client.")
|
nilq/baby-python
|
python
|
# Copyright (c) 2016 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Behaviors represent a task that Cozmo may perform for an
indefinite amount of time.
For example, the "LookAroundInPlace" behavior causes Cozmo to start looking
around him (without driving), which will cause events such as
:class:`cozmo.objects.EvtObjectObserved` to be generated as he comes across
objects.
Behaviors must be explicitly stopped before having the robot do something else
(for example, pick up the object he just observed).
Behaviors are started by a call to :meth:`cozmo.robot.Robot.start_behavior`,
which returns a :class:`Behavior` object. Calling the :meth:`~Behavior.stop`
method on that object terminate the behavior.
The :class:`BehaviorTypes` class in this module holds a list of all available
behaviors.
'''
# __all__ should order by constants, event classes, other classes, functions.
__all__ = ['BEHAVIOR_IDLE', 'BEHAVIOR_REQUESTED', 'BEHAVIOR_RUNNING',
'BEHAVIOR_STOPPED',
'EvtBehaviorRequested', 'EvtBehaviorStarted', 'EvtBehaviorStopped',
'Behavior', 'BehaviorTypes']
import collections
from . import logger
from . import event
from ._clad import _clad_to_engine_cozmo, CladEnumWrapper
#: string: Behavior idle state (not requested to run)
BEHAVIOR_IDLE = 'behavior_idle'
#: string: Behavior requested state (waiting for engine to start it)
BEHAVIOR_REQUESTED = 'behavior_requested'
#: string: Behavior running state
BEHAVIOR_RUNNING = 'behavior_running'
#: string: Behavior stopped state
BEHAVIOR_STOPPED = 'behavior_stopped'
class EvtBehaviorRequested(event.Event):
'''Triggered when a behavior is requested to start.'''
behavior = 'The Behavior object'
behavior_type_name = 'The behavior type name - equivalent to behavior.type.name'
class EvtBehaviorStarted(event.Event):
'''Triggered when a behavior starts running on the robot.'''
behavior = 'The Behavior object'
behavior_type_name = 'The behavior type name - equivalent to behavior.type.name'
class EvtBehaviorStopped(event.Event):
'''Triggered when a behavior stops.'''
behavior = 'The behavior type object'
behavior_type_name = 'The behavior type name - equivalent to behavior.type.name'
class Behavior(event.Dispatcher):
'''A Behavior instance describes a behavior the robot is currently performing.
Returned by :meth:`cozmo.robot.Robot.start_behavior`.
'''
def __init__(self, robot, behavior_type, is_active=False, **kw):
super().__init__(**kw)
self.robot = robot
self.type = behavior_type
self._state = BEHAVIOR_IDLE
if is_active:
self._state = BEHAVIOR_REQUESTED
self.dispatch_event(EvtBehaviorRequested, behavior=self, behavior_type_name=self.type.name)
def __repr__(self):
return '<%s type="%s">' % (self.__class__.__name__, self.type.name)
def _on_engine_started(self):
if self._state != BEHAVIOR_REQUESTED:
# has not been requested (is an unrelated behavior transition)
if self.is_running:
logger.warning("Behavior '%s' unexpectedly reported started when already running")
return
self._state = BEHAVIOR_RUNNING
self.dispatch_event(EvtBehaviorStarted, behavior=self, behavior_type_name=self.type.name)
def _set_stopped(self):
if not self.is_active:
return
self._state = BEHAVIOR_STOPPED
self.dispatch_event(EvtBehaviorStopped, behavior=self, behavior_type_name=self.type.name)
def stop(self):
'''Requests that the robot stop performing the behavior.
Has no effect if the behavior is not presently active.
'''
if not self.is_active:
return
self.robot._set_none_behavior()
self._set_stopped()
@property
def is_active(self):
'''bool: True if the behavior is currently active and may run on the robot.'''
return self._state == BEHAVIOR_REQUESTED or self._state == BEHAVIOR_RUNNING
@property
def is_running(self):
'''bool: True if the behavior is currently running on the robot.'''
return self._state == BEHAVIOR_RUNNING
@property
def is_completed(self):
return self._state == BEHAVIOR_STOPPED
async def wait_for_started(self, timeout=5):
'''Waits for the behavior to start.
Args:
timeout (int or None): Maximum time in seconds to wait for the event.
Pass None to wait indefinitely. If a behavior can run it should
usually start within ~0.2 seconds.
Raises:
:class:`asyncio.TimeoutError`
'''
if self.is_running or self.is_completed:
# Already started running
return
await self.wait_for(EvtBehaviorStarted, timeout=timeout)
async def wait_for_completed(self, timeout=None):
'''Waits for the behavior to complete.
Args:
timeout (int or None): Maximum time in seconds to wait for the event.
Pass None to wait indefinitely.
Raises:
:class:`asyncio.TimeoutError`
'''
if self.is_completed:
# Already complete
return
# Wait for behavior to start first - it can't complete without starting,
# and if it doesn't start within a fraction of a second it probably
# never will
await self.wait_for_started()
await self.wait_for(EvtBehaviorStopped, timeout=timeout)
_BehaviorType = collections.namedtuple('_BehaviorType', ['name', 'id'])
class BehaviorTypes(CladEnumWrapper):
'''Defines all executable robot behaviors.
For use with :meth:`cozmo.robot.Robot.start_behavior`.
'''
_clad_enum = _clad_to_engine_cozmo.ExecutableBehaviorType
_entry_type = _BehaviorType
#: Turn and move head, but don't drive, with Cozmo's head angled
#: upwards where faces are likely to be.
FindFaces = _entry_type("FindFaces", _clad_enum.FindFaces)
#: Knock over a stack of cubes.
KnockOverCubes = _entry_type("KnockOverCubes", _clad_enum.KnockOverCubes)
#: Turn and move head, but don't drive, to see what is around Cozmo.
LookAroundInPlace = _entry_type("LookAroundInPlace", _clad_enum.LookAroundInPlace)
#: Tries to "pounce" (drive forward and lower lift) when it detects
#: nearby motion on the ground plane.
PounceOnMotion = _entry_type("PounceOnMotion", _clad_enum.PounceOnMotion)
#: Roll a block, regardless of orientation.
RollBlock = _entry_type("RollBlock", _clad_enum.RollBlock)
#: Pickup one block, and stack it onto another block.
StackBlocks = _entry_type("StackBlocks", _clad_enum.StackBlocks)
# Enroll a Face - for internal use by Face.name_face (requires additional pre/post setup)
_EnrollFace = _entry_type("EnrollFace", _clad_enum.EnrollFace)
# This enum deliberately only exposes a sub-set of working behaviors
BehaviorTypes._init_class(warn_on_missing_definitions=False, add_missing_definitions=False)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from django.conf.urls import url, include
from django.contrib.auth.decorators import login_required
from children.views import ChildrenTemplateView, ChildrenDetailView, ChildrenUpdateView
from children.views import ChildrenCreateView, ChildrenDeleteView, ChildListJson
urlpatterns = [
url(r'^$', ChildrenTemplateView.as_view(), name='list'),
url(r'^add/$', ChildrenCreateView.as_view(), name='add'),
url(r'^(?P<child_id>[0-9]+)/$', ChildrenDetailView.as_view(), name='detail'),
url(r'^(?P<child_id>[0-9]+)/edit/$', ChildrenUpdateView.as_view(), name='edit'),
url(r'^(?P<child_id>[0-9]+)/delete/$', ChildrenDeleteView.as_view(), name='delete'),
url(r'^(?P<child_id>[0-9]+)/params/', include('history.urls')),
url(r'^data-table/$', login_required(ChildListJson.as_view()), name='child_list_json')
]
|
nilq/baby-python
|
python
|
from biosimulators_utils.model_lang.bngl.validation import validate_model, read_model
from biosimulators_utils.utils.core import flatten_nested_list_of_strings
import os
import shutil
import tempfile
import unittest
class BgnlValidationTestCase(unittest.TestCase):
FIXTURE_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'fixtures', 'bngl')
def test(self):
errors, warnings, _ = validate_model(os.path.join(self.FIXTURE_DIR, 'valid.bngl'))
self.assertEqual(errors, [])
self.assertEqual(warnings, [])
fid, filename = tempfile.mkstemp()
os.close(fid)
shutil.copyfile(os.path.join(self.FIXTURE_DIR, 'valid.bngl'), filename)
errors, warnings, _ = validate_model(filename)
self.assertEqual(errors, [])
self.assertEqual(warnings, [])
os.remove(filename)
filename = os.path.join(self.FIXTURE_DIR, 'invalid.bngl2')
_, errors, _ = read_model(filename, '')
self.assertIn("not a valid BNGL or BGNL XML file", flatten_nested_list_of_strings(errors))
filename = os.path.join(self.FIXTURE_DIR, 'invalid.bngl')
errors, warnings, _ = validate_model(filename)
self.assertIn("not a valid BNGL", flatten_nested_list_of_strings(errors))
self.assertEqual(warnings, [])
filename = os.path.join(self.FIXTURE_DIR, 'does-not-exist')
errors, warnings, _ = validate_model(filename)
self.assertIn('is not a file', flatten_nested_list_of_strings(errors))
self.assertEqual(warnings, [])
filename = None
errors, warnings, _ = validate_model(filename)
self.assertIn('must be a path', flatten_nested_list_of_strings(errors))
self.assertEqual(warnings, [])
filename = os.path.join(self.FIXTURE_DIR, '..', 'BIOMD0000000075.xml')
errors, warnings, _ = validate_model(filename)
self.assertIn('does not appear to a valid', flatten_nested_list_of_strings(errors))
self.assertEqual(warnings, [])
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/security_keys.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import binascii
import copy
import hashlib
import json
import logging
from king_phisher import find
from king_phisher import serializers
from king_phisher import utilities
import cryptography.hazmat.primitives.ciphers
import cryptography.hazmat.primitives.ciphers.algorithms
import cryptography.hazmat.primitives.ciphers.modes
import cryptography.hazmat.primitives.padding as padding
import cryptography.hazmat.backends as backends
import ecdsa
import ecdsa.curves
import ecdsa.keys
ciphers = cryptography.hazmat.primitives.ciphers
ecdsa_curves = dict((c.name, c) for c in ecdsa.curves.curves)
"""
A dictionary of :py:class:`ecdsa.curves.Curve` objects keyed by their
:py:mod:`ecdsa` and OpenSSL compatible names.
"""
ecdsa_curves.update((c.openssl_name, c) for c in ecdsa.curves.curves)
def _decode_data(value, encoding=None):
if isinstance(encoding, str):
encoding = encoding.lower()
if encoding == 'base64':
value = binascii.a2b_base64(value)
elif encoding == 'hex':
value = binascii.a2b_hex(value)
elif encoding is not None:
raise ValueError('unknown encoding: ' + encoding)
return value
def _encoding_data(value, encoding=None):
if isinstance(encoding, str):
encoding = encoding.lower()
if encoding == 'base64':
value = binascii.b2a_base64(value).decode('utf-8').strip()
elif encoding == 'hex':
value = binascii.b2a_hex(value).decode('utf-8').strip()
elif encoding is not None:
raise ValueError('unknown encoding: ' + encoding)
return value
def _key_cls_from_dict(cls, value, encoding=None):
key_data = _decode_data(value['data'], encoding=encoding)
return cls.from_string(key_data, curve=value['type'])
def _kwarg_curve(kwargs):
if 'curve' not in kwargs:
return kwargs
curve = kwargs.pop('curve')
if isinstance(curve, str):
if curve not in ecdsa_curves:
raise ValueError('unknown curve: ' + curve)
curve = ecdsa_curves[curve]
elif not isinstance(curve, ecdsa.curves.Curve):
raise TypeError('curve must either be a curve name or ecdsa.curves.Curve instance')
kwargs['curve'] = curve
return kwargs
def openssl_decrypt_data(ciphertext, password, digest='sha256', encoding='utf-8'):
"""
Decrypt *ciphertext* in the same way as OpenSSL. For the meaning of
*digest* see the :py:func:`.openssl_derive_key_and_iv` function
documentation.
.. note::
This function can be used to decrypt ciphertext created with the
``openssl`` command line utility.
.. code-block:: none
openssl enc -e -aes-256-cbc -in file -out file.enc -md sha256
:param bytes ciphertext: The encrypted data to decrypt.
:param str password: The password to use when deriving the decryption key.
:param str digest: The name of hashing function to use to generate the key.
:param str encoding: The name of the encoding to use for the password.
:return: The decrypted data.
:rtype: bytes
"""
salt = b''
if ciphertext[:8] == b'Salted__':
salt = ciphertext[8:16]
ciphertext = ciphertext[16:]
my_key, my_iv = openssl_derive_key_and_iv(password, salt, 32, 16, digest=digest, encoding=encoding)
cipher = ciphers.Cipher(
ciphers.algorithms.AES(my_key),
ciphers.modes.CBC(my_iv),
backend=backends.default_backend()
)
decryptor = cipher.decryptor()
plaintext = decryptor.update(ciphertext) + decryptor.finalize()
unpadder = padding.PKCS7(cipher.algorithm.block_size).unpadder()
return unpadder.update(plaintext) + unpadder.finalize()
def openssl_derive_key_and_iv(password, salt, key_length, iv_length, digest='sha256', encoding='utf-8'):
"""
Derive an encryption key and initialization vector (IV) in the same way as
OpenSSL.
.. note::
Different versions of OpenSSL use a different default value for the
*digest* function used to derive keys and initialization vectors. A
specific one can be used by passing the ``-md`` option to the
``openssl`` command.
:param str password: The password to use when deriving the key and IV.
:param bytes salt: A value to use as a salt for the operation.
:param int key_length: The length in bytes of the key to return.
:param int iv_length: The length in bytes of the IV to return.
:param str digest: The name of hashing function to use to generate the key.
:param str encoding: The name of the encoding to use for the password.
:return: The key and IV as a tuple.
:rtype: tuple
"""
password = password.encode(encoding)
digest_function = getattr(hashlib, digest)
chunk = b''
data = b''
while len(data) < key_length + iv_length:
chunk = digest_function(chunk + password + salt).digest()
data += chunk
return data[:key_length], data[key_length:key_length + iv_length]
class SigningKey(ecdsa.SigningKey, object):
@classmethod
def from_secret_exponent(cls, *args, **kwargs):
instance = super(SigningKey, cls).from_secret_exponent(*args, **kwargs)
orig_vk = instance.verifying_key
instance.verifying_key = VerifyingKey.from_public_point(orig_vk.pubkey.point, instance.curve, instance.default_hashfunc)
return instance
@classmethod
def from_string(cls, string, **kwargs):
kwargs = _kwarg_curve(kwargs)
return super(SigningKey, cls).from_string(string, **kwargs)
@classmethod
def from_dict(cls, value, encoding='base64'):
"""
Load the signing key from the specified dict object.
:param dict value: The dictionary to load the key data from.
:param str encoding: The encoding of the required 'data' key.
:return: The new signing key.
:rtype: :py:class:`.SigningKey`
"""
return _key_cls_from_dict(cls, value, encoding=encoding)
@classmethod
def from_file(cls, file_path, password=None, encoding='utf-8'):
"""
Load the signing key from the specified file. If *password* is
specified, the file is assumed to have been encoded using OpenSSL using
``aes-256-cbc`` with ``sha256`` as the message digest.
:param str file_path: The path to the file to load.
:param str password: An optional password to use for decrypting the file.
:param str encoding: The encoding of the data.
:return: A tuple of the key's ID, and the new :py:class:`.SigningKey` instance.
:rtype: tuple
"""
with open(file_path, 'rb') as file_h:
file_data = file_h.read()
if password:
file_data = openssl_decrypt_data(file_data, password, encoding=encoding)
file_data = file_data.decode(encoding)
file_data = serializers.JSON.loads(file_data)
utilities.validate_json_schema(file_data, 'king-phisher.security.key')
return file_data['id'], cls.from_dict(file_data['signing-key'], encoding=file_data.pop('encoding', 'base64'))
def sign_dict(self, data, signature_encoding='base64'):
"""
Sign a dictionary object. The dictionary will have a 'signature' key
added is required by the :py:meth:`.VerifyingKey.verify_dict` method.
To serialize the dictionary to data suitable for the operation the
:py:func:`json.dumps` function is used and the resulting data is then
UTF-8 encoded.
:param dict data: The dictionary of data to sign.
:param str signature_encoding: The encoding name of the signature data.
:return: The dictionary object is returned with the 'signature' key added.
"""
utilities.assert_arg_type(data, dict, arg_pos=1)
data = copy.copy(data)
data.pop('signature', None) # remove a pre-existing signature
json_data = json.dumps(data, sort_keys=True).encode('utf-8')
data['signature'] = _encoding_data(self.sign(json_data), encoding=signature_encoding)
return data
class VerifyingKey(ecdsa.VerifyingKey, object):
@classmethod
def from_string(cls, string, **kwargs):
kwargs = _kwarg_curve(kwargs)
return super(VerifyingKey, cls).from_string(string, **kwargs)
@classmethod
def from_dict(cls, value, encoding='base64'):
return _key_cls_from_dict(cls, value, encoding=encoding)
def verify_dict(self, data, signature_encoding='base64'):
"""
Verify a signed dictionary object. The dictionary must have a
'signature' key as added by the :py:meth:`.SigningKey.sign_dict`
method. To serialize the dictionary to data suitable for the operation
the :py:func:`json.dumps` function is used and the resulting data is
then UTF-8 encoded.
:param dict data: The dictionary of data to verify.
:param str signature_encoding: The encoding name of the signature data.
"""
utilities.assert_arg_type(data, dict, arg_pos=1)
data = copy.copy(data)
signature = _decode_data(data.pop('signature'), encoding=signature_encoding)
data = json.dumps(data, sort_keys=True).encode('utf-8')
return self.verify(signature, data)
class SecurityKeys(object):
"""
The security keys that are installed on the system. These are then used to
validate the signatures of downloaded files to ensure they have not been
corrupted or tampered with.
.. note::
Keys are first loaded from the security.json file included with the
application source code and then from an optional security.local.json
file. Keys loaded from the optional file can not over write keys loaded
from the system file.
"""
logger = logging.getLogger('KingPhisher.SecurityKeys')
def __init__(self):
self.keys = utilities.FreezableDict()
"""The dictionary of the loaded security keys, keyed by their identity string."""
if not self._load_key_store('security.json'):
raise RuntimeError('failed to load any keys from the primary store')
self._load_key_store('security.local.json')
self.keys.freeze()
self.logger.info("security key store initialized with {0:,} keys".format(len(self.keys)))
def _get_verifying_key(self, key_id):
key = self.keys.get(key_id)
if key is None:
self.logger.warning("verification of data with key {0} failed (unknown key)".format(key_id))
raise ecdsa.keys.BadSignatureError('unknown key for signature')
verifying_key = key.get('verifying-key')
if verifying_key is None:
self.logger.warning("verification of data with key {0} failed (missing verifying-key)".format(key_id))
raise ecdsa.keys.BadSignatureError('unknown key for signature')
return verifying_key
def _load_key_store(self, file_name):
file_path = find.data_file(file_name)
if not file_path:
return 0
with open(file_path, 'r') as file_h:
key_store = serializers.JSON.load(file_h)
utilities.validate_json_schema(key_store, 'king-phisher.security')
key_store = key_store['keys']
loaded = 0
for key_idx, key in enumerate(key_store, 1):
identifier = key['id']
if identifier in self.keys:
self.logger.warning("skipping loading {0}:{1} due to a duplicate id".format(file_name, key_idx))
continue
verifying_key = key['verifying-key']
key['verifying-key'] = VerifyingKey.from_dict(verifying_key, encoding=verifying_key.pop('encoding', 'base64'))
self.keys[identifier] = key
self.logger.debug("loaded key id: {0} from: {1}".format(identifier, file_path))
loaded += 1
return loaded
def verify(self, key_id, data, signature):
"""
Verify the data with the specified signature as signed by the specified
key. This function will raise an exception if the verification fails
for any reason, including if the key can not be found.
:param str key_id: The key's identifier.
:param bytes data: The data to verify against the signature.
:param bytes signature: The signature of the data to verify.
"""
verifying_key = self._get_verifying_key(key_id)
return verifying_key.verify(signature, data)
def verify_dict(self, data, signature_encoding='base64'):
"""
Verify the signed dictionary, using the key specified within the
'signed-by' key. This function will raise an exception if the
verification fails for any reason, including if the key can not be
found.
:param str key_id: The key's identifier.
:param bytes data: The data to verify against the signature.
:param bytes signature: The signature of the data to verify.
"""
key_id = data['signed-by']
verifying_key = self._get_verifying_key(key_id)
return verifying_key.verify_dict(data, signature_encoding=signature_encoding)
|
nilq/baby-python
|
python
|
import sys
nodes = []
class node:
value = -1
visited = False
dist = 2e30
prev = None
idx = -1
neighbors = []
def __init__(self, v):
self.value = v
def getNeighbors(x,y,input):
result = []
if y>1:
result.append(input[y-1][x])
if y+1<len(input):
result.append(input[y+1][x])
if x>1:
result.append(input[y][x-1])
if x+1<len(input[y]):
result.append(input[y][x+1])
return result
def loadFile(part2 = False):
f = open("Day15\\Input.txt", "r")
nodes.clear()
for x in f:
x = x.strip()
row = []
for y in x:
row.append(node(int(y)))
if part2:
wid = len(row)
for i in range(1,5):
for j in range(wid):
v = row[j].value+i
if v > 9:
v-=9
row.append(node(v))
nodes.append(row)
if part2:
hei = len(nodes)
for i in range(1,5):
for j in range(hei):
row = []
for n in nodes[j]:
v = n.value+i
if v > 9:
v-=9
row.append(node(v))
nodes.append(row)
for y in range(len(nodes)):
for x in range(len(nodes[y])):
nodes[y][x].neighbors = getNeighbors(x,y,nodes)
class priorityQueue:
queue = []
def updateNode(self,node):
idx = node.idx
if(idx <1):
return
while( idx>0 and self[idx].dist<self[idx-1].dist):
temp = self[idx]
self[idx] = self[idx-1]
self[idx-1] = temp
self[idx-1].idx = idx-1
self[idx].idx = idx
idx -= 1
def __getitem__(self,key):
return self.queue[key]
def __setitem__(self,key,value):
self.queue[key] = value
def popfront(self):
result = self[0]
for i in range(1,len(self.queue)):
self[i].idx -= 1
self[i-1] = self[i]
self.queue.pop()
result.idx = -1
return result
def addNode(self, node):
node.idx = len(self.queue)
self.queue.append(node)
def Dijkstra():
Q = priorityQueue()
source = 0
for y in range(len(nodes)):
for x in range(len(nodes[y])):
Q.addNode(nodes[y][x])
target = Q.queue[-1]
Q.queue[0].dist = 0
curr = 0
unvisted = len(Q.queue)
while len(Q.queue)>0:
u = Q.popfront()
if u == target:
break
unvisted -= 1
for n in u.neighbors:
d2 = u.dist+n.value
if d2 < n.dist:
n.dist = d2
n.prev = u
Q.updateNode(n)
return target.dist
def part1():
loadFile()
risk = Dijkstra()
print( f"Part 1: Risk:{risk}")
def part2():
loadFile(True)
risk = Dijkstra()
print( f"Part 2: Risk:{risk}")
part1()
#part2()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='Ignition',
version='0.1.8',
description='Run multiple programs in a specific order and monitor their state',
author='Luka Cehovin',
author_email='luka.cehovin@gmail.com',
url='https://github.com/lukacu/ignition/',
packages=['ignition'],
scripts=["bin/ignite"],
requires=[],
)
|
nilq/baby-python
|
python
|
from ..class_boids import Boids
import numpy as np
from nose.tools import assert_equal,assert_almost_equal
import os
import yaml
def init_trial_boids():
pathtofile = 'fixtures/regression_fixtures.yml'
data=yaml.load(open(
os.path.join(os.path.dirname(__file__),pathtofile)))
pos_start = [data['before'][0],data['before'][1]]
vel_start = [data['before'][2],data['before'][3]]
test_boids = Boids(pos_start,vel_start)
return test_boids
def check_func(test_boids,pathtofile):
test_boids.increment_positions()
answer=yaml.load(open(
os.path.join(os.path.dirname(__file__),pathtofile)))
# assert_almost_equal cannot evaluate arrays
# therefore we iterate through all elements
for j in range(test_boids.Nboids):
for i in range(2):
assert_almost_equal(test_boids.positions[i][j],
answer['positions'][i][j],delta=0.01)
assert_almost_equal(test_boids.velocities[i][j],
answer['velocities'][i][j],delta=0.01)
def test_fly_towards_middle():
test_boids = init_trial_boids()
test_boids.fly_towards_middle(0.01)
check_func(test_boids,'fixtures/fly_towards_middle.yml')
def test_avoid_nearby_boids():
test_boids = init_trial_boids()
test_boids.avoid_nearby_boids(100)
check_func(test_boids,'fixtures/avoid_nearby_birds.yml')
def test_match_speeds():
test_boids = init_trial_boids()
test_boids.match_speeds(0.125,1000)
check_func(test_boids,'fixtures/match_speeds.yml')
|
nilq/baby-python
|
python
|
# Copyright (c) 2014-2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
import netaddr
from requests import HTTPError
from oslo_log import log as logging
from f5_openstack_agent.lbaasv2.drivers.bigip import constants_v2
from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex
from f5_openstack_agent.lbaasv2.drivers.bigip.l2_service import \
L2ServiceBuilder
from f5_openstack_agent.lbaasv2.drivers.bigip.network_helper import \
NetworkHelper
from f5_openstack_agent.lbaasv2.drivers.bigip import resource_helper
from f5_openstack_agent.lbaasv2.drivers.bigip.selfips import BigipSelfIpManager
from f5_openstack_agent.lbaasv2.drivers.bigip.snats import BigipSnatManager
from f5_openstack_agent.lbaasv2.drivers.bigip.utils import strip_domain_address
LOG = logging.getLogger(__name__)
class NetworkServiceBuilder(object):
def __init__(self, f5_global_routed_mode, conf, driver, l3_binding=None):
self.f5_global_routed_mode = f5_global_routed_mode
self.conf = conf
self.driver = driver
self.l3_binding = l3_binding
self.l2_service = L2ServiceBuilder(driver, f5_global_routed_mode)
self.bigip_selfip_manager = BigipSelfIpManager(
self.driver, self.l2_service, self.driver.l3_binding)
self.bigip_snat_manager = BigipSnatManager(
self.driver, self.l2_service, self.driver.l3_binding)
self.vlan_manager = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.vlan)
self.rds_cache = {}
self.interface_mapping = self.l2_service.interface_mapping
self.network_helper = NetworkHelper(conf=self.conf)
self.service_adapter = self.driver.service_adapter
def post_init(self):
# Run and Post Initialization Tasks
# run any post initialized tasks, now that the agent
# is fully connected
self.l2_service.post_init()
def tunnel_sync(self, tunnel_ips):
self.l2_service.tunnel_sync(tunnel_ips)
def set_tunnel_rpc(self, tunnel_rpc):
# Provide FDB Connector with ML2 RPC access
self.l2_service.set_tunnel_rpc(tunnel_rpc)
def set_l2pop_rpc(self, l2pop_rpc):
# Provide FDB Connector with ML2 RPC access
self.l2_service.set_l2pop_rpc(l2pop_rpc)
def initialize_vcmp(self):
self.l2_service.initialize_vcmp_manager()
def initialize_tunneling(self, bigip):
# setup tunneling
vtep_folder = self.conf.f5_vtep_folder
vtep_selfip_name = self.conf.f5_vtep_selfip_name
bigip.local_ip = None
if not vtep_folder or vtep_folder.lower() == 'none':
vtep_folder = 'Common'
if vtep_selfip_name and \
not vtep_selfip_name.lower() == 'none':
# profiles may already exist
# create vxlan_multipoint_profile`
self.network_helper.create_vxlan_multipoint_profile(
bigip,
'vxlan_ovs',
partition='Common')
# create l2gre_multipoint_profile
self.network_helper.create_l2gre_multipoint_profile(
bigip,
'gre_ovs',
partition='Common')
# find the IP address for the selfip for each box
local_ip = self.bigip_selfip_manager.get_selfip_addr(
bigip,
vtep_selfip_name,
partition=vtep_folder
)
if local_ip:
bigip.local_ip = local_ip
else:
raise f5_ex.MissingVTEPAddress(
'device %s missing vtep selfip %s'
% (bigip.device_name,
'/' + vtep_folder + '/' +
vtep_selfip_name))
def assure_opflex_network_port(self, network_id, network):
port = None
port_name = "bigip-opflex-{}".format(network_id)
port = self.driver.plugin_rpc.create_port_on_network(
network_id=network_id,
name=port_name)
return port
def is_service_connected(self, service):
networks = service.get('networks', {})
supported_net_types = ['vlan', 'vxlan', 'gre', 'opflex']
for (network_id, network) in networks.iteritems():
if network_id in self.conf.common_network_ids:
continue
network_type = \
network.get('provider:network_type', "")
if network_type == "flat":
continue
segmentation_id = \
network.get('provider:segmentation_id', None)
if not segmentation_id:
if network_type in supported_net_types and \
self.conf.f5_network_segment_physical_network:
if network_type == "opflex":
# This is called only when the HPB config item
# f5_network_segment_physical_network is set.
self.assure_opflex_network_port(network_id, network)
return False
LOG.error("Misconfiguration: Segmentation ID is "
"missing from the service definition. "
"Please check the setting for "
"f5_network_segment_physical_network in "
"f5-openstack-agent.ini in case neutron "
"is operating in Hierarchical Port Binding "
"mode.")
raise f5_ex.InvalidNetworkDefinition(
"Network segment ID %s not defined" % network_id)
return True
def prep_service_networking(self, service, traffic_group):
"""Assure network connectivity is established on all bigips."""
if self.conf.f5_global_routed_mode:
return
if not self.is_service_connected(service):
raise f5_ex.NetworkNotReady(
"Network segment(s) definition incomplete")
if self.conf.use_namespaces:
try:
LOG.debug("Annotating the service definition networks "
"with route domain ID.")
self._annotate_service_route_domains(service)
except f5_ex.InvalidNetworkType as exc:
LOG.warning(exc.message)
except Exception as err:
LOG.exception(err)
raise f5_ex.RouteDomainCreationException(
"Route domain annotation error")
# Per Device Network Connectivity (VLANs or Tunnels)
subnetsinfo = self._get_subnets_to_assure(service)
for (assure_bigip, subnetinfo) in (
itertools.product(self.driver.get_all_bigips(), subnetsinfo)):
LOG.debug("Assuring per device network connectivity "
"for %s on subnet %s." % (assure_bigip.hostname,
subnetinfo['subnet']))
# Make sure the L2 network is established
self.l2_service.assure_bigip_network(
assure_bigip, subnetinfo['network'])
# Connect the BigIP device to network, by getting
# a self-ip address on the subnet.
self.bigip_selfip_manager.assure_bigip_selfip(
assure_bigip, service, subnetinfo)
# L3 Shared Config
assure_bigips = self.driver.get_config_bigips()
LOG.debug("Getting subnetinfo for ...")
LOG.debug(assure_bigips)
for subnetinfo in subnetsinfo:
if self.conf.f5_snat_addresses_per_subnet > 0:
self._assure_subnet_snats(assure_bigips, service, subnetinfo)
if subnetinfo['is_for_member'] and not self.conf.f5_snat_mode:
try:
self._allocate_gw_addr(subnetinfo)
except KeyError as err:
raise f5_ex.VirtualServerCreationException(err.message)
for assure_bigip in assure_bigips:
# If we are not using SNATS, attempt to become
# the subnet's default gateway.
self.bigip_selfip_manager.assure_gateway_on_subnet(
assure_bigip, subnetinfo, traffic_group)
def _annotate_service_route_domains(self, service):
# Add route domain notation to pool member and vip addresses.
tenant_id = service['loadbalancer']['tenant_id']
self.update_rds_cache(tenant_id)
if 'members' in service:
for member in service.get('members', []):
if 'address' in member:
LOG.debug("processing member %s" % member['address'])
if 'network_id' in member and member['network_id']:
member_network = (
self.service_adapter.get_network_from_service(
service,
member['network_id']
))
member_subnet = (
self.service_adapter.get_subnet_from_service(
service,
member['subnet_id']
))
if member_network:
self.assign_route_domain(
tenant_id, member_network, member_subnet)
rd_id = (
'%' + str(member_network['route_domain_id'])
)
member['address'] += rd_id
else:
member['address'] += '%0'
if 'vip_address' in service['loadbalancer']:
loadbalancer = service['loadbalancer']
if 'network_id' in loadbalancer:
lb_network = self.service_adapter.get_network_from_service(
service, loadbalancer['network_id'])
vip_subnet = self.service_adapter.get_subnet_from_service(
service, loadbalancer['vip_subnet_id'])
self.assign_route_domain(
tenant_id, lb_network, vip_subnet)
rd_id = '%' + str(lb_network['route_domain_id'])
service['loadbalancer']['vip_address'] += rd_id
else:
service['loadbalancer']['vip_address'] += '%0'
def is_common_network(self, network):
return self.l2_service.is_common_network(network)
def find_subnet_route_domain(self, tenant_id, subnet_id):
rd_id = 0
bigip = self.driver.get_bigip()
partition_id = self.service_adapter.get_folder_name(
tenant_id)
try:
tenant_rd = self.network_helper.get_route_domain(
bigip, partition=partition_id)
rd_id = tenant_rd.id
except HTTPError as error:
LOG.error(error)
return rd_id
def assign_route_domain(self, tenant_id, network, subnet):
# Assign route domain for a network
if self.l2_service.is_common_network(network):
network['route_domain_id'] = 0
return
LOG.debug("Assign route domain get from cache %s" % network)
try:
route_domain_id = self.get_route_domain_from_cache(network)
network['route_domain_id'] = route_domain_id
return
except f5_ex.RouteDomainCacheMiss as exc:
LOG.debug(exc.message)
LOG.debug("max namespaces: %s" % self.conf.max_namespaces_per_tenant)
LOG.debug("max namespaces == 1: %s" %
(self.conf.max_namespaces_per_tenant == 1))
if self.conf.max_namespaces_per_tenant == 1:
bigip = self.driver.get_bigip()
LOG.debug("bigip before get_domain: %s" % bigip)
partition_id = self.service_adapter.get_folder_name(
tenant_id)
tenant_rd = self.network_helper.get_route_domain(
bigip, partition=partition_id)
network['route_domain_id'] = tenant_rd.id
return
LOG.debug("assign route domain checking for available route domain")
check_cidr = netaddr.IPNetwork(subnet['cidr'])
placed_route_domain_id = None
for route_domain_id in self.rds_cache[tenant_id]:
LOG.debug("checking rd %s" % route_domain_id)
rd_entry = self.rds_cache[tenant_id][route_domain_id]
overlapping_subnet = None
for net_shortname in rd_entry:
LOG.debug("checking net %s" % net_shortname)
net_entry = rd_entry[net_shortname]
for exist_subnet_id in net_entry['subnets']:
if exist_subnet_id == subnet['id']:
continue
exist_subnet = net_entry['subnets'][exist_subnet_id]
exist_cidr = exist_subnet['cidr']
if check_cidr in exist_cidr or exist_cidr in check_cidr:
overlapping_subnet = exist_subnet
LOG.debug('rd %s: overlaps with subnet %s id: %s' % (
(route_domain_id, exist_subnet, exist_subnet_id)))
break
if overlapping_subnet:
# no need to keep looking
break
if not overlapping_subnet:
placed_route_domain_id = route_domain_id
break
if placed_route_domain_id is None:
if (len(self.rds_cache[tenant_id]) <
self.conf.max_namespaces_per_tenant):
placed_route_domain_id = self._create_aux_rd(tenant_id)
self.rds_cache[tenant_id][placed_route_domain_id] = {}
LOG.debug("Tenant %s now has %d route domains" %
(tenant_id, len(self.rds_cache[tenant_id])))
else:
raise Exception("Cannot allocate route domain")
LOG.debug("Placed in route domain %s" % placed_route_domain_id)
rd_entry = self.rds_cache[tenant_id][placed_route_domain_id]
net_short_name = self.get_neutron_net_short_name(network)
if net_short_name not in rd_entry:
rd_entry[net_short_name] = {'subnets': {}}
net_subnets = rd_entry[net_short_name]['subnets']
net_subnets[subnet['id']] = {'cidr': check_cidr}
network['route_domain_id'] = placed_route_domain_id
def _create_aux_rd(self, tenant_id):
# Create a new route domain
route_domain_id = None
bigips = self.driver.get_all_bigips()
rd_id = self.network_helper.get_next_domain_id(bigips)
for bigip in bigips:
partition_id = self.service_adapter.get_folder_name(tenant_id)
bigip_route_domain_id = self.network_helper.create_route_domain(
bigip,
rd_id,
partition=partition_id,
strictness=self.conf.f5_route_domain_strictness,
is_aux=True)
if route_domain_id is None:
route_domain_id = bigip_route_domain_id.id
elif bigip_route_domain_id.id != route_domain_id:
# FixME error
LOG.debug(
"Bigips allocated two different route domains!: %s %s"
% (bigip_route_domain_id, route_domain_id))
LOG.debug("Allocated route domain %s for tenant %s"
% (route_domain_id, tenant_id))
return route_domain_id
# The purpose of the route domain subnet cache is to
# determine whether there is an existing bigip
# subnet that conflicts with a new one being
# assigned to the route domain.
"""
# route domain subnet cache
rds_cache =
{'<tenant_id>': {
{'0': {
'<network type>-<segmentation id>': [
'subnets': [
'<subnet id>': {
'cidr': '<cidr>'
}
],
'1': {}}}}
"""
def update_rds_cache(self, tenant_id):
# Update the route domain cache from bigips
if tenant_id not in self.rds_cache:
LOG.debug("rds_cache: adding tenant %s" % tenant_id)
self.rds_cache[tenant_id] = {}
for bigip in self.driver.get_all_bigips():
self.update_rds_cache_bigip(tenant_id, bigip)
LOG.debug("rds_cache updated: " + str(self.rds_cache))
def update_rds_cache_bigip(self, tenant_id, bigip):
# Update the route domain cache for this tenant
# with information from bigip's vlan and tunnels
LOG.debug("rds_cache: processing bigip %s" % bigip.device_name)
route_domain_ids = self.network_helper.get_route_domain_ids(
bigip,
partition=self.service_adapter.get_folder_name(tenant_id))
# LOG.debug("rds_cache: got bigip route domains: %s" % route_domains)
for route_domain_id in route_domain_ids:
self.update_rds_cache_bigip_rd_vlans(
tenant_id, bigip, route_domain_id)
def update_rds_cache_bigip_rd_vlans(
self, tenant_id, bigip, route_domain_id):
# Update the route domain cache with information
# from the bigip vlans and tunnels from
# this route domain
LOG.debug("rds_cache: processing bigip %s rd %s"
% (bigip.device_name, route_domain_id))
# this gets tunnels too
partition_id = self.service_adapter.get_folder_name(tenant_id)
rd_vlans = self.network_helper.get_vlans_in_route_domain_by_id(
bigip,
partition=partition_id,
id=route_domain_id
)
LOG.debug("rds_cache: bigip %s rd %s vlans: %s"
% (bigip.device_name, route_domain_id, rd_vlans))
if len(rd_vlans) == 0:
LOG.debug("No vlans found for route domain: %d" %
(route_domain_id))
return
# make sure this rd has a cache entry
tenant_entry = self.rds_cache[tenant_id]
if route_domain_id not in tenant_entry:
tenant_entry[route_domain_id] = {}
# for every VLAN or TUNNEL on this bigip...
for rd_vlan in rd_vlans:
self.update_rds_cache_bigip_vlan(
tenant_id, bigip, route_domain_id, rd_vlan)
def update_rds_cache_bigip_vlan(
self, tenant_id, bigip, route_domain_id, rd_vlan):
# Update the route domain cache with information
# from the bigip vlan or tunnel
LOG.debug("rds_cache: processing bigip %s rd %d vlan %s"
% (bigip.device_name, route_domain_id, rd_vlan))
net_short_name = self.get_bigip_net_short_name(
bigip, tenant_id, rd_vlan)
# make sure this net has a cache entry
tenant_entry = self.rds_cache[tenant_id]
rd_entry = tenant_entry[route_domain_id]
if net_short_name not in rd_entry:
rd_entry[net_short_name] = {'subnets': {}}
net_subnets = rd_entry[net_short_name]['subnets']
partition_id = self.service_adapter.get_folder_name(tenant_id)
LOG.debug("Calling get_selfips with: partition %s and vlan_name %s",
partition_id, rd_vlan)
selfips = self.bigip_selfip_manager.get_selfips(
bigip,
partition=partition_id,
vlan_name=rd_vlan
)
LOG.debug("rds_cache: got selfips")
for selfip in selfips:
LOG.debug("rds_cache: processing bigip %s rd %s vlan %s self %s" %
(bigip.device_name, route_domain_id, rd_vlan,
selfip.name))
if bigip.device_name not in selfip.name:
LOG.error("rds_cache: Found unexpected selfip %s for tenant %s"
% (selfip.name, tenant_id))
continue
subnet_id = selfip.name.split(bigip.device_name + '-')[1]
# convert 10.1.1.1%1/24 to 10.1.1.1/24
(addr, netbits) = selfip.address.split('/')
addr = addr.split('%')[0]
selfip.address = addr + '/' + netbits
# selfip addresses will have slash notation: 10.1.1.1/24
netip = netaddr.IPNetwork(selfip.address)
LOG.debug("rds_cache: updating subnet %s with %s"
% (subnet_id, str(netip.cidr)))
net_subnets[subnet_id] = {'cidr': netip.cidr}
LOG.debug("rds_cache: now %s" % self.rds_cache)
def get_route_domain_from_cache(self, network):
# Get route domain from cache by network
net_short_name = self.get_neutron_net_short_name(network)
for tenant_id in self.rds_cache:
tenant_cache = self.rds_cache[tenant_id]
for route_domain_id in tenant_cache:
if net_short_name in tenant_cache[route_domain_id]:
return route_domain_id
# Not found
raise f5_ex.RouteDomainCacheMiss(
"No route domain cache entry for {0}".format(net_short_name))
def remove_from_rds_cache(self, network, subnet):
# Get route domain from cache by network
LOG.debug("remove_from_rds_cache")
net_short_name = self.get_neutron_net_short_name(network)
for tenant_id in self.rds_cache:
LOG.debug("rds_cache: processing remove for %s" % tenant_id)
deleted_rds = []
tenant_cache = self.rds_cache[tenant_id]
for route_domain_id in tenant_cache:
if net_short_name in tenant_cache[route_domain_id]:
net_entry = tenant_cache[route_domain_id][net_short_name]
if subnet['id'] in net_entry['subnets']:
del net_entry['subnets'][subnet['id']]
if len(net_entry['subnets']) == 0:
del net_entry['subnets']
if len(tenant_cache[route_domain_id][net_short_name]) == 0:
del tenant_cache[route_domain_id][net_short_name]
if len(self.rds_cache[tenant_id][route_domain_id]) == 0:
deleted_rds.append(route_domain_id)
for rd in deleted_rds:
LOG.debug("removing route domain %d from tenant %s" %
(rd, tenant_id))
del self.rds_cache[tenant_id][rd]
def get_bigip_net_short_name(self, bigip, tenant_id, network_name):
# Return <network_type>-<seg_id> for bigip network
LOG.debug("get_bigip_net_short_name: %s:%s" % (
tenant_id, network_name))
partition_id = self.service_adapter.get_folder_name(tenant_id)
LOG.debug("network_name %s", network_name.split('/'))
network_name = network_name.split("/")[-1]
if 'tunnel-gre-' in network_name:
tunnel_key = self.network_helper.get_tunnel_key(
bigip,
network_name,
partition=partition_id
)
return 'gre-%s' % tunnel_key
elif 'tunnel-vxlan-' in network_name:
LOG.debug("Getting tunnel key for VXLAN: %s", network_name)
tunnel_key = self.network_helper.get_tunnel_key(
bigip,
network_name,
partition=partition_id
)
return 'vxlan-%s' % tunnel_key
else:
LOG.debug("Getting tunnel key for VLAN: %s", network_name)
vlan_id = self.network_helper.get_vlan_id(bigip,
name=network_name,
partition=partition_id)
return 'vlan-%s' % vlan_id
@staticmethod
def get_neutron_net_short_name(network):
# Return <network_type>-<seg_id> for neutron network
net_type = network.get('provider:network_type', None)
net_seg_key = network.get('provider:segmentation_id', None)
if not net_type or not net_seg_key:
raise f5_ex.InvalidNetworkType(
'Provider network attributes not complete:'
'provider: network_type - {0} '
'and provider:segmentation_id - {1}'
.format(net_type, net_seg_key))
return net_type + '-' + str(net_seg_key)
def _assure_subnet_snats(self, assure_bigips, service, subnetinfo):
# Ensure snat for subnet exists on bigips
tenant_id = service['loadbalancer']['tenant_id']
subnet = subnetinfo['subnet']
snats_per_subnet = self.conf.f5_snat_addresses_per_subnet
lb_id = service['loadbalancer']['id']
assure_bigips = \
[bigip for bigip in assure_bigips
if tenant_id not in bigip.assured_tenant_snat_subnets or
subnet['id'] not in
bigip.assured_tenant_snat_subnets[tenant_id]]
LOG.debug("_assure_subnet_snats: getting snat addrs for: %s" %
subnet['id'])
if len(assure_bigips):
snat_addrs = self.bigip_snat_manager.get_snat_addrs(
subnetinfo, tenant_id, snats_per_subnet, lb_id)
if len(snat_addrs) != snats_per_subnet:
raise f5_ex.SNATCreationException(
"Unable to satisfy request to allocate %d "
"snats. Actual SNAT count: %d SNATs" %
(snats_per_subnet, len(snat_addrs)))
for assure_bigip in assure_bigips:
self.bigip_snat_manager.assure_bigip_snats(
assure_bigip, subnetinfo, snat_addrs, tenant_id)
def _allocate_gw_addr(self, subnetinfo):
# Create a name for the port and for the IP Forwarding
# Virtual Server as well as the floating Self IP which
# will answer ARP for the members
need_port_for_gateway = False
network = subnetinfo['network']
subnet = subnetinfo['subnet']
if not network or not subnet:
LOG.error('Attempted to create default gateway'
' for network with no id...skipping.')
return
if not subnet['gateway_ip']:
raise KeyError("attempting to create gateway on subnet without "
"gateway ip address specified.")
gw_name = "gw-" + subnet['id']
ports = self.driver.plugin_rpc.get_port_by_name(port_name=gw_name)
if len(ports) < 1:
need_port_for_gateway = True
# There was no port on this agent's host, so get one from Neutron
if need_port_for_gateway:
try:
rpc = self.driver.plugin_rpc
new_port = rpc.create_port_on_subnet_with_specific_ip(
subnet_id=subnet['id'], mac_address=None,
name=gw_name, ip_address=subnet['gateway_ip'])
LOG.info('gateway IP for subnet %s will be port %s'
% (subnet['id'], new_port['id']))
except Exception as exc:
ermsg = 'Invalid default gateway for subnet %s:%s - %s.' \
% (subnet['id'],
subnet['gateway_ip'],
exc.message)
ermsg += " SNAT will not function and load balancing"
ermsg += " support will likely fail. Enable f5_snat_mode."
LOG.exception(ermsg)
return True
def post_service_networking(self, service, all_subnet_hints):
# Assure networks are deleted from big-ips
if self.conf.f5_global_routed_mode:
return
# L2toL3 networking layer
# Non Shared Config - Local Per BIG-IP
self.update_bigip_l2(service)
# Delete shared config objects
deleted_names = set()
for bigip in self.driver.get_config_bigips():
LOG.debug('post_service_networking: calling '
'_assure_delete_networks del nets sh for bigip %s %s'
% (bigip.device_name, all_subnet_hints))
subnet_hints = all_subnet_hints[bigip.device_name]
deleted_names = deleted_names.union(
self._assure_delete_nets_shared(bigip, service,
subnet_hints))
# Delete non shared config objects
for bigip in self.driver.get_all_bigips():
LOG.debug(' post_service_networking: calling '
' _assure_delete_networks del nets ns for bigip %s'
% bigip.device_name)
subnet_hints = all_subnet_hints[bigip.device_name]
deleted_names = deleted_names.union(
self._assure_delete_nets_nonshared(
bigip, service, subnet_hints)
)
for port_name in deleted_names:
LOG.debug(' post_service_networking: calling '
' del port %s'
% port_name)
self.driver.plugin_rpc.delete_port_by_name(
port_name=port_name)
def update_bigip_l2(self, service):
# Update fdb entries on bigip
loadbalancer = service['loadbalancer']
service_adapter = self.service_adapter
bigips = self.driver.get_all_bigips()
update_members = list()
delete_members = list()
update_loadbalancer = None
delete_loadbalancer = None
if "network_id" not in loadbalancer:
LOG.error("update_bigip_l2, expected network ID")
return
if loadbalancer.get('provisioning_status', None) == \
constants_v2.F5_PENDING_DELETE:
delete_loadbalancer = loadbalancer
else:
update_loadbalancer = loadbalancer
members = service.get('members', [])
for member in members:
member['network'] = service_adapter.get_network_from_service(
service, member['network_id'])
if member.get('provisioning_status', None) == \
constants_v2.F5_PENDING_DELETE:
delete_members.append(member)
else:
update_members.append(member)
loadbalancer['network'] = service_adapter.get_network_from_service(
service,
loadbalancer['network_id']
)
if delete_loadbalancer or delete_members:
self.l2_service.delete_fdb_entries(
bigips, delete_loadbalancer, delete_members)
if update_loadbalancer or update_members:
self.l2_service.add_fdb_entries(
bigips, update_loadbalancer, update_members)
LOG.debug("update_bigip_l2 complete")
def _assure_delete_nets_shared(self, bigip, service, subnet_hints):
# Assure shared configuration (which syncs) is deleted
deleted_names = set()
tenant_id = service['loadbalancer']['tenant_id']
delete_gateway = self.bigip_selfip_manager.delete_gateway_on_subnet
for subnetinfo in self._get_subnets_to_delete(bigip,
service,
subnet_hints):
try:
if not self.conf.f5_snat_mode:
gw_name = delete_gateway(bigip, subnetinfo)
deleted_names.add(gw_name)
my_deleted_names, my_in_use_subnets = \
self.bigip_snat_manager.delete_bigip_snats(
bigip, subnetinfo, tenant_id)
deleted_names = deleted_names.union(my_deleted_names)
for in_use_subnetid in my_in_use_subnets:
subnet_hints['check_for_delete_subnets'].pop(
in_use_subnetid, None)
except f5_ex.F5NeutronException as exc:
LOG.error("assure_delete_nets_shared: exception: %s"
% str(exc.msg))
except Exception as exc:
LOG.error("assure_delete_nets_shared: exception: %s"
% str(exc.message))
return deleted_names
def _assure_delete_nets_nonshared(self, bigip, service, subnet_hints):
# Delete non shared base objects for networks
deleted_names = set()
for subnetinfo in self._get_subnets_to_delete(bigip,
service,
subnet_hints):
try:
network = subnetinfo['network']
if self.l2_service.is_common_network(network):
network_folder = 'Common'
else:
network_folder = self.service_adapter.get_folder_name(
service['loadbalancer']['tenant_id'])
subnet = subnetinfo['subnet']
if self.conf.f5_populate_static_arp:
self.network_helper.arp_delete_by_subnet(
bigip,
subnet=subnet['cidr'],
mask=None,
partition=network_folder
)
local_selfip_name = "local-" + bigip.device_name + \
"-" + subnet['id']
selfip_address = self.bigip_selfip_manager.get_selfip_addr(
bigip,
local_selfip_name,
partition=network_folder
)
if not selfip_address:
LOG.error("Failed to get self IP address %s in cleanup.",
local_selfip_name)
self.bigip_selfip_manager.delete_selfip(
bigip,
local_selfip_name,
partition=network_folder
)
if self.l3_binding and selfip_address:
self.l3_binding.unbind_address(subnet_id=subnet['id'],
ip_address=selfip_address)
deleted_names.add(local_selfip_name)
if self.conf.f5_network_segment_physical_network:
opflex_net_id = network.get('id')
if opflex_net_id:
opflex_net_port = "bigip-opflex-{}".format(
opflex_net_id)
deleted_names.add(opflex_net_port)
self.l2_service.delete_bigip_network(bigip, network)
if subnet['id'] not in subnet_hints['do_not_delete_subnets']:
subnet_hints['do_not_delete_subnets'].append(subnet['id'])
self.remove_from_rds_cache(network, subnet)
tenant_id = service['loadbalancer']['tenant_id']
if tenant_id in bigip.assured_tenant_snat_subnets:
tenant_snat_subnets = \
bigip.assured_tenant_snat_subnets[tenant_id]
if subnet['id'] in tenant_snat_subnets:
tenant_snat_subnets.remove(subnet['id'])
except f5_ex.F5NeutronException as exc:
LOG.debug("assure_delete_nets_nonshared: exception: %s"
% str(exc.msg))
except Exception as exc:
LOG.debug("assure_delete_nets_nonshared: exception: %s"
% str(exc.message))
return deleted_names
def _get_subnets_to_delete(self, bigip, service, subnet_hints):
# Clean up any Self IP, SNATs, networks, and folder for
# services items that we deleted.
subnets_to_delete = []
for subnetinfo in subnet_hints['check_for_delete_subnets'].values():
subnet = self.service_adapter.get_subnet_from_service(
service, subnetinfo['subnet_id'])
subnetinfo['subnet'] = subnet
network = self.service_adapter.get_network_from_service(
service, subnetinfo['network_id'])
subnetinfo['network'] = network
route_domain = network.get('route_domain_id', None)
if not subnet:
continue
if not self._ips_exist_on_subnet(
bigip,
service,
subnet,
route_domain):
subnets_to_delete.append(subnetinfo)
return subnets_to_delete
def _ips_exist_on_subnet(self, bigip, service, subnet, route_domain):
# Does the big-ip have any IP addresses on this subnet?
LOG.debug("_ips_exist_on_subnet entry %s rd %s"
% (str(subnet['cidr']), route_domain))
route_domain = str(route_domain)
ipsubnet = netaddr.IPNetwork(subnet['cidr'])
# Are there any virtual addresses on this subnet?
folder = self.service_adapter.get_folder_name(
service['loadbalancer']['tenant_id']
)
virtual_services = self.network_helper.get_virtual_service_insertion(
bigip,
partition=folder
)
for virt_serv in virtual_services:
(_, dest) = virt_serv.items()[0]
LOG.debug(" _ips_exist_on_subnet: checking vip %s"
% str(dest['address']))
if len(dest['address'].split('%')) > 1:
vip_route_domain = dest['address'].split('%')[1]
else:
vip_route_domain = '0'
if vip_route_domain != route_domain:
continue
vip_addr = strip_domain_address(dest['address'])
if netaddr.IPAddress(vip_addr) in ipsubnet:
LOG.debug(" _ips_exist_on_subnet: found")
return True
# If there aren't any virtual addresses, are there
# node addresses on this subnet?
nodes = self.network_helper.get_node_addresses(
bigip,
partition=folder
)
for node in nodes:
LOG.debug(" _ips_exist_on_subnet: checking node %s"
% str(node))
if len(node.split('%')) > 1:
node_route_domain = node.split('%')[1]
else:
node_route_domain = '0'
if node_route_domain != route_domain:
continue
node_addr = strip_domain_address(node)
if netaddr.IPAddress(node_addr) in ipsubnet:
LOG.debug(" _ips_exist_on_subnet: found")
return True
LOG.debug(" _ips_exist_on_subnet exit %s"
% str(subnet['cidr']))
# nothing found
return False
def add_bigip_fdb(self, bigip, fdb):
self.l2_service.add_bigip_fdb(bigip, fdb)
def remove_bigip_fdb(self, bigip, fdb):
self.l2_service.remove_bigip_fdb(bigip, fdb)
def update_bigip_fdb(self, bigip, fdb):
self.l2_service.update_bigip_fdb(bigip, fdb)
def set_context(self, context):
self.l2_service.set_context(context)
def vlan_exists(self, bigip, network, folder='Common'):
return self.vlan_manager.exists(bigip, name=network, partition=folder)
def _get_subnets_to_assure(self, service):
# Examine service and return active networks
networks = dict()
loadbalancer = service['loadbalancer']
service_adapter = self.service_adapter
lb_status = loadbalancer['provisioning_status']
if lb_status != constants_v2.F5_PENDING_DELETE:
if 'network_id' in loadbalancer:
network = service_adapter.get_network_from_service(
service,
loadbalancer['network_id']
)
subnet = service_adapter.get_subnet_from_service(
service,
loadbalancer['vip_subnet_id']
)
networks[subnet['id']] = {'network': network,
'subnet': subnet,
'is_for_member': False}
for member in service.get('members', []):
if member['provisioning_status'] != constants_v2.F5_PENDING_DELETE:
if 'network_id' in member:
network = service_adapter.get_network_from_service(
service,
member['network_id']
)
subnet = service_adapter.get_subnet_from_service(
service,
member['subnet_id']
)
networks[subnet['id']] = {'network': network,
'subnet': subnet,
'is_for_member': True}
return networks.values()
|
nilq/baby-python
|
python
|
import numpy as np
import torch
from net import Net
from generator import Generator
import utils
EPOCHS = 100000
BATCH_SIZE = 32
LR = 1e-3
LR_STEP = 0.1
LR_FAILS = 3
SIZE = (40, 40)
MARGIN = 1
NOISE = 0.1
MAX_LENGTH = 5
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
np.set_printoptions(threshold=np.inf, precision=4, suppress=True, linewidth=160)
gen = Generator(MAX_LENGTH, SIZE, MARGIN, NOISE)
net = Net(DEVICE)
print(net)
#---- train
def get_batch(size):
batch_x = []
batch_y = []
batch_lm = np.ones((size, MAX_LENGTH), dtype=np.float32) # loss mask
for i in range(size):
chars, img, ln = gen.generate()
chars = list(map(lambda x: ord(x), chars))
chars = np.array(chars)
batch_x.append(img)
batch_y.append(chars)
batch_lm[i, ln+1:] = 0
batch_x = np.array(batch_x, dtype=np.float32) / 255
batch_y = np.array(batch_y, dtype=np.int64) - ord('A')
return batch_x, batch_y, batch_lm
test_x, test_y, test_lm = get_batch(1024)
lr = LR
losses = []
best_loss = 1e6
lr_fails = 0
net.set_lr(lr)
print("LR: {:.2e}".format(lr))
fps = utils.Fps()
fps.start()
for e in range(EPOCHS):
train_x, train_y, train_lm = get_batch(BATCH_SIZE)
net.train(train_x, train_y, MAX_LENGTH, train_lm)
if utils.is_time(e, 100):
pred_y, msks = net(test_x, MAX_LENGTH)
pred_y = pred_y.argmax(dim=2).detach().cpu().numpy()
cond = np.logical_or( (pred_y == test_y), (1 - test_lm) )
corr = np.all(cond, 1).mean()
test_loss = net.get_loss(test_x, test_y, MAX_LENGTH, test_lm).item()
print("Epoch {}: loss {:.3f}, corr: {:.0f}%, fps: {:.1f}".format(e, test_loss, corr * 100, fps.fps(e)))
losses.append(test_loss)
if test_loss > best_loss:
lr_fails += 1
print("." * lr_fails)
if lr_fails >= LR_FAILS:
lr = lr * LR_STEP
net.set_lr(lr)
print("LR: {:.2e}".format(lr))
else:
best_loss = test_loss
lr_fails = 0
if utils.is_time(e, 1000):
torch.save(net.state_dict(), 'model')
|
nilq/baby-python
|
python
|
# в разработке
class NavigationHelper:
def __init__(self, app):
self.app = app
def open_home_page(self):
wd = self.wd
# open home page
wd.get("http://localhost/adressbook/group.php")
def open_group_page(self):
wd = self.wd
# open group page
wd.find_element_by_link_text("groups").click()
|
nilq/baby-python
|
python
|
"""ReBias
Copyright (c) 2020-present NAVER Corp.
MIT license
Implementation for simple statcked convolutional networks.
"""
import torch
import torch.nn as nn
class SimpleConvNet(nn.Module):
def __init__(self, num_classes=None, kernel_size=7, feature_pos='post'):
super(SimpleConvNet, self).__init__()
padding = kernel_size // 2
layers = [
nn.Conv2d(3, 16, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(16),
nn.ReLU(inplace=True),
nn.Conv2d(16, 32, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 64, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 128, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
]
self.extracter = nn.Sequential(*layers)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(128, 10)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if feature_pos not in ['pre', 'post', 'logits']:
raise ValueError(feature_pos)
self.feature_pos = feature_pos
def forward(self, x, logits_only=False):
pre_gap_feats = self.extracter(x)
post_gap_feats = self.avgpool(pre_gap_feats)
post_gap_feats = torch.flatten(post_gap_feats, 1)
logits = self.fc(post_gap_feats)
if logits_only:
return logits
elif self.feature_pos == 'pre':
feats = pre_gap_feats
elif self.feature_pos == 'post':
feats = post_gap_feats
else:
feats = logits
return logits, feats
|
nilq/baby-python
|
python
|
import tensorflow as tf
import numpy as np
class Convolutional_NN(object):
def __init__(self):
pass
def lr_network(self, input_shape, label_shape):
"""
Create loss function and the list of metrics
Arguments:
input_shape: [list / tuple] input shape
label_shape: [list / tuple] output shape
"""
self.label_shape = label_shape
self.input_shape = input_shape
self.loss = tf.keras.losses.SparseCategoricalCrossentropy()
self.metrics = [tf.keras.metrics.SparseCategoricalAccuracy()]
def build_model(self):
'''
Return a CNN model
'''
model = tf.keras.models.Sequential()
model.add( tf.keras.Input(shape=self.input_shape))
if len(self.input_shape) == 1:
model.add(tf.keras.layers.Reshape((int(np.sqrt(self.input_shape[-1])), int(np.sqrt(self.input_shape[-1])),1), input_shape=(784,)))
model.add( tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation="relu"))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128))
model.add(tf.keras.layers.Dense(self.label_shape[-1], activation="softmax"))
return model
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 06 15:07:49 2016
@author: Mike
"""
from __future__ import division
import pandas as pd
import numpy as np
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
import os
from geopy.distance import vincenty
from mpl_toolkits.basemap import Basemap
def findClosestStation(coords1, coords2):
'''
Function finds the closest value of coords2 to each value in coords1.
Inputs:
coords1 (iterable) - contains (lat, long) pairs
coords2 (iterable) - contains (lat, long) pairs
Outputs:
closest (list) - contains tuple of (index, distance_miles) of closest point
in coords2 to each tuple in coords1.
'''
closest = []
# for each pair of coordinates in coords1
for firLoc in coords1:
dis = []
# calculate the distance to each coordinate pair in coords2
for secLoc in coords2:
# append the distance in miles
dis.append(vincenty(firLoc,secLoc).miles)
# find the minimum distance and the index
# Uses base python, but numpy.argmin is applicable
# Check documentation on built-in functions for min and enumerate
min_index, min_distance = min(enumerate(dis), key = lambda p: p[1])
# store results
closest.append((min_index, min_distance))
return closest
def main():
# locate the meta data
fname = 'all_sites.csv'
metaDataPath = os.path.join(os.pardir, os.pardir, 'csv-only',
'meta', fname)
# locate tmy3 meta dat
fname = 'TMY3_StationsMeta.csv'
tmy3DataPath = os.path.join(os.pardir, os.pardir,'csv-only',
'meta', fname)
# Read the data into a pandas dataframe
tmy3MetaData = pd.DataFrame.from_csv(tmy3DataPath, index_col=None)
metaData = pd.DataFrame.from_csv(metaDataPath, index_col=None)
# get location data
lat = metaData[u'LAT'].values
lng = metaData[u'LNG'].values
# Find closest TMY3 weather data to each building
tmy3_lat = tmy3MetaData['Latitude'].values
tmy3_lng = tmy3MetaData['Longitude'].values
min_distance = findClosestStation(list(zip(lat,lng)),
list(zip(tmy3_lat,tmy3_lng)))
# store unique attributes of each minimum distance station
tmy3SiteNames = [tmy3MetaData['Site Name'][x[0]] for x in min_distance]
#---------------------------------------------------------------------------
# Find the biggest cluster of buildings
#---------------------------------------------------------------------------
'''
There is a weather station that has the most buildings for which it is the
closest weather station. Determine this station and plot the energy use
of these buildings.
'''
# list of all the indexes
index_list = [x[0] for x in min_distance]
# get the mode of the indexes (i.e., the most frequent)
most_freq = max(set(index_list), key = index_list.count)
# get the indices of the buildings that match the most frequent weather station
keepers = []
for index, tup in enumerate(min_distance):
if tup[0] == most_freq:
keepers.append(index)
# subset the pandas dataframe to pick the relevant buildings and get their data
keeperData = []
for index in keepers:
# get the site id
siteId = metaData[u'SITE_ID'][index]
# create path to data
buiFullPath = os.path.join(os.pardir, os.pardir, 'csv-only', 'csv', '{}.csv'.format(siteId))
# read energy data
energy_kWh = np.genfromtxt(buiFullPath, delimiter=',',skip_header=1,
usecols=2)
# annual energy use in kBTU
energyAnn_kBTU = np.sum(energy_kWh*3.412)
# get meta data
flrArea = metaData[u'SQ_FT'][index]
industry = metaData[u'INDUSTRY'][index]
# full building info
buiInfo = (siteId, energyAnn_kBTU, flrArea, industry, buiFullPath)
# save and append the data
keeperData.append(buiInfo)
#---------------------------------------------------------------------------
# Final plotting of the data
#---------------------------------------------------------------------------
'''
Create a scatter plot with square footage on x axis, energy use on y axis
and colored by industry type.
'''
# create a color for each of the unique industries
indNames = set([x[3] for x in keeperData])
numIndustries = len(indNames)
# get a color from a color map
cm = plt.get_cmap('Set1')
colPts = np.linspace(0.0, 0.5, numIndustries)
# relational database
type_color_map = dict(zip(indNames, colPts))
# get the data
colors = [type_color_map[x[3]] for x in keeperData]
sqFt = [x[2] for x in keeperData]
eneUse = [x[1]/1000 for x in keeperData]
areas = [np.interp(kk, [min(eneUse), np.percentile(eneUse, 25),
np.percentile(eneUse, 75),
max(eneUse)],
np.array([5, 10, 20, 40])*10) for kk in eneUse]
# plot
plt.scatter(sqFt, eneUse, c=colors, s = areas, edgecolor='')
plt.xlabel('Square Feet')
plt.ylabel('Annual Energy Use [MBTU]')
# Ensure the required directory exists
if not os.path.isdir('../../figures'):
os.mkdir('../../figures')
# Save figure
plt.savefig('../../figures/buildingsdata-session3.png')
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
a.b -= 2
a[0] += 1
a[0:2] += 1
|
nilq/baby-python
|
python
|
import re
class JSVM(object):
_memory = {}
_program = []
_js_methods = {}
def __init__(self, code=""):
# TODO: parse automatically the 'swap' method
# function Bn(a,b){var c=a[0];a[0]=a[b%a.length];a[b]=c;return a};
def _swap(args):
a = list(args[0])
b = int(args[1])
c = a[0]
a[0] = a[b % len(a)]
a[b] = c
return "".join(a)
def _split(args):
return ""
def _slice(args):
return args[0][int(args[1]):]
def _reverse(args):
return args[0][::-1]
def _join(args):
return "".join(args[0])
def _assign(args):
return args[0]
def _get(args):
return self._memory[args[0]]
self._js_methods = {
"split": _split,
"slice": _slice,
"reverse": _reverse,
"join": _join,
"$swap": _swap,
"$assign": _assign,
"$get": _get
}
if code != "":
self.compile(code)
def compile(self, code):
self._program = []
regex = re.compile(r"(\w+\.)?(\w+)\(([^)]*)\)")
code = code.replace("return ", "return=")
for instruction in code.split(";"):
#print instruction
var, method = instruction.split("=")
m = regex.match(method)
if m is None:
arguments = [method[1:-1]]
method = "$assign"
else:
m = m.groups()
#print m
arguments = []
pre_args = [m[0][:-1]] if m[0] is not None else []
pre_args += m[2].split(",")
for a in pre_args:
if a is None or a == "":
continue
# Replace variables with his value
arguments += [JSMethod(self._js_methods["$get"], a) if not a[0] == '"' and not a[0] == '' and not a.isdigit() else a]
# Suppose that an undefined method is '$swap' method
method = "$swap" if m[1] not in self._js_methods.keys() else m[1]
self._program += [(var, JSMethod(self._js_methods[method], arguments))]
return self._program
def setPreinterpreted(self, program):
self._program = program
def run(self):
for ins in self._program:
#print "%s(%s)" % (ins[1]._m.__name__, ins[1]._a)
if ins[0] not in self._memory:
self._memory[ins[0]] = None
self._memory[ins[0]] = ins[1].run()
return self._memory
class JSMethod(object):
def __init__(self, method, args):
self._m = method
self._a = args
def run(self):
args = [a.run() if isinstance(a, JSMethod) else a for a in self._a]
return self._m(args)
def __repr__(self):
return "%s(%s)" % (self._m.__name__, self._a)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
"""
entry point
"""
import datetime
import traceback
import six
import tempfile
import io
import signal
import json
import gzip
# config file
from pandaserver.config import panda_config
from pandaserver.taskbuffer.Initializer import initializer
from pandaserver.taskbuffer.TaskBuffer import taskBuffer
from pandaserver.jobdispatcher.JobDispatcher import jobDispatcher
from pandaserver.dataservice.DataService import dataService
from pandaserver.userinterface.UserIF import userIF
from pandaserver.taskbuffer.Utils import isAlive, putFile, deleteFile, getServer, updateLog, fetchLog,\
touchFile, getVomsAttr, putEventPickingRequest, getAttr, uploadLog, put_checkpoint, delete_checkpoint,\
put_file_recovery_request, put_workflow_request
from pandaserver.dataservice.DataService import datasetCompleted, updateFileStatusInDisp
from pandaserver.jobdispatcher.JobDispatcher import getJob, updateJob, getStatus, genPilotToken,\
getEventRanges, updateEventRange, getKeyPair, updateEventRanges, getDNsForS3, getProxy, getCommands, ackCommands,\
checkJobStatus, checkEventsAvailability, updateJobsInBulk, getResourceTypes
from pandaserver.userinterface.UserIF import submitJobs, getJobStatus, queryPandaIDs, killJobs, reassignJobs,\
getJobStatistics, getJobStatisticsPerSite, resubmitJobs, queryLastFilesInDataset, getPandaIDsSite,\
getJobsToBeUpdated, updateProdDBUpdateTimes, runTaskAssignment, getAssigningTask, getSiteSpecs,\
getCloudSpecs, seeCloudTask, queryJobInfoPerCloud, registerProxyKey, getProxyKey,\
getJobIDsInTimeRange, getPandIDsWithJobID, getFullJobStatus, getJobStatisticsForBamboo,\
getNUserJobs, addSiteAccess, listSiteAccess, getFilesInUseForAnal, updateSiteAccess,\
getPandaClientVer, getSlimmedFileInfoPandaIDs, getQueuedAnalJobs, getHighestPrioJobStat,\
getActiveDatasets, setCloudTaskByUser, getSerialNumberForGroupJob, getCachePrefixes,\
checkMergeGenerationStatus, getNumPilots, retryFailedJobsInActive,\
getJobStatisticsWithLabel, getPandaIDwithJobExeID, getJobStatisticsPerUserSite,\
getDisInUseForAnal, getLFNsInUseForAnal, getScriptOfflineRunning, setDebugMode,\
insertSandboxFileInfo, checkSandboxFile, changeJobPriorities, insertTaskParams,\
killTask, finishTask, getCmtConfigList, getJediTasksInTimeRange, getJediTaskDetails,\
retryTask, getRetryHistory, changeTaskPriority, reassignTask, changeTaskAttributePanda,\
pauseTask, resumeTask, increaseAttemptNrPanda, killUnfinishedJobs, changeTaskSplitRulePanda,\
changeTaskModTimePanda, avalancheTask, getPandaIDsWithTaskID, reactivateTask, getTaskStatus, \
reassignShare, listTasksInShare, getTaskParamsMap, updateWorkers, harvesterIsAlive,\
reportWorkerStats, reportWorkerStats_jobtype, addHarvesterDialogs, getJobStatisticsPerSiteResource, setNumSlotsForWP,\
reloadInput, enableJumboJobs, updateServiceMetrics, getUserJobMetadata, getJumboJobDatasets, getGShareStatus,\
sweepPQ,get_job_statistics_per_site_label_resource, relay_idds_command, send_command_to_job,\
execute_idds_workflow_command
# import error
import pandaserver.taskbuffer.ErrorCode
# initialize cx_Oracle using dummy connection
initializer.init()
# initialzie TaskBuffer
taskBuffer.init(panda_config.dbhost, panda_config.dbpasswd, panda_config.nDBConnection, True)
# initialize JobDispatcher
if panda_config.nDBConnection != 0:
jobDispatcher.init(taskBuffer)
# initialize DataService
if panda_config.nDBConnection != 0:
dataService.init(taskBuffer)
# initialize UserIF
if panda_config.nDBConnection != 0:
userIF.init(taskBuffer)
# import web I/F
allowedMethods = []
allowedMethods += ['isAlive', 'putFile', 'deleteFile', 'getServer', 'updateLog', 'fetchLog',
'touchFile', 'getVomsAttr', 'putEventPickingRequest', 'getAttr',
'uploadLog', 'put_checkpoint', 'delete_checkpoint', 'put_file_recovery_request',
'put_workflow_request']
allowedMethods += ['datasetCompleted', 'updateFileStatusInDisp']
allowedMethods += ['getJob', 'updateJob', 'getStatus', 'genPilotToken',
'getEventRanges', 'updateEventRange', 'getKeyPair',
'updateEventRanges', 'getDNsForS3', 'getProxy', 'getCommands', 'ackCommands',
'checkJobStatus', 'checkEventsAvailability', 'updateJobsInBulk', 'getResourceTypes']
allowedMethods += ['submitJobs', 'getJobStatus', 'queryPandaIDs', 'killJobs', 'reassignJobs', 'getJobStatistics',
'getJobStatisticsPerSite', 'resubmitJobs', 'queryLastFilesInDataset', 'getPandaIDsSite',
'getJobsToBeUpdated', 'updateProdDBUpdateTimes', 'runTaskAssignment', 'getAssigningTask',
'getSiteSpecs', 'getCloudSpecs', 'seeCloudTask', 'queryJobInfoPerCloud', 'registerProxyKey',
'getProxyKey', 'getJobIDsInTimeRange', 'getPandIDsWithJobID', 'getFullJobStatus',
'getJobStatisticsForBamboo', 'getNUserJobs', 'addSiteAccess', 'listSiteAccess',
'getFilesInUseForAnal', 'updateSiteAccess', 'getPandaClientVer', 'getSlimmedFileInfoPandaIDs',
'getQueuedAnalJobs', 'getHighestPrioJobStat', 'getActiveDatasets', 'setCloudTaskByUser',
'getSerialNumberForGroupJob', 'getCachePrefixes', 'checkMergeGenerationStatus', 'getNumPilots',
'retryFailedJobsInActive', 'getJobStatisticsWithLabel', 'getPandaIDwithJobExeID',
'getJobStatisticsPerUserSite', 'getDisInUseForAnal', 'getLFNsInUseForAnal', 'getScriptOfflineRunning',
'setDebugMode', 'insertSandboxFileInfo', 'checkSandboxFile', 'changeJobPriorities',
'insertTaskParams', 'killTask', 'finishTask', 'getCmtConfigList', 'getJediTasksInTimeRange',
'getJediTaskDetails', 'retryTask', 'getRetryHistory', 'changeTaskPriority', 'reassignTask',
'changeTaskAttributePanda', 'pauseTask', 'resumeTask', 'increaseAttemptNrPanda',
'killUnfinishedJobs', 'changeTaskSplitRulePanda', 'changeTaskModTimePanda', 'avalancheTask',
'getPandaIDsWithTaskID', 'reactivateTask', 'getTaskStatus',
'reassignShare', 'listTasksInShare', 'getTaskParamsMap', 'updateWorkers', 'harvesterIsAlive',
'reportWorkerStats', 'reportWorkerStats_jobtype', 'addHarvesterDialogs',
'getJobStatisticsPerSiteResource', 'setNumSlotsForWP', 'reloadInput', 'enableJumboJobs',
'updateServiceMetrics', 'getUserJobMetadata', 'getJumboJobDatasets',
'getGShareStatus', 'sweepPQ', 'get_job_statistics_per_site_label_resource', 'relay_idds_command',
'send_command_to_job','execute_idds_workflow_command']
# FastCGI/WSGI entry
if panda_config.useFastCGI or panda_config.useWSGI:
import os
import cgi
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandacommon.pandalogger.LogWrapper import LogWrapper
if panda_config.token_authType is None:
pass
elif panda_config.token_authType == 'scitokens':
import scitokens
else:
from pandaserver.srvcore import oidc_utils
# logger
_logger = PandaLogger().getLogger('Entry')
# dummy request object
class DummyReq:
def __init__(self, env, tmpLog):
# environ
self.subprocess_env = env
# header
self.headers_in = {}
# authentication
self.authenticated = True
# message
self.message = None
# content-length
if 'CONTENT_LENGTH' in self.subprocess_env:
self.headers_in["content-length"] = self.subprocess_env['CONTENT_LENGTH']
# scitoken
try:
if panda_config.token_authType in ['scitokens', 'oidc'] and 'HTTP_AUTHORIZATION' in env:
serialized_token = env['HTTP_AUTHORIZATION'].split()[1]
if panda_config.token_authType == 'scitokens':
token = scitokens.SciToken.deserialize(serialized_token, audience=panda_config.token_audience)
else:
if 'HTTP_ORIGIN' in env:
vo = env['HTTP_ORIGIN']
else:
vo = None
token = oidc_utils.deserialize_token(serialized_token, panda_config.auth_config,
vo)
# check with auth policies
if panda_config.token_authType == 'oidc':
self.authenticated = False
vo = token[ "vo"]
if vo not in panda_config.auth_policies:
self.message = 'unknown vo : {}'.format(vo)
tmpLog.error('{} - {}'.format(self.message, env['HTTP_AUTHORIZATION']))
else:
for memberStr, memberInfo in panda_config.auth_policies[vo]:
if memberStr in token["groups"]:
self.subprocess_env['PANDA_OIDC_VO'] = vo
self.subprocess_env['PANDA_OIDC_GROUP'] = memberInfo['group']
self.subprocess_env['PANDA_OIDC_ROLE'] = memberInfo['role']
self.authenticated = True
break
if not self.authenticated:
self.message = 'invalid member in {}'.format(vo)
tmpLog.error('{} - {}'.format(self.message, env['HTTP_AUTHORIZATION']))
# check issuer
if 'iss' not in token:
self.message = 'issuer is undefined in the token'
tmpLog.error(self.message)
else:
if panda_config.token_authType == 'scitokens':
items = token.claims()
else:
items = six.iteritems(token)
for c, v in items:
self.subprocess_env['PANDA_OIDC_CLAIM_{0}'.format(str(c))] = str(v)
# use sub and scope as DN and FQAN
if 'SSL_CLIENT_S_DN' not in self.subprocess_env:
if 'name' in token:
self.subprocess_env['SSL_CLIENT_S_DN'] = str(token['name'])
else:
self.subprocess_env['SSL_CLIENT_S_DN'] = str(token['sub'])
i = 0
for scope in token.get('scope', '').split():
if scope.startswith('role:'):
self.subprocess_env['GRST_CRED_AUTH_TOKEN_{0}'.format(i)] = 'VOMS ' + str(scope.split(':')[-1])
i += 1
except Exception as e:
self.message = 'invalid token: {}'.format(str(e))
tmpLog.error('{} - {}'.format(self.message, env['HTTP_AUTHORIZATION']))
# get remote host
def get_remote_host(self):
if 'REMOTE_HOST' in self.subprocess_env:
return self.subprocess_env['REMOTE_HOST']
return ""
# accept json
def acceptJson(self):
try:
if 'HTTP_ACCEPT' in self.subprocess_env:
return 'application/json' in self.subprocess_env['HTTP_ACCEPT']
except Exception:
pass
return False
# application
def application(environ, start_response):
# get method name
methodName = ''
if 'SCRIPT_NAME' in environ:
methodName = environ['SCRIPT_NAME'].split('/')[-1]
tmpLog = LogWrapper(_logger, "PID={0} {1}".format(os.getpid(), methodName), seeMem=True)
cont_length = int(environ.get('CONTENT_LENGTH', 0))
json_body = environ.get('CONTENT_TYPE', None) == 'application/json'
tmpLog.debug("start content-length={} json={}".format(cont_length, json_body))
regStart = datetime.datetime.utcnow()
retType = None
# check method name
if methodName not in allowedMethods:
tmpLog.error("is forbidden")
exeRes = "False : %s is forbidden" % methodName
else:
# get method object
tmpMethod = None
try:
tmpMethod = globals()[methodName]
except Exception:
pass
# object not found
if tmpMethod is None:
tmpLog.error("is undefined")
exeRes = "False"
else:
body = b''
try:
while cont_length > 0:
chunk = environ['wsgi.input'].read(min(cont_length, 1024*1024))
if not chunk:
break
cont_length -= len(chunk)
body += chunk
if cont_length > 0:
raise OSError('partial read from client. {} bytes remaining'.format(cont_length))
if not json_body:
# query string
environ['wsgi.input'] = io.BytesIO(body)
# get params
tmpPars = cgi.FieldStorage(environ['wsgi.input'], environ=environ,
keep_blank_values=1)
# convert to map
params = {}
for tmpKey in list(tmpPars):
if tmpPars[tmpKey].file is not None and tmpPars[tmpKey].filename is not None:
# file
params[tmpKey] = tmpPars[tmpKey]
else:
# string
params[tmpKey] = tmpPars.getfirst(tmpKey)
else:
# json
body = gzip.decompress(body)
params = json.loads(body)
if panda_config.entryVerbose:
tmpLog.debug("with %s" % str(list(params)))
# dummy request object
dummyReq = DummyReq(environ, tmpLog)
if not dummyReq.authenticated:
start_response('403 Forbidden', [('Content-Type', 'text/plain')])
return ["ERROR : token-based authentication failed on the server side with {}".format(
dummyReq.message).encode()]
param_list = [dummyReq]
# exec
exeRes = tmpMethod(*param_list, **params)
# extract return type
if isinstance(exeRes, dict):
retType = exeRes['type']
exeRes = exeRes['content']
# convert bool to string
if exeRes in [True,False]:
exeRes = str(exeRes)
except Exception as e:
tmpLog.error("execution failure : {0}\n {1}".format(str(e), traceback.format_exc()))
if hasattr(panda_config, 'dumpBadRequest') and panda_config.dumpBadRequest:
try:
with tempfile.NamedTemporaryFile(delete=False, prefix='req_dump_') as f:
environ['WSGI_INPUT_DUMP'] = f.name
f.write(body)
os.chmod(f.name, 0o775)
except Exception:
tmpLog.error(traceback.format_exc())
pass
errStr = ""
for tmpKey in environ:
tmpVal = environ[tmpKey]
errStr += "%s : %s\n" % (tmpKey,str(tmpVal))
tmpLog.error(errStr)
# return internal server error
start_response('500 INTERNAL SERVER ERROR', [('Content-Type', 'text/plain')])
# force kill to release memory
if type(e) == OSError:
tmpLog.warning('force restart due')
os.kill(os.getpid(), signal.SIGINT)
return [str(e).encode()]
if panda_config.entryVerbose:
tmpLog.debug("done")
regTime = datetime.datetime.utcnow() - regStart
tmpLog.info("exec_time=%s.%03d sec, return len=%s B" % (regTime.seconds,
regTime.microseconds/1000,
len(str(exeRes))))
# return
if exeRes == pandaserver.taskbuffer.ErrorCode.EC_NotFound:
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return ['not found'.encode()]
elif isinstance(exeRes, pandaserver.taskbuffer.ErrorCode.EC_Redirect):
start_response('302 Redirect', [('Location', exeRes.url)])
return ['redirect'.encode()]
else:
if retType == 'json':
start_response('200 OK', [('Content-Type', 'application/json')])
else:
start_response('200 OK', [('Content-Type', 'text/plain')])
if isinstance(exeRes, str):
exeRes = exeRes.encode()
return [exeRes]
# start server
if panda_config.useFastCGI:
from flup.server.fcgi import WSGIServer
WSGIServer(application,multithreaded=False).run()
|
nilq/baby-python
|
python
|
# -*- coding: UTF-8 -*-
#!usr/env/bin python 3.6
from matplotlib.font_manager import FontProperties
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import numpy as np
def file2matrix(file_name):
file = open(file_name)
array_lines = file.readlines()
number_of_lines = len(array_lines)
return_matrix = np.zeros((number_of_lines,3))
class_label_vector = []
index = 0
for line in array_lines:
line = line.strip()
list_line = line.split('\t')
return_matrix[index,:] = list_line[0:3]
if list_line[-1] == 'didntLike':
class_label_vector.append(1)
if list_line[-1] == 'smallDoses':
class_label_vector.append(2)
if list_line[-1] == 'largeDoses':
class_label_vector.append(3)
index += 1
return return_matrix,class_label_vector
def show_data(dating_data_mat,dating_labels):
font = FontProperties(fname=r"c:\windows\fonts\simsun.ttc",size=14)
fig,axs = plt.subplots(nrows=2,ncols=2,sharex=False,sharey=False)#,figsize=(13,8))
number_of_labels = len(dating_labels)
labels_colors=[]
for i in dating_labels:
if i == 1:
labels_colors.append('black')
if i == 2:
labels_colors.append('orange')
if i == 3:
labels_colors.append('red')
axs[0][0].scatter(x=dating_data_mat[:,0],y=dating_data_mat[:,1],color=labels_colors,s=15,alpha=.5)
axs0_title_text = axs[0][0].set_title(u'每年获得的飞行常客里程数域玩视频游戏所消耗时间比',FontProperties=font)
axs0_xlabel_text = axs[0][0].set_xlabel(u'',FontProperties=font)
axs0_ylabel_text = axs[0][0].set_ylabel(u'',FontProperties=font)
plt.setp(axs0_title_text,size=9,weight='bold',color='red')
plt.setp(axs0_xlabel_text,size=7,weight='bold',color='black')
plt.setp(axs0_ylabel_text,size=7,weight='bold',color='black')
plt.show()
if __name__ == '__main__':
file_name = 'datingTestSet.txt'
dating_data_mat,dating_labels = file2matrix(file_name)
show_data(dating_data_mat,dating_labels)
|
nilq/baby-python
|
python
|
# Copyright (c) 2022 Manuel Olguín Muñoz <molguin@kth.se>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import time
from collections import deque
from os import PathLike
from typing import Any, Dict, Iterator, Sequence, Tuple
import numpy as np
import numpy.typing as npt
import pandas as pd
import yaml
class FrameSet:
"""
Abstraction of a set of video frames for the model.
Easiest way to build an instance of this class is using the
FrameSet.from_datafile class method. This method takes a tracefile in
.npz format and parses it.
"""
def __init__(
self,
name: str,
initial_frame: npt.NDArray,
steps: Sequence[Dict[str, npt.NDArray]],
):
"""
Parameters
----------
name
A name for the task represented by this trace of frames.
initial_frame
The initial video frame required by the backend to initialize
the task.
steps
A sequence containing dictionaries mapping frame tags to video
frames, in order of steps.
Note that it is expected that all steps have the same tags!
"""
self._name = name
self._init_frame = initial_frame
self._steps = tuple(steps)
self._num_steps = len(self._steps)
def __str__(self) -> str:
return yaml.safe_dump(
{
"name": self._name,
"num_steps": self.step_count,
"initial_frame": f"{len(self._init_frame.tobytes())} bytes",
"steps": [
{tag: f"{len(data.tobytes())} bytes" for tag, data in step.items()}
for step in self._steps
],
}
)
@property
def step_count(self) -> int:
return self._num_steps
@property
def name(self) -> str:
return self._name
def get_initial_frame(self) -> npt.NDArray:
"""
Returns
-------
npt.NDArray
The initial video frame for this task.
"""
return self._init_frame.copy()
def get_frame(self, step_index: Any, frame_tag: str) -> npt.NDArray:
"""
Looks up a frame for a specific tag in a step.
Parameters
----------
step_index
Step index.
frame_tag
Frame tag to look up.
Returns
-------
npt.NDArray
A video frame.
"""
return self._steps[step_index][frame_tag].copy()
@classmethod
def from_datafile(cls, task_name: str, trace_path: PathLike | str) -> FrameSet:
"""
Opens a frame tracefile and parses it.
Traces correspond to compressed numpy array files (.npz) containing
the following arrays:
- An array called "initial" corresponding to the initial frame for
the task.
- A number `M x N` of arrays, where M is the number of different
possible tags for frames during a step, and N corresponds to
the number of steps in the tag. Each of these arrays is named
following the convention "step_<step index (two digits,
0-padded)>_<frame tag>".
Parameters
----------
task_name
Task name for this trace.
trace_path
Path to the datafile.
Returns
-------
FrameSet
A FrameSet object.
"""
data = np.load(trace_path)
# trace NPZ file contains initial frame + 3 frames per step
# success, blank, and low_confidence
# TODO: this assumes 3 frame categories per step (success, low confidence and
# blank (repeat is simply the previous success)). Maybe we should add a way
# of configuring that.
assert (len(data) - 1) % 3 == 0
num_steps = (len(data) - 1) // 3
init_frame = data["initial"]
# TODO: hardcoded categories
steps = deque()
repeat = init_frame
for step in range(num_steps):
step_dict = {}
for tag in ("success", "blank", "low_confidence"):
step_dict[tag] = data[f"step{step:02d}_{tag}"]
step_dict["repeat"] = repeat
repeat = step_dict["success"]
steps.append(step_dict)
return FrameSet(name=task_name, initial_frame=init_frame, steps=steps)
class FrameModel:
def __init__(self, probabilities: pd.DataFrame, success_tag: str = "success"):
"""
Parameters
----------
probabilities
A Pandas DataFrame containing two columns 'bin_start' and
'bin_end', and an arbitrary number of additional columns.
'bin_start' and 'bin_end' correspond to the left and right limits
respectively of left-inclusive, right-exclusive bins of relative
time position (e.g. if total duration is 10 seconds, 3 seconds
would fall in bin [0.0, 0.5) and 7 seconds in bin [0.5, 1.0)).
All other columns are interpreted as relative probabilities for a
tag (identified by the column name) within a bin.
All probabilities for tags in a bin MUST add up to 1.0.
For example, a row<br><br>
<table>
<thead>
<tr>
<th>bin_start</th>
<th>bin_end</th>
<th>repeat</th>
<th>low_confidence</th>
<th>blank</th>
</tr>
</thead>
<tbody>
<tr>
<td>0.0</td>
<td>0.2</td>
<td>0.3</td>
<td>0.1</td>
<td>0.6</td>
</tr>
</tbody>
</table><br>
indicates that within bin [0, 0.2), 'repeat' frames occur with a
relative probability of 0.3, 'low_confidence' frames with a
relative probability of 0.1, and 'blank' frames with a relative
probability of 0.6.
success_tag
String to be returned by methods of this class whenever the target
step time has been achieved.
"""
# validation
columns = set(probabilities.columns)
try:
columns.remove("bin_start")
columns.remove("bin_end")
except KeyError:
raise RuntimeError(
"Probability dataframe must include bin_start " "and bin_end columns."
)
prob_sums = np.zeros(len(probabilities.index))
for column in columns:
prob_sums += probabilities[column]
if not np.all(np.isclose(prob_sums, 1.0)):
raise RuntimeError(
"Sum of probabilities for each bin must be " "equal to 1.0."
)
# process probabilities
self._probs = probabilities.copy()
self._probs["interval"] = pd.IntervalIndex.from_arrays(
left=probabilities["bin_start"],
right=probabilities["bin_end"],
closed="left",
)
self._probs = self._probs.drop(columns=["bin_start", "bin_end"]).set_index(
"interval", verify_integrity=True
)
self._rng = np.random.default_rng()
self._success_tag = success_tag
def _sample_from_distribution(self, rel_pos: float) -> str:
if rel_pos > 1:
return self._success_tag
probs = self._probs[self._probs.index.contains(rel_pos)].iloc[0]
return self._rng.choice(a=probs.index, replace=False, p=probs.values)
def get_frame_at_instant(self, instant: float | int, step_time: float | int) -> str:
"""
Return a frame sampled from a specific instant in a step.
Parameters
----------
instant
Number of seconds since the start of the step.
step_time
Total target step duration.
Returns
-------
str
A randomly sampled step tag.
"""
# purely according to distributions
try:
return self._sample_from_distribution(float(instant) / float(step_time))
except ZeroDivisionError:
# if step time is 0 we can immediately assume step is over!
return self._success_tag
def step_iterator(
self, target_time: float, infinite: bool = False
) -> Iterator[Tuple[str, float]]:
"""
An iterator over the frame tags in a step.
Any calls to next() between instants 0 and target_time will
correspond to frame tags sampled from the internal distributions.
Calls to next() after a time greater than target time has been
elapsed will always return a success tag; if infinite is False, the iterator
will additionally be closed.
Yields
------
str
Frame tags.
"""
step_start = time.monotonic()
while True:
instant = time.monotonic() - step_start
yield self.get_frame_at_instant(instant, target_time), instant
if instant > target_time and not infinite:
return
|
nilq/baby-python
|
python
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import torch
import poptorch
# half_stats_begin
model = torch.nn.Sequential()
model.add_module('lin', torch.nn.Linear(16, 16))
model.add_module('bn', torch.nn.BatchNorm1d(16))
model.float()
opts = poptorch.Options()
opts.Precision.runningStatisticsAlwaysFloat(False)
poptorch_model = poptorch.inferenceModel(model, opts)
# half_stats_end
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.24 on 2022-02-24 22:20
from django.db import migrations
def commit_old_currencies_deactivation(apps, schema_editor):
Currency = apps.get_model("exchange", "Currency")
db_alias = schema_editor.connection.alias
Currency.objects.using(db_alias).filter(code__in=["MRO", "VEF"]).update(active=False)
def rollback_old_currencies_deactivation(apps, schema_editor):
Currency = apps.get_model("exchange", "Currency")
db_alias = schema_editor.connection.alias
Currency.objects.using(db_alias).filter(code__in=["MRO", "VEF"]).update(active=True)
class Migration(migrations.Migration):
dependencies = [
('exchange', '0004_currency_active'),
]
operations = [
migrations.RunPython(commit_old_currencies_deactivation, rollback_old_currencies_deactivation),
]
|
nilq/baby-python
|
python
|
#! /usr/bin/python
#
# xindice_python_delete.py
#
# Apr/13/2012
#
import math
import cgi
import string
import sys
import os
import xml.dom.minidom
import pycurl
#
import json
# ------------------------------------------------------------------
sys.path.append ('/var/www/data_base/common/python_common')
#
from text_manipulate import dict_delete_proc
from xml_manipulate import xml_to_dict_proc
from xml_manipulate import dict_to_xml_proc
#
from curl_get import curl_get_proc
from curl_get import curl_put_proc
#
from cgi_manipulate import parse_parameter
# ------------------------------------------------------------------
url_base = 'http://host_dbase:8888/xindice/db/'
url_sub = 'cities/cities'
url_in = url_base + url_sub
#
str_aa = curl_get_proc (url_in)
dict_aa=xml_to_dict_proc (str_aa)
#
# ------------------------------------------------------------------
#
#
print "Content-type: text/html\n\n"
#
# ---------------------------------------------------------------
#
print "*** check pppp_qqqq ***<br />"
array_bb = parse_parameter ()
print "*** check ssss_qqqq ***<br />"
#
print "len(array_bb) = %d<br />" % len(array_bb)
for it in range (len(array_bb)):
id_in = array_bb[it]
print "id_in = %s<br />" % id_in
dict_aa=dict_delete_proc (dict_aa,id_in)
print "*** check rrrr ***<br />"
#
print "*** check ssss ***<br />"
out_str = dict_to_xml_proc (dict_aa)
print "*** check tttt ***<br />"
#
#
curl_put_proc (url_in,out_str.encode('utf-8'))
#
#
print "OK<br />"
#
# ---------------------------------------------------------------
|
nilq/baby-python
|
python
|
"""Wrapper for the CIFAR-10 dataset, which is provided in the `torchvision`
package.
The model and data transform are taken directly from:
https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
"""
# Chuan-Zheng Lee <czlee@stanford.edu>
# July 2021
from pathlib import Path
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from config import DATA_DIRECTORY
cifar10_transforms = {
# copied from https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
'norm1': transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
]),
# copied from https://github.com/akamaster/pytorch_resnet_cifar10/blob/master/trainer.py
'flip-crop-norm2': torchvision.transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
]),
'norm2': torchvision.transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
]),
}
def get_cifar10_dataset(train=True, transform='norm1'):
# We want it to download automatically, torchvision prints a message if it's
# already downloaded, which is kind of annoying, so check if it's there
# first and pass download=False if it is.
cifar10_directory = Path(DATA_DIRECTORY) / "cifar10"
download = not (cifar10_directory / "cifar-10-batches-py").exists()
return torchvision.datasets.CIFAR10(
root=cifar10_directory,
train=train,
download=download,
transform=cifar10_transforms[transform],
)
# copied from https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
class Cifar10CNNSimple(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(nn.functional.relu(self.conv1(x)))
x = self.pool(nn.functional.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = nn.functional.relu(self.fc1(x))
x = nn.functional.relu(self.fc2(x))
x = self.fc3(x)
return x
|
nilq/baby-python
|
python
|
#Un alumno desea saber cual será su calificación final en la materia de Algoritmos.
#Dicha calificación se compone de los siguientes porcentajes:
#55% del promedio de sus tres calificaciones parciales.
#30% de la calificación del examen final.
#15% de la calificación de un trabajo final.
parcial1 = float(input("Diga la nota del parcial 1: "))
parcial2 = float(input("Diga la nota del parcual 2: "))
parcial3 = float(input("Diga la nota del parcial 3: "))
examenfinal = float(input("Diga la nota del examen final: "))
trabajofinal = float(input("Diga la nota del trabajo final: "))
promedio = (parcial1 + parcial2 + parcial3)/3
calificaciónfinal = (promedio * 55)/100 + (examenfinal * 30)/100 + (trabajofinal * 15)/100
print("La calificación final de la materia de algoritmos es de %.2f"%(calificaciónfinal))
|
nilq/baby-python
|
python
|
from ..utils import Object
class EditMessageMedia(Object):
"""
Edits the content of a message with an animation, an audio, a document, a photo or a video. The media in the message can't be replaced if the message was set to self-destruct. Media can't be replaced by self-destructing media. Media in an album can be edited only to contain a photo or a video. Returns the edited message after the edit is completed on the server side
Attributes:
ID (:obj:`str`): ``EditMessageMedia``
Args:
chat_id (:obj:`int`):
The chat the message belongs to
message_id (:obj:`int`):
Identifier of the message
reply_markup (:class:`telegram.api.types.ReplyMarkup`):
The new message reply markup; for bots only
input_message_content (:class:`telegram.api.types.InputMessageContent`):
New content of the messageMust be one of the following types: InputMessageAnimation, InputMessageAudio, InputMessageDocument, InputMessagePhoto or InputMessageVideo
Returns:
Message
Raises:
:class:`telegram.Error`
"""
ID = "editMessageMedia"
def __init__(self, chat_id, message_id, reply_markup, input_message_content, extra=None, **kwargs):
self.extra = extra
self.chat_id = chat_id # int
self.message_id = message_id # int
self.reply_markup = reply_markup # ReplyMarkup
self.input_message_content = input_message_content # InputMessageContent
@staticmethod
def read(q: dict, *args) -> "EditMessageMedia":
chat_id = q.get('chat_id')
message_id = q.get('message_id')
reply_markup = Object.read(q.get('reply_markup'))
input_message_content = Object.read(q.get('input_message_content'))
return EditMessageMedia(chat_id, message_id, reply_markup, input_message_content)
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.7 on 2020-09-06 08:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('exams', '0020_auto_20200906_1408'),
]
operations = [
migrations.AlterField(
model_name='questionresponse',
name='student_exam',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='responses', to='exams.StudentExam'),
),
]
|
nilq/baby-python
|
python
|
"""
Tox21 dataset loader.
"""
from __future__ import division
from __future__ import unicode_literals
import os
import logging
import deepchem
import dglt.contrib.moses.moses.data.featurizer as feat
from dglt.contrib.moses.moses.data.reader.utils import AdditionalInfo
from dglt.contrib.moses.moses.data.reader.data_loader import CSVLoader
logger = logging.getLogger(__name__)
def get_tox21_data_path():
data_dir = deepchem.utils.get_data_dir()
dataset_file = os.path.join(data_dir, "tox21.csv.gz")
if not os.path.exists(dataset_file):
deepchem.utils.download_url(
'http://deepchem.io.s3-website-us-west-1.amazonaws.com/datasets/tox21.csv.gz'
)
return dataset_file
dataset_file = get_tox21_data_path()
tox21_ad = AdditionalInfo(dataset_file, smiles_field='smiles')
def load_tox21(featurizer='ECFP', split='index', reload=True, K=1):
"""Load Tox21 datasets. Does not do train/test split"""
# Featurize Tox21 dataset
tox21_tasks = [
'NR-AR', 'NR-AR-LBD', 'NR-AhR', 'NR-Aromatase', 'NR-ER', 'NR-ER-LBD',
'NR-PPAR-gamma', 'SR-ARE', 'SR-ATAD5', 'SR-HSE', 'SR-MMP', 'SR-p53'
]
data_dir = deepchem.utils.get_data_dir()
# TODO: reload should be modified to support cross vailidation cases.
if reload and K == 1:
save_dir = os.path.join(data_dir, "tox21/" + featurizer + "/" + str(split))
loaded, all_dataset, transformers = deepchem.utils.save.load_dataset_from_disk(
save_dir)
if loaded:
return tox21_tasks, all_dataset, transformers
dataset_file = get_tox21_data_path()
if featurizer == 'ECFP':
featurizer = feat.CircularFingerprint(size=1024)
elif featurizer == 'GraphConv':
featurizer = feat.ConvMolFeaturizer()
elif featurizer == 'Weave':
featurizer = feat.WeaveFeaturizer()
elif featurizer == 'Raw':
featurizer = feat.RawFeaturizer()
elif featurizer == 'AdjacencyConv':
featurizer = feat.AdjacencyFingerprint(
max_n_atoms=150, max_valence=6)
elif featurizer == 'EAGCN':
featurizer = feat.EagcnFeaturizer(tox21_ad.bond_type_dict, tox21_ad.atom_type_dict)
loader = CSVLoader(
tasks=tox21_tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(dataset_file, shard_size=8192)
# Initialize transformers
transformers = [
deepchem.trans.BalancingTransformer(transform_w=True, dataset=dataset)
]
logger.info("About to transform data")
for transformer in transformers:
dataset = transformer.transform(dataset)
if split == None:
return tox21_tasks, (dataset, None, None), transformers
splitters = {
'index': deepchem.splits.IndexSplitter(),
'random': deepchem.splits.RandomSplitter(),
'scaffold': deepchem.splits.ScaffoldSplitter(),
# 'butina': deepchem.splits.ButinaSplitter(),
# 'task': deepchem.splits.TaskSplitter()
}
splitter = splitters[split]
if K > 1:
fold_datasets = splitter.k_fold_split(dataset, K)
all_dataset = fold_datasets
else:
train, valid, test = splitter.train_valid_test_split(dataset)
all_dataset = (train, valid, test)
if reload:
deepchem.utils.save.save_dataset_to_disk(save_dir, train, valid, test,
transformers)
return tox21_tasks, all_dataset, transformers
|
nilq/baby-python
|
python
|
import os
import sys
import time
from monk.pip_functionality_tests.keras.test_default_train import test_default_train
from monk.pip_functionality_tests.keras.test_default_eval_infer import test_default_eval_infer
from monk.pip_functionality_tests.keras.test_update_copy_from import test_update_copy_from
from monk.pip_functionality_tests.keras.test_update_normal import test_update_normal
from monk.pip_functionality_tests.keras.test_update_eval_infer import test_update_eval_infer
from monk.pip_functionality_tests.keras.test_expert_train import test_expert_train
from monk.pip_functionality_tests.keras.test_expert_eval_infer import test_expert_eval_infer
from monk.pip_functionality_tests.keras.test_switch_default import test_switch_default
from monk.pip_functionality_tests.keras.test_switch_expert import test_switch_expert
from monk.pip_functionality_tests.keras.test_compare import test_compare
from monk.pip_functionality_tests.keras.test_analyse import test_analyse
def run_functionality_tests():
origstdout = sys.stdout
print("Running Tests...");
sys.stdout = open("test_logs.txt", 'w');
system_dict = {};
system_dict["total_tests"] = 0;
system_dict["successful_tests"] = 0;
system_dict["failed_tests_lists"] = [];
system_dict["failed_tests_exceptions"] = [];
system_dict["skipped_tests_lists"] = [];
start = time.time()
print("Running 1/11");
system_dict = test_default_train(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 2/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_default_eval_infer(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 3/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_update_copy_from(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 4/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_update_normal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 5/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_update_eval_infer(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 6/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_expert_train(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("Running 7/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_expert_eval_infer(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 8/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_switch_default(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 9/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_switch_expert(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 10/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_compare(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 11/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_analyse(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
sys.stdout = open("test_logs.txt", 'a');
end = time.time();
print("Total Tests - {}".format(system_dict["total_tests"]));
print("Time Taken - {} sec".format(end-start));
print("Num Successful Tests - {}".format(system_dict["successful_tests"]));
print("Num Failed Tests - {}".format(len(system_dict["failed_tests_lists"])));
print("Num Skipped Tests - {}".format(len(system_dict["skipped_tests_lists"])));
print("");
for i in range(len(system_dict["failed_tests_lists"])):
print("{}. Failed Test:".format(i+1));
print("Name - {}".format(system_dict["failed_tests_lists"][i]));
print("Error - {}".format(system_dict["failed_tests_exceptions"][i]));
print("");
print("Skipped Tests List - {}".format(system_dict["skipped_tests_lists"]));
print("");
sys.stdout = origstdout;
print("Total Tests - {}".format(system_dict["total_tests"]));
print("Time Taken - {} sec".format(end-start));
print("Num Successful Tests - {}".format(system_dict["successful_tests"]));
print("Num Failed Tests - {}".format(len(system_dict["failed_tests_lists"])));
print("Num Skipped Tests - {}".format(len(system_dict["skipped_tests_lists"])));
print("See test_logs.txt for errors");
print("");
os.system("rm -r workspace");
def run_unit_tests():
from monk.pip_unit_tests.keras.test_optimizer_sgd import test_optimizer_sgd
from monk.pip_unit_tests.keras.test_optimizer_nesterov_sgd import test_optimizer_nesterov_sgd
from monk.pip_unit_tests.keras.test_optimizer_rmsprop import test_optimizer_rmsprop
from monk.pip_unit_tests.keras.test_optimizer_adam import test_optimizer_adam
from monk.pip_unit_tests.keras.test_optimizer_nadam import test_optimizer_nadam
from monk.pip_unit_tests.keras.test_optimizer_adamax import test_optimizer_adamax
from monk.pip_unit_tests.keras.test_optimizer_adadelta import test_optimizer_adadelta
from monk.pip_unit_tests.keras.test_optimizer_adagrad import test_optimizer_adagrad
from monk.pip_unit_tests.keras.test_loss_l1 import test_loss_l1
from monk.pip_unit_tests.keras.test_loss_l2 import test_loss_l2
from monk.pip_unit_tests.keras.test_loss_crossentropy import test_loss_crossentropy
from monk.pip_unit_tests.keras.test_loss_binary_crossentropy import test_loss_binary_crossentropy
from monk.pip_unit_tests.keras.test_loss_kldiv import test_loss_kldiv
from monk.pip_unit_tests.keras.test_loss_hinge import test_loss_hinge
from monk.pip_unit_tests.keras.test_loss_squared_hinge import test_loss_squared_hinge
origstdout = sys.stdout
print("Running Tests...");
sys.stdout = open("test_logs.txt", 'w');
system_dict = {};
system_dict["total_tests"] = 0;
system_dict["successful_tests"] = 0;
system_dict["failed_tests_lists"] = [];
system_dict["failed_tests_exceptions"] = [];
system_dict["skipped_tests_lists"] = [];
start = time.time()
exp_num = 1;
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_sgd(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_nesterov_sgd(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_rmsprop(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_adam(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_nadam(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_adamax(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_adadelta(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_adagrad(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_l1(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_l2(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_crossentropy(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_binary_crossentropy(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_kldiv(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_hinge(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_squared_hinge(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
from monk.pip_unit_tests.keras.test_layer_convolution1d import test_layer_convolution1d
from monk.pip_unit_tests.keras.test_layer_convolution2d import test_layer_convolution2d
from monk.pip_unit_tests.keras.test_layer_convolution3d import test_layer_convolution3d
from monk.pip_unit_tests.keras.test_layer_transposed_convolution2d import test_layer_transposed_convolution2d
from monk.pip_unit_tests.keras.test_layer_transposed_convolution3d import test_layer_transposed_convolution3d
from monk.pip_unit_tests.keras.test_layer_max_pooling1d import test_layer_max_pooling1d
from monk.pip_unit_tests.keras.test_layer_max_pooling2d import test_layer_max_pooling2d
from monk.pip_unit_tests.keras.test_layer_max_pooling3d import test_layer_max_pooling3d
from monk.pip_unit_tests.keras.test_layer_average_pooling1d import test_layer_average_pooling1d
from monk.pip_unit_tests.keras.test_layer_average_pooling2d import test_layer_average_pooling2d
from monk.pip_unit_tests.keras.test_layer_average_pooling3d import test_layer_average_pooling3d
from monk.pip_unit_tests.keras.test_layer_global_max_pooling1d import test_layer_global_max_pooling1d
from monk.pip_unit_tests.keras.test_layer_global_max_pooling2d import test_layer_global_max_pooling2d
from monk.pip_unit_tests.keras.test_layer_global_max_pooling3d import test_layer_global_max_pooling3d
from monk.pip_unit_tests.keras.test_layer_global_average_pooling1d import test_layer_global_average_pooling1d
from monk.pip_unit_tests.keras.test_layer_global_average_pooling2d import test_layer_global_average_pooling2d
from monk.pip_unit_tests.keras.test_layer_global_average_pooling3d import test_layer_global_average_pooling3d
from monk.pip_unit_tests.keras.test_layer_batch_normalization import test_layer_batch_normalization
from monk.pip_unit_tests.keras.test_layer_identity import test_layer_identity
from monk.pip_unit_tests.keras.test_layer_fully_connected import test_layer_fully_connected
from monk.pip_unit_tests.keras.test_layer_dropout import test_layer_dropout
from monk.pip_unit_tests.keras.test_layer_flatten import test_layer_flatten
from monk.pip_unit_tests.keras.test_layer_concatenate import test_layer_concatenate
from monk.pip_unit_tests.keras.test_layer_add import test_layer_add
from monk.pip_unit_tests.keras.test_activation_relu import test_activation_relu
from monk.pip_unit_tests.keras.test_activation_softmax import test_activation_softmax
from monk.pip_unit_tests.keras.test_activation_thresholded_relu import test_activation_thresholded_relu
from monk.pip_unit_tests.keras.test_activation_elu import test_activation_elu
from monk.pip_unit_tests.keras.test_activation_prelu import test_activation_prelu
from monk.pip_unit_tests.keras.test_activation_leaky_relu import test_activation_leaky_relu
from monk.pip_unit_tests.keras.test_activation_selu import test_activation_selu
from monk.pip_unit_tests.keras.test_activation_softplus import test_activation_softplus
from monk.pip_unit_tests.keras.test_activation_softsign import test_activation_softsign
from monk.pip_unit_tests.keras.test_activation_tanh import test_activation_tanh
from monk.pip_unit_tests.keras.test_activation_sigmoid import test_activation_sigmoid
from monk.pip_unit_tests.keras.test_activation_hard_sigmoid import test_activation_hard_sigmoid
from monk.pip_unit_tests.keras.test_initializer_xavier_normal import test_initializer_xavier_normal
from monk.pip_unit_tests.keras.test_initializer_xavier_uniform import test_initializer_xavier_uniform
from monk.pip_unit_tests.keras.test_initializer_random_normal import test_initializer_random_normal
from monk.pip_unit_tests.keras.test_initializer_random_uniform import test_initializer_random_uniform
from monk.pip_unit_tests.keras.test_initializer_lecun_normal import test_initializer_lecun_normal
from monk.pip_unit_tests.keras.test_initializer_lecun_uniform import test_initializer_lecun_uniform
from monk.pip_unit_tests.keras.test_initializer_he_normal import test_initializer_he_normal
from monk.pip_unit_tests.keras.test_initializer_he_uniform import test_initializer_he_uniform
from monk.pip_unit_tests.keras.test_initializer_truncated_normal import test_initializer_truncated_normal
from monk.pip_unit_tests.keras.test_initializer_orthogonal import test_initializer_orthogonal
from monk.pip_unit_tests.keras.test_initializer_variance_scaling import test_initializer_variance_scaling
from monk.pip_unit_tests.keras.test_block_resnet_v1 import test_block_resnet_v1
from monk.pip_unit_tests.keras.test_block_resnet_v2 import test_block_resnet_v2
from monk.pip_unit_tests.keras.test_block_resnet_v1_bottleneck import test_block_resnet_v1_bottleneck
from monk.pip_unit_tests.keras.test_block_resnet_v2_bottleneck import test_block_resnet_v2_bottleneck
from monk.pip_unit_tests.keras.test_block_resnext import test_block_resnext
from monk.pip_unit_tests.keras.test_block_mobilenet_v2_linear_bottleneck import test_block_mobilenet_v2_linear_bottleneck
from monk.pip_unit_tests.keras.test_block_mobilenet_v2_inverted_linear_bottleneck import test_block_mobilenet_v2_inverted_linear_bottleneck
from monk.pip_unit_tests.keras.test_block_squeezenet_fire import test_block_squeezenet_fire
from monk.pip_unit_tests.keras.test_block_densenet import test_block_densenet
from monk.pip_unit_tests.keras.test_block_conv_bn_relu import test_block_conv_bn_relu
from monk.pip_unit_tests.keras.test_block_inception_a import test_block_inception_a
from monk.pip_unit_tests.keras.test_block_inception_b import test_block_inception_b
from monk.pip_unit_tests.keras.test_block_inception_c import test_block_inception_c
from monk.pip_unit_tests.keras.test_block_inception_d import test_block_inception_d
from monk.pip_unit_tests.keras.test_block_inception_e import test_block_inception_e
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_convolution1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_convolution2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_convolution3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_transposed_convolution2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_transposed_convolution3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_max_pooling1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_max_pooling2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_max_pooling3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_average_pooling1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_average_pooling2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_average_pooling3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_max_pooling1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_max_pooling2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_max_pooling3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_average_pooling1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_average_pooling2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_average_pooling3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_batch_normalization(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_identity(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_fully_connected(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_dropout(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_flatten(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_relu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_softmax(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_thresholded_relu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_elu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_prelu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_leaky_relu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_selu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_softplus(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_softsign(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_tanh(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_sigmoid(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_hard_sigmoid(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_concatenate(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_add(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_xavier_normal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_xavier_uniform(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_random_normal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_random_uniform(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_lecun_normal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_lecun_uniform(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_he_normal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_he_uniform(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_truncated_normal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_orthogonal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_variance_scaling(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnet_v1(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnet_v2(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnet_v1_bottleneck(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnet_v2_bottleneck(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnext(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_mobilenet_v2_linear_bottleneck(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_mobilenet_v2_inverted_linear_bottleneck(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_squeezenet_fire(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_densenet(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_conv_bn_relu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_a(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_b(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_c(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_e(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
sys.stdout = open("test_logs.txt", 'a');
end = time.time();
print("Total Tests - {}".format(system_dict["total_tests"]));
print("Time Taken - {} sec".format(end-start));
print("Num Successful Tests - {}".format(system_dict["successful_tests"]));
print("Num Failed Tests - {}".format(len(system_dict["failed_tests_lists"])));
print("Num Skipped Tests - {}".format(len(system_dict["skipped_tests_lists"])));
print("");
for i in range(len(system_dict["failed_tests_lists"])):
print("{}. Failed Test:".format(i+1));
print("Name - {}".format(system_dict["failed_tests_lists"][i]));
print("Error - {}".format(system_dict["failed_tests_exceptions"][i]));
print("");
print("Skipped Tests List - {}".format(system_dict["skipped_tests_lists"]));
print("");
sys.stdout = origstdout;
print("Total Tests - {}".format(system_dict["total_tests"]));
print("Time Taken - {} sec".format(end-start));
print("Num Successful Tests - {}".format(system_dict["successful_tests"]));
print("Num Failed Tests - {}".format(len(system_dict["failed_tests_lists"])));
print("Num Skipped Tests - {}".format(len(system_dict["skipped_tests_lists"])));
print("See test_logs.txt for errors");
print("");
os.system("rm -r workspace");
|
nilq/baby-python
|
python
|
"""
.log to view bot log
For all users
"""
import asyncio
from telethon import events
from telethon.tl.types import ChannelParticipantsAdmins
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern="log"))
@borg.on(events.NewMessage(pattern=r"\.log(.*)",incoming=True))
async def _(event):
if event.fwd_from:
return
mentions = """**αℓℓυкα Zᴏʟᴅʏᴄᴋ™** //logs\n\n//8th feb 2020//\n• Fix `.kang` double reply.\n• Added new plugin `. into ur count` To view my stats 😉 **FOR SUDO USER ONLY**\n\n//10th feb 2020//\n• Added `.ai` (Your message) AI chat Bot 😉 [BUT VERY SLOW TO REPLY 😕]\n\n//11th Feb 2020//\n• Added `.slap` in reply to any message, or u gonna slap urself.\n• Added `.rnupload` file.name\n\n//12th feb 2020// \n• Added `.ft` (any emoji)
\n//13 March 2020//\n• Change prefix .ud to .mean \n• Added `.rrgb` Random RGB text Sticker\n• Added `.tagall` to tag all ppl in chat \n• Added `.commit` to upload plugins into ur github ripo (SUDO ONLY)
//26 March 2020//
•Added `.decide` to get ans YES or NO
•Added `.paste`paste bin
•Added `.userlist` to get all users in your chat.
•Added `.setwelcome` set welcome message in your chat.
•Added `.clearwelcome` disbale welcome message in your chat.
"""
chat = await event.get_input_chat()
async for x in borg.iter_participants(chat, filter=ChannelParticipantsAdmins):
mentions += f""
reply_message = None
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
await reply_message.reply(mentions)
else:
await event.reply(mentions)
await event.delete()
|
nilq/baby-python
|
python
|
import rigor.importer
import argparse
def main():
parser = argparse.ArgumentParser(description='Imports images and metadata into the database')
parser.add_argument('database', help='Database to use')
parser.add_argument('directories', metavar='dir', nargs='+', help='Directory containing images and metadata to import')
parser.add_argument('-m', '--move', action="store_true", dest='move', default=False, help='Move files into repository instead of copying')
args = parser.parse_args()
for directory in args.directories:
i = rigor.importer.Importer(directory, args.database, args.move)
i.run()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
"""
this is a transfer tool for existed oai data (train, val , test, debug)
it will crop the image into desired size
include two part
crop the data and save it into new direc ( the code will be based on dataloader)
generate a new directory which should provide the same train,val,test and debug set, but point to the cropped data
"""
from __future__ import print_function, division
import sys
import os
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../model_pool'))
from easyreg.reg_data_utils import *
from multiprocessing import *
import SimpleITK as sitk
num_of_workers = 12
import progressbar as pb
from easyreg.utils import *
class RegistrationDataset(object):
"""registration dataset."""
def __init__(self, data_path, phase=None, resize_factor=[1., 1., 1.]):
"""
the dataloader for registration task, to avoid frequent disk communication, all pairs are compressed into memory
:param data_path: string, path to the data
the data should be preprocessed and saved into txt
:param phase: string, 'train'/'val'/ 'test'/ 'debug' , debug here means a subset of train data, to check if model is overfitting
:param transform: function, apply transform on data
: seg_option: pars, settings for segmentation task, None for registration task
: reg_option: pars, settings for registration task, None for segmentation task
"""
self.data_path = data_path
self.task_output_path = None
self.data_output_path = None
self.real_img_path = None
self.real_label_path = None
self.running_read_path = None
self.phase = phase
self.data_type = '*.nii.gz'
self.turn_on_pair_regis = False
self.max_num_pair_to_load = [-1, -1, -1, -1]
""" the max number of pairs to be loaded into the memory"""
self.has_label = False
self.shared_label_set = None
self.get_file_list()
self.resize_factor = resize_factor
self.resize = not all([factor == 1 for factor in self.resize_factor])
self.pair_list = []
def process(self):
self.transfer_exist_dataset_txt_into_new_one()
#self.process_img_pool()
def set_task_output_path(self, path):
self.task_output_path = path
os.makedirs(path, exist_ok=True)
def set_data_output_path(self, path):
self.data_output_path = path
os.makedirs(path, exist_ok=True)
def set_real_data_path(self, img_path, label_path):
self.real_img_path = img_path
self.real_label_path = label_path
def set_running_read_path(self, running_read_path):
self.running_read_path = running_read_path
def set_shared_label(self, shared_label_set):
self.shared_label_set = shared_label_set
def get_file_list(self):
"""
get the all files belonging to data_type from the data_path,
:return: full file path list, file name list
"""
if not os.path.exists(self.data_path):
self.path_list = []
self.name_list = []
return
self.path_list = read_txt_into_list(os.path.join(self.data_path, 'pair_path_list.txt'))
self.name_list = read_txt_into_list(os.path.join(self.data_path, 'pair_name_list.txt'))
if len(self.path_list[0]) == 4:
self.has_label = True
if len(self.name_list) == 0:
self.name_list = ['pair_{}'.format(idx) for idx in range(len(self.path_list))]
if self.phase == 'test':
self.path_list = [[pth.replace('zhenlinx/Data/OAI_segmentation', 'zyshen/oai_data') for pth in pths] for
pths in
self.path_list]
def transfer_exist_dataset_txt_into_new_one(self):
source_path_list, target_path_list, l_source_path_list, l_target_path_list = self.split_file_list()
file_num = len(source_path_list)
assert len(source_path_list) == len(target_path_list)
if l_source_path_list is not None and l_target_path_list is not None:
assert len(source_path_list) == len(l_source_path_list)
file_list = [[source_path_list[i], target_path_list[i], l_source_path_list[i], l_target_path_list[i]] for i
in range(file_num)]
else:
file_list = [[source_path_list[i], target_path_list[i]] for i in range(file_num)]
img_output_path = os.path.join(self.running_read_path, 'img')
label_output_path = os.path.join(self.running_read_path, 'label')
file_list = [[pths[i].replace(self.real_img_path, img_output_path) if i in [0, 1] else pths[i].replace(
self.real_label_path[0],
label_output_path)
for i, pth in enumerate(pths)] for pths in file_list]
if len(self.real_label_path) > 1:
file_list = [[pths[i].replace(self.real_img_path, img_output_path) if i in [0, 1] else pths[i].replace(
self.real_label_path[1],
label_output_path)
for i, pth in enumerate(pths)] for pths in file_list]
output_path = self.task_output_path
pair_txt_path = os.path.join(output_path, 'pair_path_list.txt')
fn_txt_path = os.path.join(output_path, 'pair_name_list.txt')
fname_list = [generate_pair_name([file_list[i][0],file_list[i][1]]) for i in range(file_num)]
write_list_into_txt(pair_txt_path, file_list)
write_list_into_txt(fn_txt_path, fname_list)
def split_file_list(self):
path_list = self.path_list
num_pair = len(path_list)
assert len(path_list[0]) >= 2
has_label = True if len(path_list[0]) == 4 else False
source_path_list = [path_list[i][0] for i in range(num_pair)]
target_path_list = [path_list[i][1] for i in range(num_pair)]
l_source_path_list = None
l_target_path_list = None
if has_label:
l_source_path_list = [path_list[i][2] for i in range(num_pair)]
l_target_path_list = [path_list[i][3] for i in range(num_pair)]
return source_path_list, target_path_list, l_source_path_list, l_target_path_list
def process_img_pool(self):
"""img pool shoudl include following thing:
img_label_path_dic:{img_name:{'img':img_fp,'label':label_fp,...}
img_label_dic: {img_name:{'img':img_np,'label':label_np},......}
pair_name_list:[[pair1_s,pair1_t],[pair2_s,pair2_t],....]
pair_list [[s_np,t_np,sl_np,tl_np],....]
only the pair_list need to be used by get_item method
"""
img_label_path_dic = {}
pair_name_list = []
for fps in self.path_list:
for i in range(2):
fp = fps[i]
fn = get_file_name(fp)
if fn not in img_label_path_dic:
if self.has_label:
img_label_path_dic[fn] = {'img': fps[i], 'label': fps[i + 2]}
else:
img_label_path_dic[fn] = {'img': fps[i]}
pair_name_list.append([get_file_name(fps[0]), get_file_name(fps[1])])
split_dict = self.__split_dict(img_label_path_dic, num_of_workers)
procs = []
for i in range(num_of_workers):
p = Process(target=self.sub_process, args=(split_dict[i],))
p.start()
print("pid:{} start:".format(p.pid))
procs.append(p)
for p in procs:
p.join()
print("completed the processing in {}".format(self.phase))
def sub_process(self, img_label_path_dic):
pbar = pb.ProgressBar(widgets=[pb.Percentage(), pb.Bar(), pb.ETA()], maxval=len(img_label_path_dic)).start()
count = 0
for _, img_label_path in img_label_path_dic.items():
img_pth = img_label_path['img']
label_pth = img_label_path['label']
self.resize_input_img_and_save_it(img_pth, is_label=False, fname=get_file_name(img_pth))
self.resize_input_img_and_save_it(label_pth, is_label=True, fname=get_file_name(label_pth))
count += 1
pbar.update(count)
pbar.finish()
def convert_label_map_into_standard_one(self, label):
label_np = sitk.GetArrayFromImage(label)
cur_label = list(np.unique(label_np))
extra_label = set(cur_label) - self.shared_label_set
# print(" the extra label is {}. ".format(extra_label))
if len(extra_label) == 0:
print("no extra label")
for elm in extra_label:
"""here we assume the value 0 is the background"""
label_np[label_np == elm] = 0
label = sitk.GetImageFromArray(label_np)
return label
def resize_input_img_and_save_it(self, img_pth, is_label=False, fname='', keep_physical=True):
"""
:param img: sitk input, factor is the outputsize/patched_sized
:return:
"""
img_org = sitk.ReadImage(img_pth)
img = self.__read_and_clean_itk_info(img_pth)
if is_label and self.shared_label_set is not None:
img = self.convert_label_map_into_standard_one(img)
dimension = 3
factor = np.flipud(self.resize_factor)
img_sz = img.GetSize()
if self.resize:
resampler = sitk.ResampleImageFilter()
affine = sitk.AffineTransform(dimension)
matrix = np.array(affine.GetMatrix()).reshape((dimension, dimension))
after_size = [int(img_sz[i] * factor[i]) for i in range(dimension)]
after_size = [int(sz) for sz in after_size]
matrix[0, 0] = 1. / factor[0]
matrix[1, 1] = 1. / factor[1]
matrix[2, 2] = 1. / factor[2]
affine.SetMatrix(matrix.ravel())
resampler.SetSize(after_size)
resampler.SetTransform(affine)
if is_label:
resampler.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resampler.SetInterpolator(sitk.sitkBSpline)
img_resampled = resampler.Execute(img)
else:
img_resampled = img
output_path = self.data_output_path
output_path = os.path.join(output_path, 'img') if not is_label else os.path.join(output_path, 'label')
os.makedirs(output_path, exist_ok=True)
fpth = os.path.join(output_path, fname + '.nii.gz')
if keep_physical:
img_resampled.SetSpacing(resize_spacing(img_sz, img_org.GetSpacing(), factor))
img_resampled.SetOrigin(img_org.GetOrigin())
img_resampled.SetDirection(img_org.GetDirection())
sitk.WriteImage(img_resampled, fpth)
return fpth
def __read_and_clean_itk_info(self, path):
if path is not None:
return sitk.GetImageFromArray(sitk.GetArrayFromImage(sitk.ReadImage(path)))
else:
return None
def __split_dict(self, dict_to_split, split_num):
index_list = list(range(len(dict_to_split)))
index_split = np.array_split(np.array(index_list), num_of_workers)
split_dict = []
dict_to_split_items = list(dict_to_split.items())
for i in range(split_num):
dj = dict(dict_to_split_items[index_split[i][0]:index_split[i][-1] + 1])
split_dict.append(dj)
return split_dict
def __inverse_name(self, name):
try:
n_parts = name.split('_image_')
inverse_name = n_parts[1] + '_' + n_parts[0] + '_image'
return inverse_name
except:
n_parts = name.split('_brain_')
inverse_name = n_parts[1] + '_' + n_parts[0] + '_brain'
return inverse_name
def __len__(self):
return len(self.name_list) #############################3
data_path = '/playpen-raid/zyshen/data/reg_debug_labeled_oai_reg_inter'
phase_list = ['test']
""" path for saving the pair_path_list, pair_name_list"""
task_output_path = '/playpen-raid/zyshen/for_llf/reg_debug_labeled_oai_reg_inter'#'/playpen-raid/zyshen/data/croped_for_reg_debug_3000_pair_oai_reg_inter'
""" path for where to read the image during running the tasks"""
running_read_path = '/pine/scr/z/y/zyshen/reg_debug_labeled_oai_reg_inter/data'#'/playpen-raid/zyshen/data/croped_for_reg_debug_3000_pair_oai_reg_inter'
""" path for where to save the data"""
data_output_path = '/playpen-raid/zyshen/oai_data/reg_debug_labeled_oai_reg_inter/data'
""" img path need to be replaced with running_read_img_path"""
real_img_path = '/playpen-raid/zyshen/oai_data/Nifti_rescaled' #'/playpen-raid/zhenlinx/Data/OAI_segmentation/Nifti_6sets_rescaled'
""" label path need to be replaced with runing_read_label_path """
real_label_path = ['/playpen-raid/zyshen/oai_data/Nifti_rescaled']
#['/playpen-raid/zhenlinx/Data/OAI_segmentation/segmentations/images_6sets_right/Cascaded_2_AC_residual-1-s1_end2end_multi-out_UNet_bias_Nifti_rescaled_train1_patch_128_128_32_batch_2_sample_0.01-0.02_cross_entropy_lr_0.0005_scheduler_multiStep_02262018_013038',
# '/playpen-raid/zhenlinx/Data/OAI_segmentation/segmentations/images_6sets_left/Cascaded_2_AC_residual-1-s1_end2end_multi-out_UNet_bias_Nifti_rescaled_train1_patch_128_128_32_batch_2_sample_0.01-0.02_cross_entropy_lr_0.0005_scheduler_multiStep_02262018_013038']
resize_factor = [1,1,1]#[80./160.,192./384.,192./384]
shared_label_set=None
# data_path = '/playpen-raid/zyshen/data/reg_debug_3000_pair_oai_reg_intra'
# phase_list = ['train','val','debug']
# """ path for saving the pair_path_list, pair_name_list"""
# task_output_path = '/playpen-raid/zyshen/data/reg_debug_3000_pair_oai_reg_intra'
# """ path for where to read the image during running the tasks"""
# running_read_path = '/playpen-raid/zyshen/oai_data/reg_debug_labeled_oai_reg_intra/data'
# """ path for where to save the data"""
# data_output_path = '/playpen-raid/zyshen/oai_data/reg_debug_labeled_oai_reg_intra/data'
# """ img path need to be replaced with running_read_img_path"""
# real_img_path = '/playpen-raid/zhenlinx/Data/OAI_segmentation/Nifti_6sets_rescaled'
# """ label path need to be replaced with runing_read_label_path """
# real_label_path = ['/playpen-raid/zhenlinx/Data/OAI_segmentation/segmentations/images_6sets_right/Cascaded_2_AC_residual-1-s1_end2end_multi-out_UNet_bias_Nifti_rescaled_train1_patch_128_128_32_batch_2_sample_0.01-0.02_cross_entropy_lr_0.0005_scheduler_multiStep_02262018_013038',
# '/playpen-raid/zhenlinx/Data/OAI_segmentation/segmentations/images_6sets_left/Cascaded_2_AC_residual-1-s1_end2end_multi-out_UNet_bias_Nifti_rescaled_train1_patch_128_128_32_batch_2_sample_0.01-0.02_cross_entropy_lr_0.0005_scheduler_multiStep_02262018_013038']
# resize_factor = [80./160.,192./384.,192./384]
# shared_label_set=None
# data_path = '/playpen-raid1/zyshen/data/reg_oai_aug'
# phase_list = ['train','val','debug','teset']
# """ path for saving the pair_path_list, pair_name_list"""
# task_output_path = '/playpen-raid1/zyshen/data/reg_oai_aug'
# """ path for where to read the image during running the tasks"""
# running_read_path = '/playpen-raid/zyshen/data/croped_for_reg_debug_3000_pair_oai_reg_intra'
# """ path for where to save the data"""
# data_output_path = '/playpen-raid/zyshen/oai_data/reg_debug_labeled_oai_reg_intra/data'
# """ img path need to be replaced with running_read_img_path"""
# real_img_path = '/playpen-raid/zyshen/oai_data/Nifti_rescaled'#'/playpen-raid/zhenlinx/Data/OAI_segmentation/Nifti_6sets_rescaled'
# """ label path need to be replaced with runing_read_label_path """
# real_label_path = ['/playpen-raid/zyshen/oai_data/Nifti_rescaled']
# #['/playpen-raid/zhenlinx/Data/OAI_segmentation/segmentations/images_6sets_right/Cascaded_2_AC_residual-1-s1_end2end_multi-out_UNet_bias_Nifti_rescaled_train1_patch_128_128_32_batch_2_sample_0.01-0.02_cross_entropy_lr_0.0005_scheduler_multiStep_02262018_013038',
# # '/playpen-raid/zhenlinx/Data/OAI_segmentation/segmentations/images_6sets_left/Cascaded_2_AC_residual-1-s1_end2end_multi-out_UNet_bias_Nifti_rescaled_train1_patch_128_128_32_batch_2_sample_0.01-0.02_cross_entropy_lr_0.0005_scheduler_multiStep_02262018_013038']
# resize_factor = [80./160.,192./384.,192./384]
# shared_label_set=None
#
# data_path = '/playpen-raid/zyshen/data/reg_debug_3000_pair_reg_224_oasis3_reg_inter'
# phase_list = ['train','val','debug','test']
# """ path for saving the pair_path_list, pair_name_list"""
# task_output_path = '/playpen-raid/zyshen/data/croped_lfix_for_reg_debug_3000_pair_reg_224_oasis3_reg_inter'#'/playpen-raid/zyshen/data/croped_for_reg_debug_3000_pair_oai_reg_inter'
# """ path for where to read the image during running the tasks"""
# running_read_path = '/playpen-raid/zyshen/oasis_data/croped_lfix_for_reg_debug_3000_pair_reg_224_oasis3_reg_inter/data'
# #'/pine/scr/z/y/zyshen/croped_for_reg_debug_3000_pair_oai_reg_inter/data'#'/playpen-raid/zyshen/data/croped_for_reg_debug_3000_pair_oai_reg_inter'
# """ path for where to save the data"""
# data_output_path = '/playpen-raid/zyshen/oasis_data/todel/data'
# """ img path need to be replaced with running_read_img_path"""
# real_img_path = '/playpen-raid/xhs400/OASIS_3/processed_images_centered_224_224_224'
# """ label path need to be replaced with runing_read_label_path """
# real_label_path = ['/playpen-raid/xhs400/OASIS_3/processed_images_centered_224_224_224']
# resize_factor = [112./224.,112./224.,112./224]
# """Attention, here we manually add id 62 into list, for it is a big structure and is not absent in val, debug, test dataset"""
# #shared_label_set = {0, 2, 3, 4, 5, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 24, 26, 28, 31, 41, 42, 43, 44, 46, 47, 49, 50, 51, 52, 53, 54, 58, 60, 62, 63, 77, 80, 85, 251, 252, 253, 254, 255}
# shared_label_set = {0, 2, 3, 4, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 24, 28, 31, 41, 42, 43, 46, 47, 49, 50, 51, 52, 53, 54, 60, 63, 77}
#
# #
#
#
# data_path = '/playpen-raid/zyshen/data/syn_data'
# phase_list = ['test']
# """ path for saving the pair_path_list, pair_name_list"""
# task_output_path = '/playpen-raid/zyshen/for_llf/syn_2d' # '/playpen-raid/zyshen/data/croped_for_reg_debug_3000_pair_oai_reg_inter'
# """ path for where to read the image during running the tasks"""
# running_read_path = '/pine/scr/z/y/zyshen/data/syn_data/syn_2d/data' # '/playpen-raid/zyshen/data/croped_for_reg_debug_3000_pair_oai_reg_inter'
# """ path for where to save the data"""
# data_output_path = '/playpen-raid/zyshen/syn_data/2d_syn/data'
# """ img path need to be replaced with running_read_img_path"""
# real_img_path = '/playpen-raid/zyshen/debugs/syn_expr_0422_2' # '/playpen-raid/zhenlinx/Data/OAI_segmentation/Nifti_6sets_rescaled'
# """ label path need to be replaced with runing_read_label_path """
# real_label_path = ['/playpen-raid/zyshen/debugs/syn_expr_0422_2']
# # ['/playpen-raid/zhenlinx/Data/OAI_segmentation/segmentations/images_6sets_right/Cascaded_2_AC_residual-1-s1_end2end_multi-out_UNet_bias_Nifti_rescaled_train1_patch_128_128_32_batch_2_sample_0.01-0.02_cross_entropy_lr_0.0005_scheduler_multiStep_02262018_013038',
# # '/playpen-raid/zhenlinx/Data/OAI_segmentation/segmentations/images_6sets_left/Cascaded_2_AC_residual-1-s1_end2end_multi-out_UNet_bias_Nifti_rescaled_train1_patch_128_128_32_batch_2_sample_0.01-0.02_cross_entropy_lr_0.0005_scheduler_multiStep_02262018_013038']
# resize_factor = [1, 1, 1] # [80./160.,192./384.,192./384]
# shared_label_set = None
#
for phase in phase_list:
dataset = RegistrationDataset(data_path=os.path.join(data_path, phase), phase=phase, resize_factor=resize_factor)
dataset.set_task_output_path(os.path.join(task_output_path, phase))
dataset.set_data_output_path(os.path.join(data_output_path, phase))
dataset.set_real_data_path(real_img_path, real_label_path)
dataset.set_running_read_path(os.path.join(running_read_path, phase))
if shared_label_set is not None:
dataset.set_shared_label(shared_label_set)
dataset.process()
|
nilq/baby-python
|
python
|
#-*- coding: utf-8 -*-
"""
security
~~~~~~~~
Generic User class for interfacing with use accounts +
authentication.
"""
import hashlib
import random
from utils import Storage, to36, valid_email, \
ALPHANUMERICS as ALPHAS
username_regex = r'([A-Za-z0-9_%s]){%s,%s}$'
passwd_regex = r'([A-Za-z0-9%s]){%s,%s}$'
class Account(object):
"""The base Account class provides the basic functions for allowing
user account creation and authentication, however it should be
extended to allow user retrieval.
"""
@classmethod
def authenticate(cls, username, passwd, salt, uhash):
"""Authenticates/validates a user's credentials by comparing
their username and password versus their computed salted hash.
A successful authentication results in True, False otherwise.
"""
return cls._roast(username + salt + passwd) == uhash
@classmethod
def register(cls, username, passwd, passwd2=None, salt='', email='', **kwargs):
"""Creates a complete user tuple according to seed
credentials. The input undergoes a salting and roasting during
a hashing process and all values are returned for further db
handling (db insertion, etc). Note: For security reasons,
plaintext passwords are never returned and should not be
stored in a db unless there's a very specific reason to.
XXX: Consider adding the following keyword arguments:
password_validator - lambda for validating passwd (chars, len)
username_validator - lambda for validating username (len, etc)
:param salt: used for testing/verifying hash integrity in tests
:param kwargs: any addt'l info which should be saved w/ the created user
usage:
>>> from waltz import Account
# typically, salt is not provided as an argument and is instead
# generated as a side effect of Account.register("username", "password").
# A salt has been included along with the following function call to
# guarantee idempotence (i.e. a consistent/same uhash with each call)
>>> Account.register("username", "password", salt="123456789")
<Storage {'username': 'username', 'uhash': '021d98a32375ed850f459fe484c3ab2e352fc2801ef13eae274103befc9d0274', 'salt': '123456789', 'email': ''}>
# using additional kwargs, such as age (i.e. age=24) to add user attributes
>>> Account.register("username", "password", salt="123456789", age=24)
<Storage {'username': 'username', 'uhash': '021d98a32375ed850f459fe484c3ab2e352fc2801ef13eae274103befc9d0274', 'salt': '123456789', 'email': '', 'age': 24}>
"""
if not passwd: raise ValueError('Password Required')
if email and not valid_email(email):
raise ValueError("Email '%s' is malformed and does not " \
"pass rfc3696." % email)
if passwd2 and not passwd == passwd2:
raise ValueError('Passwords do not match')
salt = salt or cls._salt()
uhash = cls._roast(username + salt + passwd)
return Storage(zip(('username', 'salt', 'uhash', 'email'),
(username, salt, uhash, email)) + kwargs.items())
@classmethod
def public_key(cls, uid, username, salt):
"""Generates a public key which can be used as a public unique
identifier token in account activation emails and other
account specific situations where you wish to create a url
intended to only work for a specific user.
"""
return cls._roast(uid + username + salt)
@classmethod
def _salt(cls, length=12):
"""http://en.wikipedia.org/wiki/Salt_(cryptography)
Salting results in the generation of random padding of a
specified 'length' (12 default) which can be prepended or
appended to a password prior to hashing to increase security
and prevent against various brute force attacks, such as
rainbow-table lookups."""
return ''.join([random.choice(ALPHAS) for i in range(length)])
@classmethod
def _roast(cls, beans, chash=hashlib.sha256):
"""Computes a hash digest from username, salt, and
password. Hot swappable algo in case there are code changes
down the road."""
return chash(beans).hexdigest()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-10-26 11:01
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0008_alter_user_username_max_length'),
('moving', '0006_auto_20181026_0830'),
]
operations = [
migrations.CreateModel(
name='Booking',
fields=[
('location', models.TextField(max_length=50, null=True)),
('number', models.TextField(max_length=50, null=True)),
('email', models.TextField(max_length=50, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('payment', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='booking', to='moving.Payment')),
],
),
]
|
nilq/baby-python
|
python
|
import re
S = input()
if re.search(r'^(hi)+$', S):
print('Yes')
else:
print('No')
|
nilq/baby-python
|
python
|
# -*- Mode: Python3; coding: utf-8; indent-tabs-mpythoode: nil; tab-width: 4 -*-
import os
import numpy as np
PATH = "../Images/"
def organize(data):
# ndArray
print("Original:")
print(data[:2])
# Preparar, remodelar Array
height, width, channels = data.shape
data = data.flatten() # vetorizar
temp = [i for i in data] # lista
temp = [temp[i:i+channels] for i in range(0, height * width * channels,
channels)] # separar pixels novamente
# Ordenação crescente
for c in range(0, channels):
# Ordenar do último canal para o primeiro
i = channels - c - 1
temp.sort(key=lambda value: value[i])
npArray = np.array(temp, dtype=np.uint8)
npArray = npArray.flatten() # abrir
npArray = npArray.reshape(height, width, channels) # remodelar
print("Result:")
print(npArray[:2])
def test(filename):
img_np = PATH + filename + ".npy"
print("Data: ", img_np)
if not os.path.exists(img_np):
print("File not found!")
return
data = np.load(img_np)
organize(data)
if __name__ == '__main__':
# ndArray
h, w, c = 5, 4, 3
numbers = [i for i in range(h*w*c, 0, -1)]
npArray = np.array(numbers).reshape(h, w, c)
organize(npArray)
# ndArray (Imagem)
test("folha_croton")
|
nilq/baby-python
|
python
|
from arrp import clear
def test_clear():
clear("test_dataset")
|
nilq/baby-python
|
python
|
from multiprocessing import Pool
from .rabbitmq.rmq_consumer import CocoRMQConsumer
from .kafka.kafka_consumer import CocoKafkaConsumer
from .redis.redis_consumer import CocoRedisConsumer
from .logger import logger
import time
# this is aka mixture of a consumer factory and threadpool
class CocoConsumerManager(object):
CONSUMER_CLASS = {
"RMQ": CocoRMQConsumer,
"KAFKA": CocoKafkaConsumer,
"REDIS": CocoRedisConsumer
}
def __init__(self, config, worker_class, pool_size, customized_logger = None):
self._worker_class = worker_class
self._pool = None
self._pool_size = pool_size
self._config = config
if customized_logger:
self._logger = customized_logger
def start(self):
# self._pool = Pool()
# _ = [self._pool.apply_async(CocoConsumerManager._start_consumer,
# args=[x, self._worker_class, self._config]) for x in range(self._pool_size)]
CocoConsumerManager._start_consumer(1, self._worker_class, self._config)
# self._pool.close()
# self._pool.join()
# logger.warning("All progress stopped!")
@staticmethod
def _start_consumer(seq, worker_class, config):
logger.debug('start consumer')
# worker = worker_class(config, seq)
consumer_type = config["MQ_TYPE"]
logger.debug('consumer type: {}'.format(consumer_type))
consumer_class = CocoConsumerManager.CONSUMER_CLASS[consumer_type]
sub_config = config[consumer_type]
# while True:
# try:
# consumer = consumer_class(sub_config, worker_class, logger)
# consumer.connect()
# except Exception as err:
# logger.error(err)
# logger.warning("Consumer Error. Reconnect after 10 seconds.")
# time.sleep(10)
# let the consumer handling reconect
consumer = consumer_class(sub_config, worker_class, logger)
consumer.connect()
|
nilq/baby-python
|
python
|
# Suites of test positions are normally stored in many different EPD files.
# We keep them in one big JSON file instead, with standardized IDs, because
# that makes it easier to keep track of scores over time.
#
# This program imports a new EPD test suite into that JSON file.
import sys
import json
import chess
import chess.engine
epdfile = sys.argv[1]
all_epd = []
line_idx = 0
with open( epdfile + ".epd" ) as epd:
for line in epd.readlines():
line_idx = line_idx + 1
line = line.strip()
# Some epd files are passive-aggressive and use 'am' instead of 'bm'
line = line.replace( "am ", "bm " )
# Some epd files have no separator between the fen and the best move
line = line.replace( "bm ", ";bm " )
# A small number of epd files don't actually *provide*
# a best move, which seems like it kind of defeats the point,
# but ok. In these cases we fire up a strong reference engine
# to get a quick opinion about what looks good. Deeper searches
# might give us better data here.
if not 'bm ' in line:
board = chess.Board( line )
if not refengine:
refengine = chess.engine.SimpleEngine.popen_uci( "..\Engine\stockfish-10" )
result = refengine.play( board, chess.engine.Limit( depth=10 ) )
line = line + ";bm " + str( result.move )
# After the fen it's all key/value pairs between semicolons
fields = line.split( ';' )
if len( fields ) > 0:
this_test = {}
fen = fields[0].strip()
this_test['fen'] = fen
for meta in fields[1:]:
meta = meta.strip()
if len( meta ) > 0:
if ' ' in meta:
sep = meta.index( ' ' )
key = meta[:sep].strip()
val = meta[sep:].strip()
if val.startswith( '"' ) and val.endswith( '"' ):
val = val[1:-1]
this_test[key] = val
# Overwrite any existing ID
if not 'id' in this_test:
this_test['id'] = epdfile.replace( '.', '-' ) + "-" + str( line_idx )
try:
bmove = chess.Move.from_uci( bm )
except:
# Eww
bmove = board.parse_san( bm )
all_epd.append( this_test )
if refengine:
refengine.quit()
ser = json.dumps( all_epd, sort_keys = True, indent = 4 )
print( ser )
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Test for the MDF
import os
import os.path
import sys
sys.path.append(os.path.join(os.path.dirname(__file__),".."))
from schema import Range,Variable,Table
from census_spec_scanner import CensusSpec
TEST_FNAME = os.path.join(os.path.dirname(__file__), "docx_test/test_file_layout.docx")
def test_mdf_reader():
cs = CensusSpec()
cs.load_schema_from_file(TEST_FNAME)
mdf = list(cs.tables())
assert type(mdf)==list
assert len(mdf) == 1 # demo file has but a single table
assert type(mdf[0]) == Table
assert mdf[0].name == "Test_Apples"
if __name__=="__main__":
test_mdf_reader()
|
nilq/baby-python
|
python
|
import time
import pyqtgraph as pg
class UserTestUi(object):
def __init__(self, expected_display, current_display):
pg.mkQApp()
self.widget = pg.QtGui.QSplitter(pg.QtCore.Qt.Vertical)
self.widget.resize(1600, 1000)
self.display_splitter = pg.QtGui.QSplitter(pg.QtCore.Qt.Horizontal)
self.widget.addWidget(self.display_splitter)
self.display1 = expected_display
self.display2 = current_display
self.display_splitter.addWidget(self.display1.widget)
self.display_splitter.addWidget(self.display2.widget)
self.ctrl = pg.QtGui.QWidget()
self.widget.addWidget(self.ctrl)
self.ctrl_layout = pg.QtGui.QVBoxLayout()
self.ctrl.setLayout(self.ctrl_layout)
self.diff_widget = pg.DiffTreeWidget()
self.ctrl_layout.addWidget(self.diff_widget)
self.pass_btn = pg.QtGui.QPushButton('pass')
self.fail_btn = pg.QtGui.QPushButton('fail')
self.ctrl_layout.addWidget(self.pass_btn)
self.ctrl_layout.addWidget(self.fail_btn)
self.pass_btn.clicked.connect(self.pass_clicked)
self.fail_btn.clicked.connect(self.fail_clicked)
self.last_btn_clicked = None
self.widget.setSizes([750, 250])
def pass_clicked(self):
self.last_btn_clicked = 'pass'
def fail_clicked(self):
self.last_btn_clicked = 'fail'
def user_passfail(self):
self.widget.show()
while True:
pg.QtGui.QApplication.processEvents()
last_btn_clicked = self.last_btn_clicked
self.last_btn_clicked = None
if last_btn_clicked == 'fail' or not self.widget.isVisible():
raise Exception("User rejected test result.")
elif last_btn_clicked == 'pass':
break
time.sleep(0.03)
def show_results(self, expected, current):
self.diff_widget.setData(expected, current)
self.display2.show_result(current)
self.display1.show_result(expected)
def clear(self):
self.display1.clear()
self.display2.clear()
self.diff_widget.setData(None, None)
|
nilq/baby-python
|
python
|
from autohandshake.src import HandshakeBrowser
from autohandshake.src.Pages.StudentProfilePage import StudentProfilePage
from autohandshake.src.constants import BASE_URL
class ViewAsStudent:
"""
A sub-session in which the user logs in as a student.
Should be used as a context manager. Example:
::
with HandshakeSession(school_url, email) as browser:
with ViewAsStudent(student_id):
# do something
"""
def __init__(self, student_id: int, browser: HandshakeBrowser, stay_on_page: bool = False):
"""
:param student_id: the numeric Handshake id of the student to view as
:type student_id: int
:param browser: a logged-in Handshake browser with a STAFF user type
:type browser: HandshakeBrowser
:param stay_on_page: whether or not to stay on the same page when logging
back out of the "View as Student" session. If False,
navigate back to the Handshake homepage when the
session is over. Defaults to False.
:type stay_on_page: bool
"""
self._id = student_id
self._browser = browser
self._stay_on_page = stay_on_page
def __enter__(self):
"""
Log in as the specified student.
"""
profile_page = StudentProfilePage(self._id, self._browser)
profile_page.view_as_student()
def __exit__(self, exc_type, exc_val, exc_tb):
"""Stop viewing as the student and return to the career services view."""
self._browser.click_element_by_xpath('//a[@href="/users/stop_viewing_as"]')
self._browser.update_constants()
if not self._stay_on_page:
self._browser.get(BASE_URL)
|
nilq/baby-python
|
python
|
hensu = "HelloWorld"
print(hensu)
|
nilq/baby-python
|
python
|
import os
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
project_dir = Path(__file__).resolve().parents[1]
load_dotenv(find_dotenv())
LOGLEVEL = os.getenv("LOGLEVEL", "INFO").upper()
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"standard": {
"format": "[%(asctime)s][%(levelname)-5s][%(name)s] - %(message)s",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "standard",
"level": "INFO",
},
"rolling_file_debug": {
"class": "logging.handlers.RotatingFileHandler",
"filename": project_dir / "logs/debug.log",
"formatter": "standard",
"level": "DEBUG",
"maxBytes": 1024 * 1024,
"backupCount": 10,
},
"rolling_file_warning": {
"class": "logging.handlers.RotatingFileHandler",
"filename": project_dir / "logs/warnings.log",
"formatter": "standard",
"level": "WARNING",
"maxBytes": 1024 * 1024,
"backupCount": 10,
},
},
"root": {
"handlers": ["console", "rolling_file_debug", "rolling_file_warning"],
"level": LOGLEVEL,
},
"loggers": {
"__main__": {"handlers": [], "propagate": True},
"{{ cookiecutter.module_name }}": {"handlers": [], "propagate": True},
},
}
|
nilq/baby-python
|
python
|
import numpy as np
import pytest
import torch
from thgsp.graphs.generators import random_graph
from thgsp.sampling.rsbs import (
cheby_coeff4ideal_band_pass,
estimate_lk,
recon_rsbs,
rsbs,
)
from ..utils4t import devices, float_dtypes, snr_and_mse
def test_cheby_coeff4ideal_band_pass():
order = 30
ceoff = cheby_coeff4ideal_band_pass(0, 1, 0, 2, order)
assert ceoff.shape == (order + 1,)
print(ceoff)
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("dtype", float_dtypes)
class TestRsbs:
def test_estimater_lk_on_minnesota(self, dtype, device):
N = 100
g = random_graph(N, dtype=dtype, device=device)
lmax = g.max_frequency(lap_type="comb")
print(lmax)
band_limit = 10
lambda_k, cum_coh = estimate_lk(
g, band_limit, lmax=lmax, lap_type="comb", verbose=False, num_estimation=1
)
print(lambda_k)
print(cum_coh)
@pytest.mark.parametrize("return_list", [True, False])
def test_rsbs(self, dtype, device, return_list):
N = 100
k = 50
M = 30
appropriate_num_rv = np.int32(2 * np.round(np.log(N)))
g = random_graph(N, dtype=dtype, device=device)
nodes, coh = rsbs(g, M, k, num_rv=appropriate_num_rv, return_list=return_list)
print(nodes)
if return_list:
assert isinstance(nodes, list)
else:
assert isinstance(nodes, torch.Tensor)
def test_rsbs_recon(self, dtype, device):
N = 10
k = 5
M = 5
appropriate_num_rv = np.int32(2 * np.round(np.log(N)))
g = random_graph(N, 0.3, dtype=dtype, device=device, seed=2021)
print(g.device())
# since scikit-umfpack requires double scalars.
if dtype == torch.double:
nodes, coh = rsbs(g, M, k, num_rv=appropriate_num_rv, return_list=True)
f = torch.rand(N, 1, dtype=dtype, device=device)
f = f / f.norm()
f_hat = recon_rsbs(
f[nodes], S=nodes, L=g.L("comb"), cum_coh=coh, mu=0.1, reg_order=1
)
if torch.any(torch.isnan(f_hat)):
print(
"This case leads to numerical instability and thus would be skipped"
)
else:
s, m = snr_and_mse(f_hat, f)
assert m < 1
|
nilq/baby-python
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from docker import errors
from oslo_config import cfg
from requests import exceptions as req_exceptions
from magnum.common import docker_utils
from magnum.tests.functional.python_client_base import BayTest
CONF = cfg.CONF
CONF.import_opt('docker_remote_api_version', 'magnum.common.docker_utils',
group='docker')
CONF.import_opt('default_timeout', 'magnum.common.docker_utils',
group='docker')
class TestSwarmAPIs(BayTest):
"""This class will cover swarm bay basic functional testing.
Will test all kinds of container action with tls_disabled=False mode.
"""
coe = "swarm"
baymodel_kwargs = {
"tls_disabled": False,
"network_driver": None,
"volume_driver": None,
"fixed_network": '192.168.0.0/24',
"labels": {}
}
@classmethod
def setUpClass(cls):
super(TestSwarmAPIs, cls).setUpClass()
cls.bay_is_ready = None
def setUp(self):
super(TestSwarmAPIs, self).setUp()
if self.bay_is_ready is True:
return
# Note(eliqiao): In our test cases, docker client or magnum client will
# try to connect to swarm service which is running on master node,
# the endpoint is bay.api_address(listen port is included), but the
# service is not ready right after the bay was created, sleep for an
# acceptable time to wait for service being started.
# This is required, without this any api call will fail as
# 'ConnectionError: [Errno 111] Connection refused'.
msg = ("If you see this error in the functional test, it means "
"the docker service took too long to come up. This may not "
"be an actual error, so an option is to rerun the "
"functional test.")
if self.bay_is_ready is False:
# In such case, no need to test below cases on gate, raise a
# meanful exception message to indicate ca setup failed after
# bay creation, better to do a `recheck`
# We don't need to test since bay is not ready.
raise Exception(msg)
url = self.cs.bays.get(self.bay.uuid).api_address
# Note(eliqiao): docker_utils.CONF.docker.default_timeout is 10,
# tested this default configure option not works on gate, it will
# cause container creation failed due to time out.
# Debug more found that we need to pull image when the first time to
# create a container, set it as 180s.
docker_api_time_out = 180
self.docker_client = docker_utils.DockerHTTPClient(
url,
CONF.docker.docker_remote_api_version,
docker_api_time_out,
client_key=self.key_file,
client_cert=self.cert_file,
ca_cert=self.ca_file)
self.docker_client_non_tls = docker_utils.DockerHTTPClient(
url,
CONF.docker.docker_remote_api_version,
docker_api_time_out)
def _container_operation(self, func, *args, **kwargs):
# NOTE(hongbin): Swarm bay occasionally aborts the connection, so we
# re-try the operation several times here. In long-term, we need to
# investigate the cause of this issue. See bug #1583337.
for i in range(150):
try:
self.LOG.info("Calling function " + func.__name__)
return func(*args, **kwargs)
except req_exceptions.ConnectionError:
self.LOG.info("Connection aborted on calling Swarm API. "
"Will retry in 2 seconds.")
except errors.APIError as e:
if e.response.status_code != 500:
raise
self.LOG.info("Internal Server Error: " + str(e))
time.sleep(2)
raise Exception("Cannot connect to Swarm API.")
def _create_container(self, **kwargs):
image = kwargs.get('image', 'docker.io/cirros')
command = kwargs.get('command', 'ping -c 1000 8.8.8.8')
return self._container_operation(self.docker_client.create_container,
image=image, command=command)
def test_start_stop_container_from_api(self):
# Leverage docker client to create a container on the bay we created,
# and try to start and stop it then delete it.
resp = self._create_container(image="docker.io/cirros",
command="ping -c 1000 8.8.8.8")
resp = self._container_operation(self.docker_client.containers,
all=True)
container_id = resp[0].get('Id')
self._container_operation(self.docker_client.start,
container=container_id)
resp = self._container_operation(self.docker_client.containers)
self.assertEqual(1, len(resp))
resp = self._container_operation(self.docker_client.inspect_container,
container=container_id)
self.assertTrue(resp['State']['Running'])
self._container_operation(self.docker_client.stop,
container=container_id)
resp = self._container_operation(self.docker_client.inspect_container,
container=container_id)
self.assertFalse(resp['State']['Running'])
self._container_operation(self.docker_client.remove_container,
container=container_id)
resp = self._container_operation(self.docker_client.containers)
self.assertEqual([], resp)
def test_access_with_non_tls_client(self):
self.assertRaises(req_exceptions.SSLError,
self.docker_client_non_tls.containers)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#encoding=utf-8
import numpy as np
from random import choice, shuffle, uniform
#from data_factory import PlotFactory
class DataFactory():
def __init__(self, n=1):
self.plt_max=5
self.nmb_plt=None
if n < self.plt_max:
self.nmb_plt=n
else:
print("Maximum possible plots are", self.nmb_plt, ", n default to", self.nmb_plt)
self.nmb_plt=5 # default to maximal possible
self.nmb_plt=n
self.name=["temperature", "pressure", "humidity", "acceleration", "magnetic_field"]
self.func=[np.sin, np.cos, self.func1, self.func2, self.func3] #function
self.a=[1,2,3,4,5] # amplitude
self.b=[1,2,3,4,5] # bias
self.s=[1,2,3,4,5] # shift
self.f=[1,1,2,2,5] # frequency
self.noise=[1,1,2,3,5] # noise (supposed to be multiplied by 0.01)
self.randomize()
def randomize(self):
print("Shuffle all lists, this way iterating over\nthem has the same result as random choice.")
shuffle(self.name)
shuffle(self.func)
shuffle(self.a)
shuffle(self.b)
shuffle(self.f)
shuffle(self.noise)
# if n<self._max_plts:
# for p in range(n):
# key=self.rand_name.remove( choice(self.name) )
# print(self.rand_name)
# # self.plots[]
# else:
# print("Maximum number of plots available is", self._max_plts)
def produce_data(self,x):
data = dict()
for i in range(self.nmb_plt):
name=self.name[i]
func=self.func[i]
a = self.a[i] # amplitude
b = self.b[i] # bias
s = self.s[i] # shift
f = self.f[i] # frequency
u = self.noise[i]*0.2
noise = uniform(-u,u)
data[name]=np.array([ a*func( (x-s)*f ) + b + noise])
return data
def func1(self,x):
return ( np.sin(x)*np.cos(x) / 2.0 )
def func2(self,x):
return ( np.sin(x) + np.sin(x/2) + np.sin(x/4) )
def func3(self,x):
return ( np.sin(x)*np.sin(x/4) )
|
nilq/baby-python
|
python
|
from .base import BaseNewsvendor, DataDrivenMixin
from ..utils.validation import check_cu_co
from keras.models import Sequential
from keras.layers import Dense
import keras.backend as K
from sklearn.utils.validation import check_is_fitted
import numpy as np
ACTIVATIONS = ['elu', 'selu', 'linear', 'tanh', 'relu', 'softmax', 'softsign', 'softplus',
'sigmoid', 'hard_sigmoid', 'exponential']
class DeepLearningNewsvendor(BaseNewsvendor, DataDrivenMixin):
"""A newsvendor based on deep learning
Parameters
----------
cu : {array-like of shape (n_outputs,), Number or None}, default=None
The underage costs per unit. If None, then underage costs are one
for each target variable
co : {array-like of shape (n_outputs,), Number or None}, default=None
The overage costs per unit. If None, then overage costs are one
for each target variable
hidden_layers : {'auto', 'custom'}, default='auto'
Whether to use a automated or customized hidden layer structure.
- When set to 'auto' the network will use two hidden layers. The first
with 2*n_features neurons and 'relu' as activation function the second
one with n_features neurons and 'linear' as activation function
- When set to 'custom' the settings specified in both parameters 'neurons' and
'activations' will be used to build the hidden layers of the network
neurons : list, default=[100]
The ith element represents the number of neurons in the ith hidden layer
Only used when hidden_layers='custom'.
activations : list, default=['relu']
The ith element of the list represents the activation function of the ith layer.
Valid activation functions are: 'elu', 'selu', 'linear', 'tanh', 'relu', 'softmax',
'softsign', 'softplus','sigmoid', 'hard_sigmoid', 'exponential'.
Only used when hidden_layers='custom'.
optimizer: {'adam', 'sgd'}, default='adam'
The optimizer to be used.
epochs: int, default=100
Number of epochs to train the model
verbose: int 0, 1, or 2, default=1
Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch.
Attributes
----------
model_ : tensorflow.keras.Sequential
The underlying model
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs.
cu_ : ndarray, shape (n_outputs,)
Validated underage costs.
co_ : ndarray, shape (n_outputs,)
Validated overage costs.
References
----------
.. [1] Afshin Oroojlooyjadid, Lawrence V. Snyder, Martin Takáˇc,
"Applying Deep Learning to the Newsvendor Problem", 2018.
Examples
--------
>>> from ddop.datasets.load_datasets import load_data
>>> from ddop.newsvendor import DeepLearningNewsvendor
>>> from sklearn.model_selection import train_test_split
>>> data = load_data("yaz_steak.csv")
>>> X = data.iloc[:,0:24]
>>> Y = data.iloc[:,24]
>>> cu,co = 15,10
>>> X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25)
>>> mdl = DeepLearningNewsvendor(cu, co)
>>> mdl.fit(X_train, Y_train)
>>> mdl.score(X_test, Y_test)
[64.62898917]
"""
def __init__(self, cu, co, hidden_layers='auto', neurons=[100],
activations=['relu'], optimizer='adam', epochs=100, verbose=1):
self.hidden_layers = hidden_layers
self.neurons = neurons
self.activations = activations
self.optimizer = optimizer
self.epochs = epochs
self.verbose = verbose
super().__init__(
cu=cu,
co=co)
def _nv_loss(self, cu, co):
"""Create a newsvendor loss function with the given under- and overage costs"""
def customized_loss(y_true, y_pred):
self.tensor_ = y_true
loss = K.switch(K.less(y_pred, y_true), cu * (y_true - y_pred), co * (y_pred - y_true))
return K.sum(loss)
return customized_loss
def _create_model(self):
hidden_layers = self.hidden_layers
neurons = self.neurons
activations = self.activations
n_features = self.n_features_
n_outputs = self.n_outputs_
model = Sequential()
if hidden_layers == 'auto':
model.add(Dense(2 * n_features, activation='relu', input_dim=n_features))
model.add(Dense(n_features))
model.add(Dense(n_outputs))
else:
for size, activation in zip(neurons, activations):
model.add(Dense(units=size, activation=activation))
model.add(Dense(n_outputs))
model.build((None, n_features))
model.compile(loss=self._nv_loss(self.cu_, self.co_), optimizer=self.optimizer)
return model
def fit(self, X, y):
"""Fit the model to the training set (X, y).
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples, n_outputs)
The target values.
Returns
----------
self : DeepLearningNewsvendor
Fitted estimator
"""
# Validate input parameters
self._validate_hyperparameters()
X, y = self._validate_data(X, y, multi_output=True)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
# Determine output settings
self.n_features_ = X.shape[1]
self.n_outputs_ = y.shape[1]
# Check and format under- and overage costs
self.cu_, self.co_ = check_cu_co(self.cu, self.co, self.n_outputs_)
model = self._create_model()
model.fit(X, y, epochs=self.epochs, verbose=self.verbose)
self.model_ = model
return self
def _validate_hyperparameters(self):
# Make sure self.neurons is a list
neurons = self.neurons
if not hasattr(neurons, "__iter__"):
neurons = [neurons]
neurons = list(neurons)
# Make sure self.activations is a list
activations = self.activations
if not hasattr(activations, "__iter__"):
activations = [activations]
activations = list(activations)
if self.hidden_layers == "custom" and np.any(np.array(neurons) <= 0):
raise ValueError("neurons must be > 0, got %s." %
self.neurons)
if self.hidden_layers == "custom" and \
np.any(np.array([activation not in ACTIVATIONS for activation in activations])):
raise ValueError("Invalid activation function in activations. Supported are %s but got %s"
% (list(ACTIVATIONS), activations))
if self.hidden_layers not in ["auto", "custom"]:
raise ValueError("hidden_layers %s is not supported." % self.hidden_layers)
if self.hidden_layers == "custom" and len(neurons) != len(activations):
raise ValueError("When customizing the hidden layers neurons and activations must have same "
"length but neurons is of length %s and activations %s"
% (len(neurons), len(activations)))
if self.verbose not in [0, 1, 2]:
raise ValueError("verbose must be either 0, 1 or 2, got %s." %
self.verbose)
def predict(self, X):
"""Predict values for X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples to predict.
Returns
----------
y : array-like of shape (n_samples, n_outputs)
The predicted values
"""
check_is_fitted(self)
pred = self.model_.predict(X)
return pred
|
nilq/baby-python
|
python
|
# Copyright 2018 eShares, Inc. dba Carta, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
import threading
from typing import Iterator, Optional, Tuple, cast
from .interface import AbstractFeatureFlagStore, FlagDoesNotExistError
from .storage import FeatureFlagStoreItem, FeatureFlagStoreMeta
from .util.date import now
logger = logging.getLogger(__name__)
class ConsulFeatureFlagStore(AbstractFeatureFlagStore):
def __init__(self, consul, base_key="features"):
self._cache = {}
self._consul = consul
self.base_key = base_key
self._start()
def _start(self):
logger.debug("Spawning a thread to track changes in consul")
self._thread = threading.Thread(target=self._watch)
self._thread.daemon = True
self._thread.start()
def _watch(self):
index = None
while True:
index, data = self._consul.kv.get(self.base_key, recurse=True)
self._parse_data(data)
def _parse_data(self, data: Tuple[dict]):
if data is None:
return
for item in data:
serialized = item["Value"]
if serialized is None:
continue
deserialized = FeatureFlagStoreItem.deserialize(serialized)
self._set_item_in_cache(item["Key"], deserialized)
def _set_item_in_cache(self, key: str, item: FeatureFlagStoreItem):
self._cache[key] = item
def create(
self,
feature_name: str,
is_enabled: bool = False,
client_data: Optional[dict] = None,
) -> FeatureFlagStoreItem:
item = FeatureFlagStoreItem(
feature_name, is_enabled, FeatureFlagStoreMeta(now(), client_data)
)
return self._save(item)
def _save(self, item: FeatureFlagStoreItem) -> FeatureFlagStoreItem:
self._consul.kv.put(self._make_key(item.feature_name), item.serialize())
self._set_item_in_cache(item.feature_name, item)
return item
def get(self, feature_name: str) -> Optional[FeatureFlagStoreItem]:
return self._cache.get(self._make_key(feature_name))
def _make_key(self, feature_name: str) -> str:
return "/".join([self.base_key, feature_name])
def set(self, feature_name: str, is_enabled: bool):
existing = self.get(feature_name)
if existing is None:
self.create(feature_name, is_enabled)
return
item = FeatureFlagStoreItem(
feature_name, is_enabled, FeatureFlagStoreMeta.from_dict(existing.meta)
)
self._save(item)
def delete(self, feature_name: str):
self._consul.kv.delete(self._make_key(feature_name))
def list(
self, limit: Optional[int] = None, offset: int = 0
) -> Iterator[FeatureFlagStoreItem]:
feature_names = sorted(self._cache.keys())[offset:]
if limit is not None:
feature_names = feature_names[:limit]
for feature_name in feature_names:
yield cast(FeatureFlagStoreItem, self.get(feature_name))
def set_meta(self, feature_name: str, meta: FeatureFlagStoreMeta):
existing = self.get(feature_name)
if existing is None:
raise FlagDoesNotExistError(
"Feature %s does not exist" % feature_name
) # noqa: E501
item = FeatureFlagStoreItem(feature_name, existing.raw_is_enabled, meta)
self._save(item)
|
nilq/baby-python
|
python
|
MAX_LENGTH_TEXT_MESSAGE = 800
MAX_LENGTH_TEXT_SUBJECT = 80
TEXT_SIZE = "The text must be between 0 and 800 characters."
SUBJECT_SIZE = "Subject must be between 0 and 80 characters."
USER_EXISTS = "User don't exists."
|
nilq/baby-python
|
python
|
default_app_config = 'features.apps.FeaturesConfig'
|
nilq/baby-python
|
python
|
import logging
import abc
import traceback
from media.monitor.pure import LazyProperty
appname = 'root'
def setup_logging(log_path):
""" Setup logging by writing log to 'log_path' """
#logger = logging.getLogger(appname)
logging.basicConfig(filename=log_path, level=logging.DEBUG)
def get_logger():
""" in case we want to use the common logger from a procedural
interface """
return logging.getLogger()
class Loggable(object):
""" Any class that wants to log can inherit from this class and
automatically get a logger attribute that can be used like:
self.logger.info(...) etc. """
__metaclass__ = abc.ABCMeta
@LazyProperty
def logger(self): return get_logger()
def unexpected_exception(self,e):
""" Default message for 'unexpected' exceptions """
self.fatal_exception("'Unexpected' exception has occured:", e)
def fatal_exception(self, message, e):
""" Prints an exception 'e' with 'message'. Also outputs the
traceback. """
self.logger.error( message )
self.logger.error( str(e) )
self.logger.error( traceback.format_exc() )
|
nilq/baby-python
|
python
|
import time
import telnetlib
class Telnet:
#
# Desenvolvido por Felipe Lyp
#
def connect(self, host, port, username, password):
self.telnet = telnetlib.Telnet(host, port)
self.telnet.read_until(b"Login:")
self.telnet.write(username.encode('ascii') + b"\n")
if password:
self.telnet.read_until(b"Password:")
self.telnet.write(password.encode('ascii') + b"\n")
self.send('en')
self.send(password)
def send(self, cmd, encode_ascii = True):
if encode_ascii:
self.telnet.write(cmd.encode('ascii') + b"\n")
else:
self.telnet.write(cmd.encode())
time.sleep(1)
def data(self):
return str(self.telnet.read_very_eager().decode('ascii'))
|
nilq/baby-python
|
python
|
import random
class Enemy:
"""
Automatically inherits object class from python3
"""
def __init__(self, name="Enemy", hit_points=0, lives=1):
self.name = name
self.hit_points = hit_points
self.lives = lives
self.alive = True
def take_damage(self, damage):
remaining_points = self.hit_points - damage
if remaining_points >= 0:
self.hit_points = remaining_points
print("I took {} points damage and have {} left".format(damage, self.hit_points))
else:
self.lives -= 1
if self.lives > 0:
print("{0.name} lost a life".format(self))
else:
print("{0.name} is dead".format(self))
self.alive = False
def __str__(self):
return """Name: {0.name}, Lives: {0.lives},Hit points: {0.hit_points} Alive: {0.alive}""".format(self)
class Troll(Enemy):
def __init__(self, name):
# super(Troll, self).__init__(name=name, lives=1, hit_points=23)
super().__init__(name=name, lives=1, hit_points=23)
def grunt(self):
print(f'{self.name} stomp you')
class Vampire(Enemy):
def __init__(self, name):
super().__init__(name=name, lives=3, hit_points=12)
def dodges(self):
if random.randint(1, 3) == 3:
print("***** {0.name} dodges *****".format(self))
return True
else:
return False
def take_damage(self, damage):
if self.dodges():
super().take_damage(damage=damage)
class VampireKing(Vampire):
def __init__(self, name):
super().__init__(name=name)
self.hit_points = 140
def take_damage(self, damage):
qtr_damage = damage // 4
super().take_damage(qtr_damage)
|
nilq/baby-python
|
python
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Service manager module """
import logging
import keystone.backends.api as api
logger = logging.getLogger(__name__) # pylint: disable=C0103
class Manager(object):
def __init__(self):
self.driver = api.SERVICE
def create(self, service):
""" Create a new service """
return self.driver.create(service)
def get(self, service_id):
""" Returns service by ID """
return self.driver.get(service_id)
def get_by_name(self, name):
""" Returns service by name """
return self.driver.get_by_name(name=name)
def get_all(self):
""" Returns all services """
return self.driver.get_all()
def get_page(self, marker, limit):
""" Get one page of services list """
return self.driver.get_page(marker, limit)
def get_page_markers(self, marker, limit):
""" Calculate pagination markers for services list """
return self.driver.get_page_markers(marker, limit)
def get_by_name_and_type(self, name, service_type):
""" Returns service by name and type """
return self.driver.get_by_name_and_type(name, service_type)
# pylint: disable=E1103
def update(self, service):
""" Update service """
return self.driver.update(service['id'], service)
def delete(self, service_id):
""" Delete service """
self.driver.delete(service_id)
|
nilq/baby-python
|
python
|
import argparse
import pickle
import time
import os
import logging
import numpy as np
import theano as th
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
import lasagne
import lasagne.layers as ll
from lasagne.layers import dnn, batch_norm
import nn
logging.basicConfig(level=logging.INFO)
# settings
parser = argparse.ArgumentParser()
parser.add_argument('--seed', default=1, type=int)
parser.add_argument('--batch_size', default=100, type=int)
parser.add_argument('--activation', default='relu', type=str)
parser.add_argument('--learning_rate', default=0.001, type=float)
args = parser.parse_args()
logging.info(args)
# fixed random seeds
rng = np.random.RandomState(args.seed)
theano_rng = MRG_RandomStreams(rng.randint(2 ** 15))
lasagne.random.set_rng(np.random.RandomState(rng.randint(2 ** 15)))
# setup output
time_str = time.strftime("%m-%d-%H-%M", time.gmtime())
exp_dir = "./data/" + args.activation + "_" + time_str + "_" + "{}".format(args.learning_rate).replace(".", "p")
try:
os.stat(exp_dir)
except:
os.makedirs(exp_dir)
logging.info("OPENING " + exp_dir + '/results.csv')
results_file = open(exp_dir + '/results.csv', 'w')
results_file.write('epoch, time, train_error, test_error\n')
results_file.flush()
# load CIFAR-10 data
def unpickle(file):
fo = open(file, 'rb')
d = pickle.load(fo, encoding='latin1')
fo.close()
return {'x': np.cast[th.config.floatX]((-127.5 + d['data'].reshape((10000,3,32,32)))/128.), 'y': np.array(d['labels']).astype(np.uint8)}
print('Loading data')
train_data = [unpickle('/home-nfs/dan/cifar_data/cifar-10-batches-py/data_batch_' + str(i)) for i in range(1,6)]
trainx = np.concatenate([d['x'] for d in train_data],axis=0)
trainy = np.concatenate([d['y'] for d in train_data])
test_data = unpickle('/home-nfs/dan/cifar_data/cifar-10-batches-py/test_batch')
testx = test_data['x']
testy = test_data['y']
nr_batches_train = int(trainx.shape[0]/args.batch_size)
nr_batches_test = int(testx.shape[0]/args.batch_size)
print('Whitening')
# whitening
whitener = nn.ZCA(x=trainx)
trainx_white = whitener.apply(trainx)
testx_white = whitener.apply(testx)
print('Done whitening')
if args.activation == 'relu':
f = nn.relu
elif args.activation == 'elu':
f = lasagne.nonlinearities.elu
elif args.activation == 'gelu':
f = nn.gelu
else:
assert False, 'Need "relu" "elu" or "gelu" nonlinearity as input name'
x = T.tensor4()
layers = [ll.InputLayer(shape=(None, 3, 32, 32), input_var=x)]
layers.append(ll.GaussianNoiseLayer(layers[-1], sigma=0.15))
layers.append(batch_norm(dnn.Conv2DDNNLayer(layers[-1], 96, (3,3), pad=1, nonlinearity=f)))
layers.append(batch_norm(dnn.Conv2DDNNLayer(layers[-1], 96, (3,3), pad=1, nonlinearity=f)))
layers.append(batch_norm(dnn.Conv2DDNNLayer(layers[-1], 96, (3,3), pad=1, nonlinearity=f)))
layers.append(ll.MaxPool2DLayer(layers[-1], 2))
layers.append(ll.DropoutLayer(layers[-1], p=0.5))
layers.append(batch_norm(dnn.Conv2DDNNLayer(layers[-1], 192, (3,3), pad=1, nonlinearity=f)))
layers.append(batch_norm(dnn.Conv2DDNNLayer(layers[-1], 192, (3,3), pad=1, nonlinearity=f)))
layers.append(batch_norm(dnn.Conv2DDNNLayer(layers[-1], 192, (3,3), pad=1, nonlinearity=f)))
layers.append(ll.MaxPool2DLayer(layers[-1], 2))
layers.append(ll.DropoutLayer(layers[-1], p=0.5))
layers.append(batch_norm(dnn.Conv2DDNNLayer(layers[-1], 192, (3,3), pad=0, nonlinearity=f)))
layers.append(batch_norm(ll.NINLayer(layers[-1], num_units=192, nonlinearity=f)))
layers.append(batch_norm(ll.NINLayer(layers[-1], num_units=192, nonlinearity=f)))
layers.append(nn.GlobalAvgLayer(layers[-1]))
layers.append(batch_norm(ll.DenseLayer(layers[-1], num_units=10, nonlinearity=None)))
# discriminative cost & updates
output_before_softmax = ll.get_output(layers[-1], x)
y = T.ivector()
cost = nn.softmax_loss(y, output_before_softmax)
train_err = T.mean(T.neq(T.argmax(output_before_softmax,axis=1),y))
params = ll.get_all_params(layers, trainable=True)
lr = T.scalar()
mom1 = T.scalar()
param_updates = nn.adam_updates(params, cost, lr=lr, mom1=mom1)
test_output_before_softmax = ll.get_output(layers[-1], x, deterministic=True)
test_err = T.mean(T.neq(T.argmax(test_output_before_softmax,axis=1),y))
print('Compiling')
# compile Theano functions
train_batch = th.function(inputs=[x,y,lr,mom1], outputs=train_err, updates=param_updates)
test_batch = th.function(inputs=[x,y], outputs=test_err)
print('Beginning training')
# //////////// perform training //////////////
begin_all = time.time()
for epoch in range(200):
begin_epoch = time.time()
lr = np.cast[th.config.floatX](args.learning_rate * np.minimum(2. - epoch/100., 1.))
if epoch < 100:
mom1 = 0.9
else:
mom1 = 0.5
# permute the training data
inds = rng.permutation(trainx_white.shape[0])
trainx_white = trainx_white[inds]
trainy = trainy[inds]
# train
train_err = 0.
for t in range(nr_batches_train):
train_err += train_batch(trainx_white[t*args.batch_size:(t+1)*args.batch_size],
trainy[t*args.batch_size:(t+1)*args.batch_size],lr,mom1)
train_err /= nr_batches_train
# test
test_err = 0.
for t in range(nr_batches_test):
test_err += test_batch(testx_white[t*args.batch_size:(t+1)*args.batch_size],
testy[t*args.batch_size:(t+1)*args.batch_size])
test_err /= nr_batches_test
logging.info('Iteration %d, time = %ds, train_err = %.6f, test_err = %.6f' % (epoch, time.time()-begin_epoch, train_err, test_err))
results_file.write('%d, %d, %.6f, %.6f\n' % (epoch, time.time()-begin_all, train_err, test_err))
results_file.flush()
if epoch % 5 == 0:
np.savez(exp_dir + "/network.npz", *lasagne.layers.get_all_param_values(layers))
print('Saved')
|
nilq/baby-python
|
python
|
from django.contrib import admin
from bookmarks.models import Bookmark, BookmarkInstance
class BookmarkAdmin(admin.ModelAdmin):
list_display = ('url', 'description', 'added', 'adder',)
admin.site.register(Bookmark, BookmarkAdmin)
admin.site.register(BookmarkInstance)
|
nilq/baby-python
|
python
|
import os
import pickle
from smart_open import smart_open
def _split3(path):
dir, f = os.path.split(path)
fname, ext = os.path.splitext(f)
return dir, fname, ext
def get_containing_dir(path):
d, _, _ = _split3(path)
return d
def get_parent_dir(path):
if os.path.isfile(path):
path = get_containing_dir(path)
return os.path.abspath(os.path.join(path, os.pardir))
def get_file_name(path):
_, fname, _ = _split3(path)
return fname
def save_obj(obj, name):
with open(name, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with smart_open(name, 'rb') as f:
return pickle.load(f)
|
nilq/baby-python
|
python
|
# coding=utf-8
from __future__ import unicode_literals
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from .models import *
class CreditNoteAdmin(admin.ModelAdmin):
model = CreditNote
search_fields = ('numero', 'invoice__id', 'invoice__contact__id')
list_display = ('invoice', 'serie', 'numero', 'get_contact_id')
raw_id_fields = ['invoice']
readonly_fields = ['invoice', 'uuid', 'serie', 'numero']
ordering = ["-id"]
class InvoiceItemInline(admin.StackedInline):
model = InvoiceItem
fields = ['amount', 'product', 'description', 'price', 'copies', 'service_from', 'service_to', 'type']
extra = 0
class InvoiceAdmin(admin.ModelAdmin):
search_fields = ('contact__id', 'contact__name')
list_display = ('id', 'contact', 'amount', 'paid', 'debited', 'canceled', 'uncollectible', 'serie', 'numero')
fieldsets = (
("", {"fields": (
'contact', 'subscription',
('creation_date', 'expiration_date'),
('service_from', 'service_to'),
('amount', 'payment_type'),
('debited', 'paid'),
('payment_date', 'payment_reference'),
'notes',
('canceled', 'cancelation_date'),
'uncollectible',
('uuid', 'serie', 'numero'),
('pdf', 'balance'),
('route', 'order'),
'print_date'
)}),
(_('Billing data'), {
'fields': (
('billing_name', 'billing_address'),
('billing_state', 'billing_city'),
'billing_document',
)}),
)
raw_id_fields = ['contact', 'subscription']
inlines = (InvoiceItemInline,)
readonly_fields = ['canceled', 'cancelation_date', 'uuid', 'serie', 'numero', 'pdf']
ordering = ['-id']
class InvoiceItemAdmin(admin.ModelAdmin):
pass
class BillingAdmin(admin.ModelAdmin):
list_display = (
'id', 'product', 'start', 'amount_billed', 'count',
'progress', 'status')
# readonly_fields = ['exclude']
def get_readonly_fields(self, request, obj=None):
if request.user.is_staff:
if request.user.is_superuser:
return (
'id', 'start', 'exclude', 'errors', 'created_by',
'started_by', 'dpp', 'billing_date', 'end',
'subscriber_amount')
else:
return [f.name for f in self.model._meta.fields]
admin.site.register(Invoice, InvoiceAdmin)
admin.site.register(Billing, BillingAdmin)
admin.site.register(InvoiceItem, InvoiceItemAdmin)
admin.site.register(CreditNote, CreditNoteAdmin)
|
nilq/baby-python
|
python
|
import logging
import logging.handlers
from logging.handlers import TimedRotatingFileHandler, MemoryHandler
import os
from datetime import datetime
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
sys.path.insert(0, os.path.dirname(__file__))
if True:
import settings
skyline_app = 'flux'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
def set_up_logging(app):
if not os.path.exists(settings.LOG_PATH):
os.makedirs(settings.LOG_PATH)
# current_time = datetime.now()
# current_date = current_time.strftime("%Y-%m-%d")
# file_name = current_date + '.log'
# file_location = log_location + file_name
# with open(logfile, 'a+'):
if app:
use_logfile = '%s/%s.%s.log' % (settings.LOG_PATH, skyline_app, app)
else:
use_logfile = logfile
with open(use_logfile, 'a+'):
pass
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s :: %(process)s :: %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
handler = logging.handlers.TimedRotatingFileHandler(
use_logfile,
when="midnight",
interval=1,
backupCount=5)
memory_handler = logging.handlers.MemoryHandler(256,
flushLevel=logging.DEBUG,
target=handler)
handler.setFormatter(formatter)
logger.addHandler(memory_handler)
# logger = logging.getLogger(skyline_app)
# format = '[%(asctime)s] [%(levelname)s] [%(message)s] [--> %(pathname)s [%(process)d]:]'
# format = '%(asctime)s [%(levelname)s] %(process)d: %(message)s'
# To store in file
# logging.basicConfig(format=format, filemode='a+', filename=file_location, level=logging.DEBUG)
# logging.basicConfig(format=format, filemode='a', filename=file_location)
# logging.basicConfig(filename='app.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s')
# To print only
# logging.basicConfig(format=format, level=logging.DEBUG)
return logger
|
nilq/baby-python
|
python
|
from typing import List, Dict, Callable
import numpy as np
import tensorflow as tf
from typeguard import check_argument_types
from neuralmonkey.decorators import tensor
from neuralmonkey.vocabulary import END_TOKEN_INDEX
from neuralmonkey.runners.base_runner import BaseRunner
from neuralmonkey.decoders.sequence_labeler import SequenceLabeler
# pylint: disable=invalid-name
Postprocessor = Callable[[List[List[str]]], List[List[str]]]
# pylint: enable=invalid-name
class LabelRunner(BaseRunner[SequenceLabeler]):
# pylint: disable=too-few-public-methods
# Pylint issue here: https://github.com/PyCQA/pylint/issues/2607
class Executable(BaseRunner.Executable["LabelRunner"]):
def collect_results(self, results: List[Dict]) -> None:
loss = results[0].get("loss", 0.)
summed_logprobs = results[0]["label_logprobs"]
input_mask = results[0]["input_mask"]
for sess_result in results[1:]:
loss += sess_result.get("loss", 0.)
summed_logprobs = np.logaddexp(summed_logprobs,
sess_result["label_logprobs"])
assert input_mask == sess_result["input_mask"]
argmaxes = np.argmax(summed_logprobs, axis=2)
# CAUTION! FABULOUS HACK BELIEVE ME
argmaxes -= END_TOKEN_INDEX
argmaxes *= input_mask.astype(int)
argmaxes += END_TOKEN_INDEX
# transpose argmaxes because vectors_to_sentences is time-major
vocabulary = self.executor.decoder.vocabulary
decoded_labels = vocabulary.vectors_to_sentences(argmaxes.T)
if self.executor.postprocess is not None:
decoded_labels = self.executor.postprocess(decoded_labels)
self.set_result(outputs=decoded_labels, losses=[loss],
scalar_summaries=None, histogram_summaries=None,
image_summaries=None)
# pylint: enable=too-few-public-methods
def __init__(self,
output_series: str,
decoder: SequenceLabeler,
postprocess: Postprocessor = None) -> None:
check_argument_types()
BaseRunner[SequenceLabeler].__init__(self, output_series, decoder)
self.postprocess = postprocess
@tensor
def fetches(self) -> Dict[str, tf.Tensor]:
return {
"label_logprobs": self.decoder.logprobs,
"input_mask": self.decoder.encoder.input_sequence.temporal_mask,
"loss": self.decoder.cost}
@property
def loss_names(self) -> List[str]:
return ["loss"]
|
nilq/baby-python
|
python
|
import json
import os
from google.auth.transport import requests
from google.oauth2 import service_account
_BASE_URL = "https://healthcare.googleapis.com/v1"
def get_session():
"""Creates an authorized Requests Session."""
credentials = service_account.Credentials.from_service_account_file(
filename=os.environ["GOOGLE_APPLICATION_CREDENTIALS"],
scopes=["https://www.googleapis.com/auth/cloud-platform"],
)
# Create a requests Session object with the credentials.
session = requests.AuthorizedSession(credentials)
return session
def dicomweb_store_instance(
base_url, project_id, cloud_region, dataset_id, dicom_store_id, dcm_file
):
"""Handles the POST requests specified in the DICOMweb standard."""
url = "{}/projects/{}/locations/{}".format(base_url, project_id, cloud_region)
dicomweb_path = "{}/datasets/{}/dicomStores/{}/dicomWeb/studies".format(
url, dataset_id, dicom_store_id
)
# Make an authenticated API request
session = get_session()
with open(dcm_file, "rb") as dcm:
dcm_content = dcm.read()
content_type = "application/dicom"
headers = {"Content-Type": content_type}
response = session.post(dicomweb_path, data=dcm_content, headers=headers)
response.raise_for_status()
print("Stored DICOM instance:")
print(response.text)
return response
def dicomweb_search_instance(
base_url, project_id, cloud_region, dataset_id, dicom_store_id
):
"""Handles the GET requests specified in DICOMweb standard."""
url = "{}/projects/{}/locations/{}".format(base_url, project_id, cloud_region)
dicomweb_path = "{}/datasets/{}/dicomStores/{}/dicomWeb/instances".format(
url, dataset_id, dicom_store_id
)
# Make an authenticated API request
session = get_session()
headers = {"Content-Type": "application/dicom+json; charset=utf-8"}
response = session.get(dicomweb_path, headers=headers)
response.raise_for_status()
instances = response.json()
print("Instances:")
print(json.dumps(instances, indent=2))
return instances
def dicomweb_retrieve_study(
base_url, project_id, cloud_region, dataset_id, dicom_store_id, study_uid
):
"""Handles the GET requests specified in the DICOMweb standard."""
url = "{}/projects/{}/locations/{}".format(base_url, project_id, cloud_region)
dicomweb_path = "{}/datasets/{}/dicomStores/{}/dicomWeb/studies/{}".format(
url, dataset_id, dicom_store_id, study_uid
)
# When specifying the output file, use an extension like ".multipart."
# Then, parse the downloaded multipart file to get each individual
# DICOM file.
file_name = "study.multipart"
# Make an authenticated API request
session = get_session()
response = session.get(dicomweb_path)
response.raise_for_status()
with open(file_name, "wb") as f:
f.write(response.content)
print("Retrieved study and saved to {} in current directory".format(file_name))
return response
def dicomweb_search_studies(
base_url, project_id, cloud_region, dataset_id, dicom_store_id
):
"""Handles the GET requests specified in the DICOMweb standard."""
url = "{}/projects/{}/locations/{}".format(base_url, project_id, cloud_region)
dicomweb_path = "{}/datasets/{}/dicomStores/{}/dicomWeb/studies".format(
url, dataset_id, dicom_store_id
)
# Refine your search by appending DICOM tags to the
# request in the form of query parameters. This sample
# searches for studies containing a patient's name.
params = {"PatientName": "Sally Zhang"}
session = get_session()
response = session.get(dicomweb_path, params=params)
response.raise_for_status()
print("Studies found: response is {}".format(response))
# Uncomment the following lines to process the response as JSON.
# patients = response.json()
# print('Patients found matching query:')
# print(json.dumps(patients, indent=2))
# return patients
def dicomweb_retrieve_instance(
base_url,
project_id,
cloud_region,
dataset_id,
dicom_store_id,
study_uid,
series_uid,
instance_uid,
):
"""Handles the GET requests specified in the DICOMweb standard."""
url = "{}/projects/{}/locations/{}".format(base_url, project_id, cloud_region)
dicom_store_path = "{}/datasets/{}/dicomStores/{}".format(
url, dataset_id, dicom_store_id
)
dicomweb_path = "{}/dicomWeb/studies/{}/series/{}/instances/{}".format(
dicom_store_path, study_uid, series_uid, instance_uid
)
file_name = "instance.dcm"
# Make an authenticated API request
session = get_session()
headers = {"Accept": "application/dicom; transfer-syntax=*"}
response = session.get(dicomweb_path, headers=headers)
response.raise_for_status()
with open(file_name, "wb") as f:
f.write(response.content)
print(
"Retrieved DICOM instance and saved to {} in current directory".format(
file_name
)
)
return response
def dicomweb_retrieve_rendered(
base_url,
project_id,
cloud_region,
dataset_id,
dicom_store_id,
study_uid,
series_uid,
instance_uid,
):
"""Handles the GET requests specified in the DICOMweb standard."""
url = "{}/projects/{}/locations/{}".format(base_url, project_id, cloud_region)
dicom_store_path = "{}/datasets/{}/dicomStores/{}".format(
url, dataset_id, dicom_store_id
)
instance_path = "{}/dicomWeb/studies/{}/series/{}/instances/{}".format(
dicom_store_path, study_uid, series_uid, instance_uid
)
dicomweb_path = "{}/rendered".format(instance_path)
file_name = "rendered_image.png"
# Make an authenticated API request
session = get_session()
headers = {"Accept": "image/png"}
response = session.get(dicomweb_path, headers=headers)
response.raise_for_status()
with open(file_name, "wb") as f:
f.write(response.content)
print(
"Retrieved rendered image and saved to {} in current directory".format(
file_name
)
)
return response
def dicomweb_delete_study(
base_url, project_id, cloud_region, dataset_id, dicom_store_id, study_uid
):
"""Handles DELETE requests equivalent to the GET requests specified in
the WADO-RS standard.
"""
url = "{}/projects/{}/locations/{}".format(base_url, project_id, cloud_region)
dicomweb_path = "{}/datasets/{}/dicomStores/{}/dicomWeb/studies/{}".format(
url, dataset_id, dicom_store_id, study_uid
)
# Make an authenticated API request
session = get_session()
headers = {"Content-Type": "application/dicom+json; charset=utf-8"}
response = session.delete(dicomweb_path, headers=headers)
response.raise_for_status()
print("Deleted study.")
return response
|
nilq/baby-python
|
python
|
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.db import models
class Isoform(models.Model):
isoform_id = models.BigAutoField(primary_key=True)
uniprot_id = models.BigIntegerField(blank=True, null=True)
accession = models.CharField(max_length=30, blank=True, null=True)
sequence = models.CharField(max_length=200, blank=True, null=True)
uniparc_accession = models.CharField(max_length=30, blank=True, null=True)
embl_acc = models.CharField(max_length=30, blank=True, null=True)
class Meta:
managed = False
db_table = 'isoform'
class Domain(models.Model):
domain_id = models.BigAutoField(primary_key=True)
isoform = models.ForeignKey('Isoform', models.DO_NOTHING, blank=True, null=True)
start = models.BigIntegerField(blank=True, null=True)
end = models.BigIntegerField(blank=True, null=True)
description = models.CharField(max_length=45, blank=True, null=True)
class Meta:
managed = False
db_table = 'domain'
class Ptm(models.Model):
ptm_id = models.BigAutoField(primary_key=True)
domain = models.ForeignKey(Domain, models.DO_NOTHING, blank=True, null=True)
description = models.CharField(max_length=45, blank=True, null=True)
start = models.BigIntegerField(blank=True, null=True)
end = models.BigIntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'ptm'
class UniprotEntry(models.Model):
uniprot_id = models.BigAutoField(primary_key=True)
uniprot_acc = models.CharField(max_length=30, blank=True, null=True)
uniprot_tax_id = models.BigIntegerField(blank=True, null=True)
userstamp = models.CharField(max_length=30, blank=True, null=True)
timestamp = models.DateTimeField(blank=True, null=True)
sequence_version = models.SmallIntegerField(blank=True, null=True)
upi = models.CharField(max_length=13, blank=True, null=True)
md5 = models.CharField(max_length=32, blank=True, null=True)
canonical_uniprot_id = models.IntegerField(blank=True, null=True)
ensembl_derived = models.NullBooleanField()
alias = models.CharField(max_length=30, blank=True, null=True)
gene_symbol = models.CharField(max_length=30, blank=True, null=True)
chromosome_line = models.CharField(max_length=50, blank=True, null=True)
entry_type = models.ForeignKey(
'CvEntryType',
models.DO_NOTHING,
blank=True,
null=True,
db_column="entry_type"
)
length = models.IntegerField(blank=True, null=True)
protein_existence_id = models.SmallIntegerField(blank=True, null=True)
def __str__(self):
return "{0} - {1}".format(self.uniprot_id, self.uniprot_acc)
class Meta:
managed = False
db_table = 'uniprot_entry'
unique_together = (('uniprot_acc', 'sequence_version'),)
class UniprotEntryHistory(models.Model):
release_version = models.CharField(max_length=30)
uniprot = models.ForeignKey(UniprotEntry, models.DO_NOTHING, primary_key=True)
grouping_id = models.BigIntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'uniprot_entry_history'
unique_together = (('uniprot', 'release_version'),)
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import range
from builtins import super
import mock
import string
import unittest
import random
import itertools
from pprint import pprint
from dpaycli import DPay
from dpaycliapi.websocket import DPayWebsocket
from dpaycli.instance import set_shared_dpay_instance
from dpaycli.nodelist import NodeList
# Py3 compatibility
import sys
wif = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
core_unit = "DWB"
class Testcases(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
nodelist = NodeList()
nodelist.update_nodes(dpay_instance=DPay(node=nodelist.get_nodes(normal=True, appbase=True), num_retries=10))
stm = DPay(node=nodelist.get_nodes())
self.ws = DPayWebsocket(
urls=stm.rpc.nodes,
num_retries=10
)
def test_connect(self):
ws = self.ws
self.assertTrue(len(next(ws.nodes)) > 0)
|
nilq/baby-python
|
python
|
import os
from configuration import *
from scipy.io import wavfile
from scipy.signal import stft,check_COLA,istft
import numpy as np
import pickle
import multiprocessing as mp
# save decoded dataset as pickle file
def save_as_wav(dir_list):
dataset= {
'vocals': [],
'accompaniment': [],
'bass': [],
'drums': [],
'other': [],
'mixture': []
}
count=0
for folder in dir_list:
# if count>=10:
# return dataset
# count+=1
# if count % 5 == 0:
# print("\rGetting Data: {0:.2f}% ".format(count /len(os.listdir(os.path.join(wavs_dir,'train'))) * 100), end="")
for key in dataset.keys():
_,data=wavfile.read(os.path.join(wavs_dir,"train",folder,str(key)+".wav"))
dataset[key].append(data[:,0])
dataset[key].append(data[:,1])
# mix=(np.hstack(dataset['vocals'])+np.hstack(dataset['accompaniment']))/2
# print(mix.mean(),np.hstack(dataset['mixture']).mean())
# print(mix.shape,np.hstack(dataset['mixture']).shape)
# print("Complete")
return dataset
# print("Saving dataset")
# pickle.dump(dataset, open(wavs_dir+"/dataset.pickle", "wb"),pickle.HIGHEST_PROTOCOL)
# print("Dataset saved")
# read pickled wav dataset
def read_data_all(infile = wavs_dir+"/dataset_stft.pickle"):
dataset = pickle.load(open(infile, "rb"));
return dataset['mixture'],dataset['vocals'],dataset['accompaniment'],dataset['drums'],dataset['bass'],dataset['other']
# read pickled wav dataset
def read_data(infile = wavs_dir+"/dataset_stft.pickle"):
dataset = pickle.load(open(infile, "rb"));
return dataset['mixture'],dataset['vocals'],dataset['accompaniment']
def make_chunks(lis):
arr=np.hstack(lis)
chunk_len=len(arr)//int(sr*time_len)*int(sr*time_len)
return arr[:chunk_len].reshape(-1,int(sr*time_len))
def make_stft(lis):
arr=make_chunks(lis)
mags=[]
angles=[]
if check_COLA('hann',nperseg=perseg,noverlap = overlap):
for wav in arr:
f,t,X=stft(wav,nperseg=perseg,noverlap = overlap)
mags.append(np.transpose(np.abs(X)).astype('float32'))
angles.append(np.angle(X).astype('float32'))
else:
print("COLA constraint not met, in func: utils.make_stft")
exit()
# print(len(mags),np.abs(mags[0].shape))
return np.vstack(mags),angles
def get_stft_matrix(magnitudes, phases):
return magnitudes * np.exp(1.j * phases)
def make_wav(mags, phases, overlap=overlap):
a=[]
for mag,phase in zip (mags,phases):
mag=(mag.reshape(88,n_bins).swapaxes(1,0))
# phase=np.transpose(phase.reshape(-1,n_bins))
stft_matrix = get_stft_matrix(mag, phase)
# print(stft_maxrix.shape)
# for mat in stft_maxrix:
# print(mat.shape)
a.append(istft(stft_matrix,fs=sr, noverlap=overlap)[1])
# print("one ",end="")
# print(np.hstack(a).shape)
return np.hstack(a)
def save_as_stft(wavs_dir = wavs_dir):
mix,voc,acc,dru,bas,oth=read_data_all(infile = wavs_dir+"/dataset.pickle")
dataset_stft={}
dataset_stft['mixture'],dataset_stft['mixturea']=make_stft(mix)
dataset_stft['vocals'],dataset_stft['vocalsa']=make_stft(voc)
dataset_stft['accompaniment'],dataset_stft['accompanimenta']=make_stft(acc)
dataset_stft['bass'],dataset_stft['bassa']=make_stft(dru)
dataset_stft['drums'],dataset_stft['drumsa']=make_stft(bas)
dataset_stft['other'],dataset_stft['othera']=make_stft(oth)
print("Saving dataset")
pickle.dump(dataset_stft, open(wavs_dir+"/dataset_stft.pickle", "wb"),pickle.HIGHEST_PROTOCOL)
print("Dataset saved")
def multi_stft(mat,key):
phase,angle=make_stft(mat)
print(key)
return [key,phase,angle]
def save_diff_stft(wavs_dir,dataset,index=0):
# output = mp.Queue()
mix,voc,acc,dru,bas,oth=dataset['mixture'],dataset['vocals'],dataset['accompaniment'],dataset['drums'],dataset['bass'],dataset['other']
dataset_stft={}
print('starting stft')
keylist=list(dataset.keys())
pool = mp.Pool(processes=6)
results=[pool.apply(multi_stft,args=(mat,key)) for mat,key in zip ([dataset[keyl] for keyl in keylist],keylist)]
print("out of the wormhole!")
dataset_stft={}
for result in results:
dataset_stft[result[0]]=result[1]
dataset_stft[result[0]+"angle"]=result[2]
print("Saving dataset")
pickle.dump(dataset_stft, open(wavs_dir+"/dataset_stft_"+str(index)+".pickle", "wb"),pickle.HIGHEST_PROTOCOL)
print(" saved")
def read(dir_list,index):
data=save_as_wav(dir_list)
print(index)
save_diff_stft(wavs_dir,data,index)
return index
def read_mix_voc_acc(wavs_dir=wavs_dir,limit=49):
mixl=[]
vocl=[]
accl=[]
for index in range(limit[0],limit[1]-1,5):
print("\rGetting Data: {0:.2f}% ".format(index), end="")
mix,voc,acc=read_data(wavs_dir+"/dataset_stft_"+str(index)+".pickle")
mixl.append(mix)
vocl.append(voc)
accl.append(acc)
zeros=np.zeros((1,n_bins))
mixl=np.vstack(mixl)
vocl=np.vstack(vocl)
accl=np.vstack(accl)
if len(mixl)%4 is not 0:
rem=4-len(mixl)%4
padding=np.repeat(zeros,rem,axis=0)
print(padding.shape)
mixl=np.vstack(mixl,padding)
vocl=np.vstack(vocl)
if len(vocl)%4 is not 0:
rem=4-len(vocl)%4
padding=np.repeat(zeros,rem,axis=0)
print(padding.shape)
vocl=np.vstack(vocl,padding)
accl=np.vstack(accl)
if len(accl)%4 is not 0:
rem=4-len(accl)%4
padding=np.repeat(zeros,rem,axis=0)
print(padding.shape)
accl=np.vstack(accl,padding)
return mixl,vocl,accl
if __name__ == '__main__':
dir_list=os.listdir(os.path.join(wavs_dir,'train'))
# pool=mp.Pool(processes=20)
results=[(read(dir_list[sub_list:sub_list+5],sub_list)) for sub_list in range(95,len(dir_list)-4,5)]
# output = [p.get() for p in results]
print(results)
print("Ta-da!")
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.