max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
network_objects/decorators/LimitedChargeOrDischargeCapacity.py | Jipje/local_smart_grid_simulation | 0 | 12785251 | <gh_stars>0
class LimitedChargeOrDischargeCapacity:
def __init__(self, network_object, max_charge_index, max_discharge_index):
self.network_object = network_object
self.max_charge_index = max_charge_index
self.max_discharge_index = max_discharge_index
self.maximum_charge_kwh = 0
self.maximum_discharge_kwh = 0
# Assign the correct functions to each object
self.original_check = self.network_object.check_action
self.original_step = self.network_object.take_step
self.network_object.take_step = self.take_step
self.network_object.check_action = self.check_action
def set_maximum_charge(self, maximum_charge):
self.maximum_charge_kwh = maximum_charge
def set_maximum_discharge(self, maximum_discharge):
self.maximum_discharge_kwh = maximum_discharge
def check_action(self, action_kwh):
adjusted_action = action_kwh
if adjusted_action > self.maximum_charge_kwh:
adjusted_action = self.maximum_charge_kwh
elif adjusted_action < self.maximum_discharge_kwh:
adjusted_action = self.maximum_discharge_kwh
if adjusted_action != action_kwh and self.network_object.verbose_lvl > 2:
print('\t\tAction was limited due to not enough power generation')
return self.original_check(adjusted_action)
def take_step(self, environment_step, action_parameters):
if self.max_charge_index > 0:
self.set_maximum_charge(int(environment_step[self.max_charge_index]))
else:
self.set_maximum_charge(99999)
if self.max_discharge_index > 0:
self.set_maximum_discharge(int(environment_step[self.max_discharge_index]))
else:
self.set_maximum_discharge(-99999)
return self.original_step(environment_step, action_parameters)
| 2.671875 | 3 |
apps/fithm-gateway/apps/user/router.py | sergio1221/flask-backend | 3 | 12785252 | from flask_restx import Namespace, Resource
from flask import request
from .lib.parser import UserParser
from .view import UserView
from libs.depends.entry import container
from libs.middleware.auth import login_required, active_required
user = Namespace('user', path='/users', decorators=[active_required(), login_required()])
view = UserView()
@user.route('')
class UserResource(Resource):
'''User update, delete'''
@user.doc('get user')
def get(self):
return view.get()
@user.doc('update user')
def put(self):
parser: UserParser = container.get(UserParser)
param = parser.parse_update(request)
return view.update(param)
@user.doc('delete user')
def delete(self):
return view.delete()
| 2.25 | 2 |
imageprocessor/rotate.py | UBC-MDS/MDSPyGram | 0 | 12785253 | # This script is for the rotate function
import numpy as np
import matplotlib.pyplot as plt
import cv2
def rotate(image, degree, output_path):
"""
Rotates an OpenCV 2 / NumPy image about it's centre by the given degree
(in degrees). The returned image will be large enough to hold the entire
new image, with a black background
Arguments:
-----------------------------
image: path of input file
degree: int of degree
Output:
-----------------------------
an image file in .png format
"""
# exception handling
try:
image = plt.imread(image)
except AttributeError:
print("Please type in a string as the path for the input image file.")
raise
except TypeError:
print("Please provide a string as the path for the input image file.")
raise
except FileNotFoundError:
print("The input file/path does not exist, please double check it. ")
raise
except OSError:
print("The input file is not an image.")
raise
except Exception as e:
print("General Error:")
print(e)
raise
# Get the image size
image_size = (image.shape[1], image.shape[0])
image_center = tuple(np.array(image_size) / 2)
# Convert the OpenCV 3x2 rotation matrix to 3x3
rot_mat = np.vstack([cv2.getRotationMatrix2D(image_center, degree, 1.0), [0, 0, 1]])
rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])
# Shorthand for below calcs
image_w2 = image_size[0] * 0.5
image_h2 = image_size[1] * 0.5
# Obtain the rotated coordinates of the image corners
rotated_coords = [
(np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],
(np.array([image_w2, image_h2]) * rot_mat_notranslate).A[0],
(np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],
(np.array([image_w2, -image_h2]) * rot_mat_notranslate).A[0],
]
# Find the size of the new image
x_coords = [pt[0] for pt in rotated_coords]
x_pos = [x for x in x_coords if x > 0]
x_neg = [x for x in x_coords if x < 0]
y_coords = [pt[1] for pt in rotated_coords]
y_pos = [y for y in y_coords if y > 0]
y_neg = [y for y in y_coords if y < 0]
right_bound = max(x_pos)
left_bound = min(x_neg)
top_bound = max(y_pos)
bot_bound = min(y_neg)
new_w = int(abs(right_bound - left_bound))
new_h = int(abs(top_bound - bot_bound))
# We require a translation matrix to keep the image centred
trans_mat = np.matrix(
[
[1, 0, int(new_w * 0.5 - image_w2)],
[0, 1, int(new_h * 0.5 - image_h2)],
[0, 0, 1],
]
)
# Compute the tranform for the combined rotation and translation
affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]
# Apply the transform
result = cv2.warpAffine(image, affine_mat, (new_w, new_h), flags=cv2.INTER_LINEAR)
# exception handling
try:
plt.imshow(result)
plt.savefig(output_path)
except FileNotFoundError:
print("The output path does not exist.")
raise
except AttributeError:
print("Please provide a string as the path for the output image file.")
raise
except TypeError:
print("Please provide a string as the path for the output image file.")
raise
except Exception as e:
print("Other exceptions, please check your input and output. ")
print(e)
raise
| 3.90625 | 4 |
tensorflow/mantaGen/scenes/smoke_buoyant_scene.py | BrianKmdy/mantaflow | 95 | 12785254 | #******************************************************************************
#
# MantaGen
# Copyright 2018 <NAME>, <NAME>, <NAME>
#
# This program is free software, distributed under the terms of the
# Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
#******************************************************************************
from manta import *
import numpy
from random import randint
from scenes.scene import Scene
from scenes.volumes import *
from scenes.functions import *
from util.logger import *
def instantiate_scene(**kwargs): # instantiate independent of name , TODO replace?
info(kwargs)
return SmokeBuoyantScene(**kwargs)
class SmokeBuoyantScene(Scene):
#----------------------------------------------------------------------------------
def __init__(self, **kwargs):
super(SmokeBuoyantScene,self).__init__(**kwargs)
# optionally, init more grids etc.
self.max_iter_fac = 2
self.accuracy = 5e-4
self.max_source_count = int(kwargs.get("max_source_count", 5))
self.velocity_scale = float(kwargs.get("velocity_scale", self.resolution.y * 0.05))
self.use_inflow_sources = kwargs.get("use_inflow_sources", "True") == "True"
self.open_bound = kwargs.get("use_open_bound", "True") == "True"
self.sources = []
self.source_strengths = []
# smoke sims need to track the density
self.density = self.solver.create(RealGrid, name="Density")
noise = self.solver.create(NoiseField, loadFromFile=True)
noise.posScale = vec3(40) * numpy.random.uniform(low=0.25, high=1.)
noise.posOffset = random_vec3s(vmin=0.0) * 100.
noise.clamp = True
noise.clampNeg = 0
noise.clampPos = 1.
noise.valOffset = 0.15
noise.timeAnim = 0.4 * numpy.random.uniform(low=0.2, high=1.)
self.noise = noise
info("SmokeBuoyantScene initialized")
#----------------------------------------------------------------------------------
def set_velocity(self, volume, velocity):
if self.dimension == 2:
velocity.z = 0.0
volume.applyToGrid(solver=self.solver, grid=self.vel, value=velocity)
#----------------------------------------------------------------------------------
# sources used as smoke inflow in the following
def add_source(self, volume):
shape = volume.shape(self.solver)
self.sources.append(shape)
self.source_strengths.append(numpy.random.uniform(low=0.5, high=1.))
#----------------------------------------------------------------------------------
def _create_scene(self):
super(SmokeBuoyantScene, self)._create_scene()
self.sources = []
self.source_strengths = []
self.density.setConst(0)
self.vel.setConst(vec3(0))
is3d = (self.dimension > 2)
self.flags.initDomain(boundaryWidth=self.boundary)
self.flags.fillGrid()
if self.open_bound:
setOpenBound(self.flags, self.boundary, 'yY', CellType_TypeOutflow|CellType_TypeEmpty)
# formerly initialize_smoke_scene(scene):
source_count = randint(1, self.max_source_count)
for i in range(source_count):
volume = random_box(center_min=[0.2, 0.1, 0.2], center_max=[0.8, 0.6, 0.8], size_min=[0.005, 0.005, 0.005], size_max=[0.2, 0.2, 0.2], is3d=is3d)
self.add_source(volume)
src, sstr = self.sources[-1], self.source_strengths[-1]
densityInflow(flags=self.flags, density=self.density, noise=self.noise, shape=src, scale=2.0*sstr, sigma=0.5)
if self.show_gui:
# central view is more interesting for smoke
self._gui.setPlane(self.resolution.z // 2)
info("SmokeBuoyantScene created with {} sources".format(len(self.sources)))
#==================================================================================
# SIMULATION
#----------------------------------------------------------------------------------
def _compute_simulation_step(self):
# Note - sources are turned off earlier, the more there are in the scene
for i in range(len(self.sources)):
if self.use_inflow_sources:
src, sstr = self.sources[i], self.source_strengths[i]
densityInflow(flags=self.flags, density=self.density, noise=self.noise, shape=src, scale=2.0*sstr, sigma=0.5)
advectSemiLagrange(flags=self.flags, vel=self.vel, grid=self.density, order=2, clampMode=2)
advectSemiLagrange(flags=self.flags, vel=self.vel, grid=self.vel , order=2, clampMode=2)
vorticityConfinement(vel=self.vel, flags=self.flags, strength=0.1)
addBuoyancy(density=self.density, vel=self.vel, gravity=0.2*self.gravity, flags=self.flags)
setWallBcs(flags=self.flags, vel=self.vel)
solvePressure(flags=self.flags, vel=self.vel, pressure=self.pressure, cgMaxIterFac=self.max_iter_fac, cgAccuracy=self.accuracy)
| 2.4375 | 2 |
4/test_main.py | felixb/advent-of-code-2017 | 0 | 12785255 | #! /usr/bin/env python3
import unittest
from main import run_1, run_2
class Test(unittest.TestCase):
def test_1(self):
self.assertEqual(run_1("aa bb cc dd ee\naa bb cc dd aa\naa bb cc dd aaa"), 2)
def test_2(self):
self.assertEqual(
run_2("abcde fghij\nabcde xyz ecdab\na ab abc abd abf abj\niiii oiii ooii oooi oooo\noiii ioii iioi iiio"),
3)
if __name__ == '__main__':
unittest.main()
| 3.53125 | 4 |
atari/experiment_1_atari/main.py | arcosin/Task_Detector | 0 | 12785256 | <reponame>arcosin/Task_Detector<filename>atari/experiment_1_atari/main.py
import sys
import time
import random
import json
from collections import defaultdict
import torch
import torchvision.transforms as transforms
from torchvision.utils import save_image
from PIL import Image as Image
from datetime import datetime
from .multienv import MultiEnv
from .random_agent import RandomAgent
from .task_detector import TaskDetector
from .autoencoder import AutoEncoder
from .vae import VAE
from .vae import Encoder as VAEEncoder
from .vae import Decoder as VAEDecoder
from .aae import AAE
from .aae import Encoder as AAEEncoder
from .aae import Decoder as AAEDecoder
from .aae import Descriminator as AAEDescriminator
from .baseline_classifier import TaskClassifier
GPU_TRAINING_ON = True
TRAIN_RECS = 1000
TRAIN_EPOCHS = 1
TEST_RECS = 100
NN_SIZE = (77, 100)
H_DIM = 300
AAED_H_DIM_1 = 128
AAED_H_DIM_2 = 32
Z_DIM = 128
DEF_ENVS = ["breakout", "pong", "space_invaders", "ms_pacman", "assault", "asteroids", "boxing", "phoenix", "alien"]
device = None
SAMPLES_DIR = 'samples'
MODELS_DIR = 'models'
LOGS_DIR = 'logs'
class DetGen:
def __init__(self, aeMode):
super().__init__()
self.aeMode = aeMode
def generateDetector(self):
if self.aeMode == "vae":
vae = buildVAE()
return vae.to(device)
elif self.aeMode == "aae":
aae = buildAAE()
return aae.to(device)
else:
return AutoEncoder(NN_SIZE, H_DIM, Z_DIM).to(device)
# Source: https://discuss.pytorch.org/t/how-to-add-noise-to-mnist-dataset-when-using-pytorch/59745.
class AddGaussianNoise:
def __init__(self, mean = 0.0, std = 0.5):
self.std = std
self.mean = mean
def __call__(self, t):
return t + (torch.randn(t.size()) * self.std + self.mean)
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
def buildVAE():
enc = VAEEncoder(NN_SIZE, Z_DIM, Z_DIM, h = H_DIM)
dec = VAEDecoder(Z_DIM, NN_SIZE, h = H_DIM)
model = VAE(enc, dec)
return model
def buildAAE():
enc = AAEEncoder(NN_SIZE, Z_DIM, h = H_DIM)
dec = AAEDecoder(Z_DIM, NN_SIZE, h = H_DIM)
#des = AAEDescriminator(Z_DIM, h1 = AAED_H_DIM_1, h2 = AAED_H_DIM_2)
des = AAEDescriminator(h1 = AAED_H_DIM_1, h2 = AAED_H_DIM_2)
model = AAE(enc, dec, des, Z_DIM)
return model
def preprocess(inputDict, addNoise = False):
if addNoise:
transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(NN_SIZE, Image.NEAREST),
lambda x: transforms.functional.vflip(x),
transforms.ToTensor(),
AddGaussianNoise(),
])
else:
transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(NN_SIZE, Image.NEAREST),
lambda x: transforms.functional.vflip(x),
transforms.ToTensor(),
])
x = inputDict['S0'].T
x = transform(x)
x = torch.unsqueeze(x, dim=0)
x = x.to(device)
n_x = inputDict['S1'].T
n_x = transform(n_x)
n_x = torch.unsqueeze(n_x, dim=0)
n_x = n_x.to(device).detach()
return x, n_x
def convertTorch(state):
return torch.from_numpy(state)
def test(agent, detector, env, envID, ds, log, addNoise):
detector.toggleSaving(False)
predicteds = defaultdict(lambda: 0.0)
predictedsNorm = defaultdict(lambda: 0.0)
with torch.no_grad():
for inpNum, inp in enumerate(ds):
ts = time.time()
x, n_x = preprocess(inp, addNoise = addNoise)
if inpNum == 0:
save_image(torch.rot90(x, 3, [2, 3]), "%s-noise.png" % envID)
break
envPred, envPredNorm = detector.detect(x)
predicteds[str((envPred, envID))] += 1
predictedsNorm[str((envPredNorm, envID))] += 1
te = time.time()
print("Env: {} Record: {}/{} Corr: {}={}-->{} NCorr: {}={}-->{} Time: {}".format(envID, inpNum, len(ds), envPred, envID, (envPred == envID), envPredNorm, envID, (envPredNorm == envID), te - ts))
detector.toggleSaving(True)
return predicteds, predictedsNorm
def genDataFromEnv(agent, env, datasetSize, render = False):
ds = []
while True:
state = convertTorch(env.reset())
terminal = False
i = 0
while not terminal:
if render: env.render()
action = agent.act(state)
nextState, reward, terminal, info = env.step(action)
nextState = convertTorch(nextState)
detectorInput = {"S0": state, "S1": nextState, "A": action}
ds.append(detectorInput)
state = nextState
i = i + 1
if len(ds) >= datasetSize:
ds = random.sample(ds, datasetSize)
return ds
def train(agent, detector, env, envID, ds, epochs, sampleDir, log):
if log is not None:
lossKey = str(envID) + "_train_loss"
samplePathKey = str(envID) + "_train_samples"
log[lossKey] = []
log[samplePathKey] = []
for epoch in range(epochs):
for inpNum, inp in enumerate(ds):
ts = time.time()
out, z, loss = trainDetector(detector, inp, envID)
te = time.time()
print("Env: {} Epoch: {}/{} Record: {}/{} Loss: {} Time: {}".format(envID, epoch, epochs, inpNum, len(ds), loss, te - ts), flush=True)
if log is not None:
log[lossKey].append(loss)
if sampleDir[-1] == '/':
imgPath = "{}env_{}_e_{}.png".format(sampleDir, envID, epoch)
else:
imgPath = "{}/env_{}_e_{}.png".format(sampleDir, envID, epoch)
if log is not None:
log[samplePathKey].append(imgPath)
if out is not None:
save_image(torch.rot90(out, 3, [2, 3]), imgPath)
#print("Ooo ", torch.min(z), torch.max(z), torch.mean(z), torch.std(z))
for inpNum, inp in enumerate(ds):
trainDetectorDistro(detector, inp, envID)
def trainDetectorDistro(detector, inputDict, envLabel):
with torch.no_grad():
x, n_x = preprocess(inputDict)
detector.trainDistro(x, envLabel)
def trainDetector(detector, inputDict, envLabel):
x, n_x = preprocess(inputDict)
return detector.trainStep(x, n_x, envLabel)
def populateCM(taskList, cm, predicteds):
for trueEnv in taskList:
for predEnv in taskList:
cm[str((predEnv, trueEnv))] += predicteds[str((predEnv, trueEnv))]
def printCM(taskList, cm):
print(" Predicted:")
print(" ", end = '')
dotsLen = 0
for task in taskList:
s = task + " "
print(s, end = '')
dotsLen += len(s)
print()
print(" ", end = '')
for _ in range(dotsLen):
print('-', end = '')
print()
for i, trueEnv in enumerate(taskList):
print("Task " + str(i) + ": |", end = '')
for predEnv in taskList:
print("{:8.3f}".format(cm[str((predEnv, trueEnv))]), end = ' ')
print("|")
print(" ", end = '')
for _ in range(dotsLen):
print('-', end = '')
print()
def writeLog(log, filepath):
with open(filepath, "w") as f:
json.dump(log, f)
def configCLIParser(parser):
parser.add_argument("--train_size", help="Number of records to generate for training.", type=int, default=TRAIN_RECS)
parser.add_argument("--train_epochs", help="Training epochs.", type=int, default=TRAIN_EPOCHS)
parser.add_argument("--test_size", help="Number of records to generate for testing.", type=int, default=TEST_RECS)
parser.add_argument("--train_mode", help="If 2, train on one enviroment specified by train_env. If 1, trains all detectors. If 0, attempts to load detectors.", choices=[-1, 0, 1, 2], type=int, default=1)
parser.add_argument("--train_env", help="Env to use if train_mode is set to 2.", choices=DEF_ENVS, default=DEF_ENVS[0])
parser.add_argument("--test_mode", help="If 1, tests the detectors. If 0, skips testing.", type=int, choices=[0, 1], default=1)
parser.add_argument("--gen_mode", help="If 1, generates from AE, VAE, or AAE. If 0, skips generation.", type=int, choices=[0, 1], default=0)
parser.add_argument("--logging", help="Logs important info as JSON.", type=int, choices=[0, 1], default=1)
parser.add_argument("--ae_type", help="Type of AE to use.", choices=["aae", "vae", "ae", "base"], default="aae")
parser.add_argument("--detector_cache", help="Size of the detector cache. The default -1 maps to no limit.", type=int, default=-1)
parser.add_argument("--test_noise", help="If 1, add noise to images during testing.", type=int, choices=[0, 1], default=0)
parser.add_argument("--device", help="Device to run torch on. Usually 'cpu' or 'cuda:[N]'. Defaults to cpu if cuda is not available.", type=str, default="cpu")
parser.add_argument("--models_dir", help="Directory to store model save / load files.", type=str, default = "./%s/" % MODELS_DIR)
parser.add_argument("--logs_dir", help="Directory to store JSON log files.", type=str, default = "./%s/" % LOGS_DIR)
parser.add_argument("--samples_dir", help="Directory to store training reconst samples.", type=str, default = "./%s/" % SAMPLES_DIR)
return parser
def main(args):
global device
if args.logging:
log = dict()
else:
log = None
print("Starting.", flush=True)
if torch.cuda.is_available():
print("Cuda is available.")
print("Using device: %s." % args.device)
device = torch.device(args.device)
else:
print("Cuda is not available.")
print("Using device: cpu.")
device = torch.device("cpu")
if args.train_mode == 2 or args.train_mode == -1:
envNameList = [args.train_env]
else:
envNameList = DEF_ENVS
atariGames = MultiEnv(envNameList)
agent = RandomAgent(atariGames.actSpace)
if args.ae_type != "base":
gen = DetGen(args.ae_type)
taskDetector = TaskDetector(gen, args.models_dir, args.ae_type, detectorCache = args.detector_cache, device = args.device)
else:
taskDetector = TaskClassifier(NN_SIZE, len(DEF_ENVS), args.models_dir, h = H_DIM, device = args.device)
if args.train_mode > 0:
for i, env in enumerate(atariGames.getEnvList()):
ds = genDataFromEnv(agent, env, args.train_size)
print("Mem size of ds now: %s." % sys.getsizeof(ds))
taskDetector.addTask(env.game)
print("Mem size of task detector now: %s." % sys.getsizeof(taskDetector))
train(agent, taskDetector, env, env.game, ds, args.train_epochs, args.samples_dir, log)
taskDetector.expelDetector(env.game)
print("Training complete.\n\n")
else:
taskDetector.loadAll(envNameList)
print("Loaded envs %s." % str(envNameList))
if args.gen_mode == 1 and args.ae_type in ["ae", "vae", "aae"]:
for n in taskDetector.getNames():
m = taskDetector.getDetectorModel(n)
g = m.generate()
if args.samples_dir[-1] == '/':
imgPath = "{}env_{}_gen.png".format(args.samples_dir, n)
else:
imgPath = "{}env_{}_gen.png".format(args.samples_dir, n)
save_image(torch.rot90(g, 3, [2, 3]), imgPath)
if args.test_mode == 1:
print("Testing with and without normalization.")
cm = defaultdict(lambda: 0.0)
cmn = defaultdict(lambda: 0.0)
for i, env in enumerate(atariGames.getEnvList()):
ds = genDataFromEnv(agent, env, args.test_size)
predicteds, predictedsNorm = test(agent, taskDetector, env, env.game, ds, log, addNoise = (args.test_noise == 1))
populateCM(envNameList, cm, predicteds)
populateCM(envNameList, cmn, predictedsNorm)
if log is not None:
log["env_names"] = envNameList
log["cm"] = dict(cm)
log["cmn"] = dict(cmn)
print("Testing complete.\n")
print("Not normalized:\n\n")
printCM(envNameList, cm)
print("\n\nNormalized:\n\n")
printCM(envNameList, cmn)
if log is not None:
ts = datetime.now().strftime(r"%m-%d-%Y_%H-%M-%S")
writeLog(log, "{}log-{}-{}".format(args.logs_dir, args.ae_type, ts))
print("\n\nDone.", flush=True)
if __name__ == '__main__':
main()
#===============================================================================
| 2.140625 | 2 |
Ex028.py | CaioHRombaldo/PythonClasses | 0 | 12785257 | <reponame>CaioHRombaldo/PythonClasses<gh_stars>0
from random import randint
from time import sleep
answer = int(input('* Computer sound * Im thinking of a number from 1 to 5, try to guess: '))
rand = randint(1, 5)
print('PROCESSING...')
sleep(2)
if answer == rand:
print('Congratulations, I dont know how you did it, but you got it right!')
else:
print('It was not this time my friend, machines 1 x 0 human.')
print('The number I was thinking about was: {}.'.format(rand))
| 3.828125 | 4 |
score.py | htang22/pong_on_pygame | 0 | 12785258 | import pygame
class Score:
def __init__(self):
self.right_score = 0
self.left_score = 0
self.right_cord = (465, 50)
self.left_cord = (300, 50)
self.left_color = (0, 0, 0)
self.right_color = (0, 0, 0)
def show_score(self, window, color):
"""Takes the window and color and creates a right and left score which are text and also creates rectangles showing the middle and the different sides."""
font = pygame.font.Font("freesansbold.ttf", 80)
right_score = font.render(f"{self.right_score}", True, self.right_color)
left_score = font.render(f"{self.left_score}", True, self.left_color)
pygame.draw.rect(window, color, (400, 0, 10, 275))
pygame.draw.rect(window, color, (400, 350, 10, 275))
window.blit(right_score, self.right_cord)
window.blit(left_score, self.left_cord)
def game_over(self):
"""Checks to see if the score of the left or right is equal to 7. Then returns True or False"""
if self.left_score == 7 or self.right_score == 7:
return True
else:
return False
def reset_score(self):
"""Resets the score for the right and left score to 0"""
self.right_score = 0
self.left_score = 0
| 3.671875 | 4 |
python/celerite2/__init__.py | jacksonloper/celerite2 | 38 | 12785259 | <filename>python/celerite2/__init__.py
# -*- coding: utf-8 -*-
__all__ = ["__version__", "terms", "GaussianProcess"]
from . import terms
from .celerite2_version import __version__
from .numpy import GaussianProcess
__uri__ = "https://celerite2.readthedocs.io"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "MIT"
__description__ = "Fast and scalable Gaussian Processes in 1D"
__bibtex__ = __citation__ = r"""
@article{celerite1,
author = {{<NAME>. and {<NAME>. and {<NAME>. and
{<NAME>.},
title = "{Fast and Scalable Gaussian Process Modeling with Applications to
Astronomical Time Series}",
journal = {\aj},
year = 2017,
month = dec,
volume = 154,
pages = {220},
doi = {10.3847/1538-3881/aa9332},
adsurl = {http://adsabs.harvard.edu/abs/2017AJ....154..220F},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
@article{celerite2,
author = {{Foreman-Mackey}, D.},
title = "{Scalable Backpropagation for Gaussian Processes using Celerite}",
journal = {Research Notes of the American Astronomical Society},
year = 2018,
month = feb,
volume = 2,
number = 1,
pages = {31},
doi = {10.3847/2515-5172/aaaf6c},
adsurl = {http://adsabs.harvard.edu/abs/2018RNAAS...2a..31F},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
"""
| 1.96875 | 2 |
scripts/get_seqs.py | MonashBioinformaticsPlatform/iffyRna | 1 | 12785260 | <reponame>MonashBioinformaticsPlatform/iffyRna
#!/usr/bin/env python3
# -*- coding: iso-8859-15 -*-
import sys
import gzip
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
feats = sys.argv[1]
ref_genome = sys.argv[2]
feats_dict = {}
with open(feats) as feats_handl:
for i in feats_handl:
line = i.strip()
gene_id,chrom,start,end,strand,n_reads,feat_size,frac,mean,std = line.split('\t')
# header line
if gene_id.startswith("GeneId"):
continue
if chrom not in feats_dict:
feats_dict[chrom] = {}
if gene_id in feats_dict[chrom]:
sys.exit("ERROR: can't happen")
feats_dict[chrom][gene_id] = [start, end, n_reads, feat_size, frac]
for seq in SeqIO.parse(gzip.open(ref_genome, 'rt'), 'fasta'):
# assuming chrom name is the same
# which it should be
feats = feats_dict.get(seq.id)
if feats is None:
continue
for feat, v in feats.items():
start, end, n_reads, feat_size, frac = v
frag = seq.seq[int(start):int(end)+1]
info = "%s|%s|%s|%s|%s" % (start, end, feat_size, n_reads, frac)
id = "%s_%s" % (seq.id, feat)
#record = SeqRecord(frag,
record = SeqRecord(frag.upper(),
id=id,
name=feat,
description=info)
SeqIO.write(record, sys.stdout, 'fasta')
| 2.171875 | 2 |
app/utils/navigation.py | tch1bo/viaduct | 11 | 12785261 | <reponame>tch1bo/viaduct<filename>app/utils/navigation.py<gh_stars>10-100
# -*- coding: utf-8 -*-
import datetime
import re
from flask import render_template, request, url_for
from flask_login import current_user
from app import db
from app.forms.user import SignInForm
from app.models.activity import Activity
from app.models.navigation import NavigationEntry
from app.models.page import Page
from app.service import page_service
class NavigationAPI:
@staticmethod
def get_navigation_bar():
entries = NavigationAPI.get_entries(True)
entries = NavigationAPI.remove_unauthorized(entries)
login_form = SignInForm()
return render_template('navigation/view_bar.htm', bar_entries=entries,
login_form=login_form)
@staticmethod
def _get_entry_by_url(url):
page = Page.query.filter_by(path=url.lstrip('/')).first()
if page and page.navigation_entry:
return page.navigation_entry[0]
else:
return NavigationEntry.query.filter_by(url=url).first()
@staticmethod
def get_navigation_menu():
my_path = request.path
my_path = re.sub(r'(/[0-9]+)?/$', '', my_path)
me = NavigationAPI._get_entry_by_url(my_path)
if me:
parent = me.parent
else:
parent_path = my_path.rsplit('/', 1)[0]
me = NavigationAPI._get_entry_by_url(parent_path)
parent = me.parent if me else None
if parent:
entries = parent.get_children()
else:
entries = [me] if me else []
entries = NavigationAPI.remove_unauthorized(entries)
return render_template('navigation/view_sidebar.htm', back=parent,
pages=entries, current=me)
@staticmethod
def current_entry():
my_path = request.path
temp_strip = my_path.rstrip('0123456789')
if temp_strip.endswith('/'):
my_path = temp_strip
my_path = my_path.rstrip('/')
return NavigationAPI._get_entry_by_url(my_path)
@staticmethod
def order(entries, parent):
position = 1
for entry in entries:
db_entry = db.session.query(NavigationEntry)\
.filter_by(id=entry['id']).first()
db_entry.parent_id = parent.id if parent else None
db_entry.position = position
NavigationAPI.order(entry['children'], db_entry)
position += 1
db.session.add(db_entry)
db.session.commit()
@staticmethod
def get_root_entries():
return NavigationEntry.query.filter_by(parent_id=None)\
.order_by(NavigationEntry.position).all()
@staticmethod
def get_entries(inc_activities=False):
entries_all = NavigationEntry.query.order_by(NavigationEntry.position)\
.all()
entry_dict = dict((entry.id, entry) for entry in entries_all)
entries = []
for entry in entries_all:
if entry.parent_id is not None:
entry_dict[entry.parent_id].children_fast.append(entry)
else:
entries.append(entry)
# Fill in activity lists.
if entry.activity_list:
entry.activities = []
activities = db.session.query(Activity)\
.filter(Activity.end_time > datetime.datetime.now())\
.order_by("start_time").all()
for activity in activities:
url = url_for('activity.get_activity',
activity_id=activity.id)
entry.activities.append(
NavigationEntry(entry, activity.nl_name,
activity.en_name, url, None, False,
False, 0, activity.till_now()))
return entries
@staticmethod
def can_view(entry):
"""
Check whether the current user can view the entry.
Note: currently only works with pages.
"""
if entry.external or entry.activity_list or not entry.page:
return True
return page_service.can_user_read_page(entry.page, current_user)
@staticmethod
def remove_unauthorized(entries):
authorized_entries = list(entries)
for entry in entries:
if not NavigationAPI.can_view(entry):
authorized_entries.remove(entry)
return authorized_entries
@staticmethod
def get_navigation_backtrack():
backtrack = []
tracker = NavigationAPI.current_entry()
while tracker:
backtrack.append(tracker)
tracker = tracker.parent
backtrack.reverse()
return render_template('navigation/view_backtrack.htm',
backtrack=backtrack)
@staticmethod
def alphabeticalize(parent_entry):
entries = NavigationEntry.query\
.filter(NavigationEntry.parent_id == parent_entry.id)\
.order_by(NavigationEntry.nl_title)\
.all()
position = 1
for entry in entries:
entry.position = position
position += 1
| 2.21875 | 2 |
autofit/plot/__init__.py | vishalbelsare/PyAutoFit | 0 | 12785262 | from autofit.plot.mat_wrap.wrap.wrap_base import Units
from autofit.plot.mat_wrap.wrap.wrap_base import Figure
from autofit.plot.mat_wrap.wrap.wrap_base import Axis
from autofit.plot.mat_wrap.wrap.wrap_base import Cmap
from autofit.plot.mat_wrap.wrap.wrap_base import Colorbar
from autofit.plot.mat_wrap.wrap.wrap_base import ColorbarTickParams
from autofit.plot.mat_wrap.wrap.wrap_base import TickParams
from autofit.plot.mat_wrap.wrap.wrap_base import YTicks
from autofit.plot.mat_wrap.wrap.wrap_base import XTicks
from autofit.plot.mat_wrap.wrap.wrap_base import Title
from autofit.plot.mat_wrap.wrap.wrap_base import YLabel
from autofit.plot.mat_wrap.wrap.wrap_base import XLabel
from autofit.plot.mat_wrap.wrap.wrap_base import Legend
from autofit.plot.mat_wrap.wrap.wrap_base import Output
from autofit.plot.mat_wrap.mat_plot import MatPlot1D
from autofit.plot.mat_wrap.include import Include1D
from autofit.plot.mat_wrap.visuals import Visuals1D
from autofit.plot.mat_wrap.include import Include2D
from autofit.plot.mat_wrap.visuals import Visuals2D
from autofit.plot.samples_plotters import SamplesPlotter
from autofit.plot.dynesty_plotter import DynestyPlotter
from autofit.plot.ultranest_plotter import UltraNestPlotter
from autofit.plot.emcee_plotter import EmceePlotter
from autofit.plot.zeus_plotter import ZeusPlotter
from autofit.plot.pyswarms_plotter import PySwarmsPlotter | 1.4375 | 1 |
tests/test_spyd/test_authentication/test_services/test_vanilla/test_auth_success.py | DanSeraf/spyd | 4 | 12785263 | import unittest
from spyd.authentication.services.vanilla.auth_success import VanillaAuthSuccess
class TestVanillaAuthSuccess(unittest.TestCase):
def setUp(self):
self.instance = VanillaAuthSuccess('localhost', 'chasm')
def test_get_group_names(self):
group_names = self.instance.group_provider.get_group_names()
self.assertEqual(group_names, ('localhost.auth', 'chasm@localhost'))
def test_repr(self):
self.assertEqual(repr(self.instance.group_provider), '<VanillaGroupProvider chasm@localhost>')
| 2.484375 | 2 |
your_projects/aymen_api_music/main.py | kdj309/H4ckT0b3rF3st-2k21 | 23 | 12785264 | import tekore as tk
app_token = tk.request_client_token("b516728497b34264afab4e995b4e2569", "<KEY>")
spotify = tk.Spotify(app_token)
def menu():
print("search for:\n1. Artist\n2. Album\n3. Track ")
num = input("type your input there : ")
if num == "1":
artist_name = input("what's the artist name : ")
searh_string = "artist:" + artist_name
artists, = spotify.search(searh_string, types=('track',), limit=50)
print_article(artists)
elif num == "2":
album_name = input("what's the album name : ")
searh_string = "album:" + album_name
album, = spotify.search(searh_string, types=('track',), limit=50)
print_article(album)
elif num == "3":
track_name = input("what's the track name : ")
tracks, = spotify.search(track_name, types=('track',), limit=50)
print_article(tracks)
else:
print("what did you just type? try again!")
menu()
def print_article(element):
print ("{:<10} {:<70} {:<40}".format("popularity", "name", "artist"))
for elem in element.items:
print ("{:<10} {:<70} {:<40}".format(elem.popularity , elem.name, elem.artists[0].name))
if __name__ == '__main__':
menu()
| 3.5 | 4 |
rasa_core/channels/rasa_chat.py | tupac56/rasa_core | 0 | 12785265 | <filename>rasa_core/channels/rasa_chat.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import requests
from flask import Blueprint, request, jsonify, abort
from flask_cors import CORS, cross_origin
from rasa_core.channels import CollectingOutputChannel
from rasa_core.channels.channel import UserMessage
from rasa_core.channels.rest import HttpInputComponent
logger = logging.getLogger(__name__)
class RasaChatInput(HttpInputComponent):
"""Chat input channel for Rasa Platform"""
def __init__(self, host):
self.host = host
def _check_token(self, token):
url = "{}/users/me".format(self.host)
headers = {"Authorization": token}
result = requests.get(url, headers=headers)
if result.status_code == 200:
return result.json()
else:
logger.info("Failed to check token: {}. "
"Content: {}".format(token, request.data))
return None
def fetch_user(self, req):
"""Fetch user from the Rasa Platform Admin API"""
if req.headers.get("Authorization"):
user = self._check_token(req.headers.get("Authorization"))
if user:
return user
user = self._check_token(req.args.get('token', default=None))
if user:
return user
abort(401)
def blueprint(self, on_new_message):
rasa_chat = Blueprint('rasa_chat', __name__)
CORS(rasa_chat)
@rasa_chat.route("/", methods=['GET'])
def health():
return jsonify({"status": "ok"})
@rasa_chat.route("/send", methods=['GET', 'POST'])
@cross_origin()
def receive():
user = self.fetch_user(request)
msg = request.json["message"]
on_new_message(UserMessage(msg, CollectingOutputChannel(),
sender_id=user["username"]))
return jsonify({"status": "ok"})
return rasa_chat
| 2.203125 | 2 |
project/submissions/avneesh/astar_path.py | naveenmoto/lablet102 | 1 | 12785266 | """
Generates a path on the given occupancy grid (map of
the environment)
"""
import networkx as nx
from grid_loader import Grid
import numpy as np
def euclidean(node1, node2):
x1, y1 = node1
x2, y2 = node2
return ((x1-x2)**2+(y1-y2)**2)**0.5
class AStar:
# Constructor
def __init__(self):
self.graph = None
self.grid_res = None # m / pixel
def load_grid(self, grid_obj: Grid, occ_thresh = 0.5):
"""
Load a given Grid object into a networkx graph
The edges are given a weight 1 and the occupied
cells are removed
Parameters:
- grid_obj: Grid
A Grid object that is to be loaded for path
finding
- occ_thresh: float (default: 0.5)
A threshold value for depicting occupied cell
If cell value >= occ_thresh, it is considered
occupied and removed
Returns:
- removed_nodes: int
The number of nodes that were removed from
grid (number of occupied cells)
"""
self.grid_res = grid_obj.grid_res # Useful for translation from px to m and back
self.graph = nx.grid_2d_graph(grid_obj.w, grid_obj.h)
removed_nodes = 0
for i in range(grid_obj.w):
for j in range(grid_obj.h):
if grid_obj.grid_data[i, j] >= occ_thresh: # Occupied
self.graph.remove_node((i, j))
removed_nodes += 1
# Set edge properties of the graph
nx.set_edge_attributes(self.graph, {e: 1 for e in self.graph.edges()}, "cost")
return removed_nodes
# Return a route of [x, y] points
def get_route(self, start, end, heuristic = euclidean, weight = 0.5):
start_px = tuple((np.array(start) / self.grid_res).astype(int))
end_px = tuple((np.array(end) / self.grid_res).astype(int))
astar_path = nx.astar_path(self.graph, start_px, end_px,
heuristic=lambda n1, n2: weight*heuristic(n1, n2), weight="cost")
astar_path = np.array(astar_path)
astar_path_m = astar_path * self.grid_res
return astar_path_m
| 3.453125 | 3 |
riaps-x86runtime/env_setup_tests/WeatherMonitor/TempSensor.py | timkrentz/riaps-integration | 7 | 12785267 | '''
Created on Jan 25, 2017
@author: metelko
'''
# riaps:keep_import:begin
from riaps.run.comp import Component
import logging
import time
import os
# riaps:keep_import:end
class TempSensor(Component):
# riaps:keep_constr:begin
def __init__(self):
super(TempSensor, self).__init__()
self.pid = os.getpid()
self.temperature = 65
now = time.ctime(int(time.time()))
self.logger.info("(PID %s)-starting TempSensor, %s" % (str(self.pid),str(now)))
self.logger.info("Initial temp:%d, %s" % (self.temperature,str(now)))
# riaps:keep_constr:end
# riaps:keep_clock:begin
def on_clock(self):
now = time.ctime(int(time.time()))
msg = self.clock.recv_pyobj()
self.temperature = self.temperature + 1
msg = str(self.temperature)
msg = (now,msg)
self.logger.info("on_clock(): Temperature - %s, PID %s, %s" % (str(msg[1]),str(self.pid),str(now)))
self.ready.send_pyobj(msg)
# riaps:keep_clock:end
# riaps:keep_impl:begin
def __destroy__(self):
now = time.time()
self.logger.info("%s - stopping TempSensor, %s" % (str(self.pid),now))
# riaps:keep_impl:end
| 2.125 | 2 |
tests/assign.py | fangyuchen86/mini-pysonar | 22 | 12785268 | def f():
x = 0
def g(y):
global x
x = y
g(2)
print x
f()
| 2.984375 | 3 |
boards/iotSDR-IOT_Z7020/iotSDR_IOT/notebooks/.ipynb_checkpoints/iotSDR_Device-checkpoint.py | mfkiwl/iotSDR | 12 | 12785269 | <reponame>mfkiwl/iotSDR
import iotFrontEndCore
import iotSDR_defs as defs
from pynq import MMIO
import pynq.lib.dma
from pynq import Xlnk
from pynq import Overlay
import numpy as np
import threading
import time
from queue import Queue
class iotSDR():
def __init__(self,overlay,args):
"""Bit file settings"""
self.ol = overlay
"""Cache Settings"""
self._cacheSampleRate = dict()
self._cacheBandwidth = dict()
self._cacheFreq = dict()
self._cacheGain = dict()
self._checheTxLUT = dict()
"""DMA bindings"""
self.total_words = 2048
#self.dma = self.ol.axi_dma_0
#self.xlnk = Xlnk()
#self.input_buffer = self.xlnk.cma_array(shape=(self.total_words,), dtype=np.uint32)
"""Initialize defs"""
self.chA = self.createChannel(defs.chan_subGHz_A)
self.chB = self.createChannel(defs.chan_subGHz_B)
"""Initialize DMA """
self._dma_txrx_a = self.chA.pl_dma_recv
self._dma_txrx_b = self.chB.pl_dma_recv
"""initialize transceiever"""
def createChannel(self,chan):
if chan == defs.chan_subGHz_A or \
chan == defs.chan_24GHz_A:
return iotFrontEndCore.Transceiver("ch1",self.ol)
if chan == defs.chan_subGHz_B or \
chan == defs.chan_24GHz_B:
return iotFrontEndCore.Transceiver("ch2",self.ol)
"""
Sample Rate API
"""
def setSampleRate(self,direction,chan,rate):
rate_act,val = self.adjustRate(rate)
print("iotSDR Device: Requested rate: {}, Actual Rate: {:.2f} ".format(rate,rate_act))
if direction == defs.IOTSDR_RX:
if chan == defs.chan_subGHz_A:
self.chA.setRxSampleRate(val,"subGHz")
elif chan == defs.chan_24GHz_A:
self.chA.setRxSampleRate(val,"24GHz")
elif chan == defs.chan_subGHz_B:
self.chB.setRxSampleRate(val,"subGHz")
elif chan == defs.chan_24GHz_B:
self.chB.setRxSampleRate(val,"24GHz")
else:
print("iotSDR Device: Channel is not correctly selected",chan)
elif direction == defs.IOTSDR_TX:
if chan == defs.chan_subGHz_A:
self.chA.setRxSampleRate(val,"subGHz")
elif chan == defs.chan_24GHz_A:
self.chA.setRxSampleRate(val,"24GHz")
elif chan == defs.chan_subGHz_B:
self.chB.setRxSampleRate(val,"subGHz")
elif chan == defs.chan_24GHz_B:
self.chB.setRxSampleRate(val,"24GHz")
else:
print("iotSDR Device: Channel is not correctly selected",chan)
else:
print("iotSDR Device: Diection is not correctly selected",direction)
self._cacheSampleRate[direction,chan] = rate_act
#self._cacheSampleRate[chan] = rate
def getSampleRate(self,direction,chan):
return self._cacheSampleRate.get((direction,chan))
def listSampleRates(self,chan):
"""
0x1 fS = 4000kHz
0x2 fS = 2000kHz
0x3 fS = (4000/3)kHz
0x4 fS = 1000kHz
0x5 fS = 800kHz
0x6 fS = (2000/3)kHz
0x8 fS = 500kHz
0xA fS = 400kHz
"""
# iot channels
if chan < 4:
return [4e6,
2e6,
round((4000/3)*1e3,2),
1e6,800e3,
round((2000/3)*1e3,2),
500e3,
400e3]
# GPS channel
if chan == 4:
return [4.092e6,16.368e6]
def adjustRate(self,rate):
if rate >= 4e6:
return 4e6,0x1
elif rate >= 2e6 and rate < 4e6:
return 2e6,0x2
elif rate >= (4000/3)*1e3 and rate < 2e6:
return round((4000/3)*1e3,2),0x3
elif rate >= 1e6 and rate < (4000/3)*1e3:
return 1e6,0x4
elif rate >= 800e3 and rate < 1e6:
return 800e3,0x5
elif rate >= (2000/3)*1e3 and rate < 800e3:
return round((2000/3)*1e3,2),0x6
elif rate >= 500e3 and rate < (2000/3)*1e3:
return 500e3,0x8
elif rate >= 400e3 and rate < 500e3:
return 400e3,0xA
else:
return 4e6,0x1
"""
Frequency Setting API
"""
def setFrequency(self,chan,freq):
freq_act = int(self.adjustFreq(freq,chan))
if freq_act:
if chan == defs.chan_subGHz_A:
self.chA.txrx_frequency(5000,freq_act,11)
elif chan == defs.chan_24GHz_A:
self.chA.txrx_frequency_24g(5000,freq_act,11)
elif chan == defs.chan_subGHz_B:
self.chB.txrx_frequency(5000,freq_act,11)
elif chan == defs.chan_24GHz_B:
self.chB.txrx_frequency_24g(5000,freq_act,11)
print("iotSDR Device: frequency:{} updated for channel:{}".format(freq_act,chan))
self._cacheFreq[chan] = freq
def getFrequency(self,chan):
return self._cacheFreq.get(chan)
def adjustFreq(self,freq,chan):
"""
supported bands
band1 = 389.5MHz … 510MHz
band2 = 779MH … 1020MHz
band2 = 2400MHz … 2483.5MHz
"""
if chan == defs.chan_subGHz_A or chan == defs.chan_subGHz_B:
if freq >= 389.5e6 and freq <= 510e6:
return freq
elif freq > 389.5e6 and freq < 779e6:
print("iotSDR Device: frequency:{} not supported for channel:{}".format(freq,chan))
return 0
elif freq >= 779e6 and freq <= 1020e6:
return freq
else:
print("iotSDR Device: frequency:{} not supported for channel:{}".format(freq,chan))
return 0
if chan == defs.chan_24GHz_A or chan == defs.chan_24GHz_B:
if freq >= 2400e6 and freq <= 2483.5e6:
return freq
else:
print("iotSDR Device: frequency:{} not supported for channel:{}".format(freq,chan))
return 0
def listFrequencies(self,chan):
freqBands = {"subGhzband" : ((389.5e6,510e6),(779e6,1020e6)),
"24Ghzband" : (2400e6,2483.5e6)
}
if chan == defs.chan_subGHz_A or chan == defs.chan_subGHz_B:
return freqBands["subGhzband"]
if chan == defs.chan_24GHz_A or chan == defs.chan_24GHz_B:
return freqBands["24Ghzband"]
"""
BW Filter API
"""
def setBandwidth(self,direction,chan,bw):
#radio_rx_bandwidth(self,bw)
self._cacheBandwidth[direction,chan] = bw
def getBandwidth(self,direction,chan):
return self._cacheBandwidth.get((direction,chan))
def listBandwidths(self,direction):
pass
if direction == IOTSDR_RX:
"""
0x0 fBW=160kHz; fIF=250kHz
0x1 fBW=200kHz; fIF=250kHz
0x2 fBW=250kHz; fIF=250kHz
0x3 fBW=320kHz; fIF=500kHz
0x4 fBW=400kHz; fIF=500kHz
0x5 fBW=500kHz; fIF=500kHz
0x6 fBW=630kHz; fIF=1000kHz
0x7 fBW=800kHz; fIF=1000kHz
0x8 fBW=1000kHz; fIF=1000kHz
0x9 fBW=1250kHz; fIF=2000kHz
0xA fBW=1600kHz; fIF=2000kHz
0xB fBW=2000kHz; fIF=2000kHz
"""
pass
if direction == IOTSDR_TX:
pass
"""
Gain Setting API
"""
def setGain(self,chan,gain):
gain_adj = 0 if gain < 0 else 31 if gain > 31 else gain
if chan == defs.chan_subGHz_A:
self.chA.radio_tx_power(gain_adj)
elif chan == defs.chan_24GHz_A:
self.chA.radio_tx_power_24g(gain_adj)
elif chan == defs.chan_subGHz_B:
self.chB.radio_tx_power(gain_adj)
elif chan == defs.chan_24GHz_B:
self.chB.radio_tx_power_24g(gain_adj)
print("iotSDR Device: Gain:{} updated for channel:{}".format(gain_adj,chan))
self._cacheGain[chan] = gain_adj
def getGain(self,chan):
return self._cacheGain.get(chan)
def getGainRange(self,chan):
return (0,31)
"""
tramsmitter Fixed IQ LUT API
"""
def setTxLUT(self,chan,IQsamples):
if chan == defs.chan_subGHz_A or chan == defs.chan_24GHz_A:
self.chA.set_tx_LUT(IQsamples)
elif chan == defs.chan_subGHz_B or chan == defs.chan_24GHz_B:
self.chB.set_tx_LUT(IQsamples)
self._checheTxLUT[chan] = IQsamples
def getTxLUT(self,chan):
return self._checheTxLUT[chan] | 2.4375 | 2 |
cogs/help.py | aNOOBisTheGod/upgraded-discord-bot | 3 | 12785270 | <reponame>aNOOBisTheGod/upgraded-discord-bot
import discord
from discord.ext import commands
from discord.errors import Forbidden
async def send_embed(ctx, embed):
try:
await ctx.send(embed=embed)
except Forbidden:
try:
await ctx.send("Hey, seems like I can't send embeds. Please check my permissions :)")
except Forbidden:
await ctx.author.send(
f"Hey, seems like I can't send any message in {ctx.channel.name} on {ctx.guild.name}\n"
f"May you inform the server team about this issue? :slight_smile: ", embed=embed)
class Help(commands.Cog):
"""
Sends this help message
"""
def __init__(self, bot):
self.bot = bot
@commands.command()
async def help(self, ctx, *input):
"""Shows all modules of that bot"""
prefix = '!!'
version = '0.0.5'
owner = 661157502989238282
owner_name = 'aNOOBis#1420'
if not input:
try:
owner = ctx.guild.get_member(owner).mention
except AttributeError as e:
owner = owner
emb = discord.Embed(title='Commands and modules', color=0x1af1ad,
description=f'Use `{prefix}help <module>` to gain more information about that module '
f':smiley:\n')
cogs_desc = ''
for cog in self.bot.cogs:
cogs_desc += f'`{cog}` {self.bot.cogs[cog].__doc__}\n'
emb.add_field(name='Modules', value=cogs_desc, inline=False)
commands_desc = ''
for command in self.bot.walk_commands():
if not command.cog_name and not command.hidden:
commands_desc += f'{command.name} - {command.help}\n'
if commands_desc:
emb.add_field(name='Not belonging to a module', value=commands_desc, inline=False)
emb.set_footer(text=f"Bot is running {version}")
elif len(input) == 1:
for cog in self.bot.cogs:
if cog.lower() == input[0].lower():
emb = discord.Embed(title=f'{cog} - Commands', description=self.bot.cogs[cog].__doc__,
color=0xdd64e8)
for command in self.bot.get_cog(cog).get_commands():
if not command.hidden:
emb.add_field(name=f"`{prefix}{command.name}`", value=command.help, inline=False)
break
else:
emb = discord.Embed(title="What's that?!",
description=f"I've never heard from a module called `{input[0]}` before :scream:",
color=0xff002a)
elif len(input) > 1:
emb = discord.Embed(title="That's too much.",
description="Please request only one module at once :sweat_smile:",
color=0xff002a)
else:
emb = discord.Embed(title="It's a magical place.",
description="Something went wrong",
color=0xff002a)
# sending reply embed using our own function defined above
await send_embed(ctx, emb)
def setup(bot):
bot.add_cog(Help(bot))
| 2.359375 | 2 |
generate_samples.py | mnielsen/iGAN | 14 | 12785271 | <reponame>mnielsen/iGAN
from __future__ import print_function
import argparse, iGAN_predict
from pydoc import locate
from lib import utils
from lib.rng import np_rng
import cv2
import numpy as np
from lib.theano_utils import floatX
import requests
from PIL import Image
from StringIO import StringIO
def parse_args():
parser = argparse.ArgumentParser(description='generated random samples (dcgan_theano)')
parser.add_argument('--model_name', dest='model_name', help='the model name', default='outdoor_64', type=str)
parser.add_argument('--model_type', dest='model_type', help='the generative models and its deep learning framework', default='dcgan_theano', type=str)
parser.add_argument('--framework', dest='framework', help='deep learning framework', default='theano')
parser.add_argument('--model_file', dest='model_file', help='the file that stores the generative model', type=str, default=None)
parser.add_argument('--output_image', dest='output_image', help='the name of output image', type=str, default=None)
args = parser.parse_args()
return args
def interpolate(url0, url1, output_image):
model_class = locate('model_def.dcgan_theano')
model_file = './models/handbag_64.dcgan_theano'
model = model_class.Model(
model_name="handbag_64", model_file=model_file)
# save images
for j, url in enumerate([url0, url1]):
r = requests.get(url)
i = Image.open(StringIO(r.content))
i.save("pics/url"+str(j)+".jpg")
z0 = iGAN_predict.find_latent(url=url0).reshape((100,))
z1 = iGAN_predict.find_latent(url=url1).reshape((100,))
delta = 1.0/32.0
arrays = [p*z0+(1-p)*z1 for p in np.arange(-16*delta, 1+16*delta-0.0001, delta)]
z = np.stack(arrays)
print(z.shape)
zmb = floatX(z[0 : 64, :])
xmb = model._gen(zmb)
samples = [xmb]
samples = np.concatenate(samples, axis=0)
print(samples.shape)
samples = model.inverse_transform(samples, npx=model.npx, nc=model.nc)
samples = (samples * 255).astype(np.uint8)
# generate grid visualization
im_vis = utils.grid_vis(samples, 8, 8)
# write to the disk
im_vis = cv2.cvtColor(im_vis, cv2.COLOR_BGR2RGB)
cv2.imwrite(output_image, im_vis)
if __name__ == '__main__':
args = parse_args()
if not args.model_file: #if model directory is not specified
args.model_file = './models/%s.%s' % (args.model_name, args.model_type)
if not args.output_image:
args.output_image = '%s_%s_samples.png' % (args.model_name, args.model_type)
for arg in vars(args):
print('[%s] =' % arg, getattr(args, arg))
# initialize model and constrained optimization problem
model_class = locate('model_def.%s' % args.model_type)
model = model_class.Model(model_name=args.model_name, model_file=args.model_file)
# generate samples
#def gen_samples(self, z0=None, n=32, batch_size=32, use_transform=True):
samples = []
n = 32
batch_size = 32
z0 = np_rng.uniform(-1., 1., size=(n, model.nz))
n_batches = int(np.ceil(n/float(batch_size)))
for i in range(n_batches):
zmb = floatX(z0[batch_size * i:min(n, batch_size * (i + 1)), :])
xmb = model._gen(zmb)
samples.append(xmb)
samples = np.concatenate(samples, axis=0)
samples = model.inverse_transform(samples, npx=model.npx, nc=model.nc)
samples = (samples * 255).astype(np.uint8)
#samples = model.gen_samples(z0=None, n=196, batch_size=49, use_transform=True)
# generate grid visualization
im_vis = utils.grid_vis(samples, 14, 14)
# write to the disk
im_vis = cv2.cvtColor(im_vis, cv2.COLOR_BGR2RGB)
cv2.imwrite(args.output_image, im_vis)
print('samples_shape', samples.shape)
print('save image to %s' % args.output_image)
| 2.578125 | 3 |
tasfie/migrations/0007_auto_20190820_0007.py | mablue/Specialized-Procurement-and-Sales-Management-System-for-East-Azarbaijan-Gas-Company | 30 | 12785272 | <reponame>mablue/Specialized-Procurement-and-Sales-Management-System-for-East-Azarbaijan-Gas-Company<filename>tasfie/migrations/0007_auto_20190820_0007.py
# Generated by Django 2.2.4 on 2019-08-20 00:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasfie', '0006_auto_20190818_1448'),
]
operations = [
migrations.AlterField(
model_name='tasfie',
name='bank',
field=models.CharField(default='ملت', max_length=500, verbose_name='نام بانک'),
),
]
| 1.054688 | 1 |
dashunderscoredash/cogs/develop.py | rtao258/-_- | 1 | 12785273 | # -*- coding: utf-8 -*-
from discord import Embed, Member
from discord.ext.commands import command, Cog, Context
import time
from dataclasses import dataclass
@dataclass
class Job:
title: str
salary: float
responsibilites: str
requirements: str
JOBS = [
Job(
"Backend Specialist",
0.0,
"Design, develop, and maintain a persistent data storage solution for the `-_-` bot. "
"Also, seek a more reliable hosting platform on which to deploy the bot.",
"Must have some experience with backend development, whether in a web development "
"context or otherwise. Must have solid understanding of Python programming and "
"working understanding of git and GitHub. Experience with discord.py and asynchronous "
"programming is beneficial but not required; on-the-job training is available.",
),
Job(
"Discord Bot Developer",
0.0,
"Skip the tutorial - work right at the cutting edge! Develop the newest and coolest "
"features for our very own `-_-` bot. Enjoy both an educational and rewarding work "
"environment. Aditionally, perform a basic beta testing and quality assurance role. ",
"Must have proficient level of Python understanding and basic level of git/GitHub "
"experience. No experience with discord.py necessary. Significant on-the-job training "
"is available. Specify additional qualifications upon application.",
),
Job(
"Senior Marketing Manager",
0.0,
"Encourage more server members to use the `-_-` bot on a regular basis. Coordinate with "
"frequent users to gather and prioritize requested features. Recruit more developer to "
"fill vacancies on the development team. Communicate results directly with the "
"development team.",
"Must have excellent communication and teamwork skills. No technical skills required. "
"An excellent entry-level position for aspiring members.",
),
]
class Develop(Cog):
"""Tools and utilites for bot developers."""
def __init__(self, bot):
self.bot = bot
self.start_time = time.time()
@property
def latency(self):
"""Returns the latency in milliseconds"""
return round(1000 * self.bot.latency, 3)
@command()
async def ping(self, ctx: Context):
"""Ping the bot for a response and latency."""
await ctx.send(embed=Embed(
title="Pong!",
description=f"Latency: {self.latency} ms",
))
@command()
async def stats(self, ctx: Context):
"""Returns some stats about this bot."""
time_delta = time.time() - self.start_time
result = Embed(
title="-_- Bot Stats",
description=f"Up time: {round(time_delta, 3)} s\n"
f"Latency: {self.latency} ms\n"
f"Guilds: {len(self.bot.guilds)}"
)
await ctx.send(embed=result)
@command()
async def demoji(self, ctx: Context, emoji: str):
"""Get the Unicode codepoint of an emoji (or any character)."""
hexpoint = str(hex(ord(emoji)))[2:]
codepoint = "\\U" + "0" * (8 - len(hexpoint)) + hexpoint
await ctx.send(f"`{codepoint}`")
@command()
async def develop(self, ctx: Context, member: Member = None):
# default to person who called the command
if member is None:
if not isinstance(ctx.author, Member):
await ctx.send("That's a user but not a member. "
"Please try again or report a bug.")
return
member = ctx.author
developer_role = ctx.guild.get_role(731262064391356487)
# member already has role
if developer_role in member.roles:
await ctx.send(f"{member.mention}, you're already a {developer_role.mention}! "
f"Congratulations!")
return
await member.add_roles(developer_role)
await ctx.send(f"Congratulations, {member.mention}, you are now an official "
f"{developer_role.mention} member! Please see `CONTRIBUTING.md` "
f"in `-_- source` to get started. Please also reach out to another "
f"developer at your earliest convenience. ")
@command(aliases=['job'])
async def jobs(self, ctx: Context):
if JOBS:
description = ("Exciting job offers are currently available!\n"
"To apply, do `-_- develop`, then contact any developer.\n\n")
description += "\n\n".join([
f"**{job.title}**\n"
f"*Salary*: ${job.salary}/hr\n"
f"*Responsibilities*: {job.responsibilites}\n"
f"*Requirements*: {job.requirements}"
for job in JOBS
])
else:
description = ("No jobs are available at this time.\n"
"Check back later for updates!")
await ctx.send(embed=Embed(
title="-_- Job Opportunities",
description=description,
))
def setup(bot):
bot.add_cog(Develop(bot))
| 2.875 | 3 |
main.py | yptheangel/human-skin-segmentation | 0 | 12785274 | import cv2
from model import PersonSegmentation
if __name__ == '__main__':
# change 'cpu' to 'cuda' if you have pytorch cuda and your discrete GPU has enough VRAM
# output size will autoscale to fit input image aspect ratio
# if you want full image resolution set 'is_resize=False'
ps = PersonSegmentation('cpu', is_resize=True, resize_size=480)
filename = r"test_image.png"
seg_map = ps.person_segment(filename)
frame, frame_original = ps.decode_segmap(seg_map, filename)
# skin_frame, skin2img_ratio = ps.skin_segment(frame)
skin_frame, skin2img_ratio = ps.skin_segment_pro(frame)
print(f"Skin to Image Percentage: {100 * skin2img_ratio:.2f}%")
cv2.imshow("Original vs Person Seg vs Skin segmented", cv2.vconcat([frame_original, frame, skin_frame]))
cv2.waitKey(0)
cv2.destroyAllWindows()
| 3 | 3 |
tests/test_version.py | rchenzheng/datadog-api-client-python | 32 | 12785275 | def test_version():
from datadog_api_client.version import __version__
assert __version__
| 1.351563 | 1 |
configs/CCNuE/config/AnalysisConfig.py | ryounsumiko/CC-NuE-XSec | 0 | 12785276 | """
AnalysisConfig.py:
Centralized configuration for the nu_e CCQE analysis:
signal definitions (nu vs. anti-nu),
file locations,
etc.
Original author: <NAME> (<EMAIL>)
January 2014
"""
import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True
#ROOT.gErrorIgnoreLevel = ROOT.kWarning
ROOT.gROOT.SetBatch()
import math
import argparse
import os
import sys
import pprint
import re
BLUEARC = "/minerva/data/users/{}/nu_e".format(os.environ["USER"])
GIRDOUTPUT ="/pnfs/minerva/persistent/"
SIDEBANDS=["Excess_High_Inline","Excess_Low_Inline","Pi0"]
class _AnalysisConfig(object):
Defaults = {
"data_types": ["data", "mc",],
}
ALLOWED_HELICITIES = ["FHC", "RHC"]
def __init__(self, **kwargs):
params = _AnalysisConfig.Defaults.copy()
params.update(kwargs)
self._config_keys = set()
for key, value in params.items():
self._config_keys.add(key)
setattr(self, key, value)
retained_dts = []
for dt in self.data_types:
if (self.mc_only and "data" in dt.lower()) or (self.data_only and "mc" in dt.lower()):
continue
retained_dts.append(dt)
self.data_types = retained_dts
def __repr__(self):
my_dict = dict([ (k, getattr(AnalysisConfig, k)) for k in AnalysisConfig._config_keys ])
return pprint.pformat(my_dict)
#### properties ####
#@property
#def bknd_constraint_method(self):
# return BKND_CONSTRAINT_METHOD
@property
def helicity(self):
return self._helicity
@helicity.setter
def helicity(self, value):
value_upper = value.upper()
if value_upper not in _AnalysisConfig.ALLOWED_HELICITIES:
raise NameError("Allowed helicities are '%s', not '%s'" % (value, _AnalysisConfig.ALLOWED_HELICITIES))
if "helicity" not in self._config_keys:
self._config_keys.add("helicity")
self._helicity = value_upper
#@property
#def POT(self):
# if self.processing_pass == "Resurrection":
# return POT
# else:
# raise Exception("Don't have POT data for processings other than Resurrection!")
#@property
#def POT_string(self, precision=2):
# exponent = int(math.log10(self.POT["data"]))
# mantissa = self.POT["data"] / 10**exponent
# fmt_string = "%%.%(precision)df #times 10^{%%d} P.O.T." % {"precision": precision}
# return fmt_string% (mantissa, exponent)
# @property
# def data_MC_POT_ratio(self):
# return self.POT["data"] / self.POT["MC"]
# @property
# def processing_pass(self):
# return self._processing_pass
# @processing_pass.setter
# def processing_pass(self, value):
# value_cap = value.lower().capitalize()
# if value_cap not in _AnalysisConfig.ALLOWED_PROCESSING_PASSES:
# raise NameError("Allowed processing passes are '%s', not '%s'" % (value, _AnalysisConfig.ALLOWED_PROCESSING_PASSES))
# if "processing_pass" not in self._config_keys:
# self._config_keys.add("processing_pass")
# self._processing_pass = value_cap
@property
def right_sign_electron_pdg(self):
""" Sign of the PDG code for "right sign" in this configuration """
return 1 if self.helicity == "FHC" else -1 # electron is +11; positron is -11
#### public interface ####
def DataTypeNtupleList(self):
filename_config = {
"proc_pass": self.processing_pass,
"helicity": self.helicity,
}
for dt in self.data_types:
filename_config.update( { "data_type": dt } )
filename = NTUPLE_FILENAME_FORMAT % filename_config
# print " searching for filename:", filename
fullpaths = []
# print "looking here:", NTUPLE_PATH
for root, dirs, files in os.walk(NTUPLE_PATH):
# print "considering filenames:", files
if filename in files:
fullpaths.append(os.path.join(root, filename))
if len(fullpaths) == 1:
# print "Found ntuple list for specification '%s':" % dt, fullpaths[-1]
yield dt, fullpaths[-1]
elif len(fullpaths) > 1:
raise Exception("Multiple matches for data type specification '%s': %s" % (dt, fullpaths))
else:
continue
def FilePath(self, top_dir,tier_tag, playlist, data_tag, type_tag):
return "{}/{}_{}{}_{}".format(top_dir,tier_tag, data_tag,
playlist, type_tag)
# print signal_defn
# signal_defn = signal_defn if signal_defn is not None else self.signal_defn
# signal_defn = SIGNAL_DEFN_TEXT[signal_defn]
# print signal_defn
# path = os.path.join(TOP_DIR, signal_defn, dirname)
# if subdir:
# path = os.path.join(path, subdir)
# if helicity:
# path = os.path.join(path, self.helicity)
# return path
def SelectionHistoPath(self, playlist, is_data, is_output = True):
return self.FilePath(self.output_dir if is_output else self.input_dir,
"kin_dist"+("test" if self.testing else ""), playlist,
"data" if is_data else "mc",
self.selection_tag+"_"+self.ntuple_tag+
("_"+str(self.count[0]) if self.count[0] is not None else "")+
".root")
def CutStudyPath(self, playlist, is_data, is_output = True):
return self.FilePath(self.output_dir if is_output else self.input_dir,
"cut_study"+("test" if self.testing else ""), playlist,
"data" if is_data else "mc",
self.selection_tag+"_"+self.ntuple_tag+
("_"+str(self.count[0]) if self.count[0] is not None else "")+
".root")
def TruthHistoPath(self,playlist,is_output = True):
return self.FilePath(self.output_dir if is_output else self.input_dir,
"truth_dist"+("test" if self.testing else ""),playlist,
"mc", self.selection_tag+"_"+self.ntuple_tag+("_"+str(self.count[0]) if self.count[0] is not None else "")+
".root")
def UnfoldedHistoPath(self,playlist,tag,is_output=True):
return self.FilePath(self.output_dir if is_output else self.input_dir,
"unfolded"+("test" if self.testing else ""),playlist,
"", self.selection_tag+"_"+self.ntuple_tag+"_"+tag+
".root")
def XSecHistoPath(self,playlist,is_output=True):
return self.FilePath(self.output_dir if is_output else self.input_dir,
"xsec"+("test" if self.testing else ""),playlist,
"", self.selection_tag+"_"+self.ntuple_tag+
".root")
def BackgroundFitPath(self, playlist, tag, is_output = True):
return self.FilePath(self.output_dir if is_output else self.input_dir,
"bkgfit", playlist, "" , tag+"_"+self.selection_tag+"_"+self.ntuple_tag+".root")
def PlotPath(self, plot_name, sideband,tag=""):
return self.FilePath(self.output_dir,"plot/"+plot_name, sideband, "" , self.selection_tag+"_"+tag)
#### entry point ####
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-p", "--playlist",
dest="playlist",
help="Process given playlists."
)
parser.add_argument("--grid",
action="store_true",
default = False,
help = "Run macro on grid, Input/Output path must be updated to avoid direct access to BlueArc"
)
parser.add_argument("-d", "--data_types",
dest="data_types",
action="append",
help="Data types to process. Supply this once for every data type you want to use.",
default=argparse.SUPPRESS,
)
parser.add_argument("--use-sideband", "--use_sideband",
dest="sidebands",
nargs='*',
help="Use this sideband rather than defalut. (If you use this option at all, you must use it to specify ALL the sidebands you want.) ",
default=SIDEBANDS,
)
parser.add_argument("--data_only", "--data-only",
dest="data_only",
action="store_true",
help="Shortcut option to process only data from the 'data_types' option. If you supply both '--data_only' and '--mc_only', '--mc-only' takes precedence.",
default=False,
)
parser.add_argument("--mc_only", "--mc-only",
dest="mc_only",
action="store_true",
help="Shortcut option to process only MC from the 'data_types' option. If you supply both '--data_only' and '--mc_only', '--mc-only' takes precedence.",
default=False,
)
parser.add_argument("--pc",
dest="is_pc",
action="store_true",
help="running over particle cannon sample",
default=False,
)
parser.add_argument("--signal",
dest="signal",
action="store_true",
help="Use only the extracted true signal event samples.",
default=argparse.SUPPRESS,
)
parser.add_argument("-t", "--test", "--testing",
dest="testing",
action="store_true",
help="Use a smaller sample for testing purposes. ",
default=False
)
parser.add_argument("-o", "--output",
dest = "output_dir",
help="Use alternate location for output file.",
default=BLUEARC
)
parser.add_argument("-i", "--input",
dest = "input_dir",
help="Use alternate location for input files other than ntuple.",
default=BLUEARC
)
parser.add_argument("--ntuple_tag", "--ntuple-tag",
help="Use ntuple playlist tagged by given tag.",
default="LLR"
)
parser.add_argument("--selection_tag","--selection-tag",
help="Use event selection histograms tagged by given tag.",
default="collab1"
)
parser.add_argument("--bkgTune_tag","--bkgTune-tag",
help="Use event selection histograms tagged by given tag.",
default="Global"
)
parser.add_argument("--count",
help="process arg1 subruns starting from arg0 entry of playlist.",
type = int,
nargs = 2,
default = [None,None])
parser.add_argument("--cal_POT","--cal-POT",
help="recount POT even if POT info is available",
dest = "POT_cal",
action="store_true",
default=False)
parser.add_argument("--only_cal_POT","--only-cal-POT",
help="do not run selection but only count POT",
dest = "run_reco",
action="store_false",
default=True)
parser.add_argument("--exclude_universes","--exclude-universes",
help="do not want some systematics universes, list their ShortName()",
nargs="*",
)
parser.add_argument("--skip_2p2h","--skip_2p2h",
help="do not want 2p2h events,(use this when you are going to run delicate 2p2h sample.)",
action="store_true",
default=False)
parser.add_argument("--truth",
help="run truth loop: more correct efficiency demominator",
action="store_true",
default=False)
parser.add_argument("--extra_weighter",
help="Name of extra weighter you want to use",
default=None)
options = parser.parse_args()
if options.data_only and options.mc_only:
options.data_only = False
#if options.playlist is None:
#print "Please specify a playlist."
#sys.exit(1)
# If only cal POT, doesn't make sense to read from record.
if not options.run_reco:
options.POT_cal=True
if options.grid:
#override start variable by $PROCESS variable
nth_job = int(os.environ["PROCESS"])
options.count[0]=nth_job*options.count[1]+options.count[0]
if options.testing:
options.count = [0,1]
AnalysisConfig = _AnalysisConfig(**vars(options))
print("Using analysis configuration:")
print(AnalysisConfig)
| 2.171875 | 2 |
crawler/browser/condition.py | CvvT/crawler_sqlmap | 9 | 12785277 | <reponame>CvvT/crawler_sqlmap<gh_stars>1-10
#! /usr/bin/env python
# -*- coding: utf-8 -*-
class jQuery_load(object):
def __init__(self):
pass
def __call__(self, driver):
try:
ret = driver.execute_script("return jQuery.active")
return int(ret) == 0
except:
return True
class jScript_load(object):
def __init__(self):
pass
def __call__(self, driver):
try:
ret = driver.execute_script("return document.readyState")
return str(ret) == "complete"
except:
return False
| 2.140625 | 2 |
XAF V2.py | BasVVisser/XAF-Auditfiles | 1 | 12785278 | <reponame>BasVVisser/XAF-Auditfiles
#XAF V2 (CLAIR2.00) XML inlezen en exporteren naar CSV
#Dit programma leest een XAF V2 (CLAIR2.00) bestand in een Pandas Dataframe en maakt van alle tabellen 1 totaaltabel en exporteert deze naar een CSV-bestand.
#Benodigde modules
import pandas as pd
import numpy as np
import sys
import os
import xml.etree.ElementTree as ET
#Selecteren van bestanden, als dit via IDEA moet kan dit weg.
import tkinter as tk
from tkinter import filedialog
#Algemene functies
def XAF_Parsen(file):
tree = ET.parse(file)
root = tree.getroot()
return root
def XAF_element_vinden(root, tag, ns):
return root.find(tag, ns)
def namespace_ombouwen_transacties(root):
ns = root.tag.split('{')[1].split('}')[0]
ns = "{" + ns + "}"
return ns
def namespace_ombouwen_algemeen(root):
namespaces = {'xsd':"http://www.w3.org/2001/XMLSchema", 'xsi':"http://www.w3.org/2001/XMLSchema-instance" }
namespaces["ADF"] = root.tag.split('{')[1].split('}')[0]
return namespaces
def hoofdlaag_informatie(root, ns):
if root == None:
return None
else:
informatie_opslaan = dict()
for child in root:
kolomnaam = child.tag.replace(ns,'')
kolomwaarde = child.text
if len(child) == 0:
informatie_opslaan[kolomnaam] = kolomwaarde
else:
continue
return informatie_opslaan
def informatie_tweede_laag(root, ns):
huidige_regel = 0
regels = dict()
for child in root:
informatie_opslaan = dict()
for subchild in child:
if len(subchild) != 0:
for subsubchild in subchild:
kolomwaarde = subsubchild.text
kolomnaam = subsubchild.tag.replace(ns,'')
informatie_opslaan[kolomnaam] = kolomwaarde
else:
kolomwaarde =subchild.text
kolomnaam = subchild.tag.replace(ns,'')
informatie_opslaan[kolomnaam] = kolomwaarde
regels[huidige_regel] = informatie_opslaan
huidige_regel +=1
df = pd.DataFrame(regels).transpose()
return df
def btw_codes_oplossen(vatcode):
claim = vatcode[(['vatID', 'vatDesc','vatToClaimAccID'])]
claim = claim[pd.isnull(claim['vatToClaimAccID']) == False]
pay = vatcode[(['vatID', 'vatDesc','vatToPayAccID'])]
pay = pay[pd.isnull(pay['vatToPayAccID']) == False]
vatcode = pd.merge(claim,pay, on = ['vatID', 'vatDesc'], how ='outer')
return vatcode
def dagboek_informatie(dagboeken, ns):
dagboek_df = pd.DataFrame()
for dagboek in dagboeken:
dagboekinfo = dict()
for regels in dagboek:
if len(regels) == 0:
kolomnaams = regels.tag.replace(ns,'')
kolomwaardes = regels.text
dagboekinfo[kolomnaams] = kolomwaardes
dagboek_df = dagboek_df.append( dagboekinfo, ignore_index = True)
return dagboek_df
def transactie_informatie(dagboeken, ns):
transacties_df = pd.DataFrame()
total_records = list()
record_dict = dict()
for dagboek in dagboeken:
for regels in dagboek:
if len(regels) == 0:
kolomnaams = regels.tag.replace(ns,'')
kolomwaardes = regels.text
record_dict[kolomnaams] = kolomwaardes
else:
for record in regels:
if len(record) == 0:
kolomnaams = "TR_"+record.tag.replace(ns,'')
kolomwaardes = record.text
record_dict[kolomnaams] = kolomwaardes
else:
for subfields in record:
if len(subfields) == 0:
kolomnaams = subfields.tag.replace(ns,'')
kolomwaardes = subfields.text
record_dict[kolomnaams] = kolomwaardes
else:
for subfields_1 in subfields:
if len(subfields_1) == 0:
kolomnaams = subfields_1.tag.replace(ns,'')
kolomwaardes = subfields_1.text
record_dict[kolomnaams] = kolomwaardes
else : print('nog een sublaag!')
total_records.append(record_dict.copy())
transacties_df = transacties_df.append(total_records, ignore_index = True)
return transacties_df
def openingsbalans_samenvoegen(transacties_df, openingsbalans, openingbalance):
if openingsbalans == None:
return transacties_df
else:
frames = pd.concat([openingbalance, transacties_df], axis = 0)
return frames
def bedrag_corrigeren(transacties_df):
amount_raw = transacties_df['amnt'].astype(float)
conditions = [
(transacties_df['amntTp'] == 'C'),
(transacties_df['amntTp'] == 'D')]
choices = [-1,1]
transacties_df['amount'] = np.select(conditions, choices, default= 1 ) * amount_raw
return transacties_df
def debet_toevoegen(transacties_df):
amount_raw = transacties_df['amnt'].astype(float)
conditions = [
(transacties_df['amntTp'] == 'C'),
(transacties_df['amntTp'] == 'D')]
choices = [0,1] #credit is 0 1e keuze, debet is 1.
transacties_df['debet'] = np.select(conditions, choices, default= 1 ) * amount_raw
return transacties_df
def credit_toevoegen(transacties_df):
amount_raw = transacties_df['amnt'].astype(float)
conditions = [
(transacties_df['amntTp'] == 'C'),
(transacties_df['amntTp'] == 'D')]
choices = [1,0]
transacties_df['credit'] = np.select(conditions, choices, default= 1 ) * amount_raw
return transacties_df
def btw_bedrag_corrigeren(transacties_df):
if "vatAmnt" in transacties_df.columns:
btw_bedrag_raw = transacties_df["vatAmnt"].astype(float)
conditions = [
(transacties_df["vatAmntTp"] == "C"),
(transacties_df["vatAmntTp"] == "D")]
choices = [-1,1]
transacties_df["vat_amount"] = np.select(conditions, choices, default=1) * btw_bedrag_raw
return transacties_df
else:
return transacties_df
def algemene_informatie_samenvoegen(openingsbalans, openingsbalansinfo, headerinfo, companyinfo, transactionsinfo):
if openingsbalans == None:
auditfile_algemene_informatie = pd.concat([headerinfo, companyinfo, transactionsinfo], axis = 1 )
return auditfile_algemene_informatie
else:
auditfile_algemene_informatie = pd.concat([headerinfo, companyinfo, transactionsinfo, openingsbalansinfo], axis = 1)
return auditfile_algemene_informatie
def tabellen_samenvoegen(transacties_df, periods, vatcode, custsup, genledg, dagboek_df):
if periods.empty:
transacties_df = transacties_df
else:
transacties_df = pd.merge(transacties_df, periods, left_on="TR_periodNumber",right_on = "periodNumber", how="left")
if len(vatcode) != 0 and "vatID" in transacties_df.columns:
transacties_df = pd.merge(transacties_df, vatcode, on="vatID", how="left")
else:
transacties_df = transacties_df
if "custSupID" in transacties_df.columns and len(custsup) != 0:
transacties_df = pd.merge(transacties_df, custsup.add_prefix("cs_"), left_on="custSupID", right_on="cs_custSupID", how="left")
else:
transacties_df = transacties_df
if "accID" in transacties_df.columns:
transacties_df = pd.merge(transacties_df, genledg, on="accID", how="left")
elif "accountID" in transacties_df.columns:
transacties_df = pd.merge(transacties_df, genledg, on="accountID", how="left")
if "jrnID" in transacties_df.columns:
transacties_df = pd.merge(transacties_df, dagboek_df.add_prefix("jrn_"), left_on="jrnID", right_on="jrn_jrnID", how="left")
elif "journalID" in transacties_df.columns:
transacties_df = pd.merge(transacties_df, dagboek_df.add_prefix("jrn_"), left_on="journalID", right_on="jrn_journalID", how="left")
return transacties_df
def exportlocatie_bepalen(file):
exportlocatie = file
exportlocatie = exportlocatie[::-1].split("/",1)
exportpad = exportlocatie[1][::-1]
exportnaam = exportlocatie[0][::-1].strip(".xaf")
exportbestand = str(exportpad)+"/"+str(exportnaam)+".csv"
return exportbestand
def entiteit_boekjaar_toevoegen(file, transacties_df):
exportlocatie = file
exportlocatie = exportlocatie[::-1].split("/",1)
exportnaam = exportlocatie[0][::-1].strip(".xaf")
if "-" in exportnaam:
kolomwaarden = exportnaam.split("-")
for i in range(len(kolomwaarden)):
kolomwaarden[i] = kolomwaarden[i].strip() #spaties voor en na het streepje verwijderen
transacties_df["Entiteit"] = kolomwaarden[0]
transacties_df["Boekjaar"] = kolomwaarden[1]
return transacties_df
else:
return transacties_df
def exportlocatie__geconsolideerd_bepalen(file):
exportlocatie = file
exportlocatie = exportlocatie[::-1].split("/",1)
exportpad = exportlocatie[1][::-1]
#exportnaam = exportlocatie[0][::-1].strip(".xaf")
exportbestand = str(exportpad)+"/"+"Geconsolideerd.csv"
return exportbestand
def transacties_V2(root):
transacties_df = pd.DataFrame()
record_dict_dagboek = dict()
record_dict_transacties = dict()
record_dict_lijn = dict()
record_dict_totaal = list()
for child in root:
if child.tag == "transactions":
for journal in transactions:
if len(journal) != 0:
for dagboek in journal:
if len(dagboek) == 0:
kolomnaam_dagboek = "jrn_"+dagboek.tag
kolomwaarde_dagboek = dagboek.text
record_dict_dagboek[kolomnaam_dagboek] = kolomwaarde_dagboek
for i in dagboek: #i is het transactieniveau (algemene informatie van de transactie)
if len(i) == 0:
kolomnaam_transactie = "TR_"+i.tag
kolomwaarde_transactie = i.text
record_dict_transacties[kolomnaam_transactie] = kolomwaarde_transactie
if len(i) > 0:
record_dict_lijn = dict()
for j in i: #j is het lijnniveau van de transactie, meeste detail
kolomnaam_lijn = j.tag
kolomwaarde_lijn = j.text
record_dict_lijn[kolomnaam_lijn] = kolomwaarde_lijn
merged = merge(record_dict_lijn, record_dict_transacties)
merged1 = merge(merged, record_dict_dagboek)
record_dict_totaal.append(merged1.copy())
transacties_df = transacties_df.append(record_dict_totaal, ignore_index=True)
return transacties_df
def bedrag_toevoegen_V2(transacties_df):
transacties_df["debitAmount"] = transacties_df["debitAmount"].astype(float)
transacties_df["debitAmount"] = transacties_df["debitAmount"].fillna(0)
transacties_df["creditAmount"] = transacties_df["creditAmount"].astype(float)
transacties_df["creditAmount"] = transacties_df["creditAmount"].fillna(0)
transacties_df["Bedrag"] = transacties_df["debitAmount"] - transacties_df["creditAmount"]
return transacties_df
def merge(dict1, dict2):
res = {**dict1, **dict2}
return res
def dataframe_opschonen(transacties_df):
if "amnt" in transacties_df:
transacties_df = transacties_df.drop(columns=['amnt'])
transacties_df = transacties_df.dropna(axis=1, how="all")
return transacties_df
#Start hoofdprogramma
if __name__ == "__main__":
main = tk.Tk()
filez = filedialog.askopenfilenames(filetypes=[("XAF","*.xaf")],multiple=True)
filenames = main.tk.splitlist(filez)
main.destroy()
geconsolideerd = pd.DataFrame()
for filename in filenames:
file = filename
#Parsen XML bestand
root = XAF_Parsen(file)
if root.tag == "auditfile": #ADF 2
namespaces = ""
ns = ""
#Algemene informatie V2
header = XAF_element_vinden(root, "header", namespaces)
headerinfo = pd.DataFrame(hoofdlaag_informatie(header, ns), index = [0])
transactions = XAF_element_vinden(root, "transactions", namespaces)
transactionsinfo = pd.DataFrame(hoofdlaag_informatie(transactions, ns), index = [0])
companyinfo = None
openingsbalans = XAF_element_vinden(root, "openingBalance", namespaces)
openingsbalansinfo = pd.DataFrame(hoofdlaag_informatie(openingsbalans, ns), index = [0])
openingbalance = informatie_tweede_laag(root.findall("openingBalance/",namespaces),ns)
auditfile_algemene_informatie = algemene_informatie_samenvoegen(openingsbalans, openingsbalansinfo, headerinfo, companyinfo, transactionsinfo)
#Overige tabellen
genledg = informatie_tweede_laag(root.findall("generalLedger/",""),"")
custsup = informatie_tweede_laag(root.findall("customersSuppliers/",""),"")
periods = pd.DataFrame()
vatcode = pd.DataFrame()
#Dagboeken en transacties
dagboeken = root.findall("transactions/journal")
dagboek_df = dagboek_informatie(dagboeken, ns)
transacties_df = transacties_V2(root)
#Samenvoegen van de transacties met de algemene tabellen + openingsbalans
transacties_df = openingsbalans_samenvoegen(transacties_df,openingsbalans,openingbalance)
transacties_df = tabellen_samenvoegen(transacties_df, periods, vatcode, custsup, genledg, dagboek_df)
#Informatie toevoegen + debet/credit oplossen + opschonen
transacties_df = bedrag_toevoegen_V2(transacties_df)
transacties_df = dataframe_opschonen(transacties_df)
exportbestand = exportlocatie_bepalen(file)
transacties_df.to_csv(exportbestand,
index=False,
decimal=",",
sep=";")
if len(filenames) > 1:
geconsolideerd = pd.concat([geconsolideerd, transacties_df], axis=0, ignore_index=True)
#exporteren van de geconsolideerde ADF
if len(filenames) > 1:
exportbestand_geconsolideerd = exportlocatie__geconsolideerd_bepalen(filenames[0])
geconsolideerd.to_csv(exportbestand_geconsolideerd,
index=False,
decimal=",",
sep=";")
| 2.421875 | 2 |
geolib/models/dsheetpiling/internal_partial_factors.py | Deltares/geolib | 4 | 12785279 | from geolib.models import BaseDataClass
from geolib.models.dseries_parser import DSeriesInlineMappedProperties
class PartialFactorsEurocodeDa1Set1(DSeriesInlineMappedProperties):
safetyfactorgammad: float = 1.00
safetyfactoreta: float = 1.00
loadfactorpermunfav: float = 1.00
loadfactorpermfav: float = 1.00
loadfactorvarunfav: float = 1.00
loadfactorvarfav: float = 0.00
loadfactorpermunfavmultiply: float = 1.00
loadfactorvarunfavmultiply: float = 1.00
loadfactorpermunfavcalc: float = 0.0000
loadfactorvarunfavcalc: float = 0.0000
constructloadfactorpermfav: float = 1.00
constructloadfactorvarfav: float = 1.00
constructloadfactorpermunfavmultiply: float = 1.00
constructloadfactorvarunfavmultiply: float = 1.00
constructloadfactorpermunfavcalc: float = 1.0000
constructloadfactorvarunfavcalc: float = 1.0000
effectfactor: float = 1.35
effectfactorvarunfav: float = 1.10
materialfactorcohesion: float = 1.00
materialfactorundrainedshearstrength: float = 1.00
materialfactortgphi: float = 1.000
materialfactorsubgradereaction: float = 1.00
resistancefactor: float = 1.00
geometryincretainingheight: float = 10.00
geometrymaxincretainingheight: float = 0.50
geometrydeltapassivesurfacelevel: float = 0.00
geometrydeltapassivephreaticline: float = 0.00
geometrydeltaactivephreaticline: float = 0.00
overallstabilityfactordrivingmoment: float = 1.00
overallstabilityfactorcohesion: float = 1.00
overallstabilityfactortgphi: float = 1.00
overallstabilityfactorunitweight: float = 1.00
factorrepvaluesmdpmax: float = 0.00
verticalbalancegammamb: float = 1.25
class PartialFactorsEurocodeDa1Set2(DSeriesInlineMappedProperties):
safetyfactorgammad: float = 1.00
safetyfactoreta: float = 1.00
loadfactorpermunfav: float = 1.00
loadfactorpermfav: float = 1.00
loadfactorvarunfav: float = 1.30
loadfactorvarfav: float = 0.00
loadfactorpermunfavmultiply: float = 1.00
loadfactorvarunfavmultiply: float = 1.00
loadfactorpermunfavcalc: float = 0.0000
loadfactorvarunfavcalc: float = 0.0000
constructloadfactorpermfav: float = 1.00
constructloadfactorvarfav: float = 1.00
constructloadfactorpermunfavmultiply: float = 1.00
constructloadfactorvarunfavmultiply: float = 1.00
constructloadfactorpermunfavcalc: float = 1.0000
constructloadfactorvarunfavcalc: float = 1.0000
effectfactor: float = 1.00
effectfactorvarunfav: float = 1.00
materialfactorcohesion: float = 1.25
materialfactorundrainedshearstrength: float = 1.00
materialfactortgphi: float = 1.250
materialfactorsubgradereaction: float = 1.00
resistancefactor: float = 1.00
geometryincretainingheight: float = 10.00
geometrymaxincretainingheight: float = 0.50
geometrydeltapassivesurfacelevel: float = 0.00
geometrydeltapassivephreaticline: float = 0.00
geometrydeltaactivephreaticline: float = 0.00
overallstabilityfactordrivingmoment: float = 1.00
overallstabilityfactorcohesion: float = 1.00
overallstabilityfactortgphi: float = 1.00
overallstabilityfactorunitweight: float = 1.00
factorrepvaluesmdpmax: float = 0.00
verticalbalancegammamb: float = 1.25
class PartialFactorsEurocodeDa2(DSeriesInlineMappedProperties):
safetyfactorgammad: float = 1.00
safetyfactoreta: float = 1.00
loadfactorpermunfav: float = 1.00
loadfactorpermfav: float = 1.00
loadfactorvarunfav: float = 1.00
loadfactorvarfav: float = 0.00
loadfactorpermunfavmultiply: float = 1.00
loadfactorvarunfavmultiply: float = 1.00
loadfactorpermunfavcalc: float = 0.0000
loadfactorvarunfavcalc: float = 0.0000
constructloadfactorpermfav: float = 1.00
constructloadfactorvarfav: float = 1.00
constructloadfactorpermunfavmultiply: float = 1.00
constructloadfactorvarunfavmultiply: float = 1.00
constructloadfactorpermunfavcalc: float = 1.0000
constructloadfactorvarunfavcalc: float = 1.0000
effectfactor: float = 1.35
effectfactorvarunfav: float = 1.10
materialfactorcohesion: float = 1.00
materialfactorundrainedshearstrength: float = 1.00
materialfactortgphi: float = 1.000
materialfactorsubgradereaction: float = 1.00
resistancefactor: float = 1.40
geometryincretainingheight: float = 10.00
geometrymaxincretainingheight: float = 0.50
geometrydeltapassivesurfacelevel: float = 0.00
geometrydeltapassivephreaticline: float = 0.00
geometrydeltaactivephreaticline: float = 0.00
overallstabilityfactordrivingmoment: float = 1.00
overallstabilityfactorcohesion: float = 1.00
overallstabilityfactortgphi: float = 1.00
overallstabilityfactorunitweight: float = 1.00
factorrepvaluesmdpmax: float = 0.00
verticalbalancegammamb: float = 1.25
class PartialFactorsEurocodeDa3(DSeriesInlineMappedProperties):
safetyfactorgammad: float = 1.00
safetyfactoreta: float = 1.00
loadfactorpermunfav: float = 1.00
loadfactorpermfav: float = 1.00
loadfactorvarunfav: float = 1.30
loadfactorvarfav: float = 0.00
loadfactorpermunfavmultiply: float = 1.00
loadfactorvarunfavmultiply: float = 1.00
loadfactorpermunfavcalc: float = 0.0000
loadfactorvarunfavcalc: float = 0.0000
constructloadfactorpermfav: float = 1.00
constructloadfactorvarfav: float = 1.00
constructloadfactorpermunfavmultiply: float = 1.00
constructloadfactorvarunfavmultiply: float = 1.00
constructloadfactorpermunfavcalc: float = 1.0000
constructloadfactorvarunfavcalc: float = 1.0000
effectfactor: float = 1.00
effectfactorvarunfav: float = 1.00
materialfactorcohesion: float = 1.25
materialfactorundrainedshearstrength: float = 1.00
materialfactortgphi: float = 1.250
materialfactorsubgradereaction: float = 1.00
resistancefactor: float = 1.00
geometryincretainingheight: float = 10.00
geometrymaxincretainingheight: float = 0.50
geometrydeltapassivesurfacelevel: float = 0.00
geometrydeltapassivephreaticline: float = 0.00
geometrydeltaactivephreaticline: float = 0.00
overallstabilityfactordrivingmoment: float = 1.00
overallstabilityfactorcohesion: float = 1.00
overallstabilityfactortgphi: float = 1.00
overallstabilityfactorunitweight: float = 1.00
factorrepvaluesmdpmax: float = 0.00
verticalbalancegammamb: float = 1.25
class PartialFactorsEc7Nl0(DSeriesInlineMappedProperties):
safetyfactorgammad: float = 1.00
safetyfactoreta: float = 1.00
loadfactorpermunfav: float = 1.00
loadfactorpermfav: float = 1.00
loadfactorvarunfav: float = 1.00
loadfactorvarfav: float = 0.00
loadfactorpermunfavmultiply: float = 1.00
loadfactorvarunfavmultiply: float = 1.00
loadfactorpermunfavcalc: float = 0.0000
loadfactorvarunfavcalc: float = 0.0000
constructloadfactorpermfav: float = 1.00
constructloadfactorvarfav: float = 1.00
constructloadfactorpermunfavmultiply: float = 1.00
constructloadfactorvarunfavmultiply: float = 1.00
constructloadfactorpermunfavcalc: float = 1.0000
constructloadfactorvarunfavcalc: float = 1.0000
effectfactor: float = 1.00
effectfactorvarunfav: float = 1.00
materialfactorcohesion: float = 1.00
materialfactorundrainedshearstrength: float = 1.00
materialfactortgphi: float = 1.050
materialfactorsubgradereaction: float = 1.30
resistancefactor: float = 1.00
geometryincretainingheight: float = 10.00
geometrymaxincretainingheight: float = 0.50
geometrydeltapassivesurfacelevel: float = 0.00
geometrydeltapassivephreaticline: float = 0.15
geometrydeltaactivephreaticline: float = 0.05
overallstabilityfactordrivingmoment: float = 0.90
overallstabilityfactorcohesion: float = 1.30
overallstabilityfactortgphi: float = 1.20
overallstabilityfactorunitweight: float = 1.00
factorrepvaluesmdpmax: float = 1.20
verticalbalancegammamb: float = 1.20
class PartialFactorsEc7Nl1(DSeriesInlineMappedProperties):
safetyfactorgammad: float = 1.00
safetyfactoreta: float = 1.00
loadfactorpermunfav: float = 1.00
loadfactorpermfav: float = 1.00
loadfactorvarunfav: float = 1.00
loadfactorvarfav: float = 0.00
loadfactorpermunfavmultiply: float = 1.00
loadfactorvarunfavmultiply: float = 1.00
loadfactorpermunfavcalc: float = 0.0000
loadfactorvarunfavcalc: float = 0.0000
constructloadfactorpermfav: float = 1.00
constructloadfactorvarfav: float = 1.00
constructloadfactorpermunfavmultiply: float = 1.00
constructloadfactorvarunfavmultiply: float = 1.00
constructloadfactorpermunfavcalc: float = 1.0000
constructloadfactorvarunfavcalc: float = 1.0000
effectfactor: float = 1.00
effectfactorvarunfav: float = 1.00
materialfactorcohesion: float = 1.15
materialfactorundrainedshearstrength: float = 1.00
materialfactortgphi: float = 1.150
materialfactorsubgradereaction: float = 1.30
resistancefactor: float = 1.00
geometryincretainingheight: float = 10.00
geometrymaxincretainingheight: float = 0.50
geometrydeltapassivesurfacelevel: float = 0.00
geometrydeltapassivephreaticline: float = 0.20
geometrydeltaactivephreaticline: float = 0.05
overallstabilityfactordrivingmoment: float = 1.00
overallstabilityfactorcohesion: float = 1.30
overallstabilityfactortgphi: float = 1.20
overallstabilityfactorunitweight: float = 1.00
factorrepvaluesmdpmax: float = 1.20
verticalbalancegammamb: float = 1.25
class PartialFactorsEc7Nl2(DSeriesInlineMappedProperties):
safetyfactorgammad: float = 1.00
safetyfactoreta: float = 1.00
loadfactorpermunfav: float = 1.00
loadfactorpermfav: float = 1.00
loadfactorvarunfav: float = 1.00
loadfactorvarfav: float = 0.00
loadfactorpermunfavmultiply: float = 1.00
loadfactorvarunfavmultiply: float = 1.00
loadfactorpermunfavcalc: float = 0.0000
loadfactorvarunfavcalc: float = 0.0000
constructloadfactorpermfav: float = 1.00
constructloadfactorvarfav: float = 1.00
constructloadfactorpermunfavmultiply: float = 1.00
constructloadfactorvarunfavmultiply: float = 1.00
constructloadfactorpermunfavcalc: float = 1.0000
constructloadfactorvarunfavcalc: float = 1.0000
effectfactor: float = 1.00
effectfactorvarunfav: float = 1.00
materialfactorcohesion: float = 1.25
materialfactorundrainedshearstrength: float = 1.00
materialfactortgphi: float = 1.175
materialfactorsubgradereaction: float = 1.30
resistancefactor: float = 1.00
geometryincretainingheight: float = 10.00
geometrymaxincretainingheight: float = 0.50
geometrydeltapassivesurfacelevel: float = 0.00
geometrydeltapassivephreaticline: float = 0.25
geometrydeltaactivephreaticline: float = 0.05
overallstabilityfactordrivingmoment: float = 1.00
overallstabilityfactorcohesion: float = 1.45
overallstabilityfactortgphi: float = 1.25
overallstabilityfactorunitweight: float = 1.00
factorrepvaluesmdpmax: float = 1.20
verticalbalancegammamb: float = 1.25
class PartialFactorsEc7Nl3(DSeriesInlineMappedProperties):
safetyfactorgammad: float = 1.00
safetyfactoreta: float = 1.00
loadfactorpermunfav: float = 1.00
loadfactorpermfav: float = 1.00
loadfactorvarunfav: float = 1.25
loadfactorvarfav: float = 0.00
loadfactorpermunfavmultiply: float = 1.00
loadfactorvarunfavmultiply: float = 1.00
loadfactorpermunfavcalc: float = 0.0000
loadfactorvarunfavcalc: float = 0.0000
constructloadfactorpermfav: float = 1.00
constructloadfactorvarfav: float = 1.00
constructloadfactorpermunfavmultiply: float = 1.00
constructloadfactorvarunfavmultiply: float = 1.00
constructloadfactorpermunfavcalc: float = 1.0000
constructloadfactorvarunfavcalc: float = 1.0000
effectfactor: float = 1.00
effectfactorvarunfav: float = 1.00
materialfactorcohesion: float = 1.40
materialfactorundrainedshearstrength: float = 1.00
materialfactortgphi: float = 1.200
materialfactorsubgradereaction: float = 1.30
resistancefactor: float = 1.00
geometryincretainingheight: float = 10.00
geometrymaxincretainingheight: float = 0.50
geometrydeltapassivesurfacelevel: float = 0.00
geometrydeltapassivephreaticline: float = 0.25
geometrydeltaactivephreaticline: float = 0.05
overallstabilityfactordrivingmoment: float = 1.00
overallstabilityfactorcohesion: float = 1.60
overallstabilityfactortgphi: float = 1.30
overallstabilityfactorunitweight: float = 1.00
factorrepvaluesmdpmax: float = 1.35
verticalbalancegammamb: float = 1.25
class PartialFactorsEc7BSet1(DSeriesInlineMappedProperties):
safetyfactorgammad: float = 1.00
safetyfactoreta: float = 1.00
loadfactorpermunfav: float = 1.00
loadfactorpermfav: float = 1.00
loadfactorvarunfav: float = 1.00
loadfactorvarfav: float = 0.00
loadfactorpermunfavmultiply: float = 1.00
loadfactorvarunfavmultiply: float = 1.00
loadfactorpermunfavcalc: float = 0.0000
loadfactorvarunfavcalc: float = 0.0000
constructloadfactorpermfav: float = 1.00
constructloadfactorvarfav: float = 1.00
constructloadfactorpermunfavmultiply: float = 1.00
constructloadfactorvarunfavmultiply: float = 1.00
constructloadfactorpermunfavcalc: float = 1.0000
constructloadfactorvarunfavcalc: float = 1.0000
effectfactor: float = 1.35
effectfactorvarunfav: float = 1.10
materialfactorcohesion: float = 1.00
materialfactorundrainedshearstrength: float = 1.00
materialfactortgphi: float = 1.000
materialfactorsubgradereaction: float = 1.00
resistancefactor: float = 1.00
geometryincretainingheight: float = 10.00
geometrymaxincretainingheight: float = 0.50
geometrydeltapassivesurfacelevel: float = 0.00
geometrydeltapassivephreaticline: float = 0.00
geometrydeltaactivephreaticline: float = 0.00
overallstabilityfactordrivingmoment: float = 1.00
overallstabilityfactorcohesion: float = 1.00
overallstabilityfactortgphi: float = 1.00
overallstabilityfactorunitweight: float = 1.00
factorrepvaluesmdpmax: float = 0.00
verticalbalancegammamb: float = 1.20
class PartialFactorsEc7BSet2(DSeriesInlineMappedProperties):
safetyfactorgammad: float = 1.00
safetyfactoreta: float = 1.00
loadfactorpermunfav: float = 1.00
loadfactorpermfav: float = 1.00
loadfactorvarunfav: float = 1.10
loadfactorvarfav: float = 0.00
loadfactorpermunfavmultiply: float = 1.00
loadfactorvarunfavmultiply: float = 1.00
loadfactorpermunfavcalc: float = 0.0000
loadfactorvarunfavcalc: float = 0.0000
constructloadfactorpermfav: float = 1.00
constructloadfactorvarfav: float = 1.00
constructloadfactorpermunfavmultiply: float = 1.00
constructloadfactorvarunfavmultiply: float = 1.00
constructloadfactorpermunfavcalc: float = 1.0000
constructloadfactorvarunfavcalc: float = 1.0000
effectfactor: float = 1.00
effectfactorvarunfav: float = 1.00
materialfactorcohesion: float = 1.25
materialfactorundrainedshearstrength: float = 1.00
materialfactortgphi: float = 1.250
materialfactorsubgradereaction: float = 1.00
resistancefactor: float = 1.00
geometryincretainingheight: float = 10.00
geometrymaxincretainingheight: float = 0.50
geometrydeltapassivesurfacelevel: float = 0.00
geometrydeltapassivephreaticline: float = 0.00
geometrydeltaactivephreaticline: float = 0.00
overallstabilityfactordrivingmoment: float = 1.00
overallstabilityfactorcohesion: float = 1.25
overallstabilityfactortgphi: float = 1.25
overallstabilityfactorunitweight: float = 1.00
factorrepvaluesmdpmax: float = 0.00
verticalbalancegammamb: float = 1.20
class PartialFactorsCurI(DSeriesInlineMappedProperties):
safetyfactorgammad: float = 1.00
safetyfactoreta: float = 1.00
loadfactorpermunfav: float = 1.00
loadfactorpermfav: float = 1.00
loadfactorvarunfav: float = 1.00
loadfactorvarfav: float = 0.00
loadfactorpermunfavmultiply: float = 1.00
loadfactorvarunfavmultiply: float = 1.00
loadfactorpermunfavcalc: float = 0.0000
loadfactorvarunfavcalc: float = 0.0000
constructloadfactorpermfav: float = 1.00
constructloadfactorvarfav: float = 1.00
constructloadfactorpermunfavmultiply: float = 1.00
constructloadfactorvarunfavmultiply: float = 1.00
constructloadfactorpermunfavcalc: float = 1.0000
constructloadfactorvarunfavcalc: float = 1.0000
effectfactor: float = 1.00
effectfactorvarunfav: float = 1.00
materialfactorcohesion: float = 1.00
materialfactorundrainedshearstrength: float = 1.00
materialfactortgphi: float = 1.050
materialfactorsubgradereaction: float = 1.30
resistancefactor: float = 1.00
geometryincretainingheight: float = 0.00
geometrymaxincretainingheight: float = 0.00
geometrydeltapassivesurfacelevel: float = 0.20
geometrydeltapassivephreaticline: float = 0.15
geometrydeltaactivephreaticline: float = 0.05
overallstabilityfactordrivingmoment: float = 0.90
overallstabilityfactorcohesion: float = 1.50
overallstabilityfactortgphi: float = 1.20
overallstabilityfactorunitweight: float = 1.00
factorrepvaluesmdpmax: float = 0.00
verticalbalancegammamb: float = 1.25
class PartialFactorsCurIi(DSeriesInlineMappedProperties):
safetyfactorgammad: float = 1.00
safetyfactoreta: float = 1.00
loadfactorpermunfav: float = 1.00
loadfactorpermfav: float = 1.00
loadfactorvarunfav: float = 1.00
loadfactorvarfav: float = 0.00
loadfactorpermunfavmultiply: float = 1.00
loadfactorvarunfavmultiply: float = 1.00
loadfactorpermunfavcalc: float = 0.0000
loadfactorvarunfavcalc: float = 0.0000
constructloadfactorpermfav: float = 1.00
constructloadfactorvarfav: float = 1.00
constructloadfactorpermunfavmultiply: float = 1.00
constructloadfactorvarunfavmultiply: float = 1.00
constructloadfactorpermunfavcalc: float = 1.0000
constructloadfactorvarunfavcalc: float = 1.0000
effectfactor: float = 1.00
effectfactorvarunfav: float = 1.00
materialfactorcohesion: float = 1.00
materialfactorundrainedshearstrength: float = 1.00
materialfactortgphi: float = 1.150
materialfactorsubgradereaction: float = 1.30
resistancefactor: float = 1.00
geometryincretainingheight: float = 0.00
geometrymaxincretainingheight: float = 0.00
geometrydeltapassivesurfacelevel: float = 0.30
geometrydeltapassivephreaticline: float = 0.20
geometrydeltaactivephreaticline: float = 0.05
overallstabilityfactordrivingmoment: float = 1.00
overallstabilityfactorcohesion: float = 1.50
overallstabilityfactortgphi: float = 1.20
overallstabilityfactorunitweight: float = 1.00
factorrepvaluesmdpmax: float = 0.00
verticalbalancegammamb: float = 1.25
class PartialFactorsCurIii(DSeriesInlineMappedProperties):
safetyfactorgammad: float = 1.00
safetyfactoreta: float = 1.00
loadfactorpermunfav: float = 1.00
loadfactorpermfav: float = 1.00
loadfactorvarunfav: float = 1.25
loadfactorvarfav: float = 0.00
loadfactorpermunfavmultiply: float = 1.00
loadfactorvarunfavmultiply: float = 1.00
loadfactorpermunfavcalc: float = 0.0000
loadfactorvarunfavcalc: float = 0.0000
constructloadfactorpermfav: float = 1.00
constructloadfactorvarfav: float = 1.00
constructloadfactorpermunfavmultiply: float = 1.00
constructloadfactorvarunfavmultiply: float = 1.00
constructloadfactorpermunfavcalc: float = 1.0000
constructloadfactorvarunfavcalc: float = 1.0000
effectfactor: float = 1.00
effectfactorvarunfav: float = 1.00
materialfactorcohesion: float = 1.10
materialfactorundrainedshearstrength: float = 1.00
materialfactortgphi: float = 1.200
materialfactorsubgradereaction: float = 1.30
resistancefactor: float = 1.00
geometryincretainingheight: float = 0.00
geometrymaxincretainingheight: float = 0.00
geometrydeltapassivesurfacelevel: float = 0.35
geometrydeltapassivephreaticline: float = 0.25
geometrydeltaactivephreaticline: float = 0.05
overallstabilityfactordrivingmoment: float = 1.10
overallstabilityfactorcohesion: float = 1.50
overallstabilityfactortgphi: float = 1.20
overallstabilityfactorunitweight: float = 1.00
factorrepvaluesmdpmax: float = 0.00
verticalbalancegammamb: float = 1.25
class PartialFactorsEc7SeVk1(DSeriesInlineMappedProperties):
safetyfactorgammad: float = 0.83
safetyfactoreta: float = 1.00
loadfactorpermunfav: float = 1.00
loadfactorpermfav: float = 1.00
loadfactorvarunfav: float = 1.00
loadfactorvarfav: float = 0.00
loadfactorpermunfavmultiply: float = 1.10
loadfactorvarunfavmultiply: float = 1.40
loadfactorpermunfavcalc: float = 0.9130
loadfactorvarunfavcalc: float = 1.1620
constructloadfactorpermfav: float = 1.00
constructloadfactorvarfav: float = 0.00
constructloadfactorpermunfavmultiply: float = 1.35
constructloadfactorvarunfavmultiply: float = 1.50
constructloadfactorpermunfavcalc: float = 1.1205
constructloadfactorvarunfavcalc: float = 1.2450
effectfactor: float = 1.00
effectfactorvarunfav: float = 1.00
materialfactorcohesion: float = 1.30
materialfactorundrainedshearstrength: float = 1.50
materialfactortgphi: float = 1.300
materialfactorsubgradereaction: float = 1.00
resistancefactor: float = 1.00
geometryincretainingheight: float = 0.00
geometrymaxincretainingheight: float = 0.00
geometrydeltapassivesurfacelevel: float = 0.00
geometrydeltapassivephreaticline: float = 0.00
geometrydeltaactivephreaticline: float = 0.00
overallstabilityfactordrivingmoment: float = 1.00
overallstabilityfactorcohesion: float = 1.30
overallstabilityfactortgphi: float = 1.30
overallstabilityfactorunitweight: float = 1.00
factorrepvaluesmdpmax: float = 1.00
verticalbalancegammamb: float = 1.30
class PartialFactorsEc7SeVk2(DSeriesInlineMappedProperties):
safetyfactorgammad: float = 0.91
safetyfactoreta: float = 1.00
loadfactorpermunfav: float = 1.00
loadfactorpermfav: float = 1.00
loadfactorvarunfav: float = 1.00
loadfactorvarfav: float = 0.00
loadfactorpermunfavmultiply: float = 1.10
loadfactorvarunfavmultiply: float = 1.40
loadfactorpermunfavcalc: float = 1.0010
loadfactorvarunfavcalc: float = 1.2740
constructloadfactorpermfav: float = 1.00
constructloadfactorvarfav: float = 0.00
constructloadfactorpermunfavmultiply: float = 1.35
constructloadfactorvarunfavmultiply: float = 1.50
constructloadfactorpermunfavcalc: float = 1.2285
constructloadfactorvarunfavcalc: float = 1.3650
effectfactor: float = 1.00
effectfactorvarunfav: float = 1.00
materialfactorcohesion: float = 1.30
materialfactorundrainedshearstrength: float = 1.50
materialfactortgphi: float = 1.300
materialfactorsubgradereaction: float = 1.00
resistancefactor: float = 1.00
geometryincretainingheight: float = 0.00
geometrymaxincretainingheight: float = 0.00
geometrydeltapassivesurfacelevel: float = 0.00
geometrydeltapassivephreaticline: float = 0.00
geometrydeltaactivephreaticline: float = 0.00
overallstabilityfactordrivingmoment: float = 1.00
overallstabilityfactorcohesion: float = 1.30
overallstabilityfactortgphi: float = 1.30
overallstabilityfactorunitweight: float = 1.00
factorrepvaluesmdpmax: float = 1.00
verticalbalancegammamb: float = 1.30
class PartialFactorsEc7SeVk3(DSeriesInlineMappedProperties):
safetyfactorgammad: float = 1.00
safetyfactoreta: float = 1.00
loadfactorpermunfav: float = 1.00
loadfactorpermfav: float = 1.00
loadfactorvarunfav: float = 1.00
loadfactorvarfav: float = 0.00
loadfactorpermunfavmultiply: float = 1.10
loadfactorvarunfavmultiply: float = 1.40
loadfactorpermunfavcalc: float = 1.1000
loadfactorvarunfavcalc: float = 1.4000
constructloadfactorpermfav: float = 1.00
constructloadfactorvarfav: float = 0.00
constructloadfactorpermunfavmultiply: float = 1.35
constructloadfactorvarunfavmultiply: float = 1.50
constructloadfactorpermunfavcalc: float = 1.3500
constructloadfactorvarunfavcalc: float = 1.5000
effectfactor: float = 1.00
effectfactorvarunfav: float = 1.00
materialfactorcohesion: float = 1.30
materialfactorundrainedshearstrength: float = 1.50
materialfactortgphi: float = 1.300
materialfactorsubgradereaction: float = 1.00
resistancefactor: float = 1.00
geometryincretainingheight: float = 0.00
geometrymaxincretainingheight: float = 0.00
geometrydeltapassivesurfacelevel: float = 0.00
geometrydeltapassivephreaticline: float = 0.00
geometrydeltaactivephreaticline: float = 0.00
overallstabilityfactordrivingmoment: float = 1.00
overallstabilityfactorcohesion: float = 1.30
overallstabilityfactortgphi: float = 1.30
overallstabilityfactorunitweight: float = 1.00
factorrepvaluesmdpmax: float = 1.00
verticalbalancegammamb: float = 1.30
| 1.90625 | 2 |
uts/uts_17_aut_py/1/J.py | viad00/code_olymp | 0 | 12785280 | <filename>uts/uts_17_aut_py/1/J.py<gh_stars>0
ser = int(input())
mas = list(map(int, input().split()))
ind = ser // 2
mas1 = mas[:ind]
mas2 = mas[ind:]
mas1.sort()
mas2.sort(reverse=True)
for i in mas1:
print(i, end=' ')
for i in mas2:
print(i, end=' ')
| 2.84375 | 3 |
cogs/db_optimise.py | pratheek78/Ezebot-Open-Source | 2 | 12785281 | <reponame>pratheek78/Ezebot-Open-Source
import discord, pymongo, os
from discord.ext import commands
from dotenv import load_dotenv
from DiscordBot import client
"""Optimise Database usage by removing server data when the bot leaves that server"""
# Mongo variables
load_dotenv()
MONGOCONN = os.getenv('Mongo_conn')
cl = pymongo.MongoClient(MONGOCONN)
mdb = cl['Ezebot']
# These are the collections in use
modcoll = mdb['Modlogging'] #Moderation collection
prefixcoll = mdb['prefix'] #prefix collection
msgcoll = mdb['ui_messages'] #Messages collection
class db_optimise(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_guild_remove(self, guild):
modcoll.delete_many({'guild_id': guild.id})
msgcoll.delete_many({'guild_id': guild.id})
prefixcoll.delete_many({'guild_id': guild.id})
print(f'Bot was removed from guild- {guild.name}. All guild-related data has been removed.')
await guild.owner.send(f'Ezebot has been removed from {guild.name}. All data of this guild(prefix, cases, message logs) have been removed from our database')
def setup(client):
client.add_cog(db_optimise(client))
| 2.359375 | 2 |
enthought/chaco/shell/scaly_plot.py | enthought/etsproxy | 3 | 12785282 | <filename>enthought/chaco/shell/scaly_plot.py<gh_stars>1-10
# proxy module
from __future__ import absolute_import
from chaco.shell.scaly_plot import *
| 1.1875 | 1 |
forestsim/ForestSim.py | CharlesGaydon/forestsim | 1 | 12785283 | import numpy as np
import collections
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn.manifold import MDS
from time import time
from warnings import warn
class ForestSim():
def __init__(self, forest):
# TODO : adapt if non sklearn forest used
self.forest = forest
def fit(self, X, y = 2, randomize = False, nb_repet = 1, keep_all_mat = False):
self.X = np.float32(X) #used in tree.apply function
self.y = y
self.n = self.X.shape[0]
self.similarity_matrix = np.zeros((self.n,self.n))
# True to keep all sim matrices
if keep_all_mat:
self.co_ocs = []
# create the target vector if needed
if not isinstance(self.y, collections.Sequence):
self.y_ = np.random.choice(self.y, size = (self.n,))
else:
self.y_ = self.y
t0 = time()
for repet_id in range(nb_repet):
t = time()
print("Fitting - {}/{} iteration".format(repet_id,nb_repet))
# random seed to have changing bootstrapping in forest.fit
np.random.seed(repet_id)
if randomize:
np.random.shuffle(self.y_)
self.forest.fit(self.X,self.y_) # check inplace op
sim = self.calculate_a_sim_mat()
self.similarity_matrix += sim
if keep_all_mat:
self.co_ocs.append(sim)
print("Took {} seconds".format(np.round(time()-t, decimals=2)))
print("Total time : {} seconds".format(np.round(time()-t0, decimals=2)))
self.similarity_matrix /= nb_repet
return (self)
def calculate_a_sim_mat(self):
co_oc = np.zeros((self.n,self.n))
for iter_, dt in enumerate(self.forest.estimators_):
leafs_id = dt.tree_.apply(self.X)
ser = pd.DataFrame(data={"ser":leafs_id, "ones":1})
ser = ser.pivot(columns="ser").fillna(0)
ser = ser.dot(ser.T)
co_oc+= ser.values
# pondération par unique n of leaf a reflechir
co_oc = co_oc/len(self.forest.estimators_)
return (co_oc)
# should we return a copy ?
def get_similarity_matrix(self):
return (self.similarity_matrix)
def get_distance_matrix(self):
return (np.sqrt(1-self.similarity_matrix))
# use sklearn.manifold.MDS kwags
def apply_MDS(self,n_instance=100, dissimilarity = "precomputed",**kwargs):
np.random.seed(0)
if isinstance(n_instance,int) and 0<n_instance and n_instance<=self.n:
idx = np.random.choice(self.n,n_instance,replace=False)
elif isinstance(n_instance,float) and 0<n_instance and n_instance<=1:
idx = np.random.choice(self.n,int(self.n*n_instance),replace=False)
else:
warn("invalid n_instance argument - should be in [0.0;1.0] or [0,self.n]")
idx = np.arange(self.n)
if len(idx) == self.n:
print("Computing MDS on all {} instances.".format(self.n))
else:
print("Computing MDS on {} / {} instances.".format(len(idx),self.n))
kwargs.update({"dissimilarity":dissimilarity})
if "dissimilarity" not in kwargs.keys():
print("Computing non precomputed MDS - set dissimilarity to precomputed to use the distance matrix")
mds = MDS(**kwargs)
self.X_mds = mds.fit_transform(self.X[idx,:])
else:
print("Computing MDS on precomputed dissimilarities.")
mds = MDS(**kwargs)
dist_mat_ = self.get_distance_matrix()[idx][:,idx]
self.X_mds = mds.fit_transform(dist_mat_)
return (self.X_mds)
def project_MDS_2D(self, **kwargs):
# TODO : add saving options
# TODO : add the necessary sampling, then stratified sampling...
plt.figure(figsize=(8,8))
sns.scatterplot(x = self.X_mds[:,0],
y=self.X_mds[:,1]
)
plt.show()
def main():
# should be able to take a standard csv file somewhere, apply one of the two methods, and output the sim mat in a csv file
print("work in progress")
if __name__ == "__main__":
main() | 2.90625 | 3 |
tests/test_sections_Fillet.py | iamlikeme/sections | 0 | 12785284 | <reponame>iamlikeme/sections<gh_stars>0
import unittest
import sys
from math import pi
sys.path.insert(0, "..")
from sections.sections import Fillet
import test_sections_generic as generic
class TestPhysicalProperties(generic.TestPhysicalProperties, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sectclass = Fillet
cls.dimensions = dict(r=3.0, phi0=pi/3, phi1=pi*2/3)
cls.angular = ["phi0", "phi1"]
cls.rp = 5.0, 4.0
cls.A = 6.16367930735052
cls._I0 = 4.07970918156066, 5.09977122822122, 0.0
cls._I = 34.08506759985616, 5.09977122822122, 0.0
cls._cog = 0.0, 2.20637532613114
def test_check_dimensions(self):
self.assertRaises(ValueError, self.section.set_dimensions, r=-1)
self.assertRaises(ValueError, self.section.set_dimensions, r=0)
self.assertRaises(ValueError, self.section.set_dimensions, phi0=1, phi1=0)
self.assertRaises(ValueError, self.section.set_dimensions, phi0=1, phi1=1)
self.assertRaises(ValueError, self.section.set_dimensions, phi0=1, phi1=1 + 2.1*pi)
if __name__ == "__main__":
unittest.main()
| 2.40625 | 2 |
connection.py | pizzapanther/gae_boto | 1 | 12785285 |
import gae_boto.settings as settings
from .apis.sqs import SQS
from .apis.route53 import Route53
class AmazonConnection (object):
def __init__ (self, aws_id, aws_key, aws_acct=None, region=None):
self.aws_id = aws_id
self.aws_key = aws_key
self.aws_acct = aws_acct
self.region = region
if self.region is None:
self.region = settings.DEFAULT_REGION
@property
def sqs (self):
return self.build_api('sqs', SQS)
@property
def route53 (self):
return self.build_api('route53', Route53)
def build_api (self, api, api_class):
attr = '_' + api
if not hasattr(self, attr):
setattr(self, attr, api_class(self))
return getattr(self, attr)
| 2.28125 | 2 |
pyinfra/facts/yum.py | blarghmatey/pyinfra | 1,532 | 12785286 | from pyinfra.api import FactBase
from .util import make_cat_files_command
from .util.packaging import parse_yum_repositories
class YumRepositories(FactBase):
'''
Returns a list of installed yum repositories:
.. code:: python
[
{
'name': 'CentOS-$releasever - Apps',
'baseurl': 'http://mirror.centos.org/$contentdir/$releasever/Apps/$basearch/os/',
'gpgcheck': '1',
'enabled': '1',
'gpgkey': 'file:///<KEY>',
},
]
'''
command = make_cat_files_command(
'/etc/yum.conf',
'/etc/yum.repos.d/*.repo',
)
requires_command = 'yum'
default = list
def process(self, output):
return parse_yum_repositories(output)
| 2.234375 | 2 |
Others/code_festival/cf17-final-open/a.py | KATO-Hiro/AtCoder | 2 | 12785287 | <filename>Others/code_festival/cf17-final-open/a.py<gh_stars>1-10
# -*- coding: utf-8 -*-
def main():
from re import search
s = input()
pattern = r'^A?KIHA?BA?RA?$'
is_match = search(pattern, s)
if is_match:
print('YES')
else:
print('NO')
if __name__ == '__main__':
main()
| 2.921875 | 3 |
Scripts/python/scripts mundo 2/teste052.py | BrenoNAlmeida/Scripts-Escola | 0 | 12785288 | x= int(input('bla bla bla = '))
e_primo = True
for n in range (2 , x):
if x % n == 0:
e_primo = False
break
if e_primo:
print('o numero é primo ')
else:
print('nao é primo')
| 3.59375 | 4 |
paca_distance.py | nntrongnghia/chatenoud_delorme | 0 | 12785289 | <filename>paca_distance.py
from bs4 import BeautifulSoup as soup
import requests
import re
import time
import sqlite3 as sql
import pandas as pd
wiki = 'https://fr.wikipedia.org/wiki/Liste_des_communes_des_Bouches-du-Rh%C3%B4ne'
page = requests.get(wiki)
html = soup(page.content,'html.parser')
content = html.find(id='mw-content-text')
content_children = list(content.children)
main = content_children[0]
main_tab = main.find_all('table')
lst = list(main_tab[1].children)
url = []
url_wiki = 'https://fr.wikipedia.org'
r = re.compile(r'<a href="(.*)" title=')
for i in lst:
text = str(i)
url.append(r.findall(text))
url2 = []
for i in url:
try:
url2.append(i[0])
except:
pass
url2 = url2[1:]
ville_coor = []
labels = ['ville','lat','lon']
for u in url2:
ville_page = requests.get(url_wiki+u)
ville_html = soup(ville_page.content,'html.parser')
ville = ville_html.find(id='firstHeading').text
lat = float(ville_html.find('a',class_='mw-kartographer-maplink')['data-lat'])
lon = float(ville_html.find('a',class_='mw-kartographer-maplink')['data-lon'])
row = [ville,lat,lon]
ville_coor.append(row)
print('{} {} {}'.format(ville,lat,lon))
time.sleep(0.5)
#======N'EXECUTE PLUS - Create a database - j'ai deja cree et ajoute des donnes. N'EXECUTE PLUS
df = pd.DataFrame(ville_coor,columns=labels)
conn = sql.connect('lbc.db') #make a connection with database
#c = conn.cursor() # an object to operate with database
df.to_sql('paca_distance', conn, if_exists='replace')
conn.commit() # to save the changes we made
conn.close() # to close the connection, if not, the database is blocked
#===================== | 2.953125 | 3 |
data/test/python/3c5cd7a0d3d83c56ea0eb09bf56765f8b2cf2fe5main.py | harshp8l/deep-learning-lang-detection | 84 | 12785290 | <reponame>harshp8l/deep-learning-lang-detection
import sys
from resources.resource_manager import GetImagePath
def GetPygameWindowAndController():
""" Return the Pygame Window and Controller """
from Screen.Pygame.Menu.MainMenu.main_menu_controller import MainMenuController
from kao_gui.pygame.window import BuildWindow
from InputProcessor import pygame_bindings
window = BuildWindow(width=640, height=480, caption='Pokemon',
iconFilename=GetImagePath('pokeball3.bmp'),
bindings=pygame_bindings.keyBindings)
return window, MainMenuController()
def GetConsoleWindowAndController():
""" Return the Console Window and Controller """
from Screen.Console.Menu.MainMenu.main_menu_controller import MainMenuController
from kao_gui.console.window import BuildWindow
return BuildWindow(), MainMenuController()
def main(args):
""" Start the game """
if len(args) > 0 and args[0] == "-c":
window, startController = GetConsoleWindowAndController()
else:
window, startController = GetPygameWindowAndController()
try:
startController.run()
finally:
window.close()
if __name__ == "__main__":
main(sys.argv[1:]) | 2.828125 | 3 |
mpglue/mpglue/rad_calibration.py | siu-panh/mapeo-uso-del-suelo | 0 | 12785291 | <reponame>siu-panh/mapeo-uso-del-suelo
#!/usr/bin/env python
"""
@author: <NAME>
Date Created: 9/24/2011
"""
from __future__ import division, print_function
from future.utils import viewitems
from builtins import int, map
import math
from copy import copy
import datetime
from collections import OrderedDict
import calendar
# MapPy
from . import raster_tools
# NumPy
try:
import numpy as np
except ImportError:
raise ImportError('NumPy must be installed')
# Pandas
try:
import pandas as pd
except ImportError:
raise ImportError('Pandas must be installed')
# Numexpr
try:
import numexpr as ne
except ImportError:
raise ImportError('Numexpr must be installed')
# Scikit-image
try:
from skimage.exposure import rescale_intensity
except ImportError:
raise ImportError('Scikit-image must be installed')
old_settings = np.seterr(all='ignore')
def earth_sun_distance(julian_day):
"""
Converts Julian Day to earth-sun distance.
Args:
julian_day (int): The Julian Day.
Returns:
Earth-Sun distance (d) in astronomical units for Day of the Year (DOY)
"""
return (1.0 - 0.01672 * math.cos(math.radians(0.9856 * (float(julian_day) - 4.0)))) ** 2.0
def julian_day_dictionary(start_year=1980, end_year=2050, store='st_jd'):
"""
A function to setup standard (continuously increasing) Julian Days
Args:
start_year (Optional[int])
end_year (Optional[int])
store (Optional[str]): Choices are ['st_jd', 'date'].
Returns:
Dictionary of {'yyyy-doy': yyyydoy}
or
Dictionary of {'yyyy-doy': 'yyyy.mm.dd'}
"""
jd_dict = OrderedDict()
for yyyy in range(start_year, end_year):
# Get the days for the current year.
time_stamp = pd.date_range('{:d}-01-01'.format(yyyy),
'{:d}-12-31'.format(yyyy),
name='time',
freq='D')
date_time = time_stamp.to_pydatetime()
dd_yyyy = ('-{:d},'.format(yyyy).join(map(str, [dt.timetuple().tm_yday
for dt in date_time])) + '-{:d}'.format(yyyy)).split(',')
if store == 'date':
date_list = list(map(str, ['{}.{:02d}.{:03d}.{:03d}'.format(dt.timetuple().tm_year,
int(dt.timetuple().tm_mon),
int(dt.timetuple().tm_mday),
int(dt.timetuple().tm_yday)) for dt in
date_time]))
for date in range(0, len(dd_yyyy)):
if store == 'st_jd':
date_split = dd_yyyy[date].split('-')
dd_ = '{:03d}'.format(int(date_split[0]))
yyyy_ = '{}'.format(date_split[1])
jd_dict['{}-{}'.format(yyyy_, dd_)] = int('{}{}'.format(yyyy_, dd_))
elif store == 'date':
y, m, d, jd = date_list[date].split('.')
jd_dict['{}-{}'.format(y, jd)] = '{}.{}.{}'.format(y, m, d)
return jd_dict
def julian_day_dictionary_r(start_year=1980, end_year=2050, jd_dict=None):
"""
A function to get the reverse Julian Data dictionary
Args:
start_year (Optional[int])
end_year (Optional[int])
jd_dict (Optional[dict]): A pre-calculated (i.e., from `julian_day_dictionary`) Julian day dictionary.
Returns:
Dictionary of {yyyyddd: 'year-day'}
"""
jd_dict_r = OrderedDict()
if not isinstance(jd_dict, dict):
jd_dict = julian_day_dictionary(start_year=start_year,
end_year=end_year)
for k, v in viewitems(jd_dict):
jd_dict_r[v] = k
return jd_dict_r
def get_leap_years(start_year=1980, end_year=2050):
"""
Gets the number of calendar days by year, either 365 or 366
Args:
start_year (Optional[int])
end_year (Optional[int])
Returns:
Dictionary, with keys --> values as yyyy --> n days
"""
leap_year_dict = dict()
for yyyy in range(start_year, end_year):
if calendar.isleap(yyyy):
leap_year_dict[yyyy] = 366
else:
leap_year_dict[yyyy] = 365
return leap_year_dict
def jd_interp(the_array, length, skip_factor):
"""
Args:
the_array
length
skip_factor
"""
# Get a dictionary of leap years.
year_dict = get_leap_years()
# The first year.
current_year = int(str(the_array[0])[:4])
# The Julian day `yyyyddd`.
the_date = the_array[0]
rescaled = list()
for i in range(0, length):
# Append `yyyyddd`.
rescaled.append(the_date)
the_date += skip_factor
current_doy = int(str(the_date)[4:])
# Check year overload.
# Update calendar day > maximum
# days in year `current_year`.
if current_doy > year_dict[current_year]:
current_doy_diff = current_doy - year_dict[current_year]
current_year += 1
the_date = int('{:d}{:03d}'.format(current_year, current_doy_diff))
return rescaled
def rescale_scaled_jds(the_array, counter=1000):
"""
Rescales `yyyyddd` values to monotonically increasing values.
Args:
the_array (1d array-like, str or int): The Julian day list.
counter (Optional[int]): The starting index.
"""
iter_length = len(the_array) - 1
# Get a dictionary of leap years.
year_dict = get_leap_years()
# Get the first year.
current_year = int(str(the_array[0])[:4])
rescaled = list()
for i in range(0, iter_length):
rescaled.append(counter)
next_year = int(str(the_array[i+1])[:4])
if next_year != current_year:
# Next year Julian Day + (Current year max - current year Julian Day)
counter += int(str(the_array[i+1])[4:]) + (year_dict[current_year] - int(str(the_array[i])[4:]))
else:
# Next Julian Day - Current Julian Day
counter += the_array[i+1] - the_array[i]
current_year = copy(next_year)
rescaled.append(counter)
return rescaled
def date2julian(month, day, year):
"""
Converts month, day, and year to Julian Day.
Args:
month (int or str): The month.
day (int or str): The day.
year (int or str): The year.
Returns:
Julian Day
"""
# Convert strings to integers
month = int(month)
day = int(day)
year = int(year)
fmt = '%Y.%m.%d'
dt = datetime.datetime.strptime('{}.{}.{}'.format(str(year), str(month), str(day)), fmt)
return int(dt.timetuple().tm_yday)
def julian2date(julian_day, year, jd_dict_date=None):
"""
Converts Julian day to month and day.
Args:
julian_day (int or str): The Julian Day.
year (int or str): The year.
jd_dict_date (Optional[dict]): A pre-calculated (i.e., from `julian_day_dictionary`) Julian day dictionary.
Returns:
(month, day) of the Julian Day `julian_day`.
"""
year = int(year)
julian_day = int(julian_day)
if not isinstance(jd_dict_date, dict):
jd_dict_date = julian_day_dictionary(store='date')
y, m, d = jd_dict_date['{:d}-{:03d}'.format(year, julian_day)].split('.')
return int(m), int(d)
def yyyyddd2months(yyyyddd_list):
"""
Converts yyyyddd format to yyyymmm format.
"""
jd_dict_date = julian_day_dictionary(store='date')
return ['{}{:03d}'.format(str(yyyyddd)[:4],
julian2date(str(yyyyddd)[4:],
str(yyyyddd)[:4],
jd_dict_date=jd_dict_date)[0]) for yyyyddd in yyyyddd_list]
def scaled_jd2jd(scaled_jds, return_jd=True):
"""
Converts scaled Julian day integers to string yyyy-ddd format.
Args:
scaled_jds (int list): The Julian days to convert.
return_jd (Optional[bool]): Whether to return Julian Days as 'yyyy-ddd'.
Otherwise, returns month-day-year format. Default is True.
"""
jd_dict_r = julian_day_dictionary_r()
xd_smooth_labels = list()
for k in scaled_jds:
if int(k) in jd_dict_r:
xd_smooth_labels.append(jd_dict_r[int(k)])
else:
# Check to see if the day of year is 366,
# but the year is not a leap year.
yyyy = int(str(k)[:4])
doy = int(str(k)[4:])
if doy == 366:
new_k = int('{:d}001'.format(yyyy+1))
xd_smooth_labels.append(jd_dict_r[new_k])
if return_jd:
return xd_smooth_labels
else:
jd_dict_date = julian_day_dictionary(store='date')
return ['{}-{}-{}'.format(julian2date(l.split('-')[1], l.split('-')[0], jd_dict_date=jd_dict_date)[0],
julian2date(l.split('-')[1], l.split('-')[0], jd_dict_date=jd_dict_date)[1],
l.split('-')[0]) for l in xd_smooth_labels]
class Conversions(object):
"""
A class for sensor-specific radiometric calibration
"""
def dn2radiance(self, dn_array, band, aster_gain_setting='high',
cbers_series='CBERS2B', cbers_sensor='HRCCD', landsat_gain=None, landsat_bias=None,
wv2_abs_calibration_factor=None, wv2_effective_bandwidth=None):
"""
Converts digital numbers (DN) to radiance
Args:
dn_array (ndarray): The array to calibrate.
band (int): The band to calibrate.
aster_gain_setting (Optional[str]): The gain setting for ASTER. Default is 'high'.
cbers_series (Optional[str]): The CBERS series. Default is 'CBERS2B'.
cbers_sensor (Optional[str]): The CBERS sensor. Default is 'HRCCD'.
landsat_gain (Optional[float]): The gain setting for Landsat. Default is None.
landsat_bias (Optional[float]): The bias setting for Landsat. Default is None.
wv2_abs_calibration_factor (Optional[float]): The absolute calibration factor for WorldView2. Default is None.
wv2_effective_bandwidth (Optional[float]): The effective bandwidth for WorldView2. Default is None.
Formulas:
ASTER:
L = (DN-1) * UCC
Where,
L = Spectral radiance at the sensor's aperture
UCC = Unit Conversion Coefficient
CBERS:
L = DN / CC
Where,
L = Spectral radiance at the sensor's aperture
DN = Quantized calibrated pixel value
CC = Absolute calibration coefficient
WorldView2:
L = (absCalFactor * DN) / eB
Where,
L = TOA spectral radiance
absCalFactor = Absolute radiometric calibration factor
DN = digital counts
eB = Effective bandwidth
Returns:
Radiance as ndarray.
"""
if self.sensor == 'ASTER':
gain_setting_dict = {'high': 0, 'normal': 1, 'low1': 2, 'low2': 3}
ucc = np.array([[.676, 1.688, 2.25, .0],
[.708, 1.415, 1.89, .0],
[.423, .862, 1.15, .0],
[.1087, .2174, .2900, .2900],
[.0348, .0696, .0925, .4090],
[.0313, .0625, .0830, .3900],
[.0299, .0597, .0795, .3320],
[.0209, .0417, .0556, .2450],
[.0159, .0318, .0424, .2650]], dtype='float32')
radiance = np.float32(np.subtract(dn_array, 1.))
radiance = np.float32(np.multiply(radiance, ucc[int(band)-1][gain_setting_dict[aster_gain_setting]]))
elif self.sensor == 'CBERS':
if '2B' in cbers_series and 'HRCCD' in cbers_sensor:
ucc = {'1': .97, '2': 1.74, '3': 1.083, '4': 2.105}
else:
raise NameError('\nSeries not recoginized.\n')
radiance = np.float32(np.divide(dn_array, ucc[str(band)]))
elif self.sensor.lower() in ['tm', 'etm', 'oli_tirs']:
if not isinstance(landsat_gain, float) or not isinstance(landsat_bias, float):
raise ValueError('\nCalibration coefficients not set.\n')
radiance = ne.evaluate('(landsat_gain * dn_array) + landsat_bias')
elif self.sensor == 'WorldView2':
if not wv2_abs_calibration_factor or not wv2_effective_bandwidth:
raise ValueError('\nCalibration coefficients not set.\n')
radiance = np.float32(np.divide(np.multiply(wv2_abs_calibration_factor[band-1], dn_array),
wv2_effective_bandwidth[band-1]))
radiance[dn_array <= 0] = 0
return radiance
def radiance2reflectance(self, radiance_array, band, solar_angle=None, julian_day=None, bd_esun=None,
landsat_gain=None, landsat_bias=None, aster_solar_scheme='Smith',
cbers_series='CBERS2', cbers_sensor='HRCCD'):
"""
Converts radiance to top of atmosphere reflectance
Args:
radiance_array (ndarray): The ndarray to calibrate.
band (Optional[int]): The band to calibrate. Default is 1.
solar_angle (float)
julian_day (int)
bd_esun (float):
landsat_gain (float)
landsat_bias (float)
aster_solar_scheme (Optional[str]): The solar scheme for ASTER. Default is 'Smith'.
Choices are ['Smith', 'ThomeEtAlA', 'TomeEtAlB'].
cbers_series (Optional[str]): The CBERS series. Default is 'CBERS2B'.
cbers_sensor (Optional[str]): The CBERS sensor. Default is 'HRCCD'.
Returns:
Reflectance as ndarray.
"""
d_sq = earth_sun_distance(julian_day)
pi = math.pi
cos0 = np.cos(np.radians(90. - solar_angle))
if self.sensor == 'ASTER':
scheme = {'Smith': 0,
'ThomeEtAlA': 1,
'ThomeEtAlB': 2}
# Solar spectral irradiance values
# for each ASTER band.
esun = np.array([[1845.99, 1847., 1848.],
[1555.74, 1553., 1549.],
[1119.47, 1118., 1114.],
[231.25, 232.5, 225.4 ],
[79.81, 80.32, 86.63 ],
[74.99, 74.92, 81.85 ],
[68.66, 69.20, 74.85 ],
[59.74, 59.82, 66.49 ],
[56.92, 57.32, 59.85]], dtype='float32')
bd_esun = esun[band-1][scheme[aster_solar_scheme]]
elif self.sensor == 'CBERS2':
# Solar spectral irradiance values for each sensor
if '2B' in cbers_series and 'HRCCD' in cbers_sensor:
esun = {1: 1934.03, 2: 1787.1, 3: 1548.97, 4: 1069.21}
bd_esun = esun[band]
elif self.sensor == 'WorldView2':
# band names
# coastal, blue, green, yellow, red, red edge, NIR1, NIR2
esun = {'BAND_P': 1580.814, 'BAND_C': 1758.2229, 'BAND_B': 1974.2416, 'BAND_G': 1856.4104,
'BAND_Y': 1738.4791, 'BAND_R': 1559.4555, 'BAND_RE': 1342.0695, 'BAND_N': 1069.7302,
'BAND_N2': 861.2866}
band_positions = {1: 'BAND_P', 2: 'BAND_C', 3: 'BAND_B', 4: 'BAND_G', 5: 'BAND_Y', 6: 'BAND_R',
7: 'BAND_RE', 8: 'BAND_N', 9: 'BAND_N2'}
bd_esun = esun[band_positions[band]]
else:
raise NameError('\n{} is not a supported sensor.'.format(self.sensor))
if self.sensor.lower() == 'oli_tirs':
reflectance_equation = '((radiance_array * landsat_gain) + landsat_bias) / cos0'
else:
reflectance_equation = '(radiance_array * pi * d_sq) / (bd_esun * cos0)'
reflectance = ne.evaluate(reflectance_equation)
reflectance[radiance_array <= 0] = 0.
return reflectance
def get_gain_bias(self, series, sensor, band_position, l_max, l_min, coeff_check):
max_min_dict = {'TM4': {'lmax': {'1': 163., '2': 336., '3': 254., '4': 221.,
'5': 31.4, '6': 15.3032, '7': 16.6},
'lmin': {'1': -1.52, '2': -2.84, '3': -1.17, '4': -1.51,
'5': -.37, '6': 1.2378, '7': -.15}},
'TM5': {'lmax': {'1': 169., '2': 333., '3': 264., '4': 221.,
'5': 30.2, '6': 15.3032, '7': 16.5},
'lmin': {'1': -1.52, '2': -2.84, '3': -1.17, '4': -1.51,
'5': -.37, '6': 1.2378, '7': -.15}},
'ETM': {'lmax': {'1': 191.6, '2': 196.6, '3': 152.9, '4': 157.4,
'5': 31.06, '6': 12.65, '7': 10.8},
'lmin': {'1': -6.2, '2': -6.4, '3': -5., '4': -5.1,
'5': -1., '6': 3.2, '7': -.35}}}
# Get standard coefficients if none were
# obtained from the metadata.
if coeff_check == 999:
if 'sat4' in series or 'LANDSAT_4' in series and 'TM' in sensor:
sensor_series = 'TM4'
elif 'sat5' in series or 'LANDSAT_5' in series and 'TM' in sensor:
sensor_series = 'TM5'
elif 'sat7' in series or 'LANDSAT_7' in series:
sensor_series = 'ETM'
else:
raise NameError('The Landsat sensor could not be found.')
l_max = max_min_dict[sensor_series]['lmax'][str(band_position)]
l_min = max_min_dict[sensor_series]['lmin'][str(band_position)]
gain = (l_max - l_min) / 254.
bias = l_min - ((l_max - l_min) / 254.)
return gain, bias
def get_kelvin_coefficients(self, series, sensor, band_position):
k_dict = {'TM4': {'k1': 671.62, 'k2': 1284.3},
'TM5': {'k1': 607.76, 'k2': 1260.56},
'ETM': {'k1': 666.09, 'k2': 1282.71}}
if 'sat4' in series or 'LANDSAT_4' in series and 'TM' in sensor:
sensor_series = 'TM4'
elif 'sat5' in series or 'LANDSAT_5' in series and 'TM' in sensor:
sensor_series = 'TM5'
elif 'sat7' in series or 'LANDSAT_7' in series:
sensor_series = 'ETM'
else:
raise NameError('The Landsat sensor could not be found.')
k1 = k_dict[sensor_series]['k1']
k2 = k_dict[sensor_series]['k2']
return k1, k2
def get_esun(self, series, sensor, band_position):
esun_dict = {'TM4': {'1': 1983., '2': 1795., '3': 1539., '4': 1028., '5': 219.8, '7': 83.49},
'TM5': {'1': 1983., '2': 1796., '3': 1536., '4': 1031., '5': 220., '7': 83.44},
'ETM': {'1': 1997., '2': 1812., '3': 1533., '4': 1039., '5': 230.8, '7': 84.9}}
# Solar spectral irradiance values for Landsat sensors.
if 'sat4' in series or 'LANDSAT_4' in series and 'TM' in sensor:
sensor_series = 'TM4'
elif 'sat5' in series or 'LANDSAT_5' in series and 'TM' in sensor:
sensor_series = 'TM5'
elif 'sat7' in series or 'LANDSAT_7' in series:
sensor_series = 'ETM'
else:
raise NameError('The Landsat sensor could not be found.')
return esun_dict[sensor_series][str(band_position)]
def get_dn_dark(self, dn_array, min_dark):
val = 1
min_true = True
while min_true:
idx = np.where(dn_array == val)
if len(dn_array[idx]) >= min_dark:
min_true = False
else:
val += 1
return val
def get_tri(self, series, sensor, band_position):
tri_dict = {'TM4': {'1': .485, '2': .569, '3': .659, '4': .841, '5': 1.676, '7': 2.222},
'TM5': {'1': .485, '2': .569, '3': .666, '4': .84, '5': 1.676, '7': 2.223},
'ETM': {'1': .483, '2': .56, '3': .662, '4': .835, '5': 1.648, '7': 2.206}}
if 'sat4' in series or 'LANDSAT_4' in series and 'TM' in sensor:
sensor_series = 'TM4'
elif 'sat5' in series or 'LANDSAT_5' in series and 'TM' in sensor:
sensor_series = 'TM5'
elif 'sat7' in series or 'LANDSAT_7' in series:
sensor_series = 'ETM'
else:
raise NameError('The Landsat sensor could not be found.')
return tri_dict[sensor_series][str(band_position)]
def get_tr(self, tri):
return .008569 * pow(tri, -4) * (1. + .0113 * pow(tri, -2) + .00013 * pow(tri, -4))
def get_path_rad(self, gain, bias, dn_dark, bd_esun, cos0, tz, tv, edown, d_sq):
return (gain * dn_dark) + bias - .01 * (bd_esun * cos0 * tz + edown) * tv / math.pi
def prepare_dark(self, dn_array, band_position, bd_esun, gain, bias, sensor_angle, dn_dark, min_dark):
print('\nGetting dark haze value for {} ...\n'.format(self.calibration))
if dn_dark == -999:
dn_dark = self.get_dn_dark(dn_array, min_dark)
print(' Band {:d} haze value: {:d}\n'.format(band_position, dn_dark))
self.d_sq = earth_sun_distance(self.julian_day)
# Cosine of solar zenith angle.
self.cos0 = np.cos(np.radians(90. - self.solar_angle))
# Cosine of sensor viewing angle (90 degrees
# for nadir viewing sensor).
self.cosS = np.cos(np.radians(90. - sensor_angle))
tr = self.get_tr(self.get_tri(self.pr.series, raster_tools.SENSOR_DICT[self.pr.sensor.lower()], band_position))
self.tv = math.exp(-tr / self.cosS)
self.tz = math.exp(-tr / self.cos0)
self.edown = .01
self.path_radiance = self.get_path_rad(gain, bias, dn_dark, bd_esun, self.cos0,
self.tz, self.tv, self.edown, self.d_sq)
def radiance2reflectance_dos(self, radiance_array, band_position, bd_esun, gain, bias,
sensor_angle=90., dn_dark=-999, min_dark=1000):
self.prepare_dark(radiance_array, band_position, bd_esun, gain, bias, sensor_angle, dn_dark, min_dark)
pi = math.pi
d_sq = self.d_sq
path_radiance = self.path_radiance
tv = self.tv
tz = self.tz
cos0 = self.cos0
edown = self.edown
reflectance_equation = '(radiance_array * pi * d_sq) / (bd_esun * cos0)'
dos_equation = '(pi * (reflectance - path_radiance)) / (tv * (bd_esun * cos0 * tz * edown))'
reflectance = ne.evaluate(reflectance_equation)
reflectance = ne.evaluate(dos_equation)
reflectance[radiance_array <= 0] = 0.
return reflectance
def radiance2kelvin(self, dn_array, k1=None, k2=None):
temperature = ne.evaluate('k2 / log((k1 / dn_array) + 1)')
temperature[dn_array <= 0] = 0.
return temperature
class CalibrateSensor(Conversions):
"""
A class for radiometric calibration
Args:
input_image (str): An image (single or multi-band) to process.
sensor (Optional[str]): The sensor to calibrate.
Choices are ['TM', 'ETM', 'OLI_TIRS', 'ASTER', 'CBERS', 'WorldView2'].
image_date (str): yyyy/mm/dd
solar_angle (float)
bands2process (Optional[int or int list]): Default is -1, or all bands.
Examples:
>>> from mappy import rad_calibration
>>>
>>> # Convert ASTER to radiance.
>>> cal = rad_calibration.CalibrateSensor('/in_image.tif', 'ASTER')
>>> cal.process('/out_image.tif', calibration='radiance')
>>>
>>> # Convert Landsat to top of atmosphere reflectance.
>>> cal = rad_calibration.CalibrateSensor('/in_image.tif', 'TM')
>>> cal.process('/out_image.tif', calibration='toar', metadata='/metadata.MTL')
"""
def __init__(self, input_image, sensor, bands2process=-1):
self.input_image = input_image
self.sensor = sensor
self.bands2process = bands2process
self.i_info = raster_tools.ropen(self.input_image)
Conversions.__init__(self)
def process(self, output_image, image_date=None, solar_angle=None, calibration='radiance', d_type='float32',
bd_esun_list=[], aster_gain_setting='high', aster_solar_scheme='Smith', cbers_series='CBERS2B',
cbers_sensor='HRCCD', landsat_gain_list=[], landsat_bias_list=[], k1=None, k2=None,
wv2_abs_calibration_factor=[], wv2_effective_bandwidth=[], metadata=None):
"""
Args:
output_image (str): The output image.
calibration (Optional[str]): Choices are ['radiance', 'toar', 'cost', 'dos2', 'dos3', 'dos4', 'temp'].
d_type (Optional[str]): The output storage type. Default is 'float32'.
Choices are ['float32', 'byte', 'uint16'].
bd_esun_list (Optional[float list]): A list of ESUN coefficients (for each band to process)
if ``metadata`` is not given.
aster_gain_setting (Optional[str]):
aster_solar_scheme (Optional[str]):
cbers_series (Optional[str]):
cbers_sensor (Optional[str]):
landsat_gain_list (Optional[float list]): A list of gain coefficients (for each band to process)
if ``metadata`` is not given.
landsat_bias_list (Optional[float list]): Same as above, with bias.
wv2_abs_calibration_factor (Optional[str]):
wv2_effective_bandwidth (Optional[str]):
metadata (Optional[object or str): A metadata file or object instance. Default is None.
References:
Chavez (1988)
Schroeder et al. (2006), RSE
Song et al. (2001)
GRASS manual (http://grass.osgeo.org/grass65/manuals/html65_user/i.landsat.toar.html)
p = pi(Lsat - Lp) / Tv(Eo * cos(0) * Tz + Edown)
where,
p = At-sensor reflectance
Lsat = At-sensor Radiance
Lp = Path Radiance
= G * DNdark + B-.01(Eo * cos(0) * Tz + Edown)Tv/pi
Tv = Atmospheric transmittance from the target toward the sensor
= exp(-pi / cos(satellite zenith angle))
Tz = Atmospheric transmittance in the illumination direction
= exp(-pi / cos(solar zenith angle))
Eo = Exoatmospheric solar constant
0 = Zolar zenith angle
Edown = Downwelling diffuse irradiance
"""
self.output_image = output_image
self.calibration = calibration
self.image_date = image_date
self.solar_angle = solar_angle
self.d_type = d_type
self.metadata = metadata
self.bd_esun_list = bd_esun_list
self.landsat_gain_list = landsat_gain_list
self.landsat_bias_list = landsat_bias_list
self.k1 = k1
self.k2 = k2
self.landsat_sensors = ['tm', 'etm', 'oli_tirs']
# Search for metadata.
self.get_metadata()
self.rad_settings = dict(aster_gain_setting=aster_gain_setting,
cbers_series=cbers_series, cbers_sensor=cbers_sensor,
landsat_gain=None, landsat_bias=None,
wv2_abs_calibration_factor=wv2_abs_calibration_factor,
wv2_effective_bandwidth=wv2_effective_bandwidth)
self.refl_settings = dict(solar_angle=self.solar_angle, julian_day=self.julian_day,
bd_esun=None, landsat_gain=None, landsat_bias=None,
aster_solar_scheme=aster_solar_scheme,
cbers_series=cbers_series, cbers_sensor=cbers_sensor)
self.temp_settings = dict(k1=self.k1, k2=self.k2)
self.create_output()
row_block_size, col_block_size = raster_tools.block_dimensions(self.i_info.rows, self.i_info.cols)
for i in range(0, self.i_info.rows, row_block_size):
n_rows = raster_tools.n_rows_cols(i, row_block_size, self.i_info.rows)
for j in range(0, self.i_info.cols, col_block_size):
n_cols = raster_tools.n_rows_cols(j, col_block_size, self.i_info.cols)
# Read the array block.
dn_array = self.i_info.read(bands2open=self.bands2process,
i=i, j=j,
rows=n_rows, cols=n_cols,
d_type='float32')
for out_band, band_position, dn_array in zip(self.band_range, self.bands2process, dn_array):
# Update radiance settings.
self.update_rad_settings(band_position)
# Convert DN to radiance.
if self.sensor.lower() != 'oli_tirs':
cal_array = self.dn2radiance(dn_array, band_position, **self.rad_settings)
elif (self.sensor.lower() == 'oli_tirs') and (self.calibration.lower() == 'radiance'):
cal_array = self.dn2radiance(dn_array, band_position, **self.rad_settings)
if self.calibration.lower() != 'radiance':
self.update_toar_settings(band_position)
if self.calibration.lower() == 'toar':
# Convert radiance to top of atmosphere reflectance.
if self.sensor.lower() == 'oli_tirs':
cal_array = self.radiance2reflectance(dn_array, band_position, **self.refl_settings)
else:
cal_array = self.radiance2reflectance(cal_array, band_position, **self.refl_settings)
elif self.calibration.lower() == 'dos':
cal_array = self.radiance2reflectance_dos(cal_array, band_position,
self.refl_settings['bd_esun'],
self.refl_settings['landsat_gain'],
self.refl_settings['landsat_bias'],
sensor_angle=90.,
dn_dark=-999, min_dark=1000)
elif self.calibration.lower() == 'temperature':
cal_array = self.radiance2kelvin(dn_array, **self.temp_settings)
# Scale the data to byte or uint16 storage.
if self.d_type != 'float32':
cal_array = self.scale_data(cal_array)
self.out_rst.write_array(cal_array, i=i, j=j, band=out_band)
self.out_rst.close_band()
# Close the input image.
self.i_info.close()
# Close the output drivers.
self.out_rst.close_file()
self.out_rst = None
def update_toar_settings(self, band_position):
# Landsat settings
if self.sensor.lower() in self.landsat_sensors:
# Is there a user provided file or object?
if isinstance(self.metadata, str) or isinstance(self.metadata, raster_tools.LandsatParser):
if raster_tools.SENSOR_DICT[self.pr.sensor.lower()] == 'oli_tirs':
k1 = self.pr.k1
k2 = self.pr.k2
else:
self.refl_settings['bd_esun'] = self.get_esun(self.pr.series,
raster_tools.SENSOR_DICT[self.pr.sensor.lower()],
band_position)
k1, k2 = self.get_kelvin_coefficients(self.pr.series,
raster_tools.SENSOR_DICT[self.pr.sensor.lower()],
band_position)
self.temp_settings['k1'] = k1
self.temp_settings['k2'] = k2
else:
bi = self.bands2process[self.bands2process.index(band_position)]
self.refl_settings['bd_esun'] = self.bd_esun_list[bi]
self.temp_settings['k1'] = self.k1
self.temp_settings['k2'] = self.k2
self.refl_settings['landsat_gain'] = self.rad_settings['landsat_gain']
self.refl_settings['landsat_bias'] = self.rad_settings['landsat_bias']
def update_rad_settings(self, band_position):
# Landsat settings
if self.sensor.lower() in self.landsat_sensors:
# Is there a user provided file or object?
if isinstance(self.metadata, str) or isinstance(self.metadata, raster_tools.LandsatParser):
# A value of 999 means coefficients
# were not gathered.
coeff_check = self.pr.no_coeff
# Landsat 8 information pulled from metadata.
if raster_tools.SENSOR_DICT[self.pr.sensor.lower()] == 'oli_tirs':
landsat_gain = self.pr.rad_mult_dict[int(band_position)]
landsat_bias = self.pr.rad_add_dict[int(band_position)]
else:
l_max = self.pr.LMAX_dict[int(band_position)]
l_min = self.pr.LMIN_dict[int(band_position)]
landsat_gain, landsat_bias = self.get_gain_bias(self.pr.series,
raster_tools.SENSOR_DICT[self.pr.sensor.lower()],
band_position, l_max, l_min, coeff_check)
self.rad_settings['landsat_gain'] = landsat_gain
self.rad_settings['landsat_bias'] = landsat_bias
else:
bi = self.bands2process[self.bands2process.index(band_position)]
self.rad_settings['landsat_gain'] = self.landsat_gain_list[bi]
self.rad_settings['landsat_bias'] = self.landsat_bias_list[bi]
def scale_data(self, calibrated_array):
if self.d_type == 'byte':
return rescale_intensity(calibrated_array,
in_range=(0., 1.),
out_range=(0, 255)).astype(np.uint8)
elif self.d_type == 'uint16':
return rescale_intensity(calibrated_array,
in_range=(0., 1.),
out_range=(0, 10000)).astype(np.uint16)
def create_output(self):
if isinstance(self.bands2process, int) and self.bands2process == -1:
self.bands2process = list(range(1, self.i_info.bands+1))
elif isinstance(self.bands2process, int) and self.bands2process > 0:
self.bands2process = [self.bands2process]
elif isinstance(self.bands2process, int) and self.bands2process == 0:
raise ValueError('\nThe bands to process must be -1, int > 0, or a list of bands.\n')
self.band_range = list(range(1, len(self.bands2process)+1))
# Copy the input information.
self.o_info = self.i_info.copy()
# Change parameters if necessary.
self.o_info.storage = self.d_type
# Create the output.
self.out_rst = raster_tools.create_raster(self.output_image, self.o_info)
def get_metadata(self):
if isinstance(self.metadata, str):
if self.sensor.lower() in self.landsat_sensors:
if isinstance(self.metadata, str):
self.pr = raster_tools.LandsatParser(self.metadata)
elif isinstance(self.metadata, raster_tools.LandsatParser):
self.pr = self.metadata
if isinstance(self.image_date, str):
year, month, day = self.image_date.split('/')
else:
year, month, day = self.pr.year, self.pr.month, self.pr.day
if not isinstance(self.solar_angle, float):
self.solar_angle = self.pr.elev
self.julian_day = date2julian(month, day, year)
# def search_worldview_meta(dir, type):
#
# fName = '.IMD'
#
# # search for date and elevation angle
# absCalLine = 'absCalFactor'
# effBandwLine = 'effectiveBandwidth'
# dateLine = 'earliestAcqTime'
# eleve_line = 'meanSunEl'
# group = 'BEGIN_GROUP = BAND'
#
# absCalFactor, effBandWidth, bandNames = [], [], []
#
# for root, dirs, files in os.walk(dir):
#
# if root[-3:] == type:
# print 'Walking through: %s' % root
#
# rootList = os.listdir(root)
#
# for file in rootList:
#
# if fName in file:
#
# # : open the text file
# txtFile = '%s/%s' % (root, file)
# txt = open(txtFile,'r')
#
# # : get lines in a list
# txtRead = txt.readlines()
#
# for line in txtRead:
#
# if group in line:
# # : get each band
# bandNames.append(string.strip(line[line.find('= ')+1:-1]))
#
# if absCalLine in line:
# # : get absolute calibration factor for each band
# absCalFactor.append(float(line[line.find('= ')+1:-2]))
#
# if effBandwLine in line:
# # : get effective bandwidth for each band
# effBandWidth.append(float(line[line.find('= ')+1:-2]))
#
# if dateLine in line:
# # : get the date
# date = string.strip(line[line.find('= ')+1:line.find('= ')+12])
#
# if eleve_line in line:
# # : get the elevation angle
# elev = float(line[line.find('= ')+1:-2])
#
# return date, elev, absCalFactor, effBandWidth, bandNames
def get_scan_angle(img=None, i_info=None, band2open=1, forward=True):
"""
Gets the scan angle from an input Landsat image.
1) get the bottom left corner, moving right, extracting each column
2) stop if the column has data (i.e., it's a corner)
3) get the <y> position where there is data in the column
4) get the top left corner, moving down, extracting each row
5) stop if the row has data
6) get the <x> position where there is data in the row
7) find the degree of the angle
scan angle (a) = rad2deg[tangent(opposite / adjacent)]
where,
opposite = urx - llx
adjacent = lly - ury
/| urx | ury
/ |
/ |
/ |
/ | opposite
/ |
/ |
lly b/a\_____| lrx
adjacent
Args:
img (Optional[str]): The input image. Default is None.
i_info (Optional[object]): An instance of ``raster_tools.ropen``. Default is None.
band2open (Optional[int]): The band to open. Default is 1.
foward (Optional[bool]): Whether to rotate the image forward. Default is True.
Returns:
The rotation degree.
"""
if i_info:
img = i_info.read(bands2open=band2open)
else:
if not isinstance(img, np.ndarray):
raise TypeError('\nThe image needs to be given as an array if not given by an ropen instance.\n')
rws, cls = img.shape
# get the bottom left corner
for cl in range(0, cls):
# all rows, current column
clm = img[:, cl]
if clm.max() > 0:
break
llx = copy(cl)
lly_idx = clm[np.where(clm > 0)]
# first pixel (North to South) that has data
# try:
lly = list(clm).index(lly_idx[0])
# except:
# print list(clm)
# print
# print lly_idx
# print
# print img.max()
# import matplotlib.pyplot as plt
# plt.imshow(img)
# plt.show()
# sys.exit()
# get the upper right corner
for rw in range(0, rws):
# all columns, current row
rww = img[rw, :]
if rww.max() > 0:
break
ury = copy(rw)
urx_idx = rww[np.where(rww > 0)]
# first pixel (West to East) that has data
urx = list(rww).index(urx_idx[0])
opposite = float(lly - ury)
adjacent = float(urx - llx)
if (opposite == 0) and (adjacent == 0):
deg = 0
else:
# get the rotation degree
if forward:
deg = -(90. - np.rad2deg(np.arctan(opposite / adjacent)))
else:
deg = 90. - np.rad2deg(np.arctan(opposite / adjacent))
return deg
| 2.34375 | 2 |
nanoCameras/Hamamatsu_nanoCam.py | hzg-wpi/p05nano | 0 | 12785292 | <gh_stars>0
import time
import PyTango
import p05.common.PyTangoProxyConstants as proxies
import p05.tools.misc as misc
class Hamamatsu_nanoCam():
def __init__(self, tHama=None, tTrigger=None, imageDir=None, exptime=None):
if tHama == None:
self.tHama = PyTango.DeviceProxy(proxies.camera_hama)
else:
self.tHama = tHama
if tTrigger == None:
self.tTrigger = PyTango.DeviceProxy(proxies.register_eh2_out03)
else:
self.tTrigger = tTrigger
self.CAM_Binning = 1
time.sleep(0.1)
if self.tHama.state() == PyTango.DevState.EXTRACT:
self.tHama.command_inout('AbortAcq')
while self.tHama.state() == PyTango.DevState.EXTRACT:
time.sleep(0.01)
if imageDir == None:
raise Exception('Cannot set None-type image directory!')
else:
self.imageDir = imageDir
if exptime != None:
self.exptime = exptime
else:
self.exptime = 0.1
self.CAM_xlow = self.tHama.read_attribute('SUBARRAY_HPOS').value
self.CAM_xhigh = self.tHama.read_attribute('SUBARRAY_HSIZE').value + self.tHama.read_attribute(
'SUBARRAY_HPOS').value
self.CAM_ylow = self.tHama.read_attribute('SUBARRAY_VPOS').value
self.CAM_yhigh = self.tHama.read_attribute('SUBARRAY_VSIZE').value + self.tHama.read_attribute(
'SUBARRAY_VPOS').value
self.tHama.write_attribute('EXPOSURE_TIME', self.exptime)
time.sleep(0.2)
# self.tHama.write_attribute('FilePostfix', '.bin') #!!!!
self.tHama.write_attribute('TRIGGER_SOURCE', 'EXTERNAL')
time.sleep(0.2)
self.tHama.write_attribute('TRIGGER_ACTIVE', 'EDGE')
time.sleep(0.2)
self.tHama.write_attribute('OUTPUT_TRIGGER_KIND[0]', 'TRIGGER READY')
time.sleep(0.2)
self.tHama.write_attribute('TRIGGER_POLARITY', 'POSITIVE')
time.sleep(0.2)
self.tHama.write_attribute('OUTPUT_TRIGGER_POLARITY[0]', 'POSITIVE')
time.sleep(0.2)
self.tHama.write_attribute('FilePrefix', 'Image')
self.tHama.write_attribute('FileDirectory', self.imageDir)
self.tHama.write_attribute('FileRefNumber', 0)
self.tHama.write_attribute('SaveImageFlag', True)
self.tTrigger.write_attribute('Value', 0) # !!!!
# self.tTrigger.write_attribute('Voltage', 0) #!!!!
time.sleep(0.2)
self.iImage = 0
return None
# end __init__
def setExptime(self, value):
try:
while not self.tHama.state() == PyTango.DevState.ON:
time.sleep(0.01)
self.tHama.write_attribute('EXPOSURE_TIME', value)
self.exptime = value
except Exception as e:
print(misc.GetTimeString() + ': Hamamatsu server not responding while setting new ExposureTime:\n%s' % e)
while not self.tHama.state() == PyTango.DevState.ON:
time.sleep(0.01)
return None
# end setExptime
def setImgNumber(self, i):
while not self.tHama.state() == PyTango.DevState.ON:
time.sleep(0.01)
self.tHama.write_attribute('FileRefNumber', i)
return None
def sendCommand(self, command):
self.tHama.command_inout(command)
def state(self):
return self.tHama.state()
def readAttribute(self, attribute):
return self.tHama.read_attribute(attribute)
def getImgNumber(self):
i = self.tHama.read_attribute('FileRefNumber')
return i
def getImage(self):
return self.tHama.read_attribute('IMAGE').value
def acquireImage(self):
start = time.clock()
while not self.tHama.state() == PyTango.DevState.ON:
time.sleep(0.005)
end = time.clock()
# print("waiting for on state:" + str(end-start) )
self.tHama.command_inout('StartAcq')
while not self.tHama.state() == PyTango.DevState.EXTRACT:
time.sleep(0.005)
# self.tTrigger.write_attribute('Voltage', 3.5)
self.tTrigger.write_attribute('Value', 1)
time.sleep(0.005)
# self.tTrigger.write_attribute('Voltage', 0)
self.tTrigger.write_attribute('Value', 0)
self.imageTime = time.time()
self.iImage = 0
# while not self.tHama.state() == PyTango.DevState.ON:
# time.sleep(0.01)
# time.sleep(0.01)
return None
def startLive(self):
self.tHama.StartVideoAcq()
return None
def stopHamaacquisition(self):
self.tTrigger.write_attribute('Value', 0)
# self.tTrigger.write_attribute('Voltage', 0)
while self.tHama.state() == PyTango.DevState.EXTRACT:
self.tHama.command_inout('AbortAcq')
time.sleep(0.1)
self.tHama.command_inout('AbortAcq')
self.imageTime = time.time()
self.iImage = 0
time.sleep(3)
return None
def setROI(self, xlow, xhigh, ylow, yhigh):
self.CAM_xlow, self.CAM_xhigh = xlow, xhigh
self.CAM_ylow, self.CAM_yhigh = ylow, yhigh
self.tHama.write_attribute('SUBARRAY_MODE', 'ON')
self.tHama.write_attribute('SUBARRAY_HPOS', int(self.CAM_xlow))
self.tHama.write_attribute('SUBARRAY_HSIZE', int(self.CAM_xhigh - self.CAM_xlow))
self.tHama.write_attribute('SUBARRAY_VPOS', int(self.CAM_ylow))
self.tHama.write_attribute('SUBARRAY_VSIZE', int(self.CAM_yhigh - self.CAM_yhigh))
return None
def setImageName(self, name):
while not self.tHama.state() == PyTango.DevState.ON:
time.sleep(0.01)
self.tHama.write_attribute('FilePrefix', name)
def finishScan(self):
self.tHama.command_inout('AbortAcq')
return None
def getCameraInfo(self):
_s = ''
_s += 'ExpTime\t= externally set\n'
_s += 'DataType\t= Uint16\n'
_s += 'Binning\t= 1'
_s += 'ROI= [0, 2048,0, 20488]\n'
return _s
| 2.0625 | 2 |
scripts/models/CNN_using_persistence_images_on_patch.py | pjhartout/TDA_ADNI_MLCB | 1 | 12785293 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""CNN_using_persistence_images_on_patch.py
The aim of this script is to perform the training of a CNN using persistence
images as a input. This script is inspired from this script:
BorgwardtLab/ADNI_MRI_Analysis/blob/mixed_CNN/mixed_CNN/run_Sarah.py
To get real time information into the model training and structure, run
$ tensorboard --logdir logs/fit
once this script has been started.
NOTES:
- One loaded, the "big" 100x100x3 images aren't that big (>400MB in RAM) so
NO GENERATOR NEEDED
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import dotenv
import datetime
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import numpy as np
import pandas as pd
from tqdm import tqdm
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from itertools import islice
import shutil
print(tf.test.gpu_device_name())
DOTENV_KEY2VAL = dotenv.dotenv_values()
tf.random.set_seed(42)
N_BINS = 1000
N_FILTERS = 4
KERNEL_SIZE = 4
DROPOUT_RATE = 0.3
################################################################################
# Functions
################################################################################
persistence_image_location = (
DOTENV_KEY2VAL["DATA_DIR"] + "/global_persistence_images/"
)
partitions_location = DOTENV_KEY2VAL["DATA_DIR"] + "/partitions/"
diagnosis_json = (
DOTENV_KEY2VAL["DATA_DIR"] + "/collected_diagnoses_complete.json"
)
def make_model(input_shape):
"""Makes a keras model.
Args:
input_shape (tuple): input shape of the neural network
num_classes (int): number of classes involved
Returns:
keral.Model: model ready to be trained
"""
inputs = keras.Input(shape=input_shape)
tower_1 = layers.Conv2D(
N_FILTERS, KERNEL_SIZE, padding="same", activation="relu"
)(inputs[:, :, :, 0:1])
tower_1 = layers.BatchNormalization()(tower_1)
tower_1 = layers.MaxPooling2D()(tower_1)
tower_2 = layers.Conv2D(
N_FILTERS, KERNEL_SIZE, padding="same", activation="relu"
)(inputs[:, :, :, 1:2])
tower_2 = layers.BatchNormalization()(tower_2)
tower_2 = layers.MaxPooling2D()(tower_2)
tower_3 = layers.Conv2D(
N_FILTERS, KERNEL_SIZE, padding="same", activation="relu"
)(inputs[:, :, :, 2:])
tower_3 = layers.BatchNormalization()(tower_3)
tower_3 = layers.MaxPooling2D()(tower_3)
merged = layers.concatenate([tower_1, tower_2, tower_3], axis=1)
merged = layers.Flatten()(merged)
x = layers.Dense(500, activation="relu")(merged)
x = layers.Dropout(DROPOUT_RATE)(x)
x = layers.Dense(500, activation="relu")(merged)
x = layers.Dropout(DROPOUT_RATE)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
return keras.Model(inputs, outputs)
def get_partitions(partitions_location):
partition = []
labels = []
for root, dirs, files in os.walk(partitions_location):
for file in files:
if file.split("_")[0] == "partition":
partition.append(
np.load(
partitions_location + file, allow_pickle=True
).item()
)
elif file.split("_")[0] == "labels":
labels.append(
np.load(
partitions_location + file, allow_pickle=True
).item()
)
else:
print(f"File {file} is neither partition nor labels file")
return partition, labels
################################################################################
# Main
################################################################################
def main():
############################################################################
# Data loading and processing
############################################################################
inits = 3
partitions, labels = get_partitions(partitions_location)
histories = []
for partition, label in zip(partitions, labels):
for i in range(inits):
# Make sure there aren't the same patients in train and test
X_train_lst = []
y_train_lst = []
for image in tqdm(partition["train"]):
X_train_lst.append(
np.load(persistence_image_location + image + ".npy")
)
y_train_lst.append(label[image])
X_train, y_train = (
np.stack(X_train_lst, axis=0).reshape(
len(X_train_lst), N_BINS, N_BINS, 3
),
np.vstack(y_train_lst),
)
print("Training data loadede")
X_val_lst = []
y_val_lst = []
for image in tqdm(partition["validation"]):
X_val_lst.append(
np.load(persistence_image_location + image + ".npy")
)
y_val_lst.append(label[image])
X_val, y_val = (
np.stack(X_val_lst, axis=0).reshape(
len(X_val_lst), N_BINS, N_BINS, 3
),
np.vstack(y_val_lst),
)
print("Validation data loadede")
####################################################################
# Model definition
####################################################################
model = make_model(input_shape=(N_BINS, N_BINS, 3))
tf.keras.utils.plot_model(
model,
to_file="model.png",
show_shapes=True,
show_layer_names=True,
rankdir="TB",
expand_nested=True,
dpi=96,
)
####################################################################
# Model training
####################################################################
epochs = 100
tensorboard_logs = "logs/fit"
if os.path.exists(tensorboard_logs):
shutil.rmtree(tensorboard_logs)
log_dir = "logs/fit/" + datetime.datetime.now().strftime(
"%Y%m%d-%H%M%S"
)
callbacks = [
tf.keras.callbacks.TensorBoard(
log_dir=log_dir, histogram_freq=1
),
tf.keras.callbacks.EarlyStopping(
monitor="val_accuracy",
min_delta=0.001,
patience=10,
verbose=0,
mode="auto",
baseline=None,
restore_best_weights=True,
),
tf.keras.callbacks.ModelCheckpoint(
filepath="model_weights",
save_weights_only=True,
monitor="val_accuracy",
mode="max",
save_best_only=True,
),
]
lr = keras.optimizers.schedules.ExponentialDecay(
0.01, decay_steps=30, decay_rate=0.6, staircase=True
)
model.compile(
optimizer=keras.optimizers.Adam(
learning_rate=lr,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-07,
amsgrad=False,
),
loss="binary_crossentropy",
metrics=[
keras.metrics.BinaryAccuracy(name="accuracy"),
keras.metrics.Precision(name="precision"),
keras.metrics.Recall(name="recall"),
keras.metrics.AUC(name="auc"),
],
# run_eagerly=True,
)
history = model.fit(
X_train,
y_train,
epochs=epochs,
callbacks=callbacks,
batch_size=16,
validation_data=(X_val, y_val),
)
histories.append(history)
############################################################################
# Model evaluation
############################################################################
# Mosly already included into the training procedure.
last_acc = []
last_val_acc = []
last_val_prec = []
last_val_rec = []
last_val_auc = []
for hist in histories:
last_acc.append(max(hist.history["accuracy"]))
last_val_acc.append(max(hist.history["val_accuracy"]))
last_val_prec.append(max(hist.history["val_precision"]))
last_val_rec.append(max(hist.history["val_recall"]))
last_val_auc.append(max(hist.history["val_auc"]))
print(
f"The mean training accuracy over the folds is {np.mean(last_acc)}, pm {np.std(last_acc)}"
)
print(
f"The mean validation accuracy over the folds is {np.mean(last_val_acc)}, pm {np.std(last_val_acc)}"
)
print(
f"The mean validation precision over the folds is {np.mean(last_val_prec)}, pm {np.std(last_val_prec)}"
)
print(
f"The mean validation recall over the folds is {np.mean(last_val_rec)}, pm {np.std(last_val_rec)}"
)
print(
f"The mean validation auc over the folds is {np.mean(last_val_auc)}, pm {np.std(last_val_auc)}"
)
############################################################################
# Model evaluation
############################################################################
# Here we actually extract the id of the samples that are misclassified
# y_pred = model.predict(X_train)
# difference = np.round(y_train - y_pred)
# index = np.nonzero(difference)
# y_pred = model.predict(X_val)
# difference = np.round(y_val - y_pred)
# index_2 = np.nonzero(difference)
# df_misclassified_train = pd.DataFrame(
# np.array(partitions[0]["train"])[index[0]]
# )
# df_misclassified_val = pd.DataFrame(
# np.array(partitions[0]["validation"])[index_2[0]]
# )
# df_misclassified = pd.concat(
# [df_misclassified_train, df_misclassified_val]
# )
# df_misclassified.to_csv(
# DOTENV_KEY2VAL["GEN_DATA_DIR"] + "misclassification.csv"
# )
if __name__ == "__main__":
main()
| 2.640625 | 3 |
yah/types/device.py | sunsx0/yah | 0 | 12785294 | <reponame>sunsx0/yah<gh_stars>0
import typing
import dataclasses as dc
from .capability import CapabilityBase
from .properties import PropertyBase
@dc.dataclass
class Device:
id: str
name: str
type: str
icon_url: str = ''
capabilities: typing.List[CapabilityBase] = dc.field(default_factory=list)
properties: typing.List[PropertyBase] = dc.field(default_factory=list)
skill_id: str = ''
groups: typing.List[typing.Any] = dc.field(default_factory=list)
| 2.234375 | 2 |
TensorFlow_Classification/TensorFlow_AlexNet/train.py | YouthJourney/Deep-Learning-For-Image-Process | 2 | 12785295 | <reponame>YouthJourney/Deep-Learning-For-Image-Process
# -*- coding: UTF-8 -*-
"""
Author: LGD
FileName: train
DateTime: 2021/1/20 17:19
SoftWare: PyCharm
"""
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
from TensorFlow_AlexNet.model import AlexNet_v1, AlexNet_v2
import tensorflow as tf
import json
import os
data_root = os.path.abspath(os.path.join(os.getcwd(), '../..'))
image_path = data_root + '/Pytorch_Projects/data_set/flower_data/'
train_dir = image_path + 'train'
val_dir = image_path + 'val'
if not os.path.exists('save_weights'):
os.mkdir('save_weights')
im_height = 224
im_width = 224
batch_size = 32
epochs = 6
# data generator with data augmentation
# 使用ImageDataGenerator函数会把label转化为one-hot编码格式
train_image_generator = ImageDataGenerator(
rescale=1. / 255, # 缩放,归一化
horizontal_flip=True # 水平翻转
)
val_image_generator = ImageDataGenerator(rescale=1. / 255)
train_data_gen = train_image_generator.flow_from_directory(
directory=train_dir,
batch_size=batch_size,
shuffle=True,
target_size=(im_height, im_width),
class_mode='categorical'
)
# 训练集数目
total_train = train_data_gen.n
# get class dict
class_indices = train_data_gen.class_indices
print('class_indices', class_indices)
# transform value and key of dict
inverse_dict = dict((val, key) for key, val in class_indices.items())
# write dict into json file
json_str = json.dumps(inverse_dict, indent=4)
with open('class_indices.json', 'w') as json_file:
json_file.write(json_str)
val_data_gen = val_image_generator.flow_from_directory(
directory=val_dir,
batch_size=batch_size,
shuffle=False,
target_size=(im_height, im_width),
class_mode='categorical'
)
# 验证集数目
total_val = val_data_gen.n
# display the picture
# sample_training_images, sample_training_labels = next(train_data_gen) # label is one-hot coding
# #
# #
# # This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column.
# def plotImages(images_arr):
# fig, axes = plt.subplots(1, 5, figsize=(20, 20))
# axes = axes.flatten()
# for img, ax in zip(images_arr, axes):
# ax.imshow(img)
# ax.axis('off')
# plt.tight_layout()
# plt.show()
#
#
# plotImages(sample_training_images[:5])
# 第一种搭建模型的方法训练
model = AlexNet_v1(im_height=im_height, im_width=im_width, class_num=5)
model.summary()
# using keras high level api for training
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=0.0005),
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False), # 搭建模型进行了softmax()处理就是False, 没有的话就是TRUE
metrics=['acc']
)
callbacks = [tf.keras.callbacks.ModelCheckpoint(
filepath='save_weights/AlexNet.h5',
save_best_only=True,
save_weights_only=True,
monitor='val_loss' # 监控的参数
)]
# TensorFlow2.1版本后, recommend to using fit, 它实现了在训练时打开Dropout,在验证时关闭Dropout方法
history = model.fit(
x=train_data_gen,
steps_per_epoch=total_train // batch_size,
epochs=epochs,
validation_data=val_data_gen,
validation_steps=total_val // batch_size,
callbacks=callbacks
)
# plot loss and accuracy image
history_dict = history.history
train_loss = history_dict['loss']
train_acc = history_dict['acc']
val_loss = history_dict['val_loss']
val_acc = history_dict['val_acc']
# figure 1
plt.figure()
plt.plot(range(epochs), train_loss, label='train_loss')
plt.plot(range(epochs), val_loss, label='val_loss')
plt.legend()
plt.xlabel('epochs')
plt.ylabel('loss')
plt.show()
# figure 2
plt.figure()
plt.plot(range(epochs), train_acc, label='train_acc')
plt.plot(range(epochs), val_acc, label='val_acc')
plt.legend()
plt.xlabel('epochs')
plt.ylabel('acc')
plt.show()
# # 第二种搭建模型的方法训练
# model = AlexNet_v2(class_num=5)
# model.build((batch_size, im_height, im_width, 3)) # 只有在调用build时它才是真正的实例化了模型
# tensorflow2.1以前的版本若数据不能一次性加载到内存,需要使用fit_generator()方法进行训练
# history = model.fit_generator(generator=train_data_gen,
# steps_per_epoch=total_train // batch_size,
# epochs=epochs,
# validation_data=val_data_gen,
# validation_steps=total_val // batch_size,
# callbacks=callbacks)
# using keras low level api for training
# loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=False)
# optimizer = tf.keras.optimizers.Adam(learning_rate=0.0005)
#
# train_loss = tf.keras.metrics.Mean(name='train_loss')
# train_accuracy = tf.keras.metrics.CategoricalAccuracy(name='train_accuracy')
#
# test_loss = tf.keras.metrics.Mean(name='test_loss')
# test_accuracy = tf.keras.metrics.CategoricalAccuracy(name='test_accuracy')
#
#
# @tf.function
# def train_step(images, labels):
# with tf.GradientTape() as tape:
# predictions = model(images, training=True)
# loss = loss_object(labels, predictions)
# gradients = tape.gradient(loss, model.trainable_variables)
# optimizer.apply_gradients(zip(gradients, model.trainable_variables))
#
# train_loss(loss)
# train_accuracy(labels, predictions)
#
#
# @tf.function
# def test_step(images, labels):
# predictions = model(images, training=False)
# t_loss = loss_object(labels, predictions)
#
# test_loss(t_loss)
# test_accuracy(labels, predictions)
#
#
# best_test_loss = float('inf')
# for epoch in range(1, epochs+1):
# train_loss.reset_states() # clear history info
# train_accuracy.reset_states() # clear history info
# test_loss.reset_states() # clear history info
# test_accuracy.reset_states() # clear history info
# for step in range(total_train // batch_size):
# images, labels = next(train_data_gen)
# train_step(images, labels)
#
# for step in range(total_val // batch_size):
# test_images, test_labels = next(val_data_gen)
# test_step(test_images, test_labels)
#
# template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
# print(template.format(epoch,
# train_loss.result(),
# train_accuracy.result() * 100,
# test_loss.result(),
# test_accuracy.result() * 100))
# if test_loss.result() < best_test_loss:
# model.save_weights("./save_weights/myAlex.ckpt", save_format='tf')
| 2.734375 | 3 |
Testing/doctesting.py | lvolkmann/couch-to-coder-python-exercises | 0 | 12785296 | """
Create docstring tests for the following functions
"""
import doctest
# Example
def get_area_rect(length, width):
"""Returns area of rectangle
>>> get_area_rect(5 , 5)
25
>>> get_area_rect(5 , 0)
Traceback (most recent call last):
...
ValueError
>>> get_area_rect(5 , -1)
Traceback (most recent call last):
...
ValueError
"""
if(length <= 0) or (width <= 0):
raise ValueError
return length * width
# Your turn
def validate_input(x):
"""Validates that input give is between 1 and 10
>>> validate_input(5)
True
>>> validate_input(-2)
False
>>> validate_input(12)
False
"""
x = int(x)
if 1 <= x <= 10:
return True
else:
return False
def true_if_hello(x):
"""Will return true if and only if passed the str 'hello'
>>> true_if_hello('hello')
True
>>> true_if_hello('olleh')
False
>>> true_if_hello(7)
False
"""
x = str(x)
if x == 'hello':
return True
return False
doctest.testmod()
| 4 | 4 |
settings.py | AtleMichaelSelberg/3dpav-desktop | 0 | 12785297 | INCHES_TO_CENIMETERS = 2.54
class Reading():
def __init__(self, value, stamp):
self.value = value
self.stamp = stamp | 2.21875 | 2 |
JobScheduleEnv/envs/JobScheduleEnv.py | changxin-ge/JobScheduleEnv | 0 | 12785298 | <filename>JobScheduleEnv/envs/JobScheduleEnv.py<gh_stars>0
# coding: utf-8
import pandas as pd
import gym
import numpy as np
import datetime
import random
class JobScheduleEnv(gym.Env):
def __init__(self, config=None):
""" Job schedule environment constructor
Args:
config: configuration of the job schedule environment
"""
# read in configuration
if config is None:
self.env_config = {
'workers': 2,
'demands': [{'id': 0, 'start_time': 1, 'end_time': 5},
{'id': 1, 'start_time': 2, 'end_time': 8},
{'id': 2, 'start_time': 4, 'end_time': 9},
{'id': 3, 'start_time': 1, 'end_time': 4}]}
else:
self.env_config = config
# initialization of the environment
self._set_config()
# initialization of the reset
_ = self.reset()
# set observation and action spaces
self.action_space = gym.spaces.Discrete(self.jobs + 1)
self.observation_space = gym.spaces.Dict({
'action_mask': gym.spaces.Box(0, 1, shape=(self.jobs + 1,), dtype=np.bool_),
'job_obs': gym.spaces.Box(0.0, 1.0, shape=(self.jobs, 4), dtype=np.float32),
})
def _set_config(self):
""" Set environment configuration """
self.workers = int(self.env_config['workers'])
self.demands = self.env_config['demands']
self.jobs = len(self.demands)
def reset(self):
""" Reset environment
Returns:
observations: observations of the environment
"""
return self._get_obs()
def _get_obs(self):
""" Get observations
Returns:
observations: observations of the environment
"""
return self.observation_space
def _get_actions(self):
""" Get actions
Returns:
actions: actions to take at the moment
"""
return self.action_space
def step(self, action):
""" Step """
raise NotImplementedError()
def render(self, mode="human"):
""" Render """
raise NotImplementedError() | 2.78125 | 3 |
skeletor/error.py | vmasrani/skeletor | 66 | 12785299 | """ Exception handling for skeletor """
class SkeletorException(Exception):
""" General exception raised by skeletor """
pass
| 1.726563 | 2 |
examples/example1/example.py | bb515/probabilistic-peridynamics-project | 0 | 12785300 | """A simple, 2D peridynamics simulation example."""
import argparse
import cProfile
from io import StringIO
import numpy as np
import pathlib
from peridynamics import Model
from peridynamics.model import initial_crack_helper
from peridynamics.integrators import Euler
from pstats import SortKey, Stats
mesh_file = pathlib.Path(__file__).parent.absolute() / "test.msh"
@initial_crack_helper
def is_crack(x, y):
"""Determine whether a pair of particles define the crack."""
output = 0
crack_length = 0.3
p1 = x
p2 = y
if x[0] > y[0]:
p2 = x
p1 = y
# 1e-6 makes it fall one side of central line of particles
if p1[0] < 0.5 + 1e-6 and p2[0] > 0.5 + 1e-6:
# draw a straight line between them
m = (p2[1] - p1[1]) / (p2[0] - p1[0])
c = p1[1] - m * p1[0]
# height a x = 0.5
height = m * 0.5 + c
if (height > 0.5 * (1 - crack_length)
and height < 0.5 * (1 + crack_length)):
output = 1
return output
def boundary_function(model, u, step):
"""
Apply a load to the system.
Particles on each of the sides of the system are pulled apart with
increasing time step.
"""
load_rate = 0.00001
u[model.lhs, 1:3] = np.zeros((len(model.lhs), 2))
u[model.rhs, 1:3] = np.zeros((len(model.rhs), 2))
u[model.lhs, 0] = (
-0.5 * step * load_rate * np.ones(len(model.rhs))
)
u[model.rhs, 0] = (
0.5 * step * load_rate * np.ones(len(model.rhs))
)
return u
def main():
"""Conduct a peridynamics simulation."""
parser = argparse.ArgumentParser()
parser.add_argument('--profile', action='store_const', const=True)
args = parser.parse_args()
if args.profile:
profile = cProfile.Profile()
profile.enable()
model = Model(mesh_file, horizon=0.1, critical_strain=0.005,
elastic_modulus=0.05, initial_crack=is_crack)
# Set left-hand side and right-hand side of boundary
indices = np.arange(model.nnodes)
model.lhs = indices[model.coords[:, 0] < 1.5*model.horizon]
model.rhs = indices[model.coords[:, 0] > 1.0 - 1.5*model.horizon]
integrator = Euler(dt=1e-3)
u, damage, *_ = model.simulate(
steps=100,
integrator=integrator,
boundary_function=boundary_function,
write=1000
)
if args.profile:
profile.disable()
s = StringIO()
stats = Stats(profile, stream=s).sort_stats(SortKey.CUMULATIVE)
stats.print_stats()
print(s.getvalue())
if __name__ == "__main__":
main() | 2.96875 | 3 |
src/z3c/dav/exceptions/__init__.py | mkerrin/z3c.dav | 1 | 12785301 | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Common WebDAV error handling code.
There are two types of error views. Ones that get caught by the WebDAV protocol
and the other which escapes to the publisher. Both these views implement
different interface which we can control through the WebDAV package via the
IPublication.handleException method.
"""
__docformat__ = 'restructuredtext'
from xml.etree import ElementTree
from zope import interface
from zope import schema
from zope import component
import zope.publisher.interfaces.http
from zope.publisher.interfaces.http import IHTTPException
import zope.publisher.defaultview
import z3c.dav.interfaces
import z3c.dav.utils
class DAVError(object):
interface.implements(z3c.dav.interfaces.IDAVErrorWidget)
component.adapts(interface.Interface,
z3c.dav.interfaces.IWebDAVRequest)
def __init__(self, context, request):
self.context = context
self.request = request
status = None
errors = []
propstatdescription = ""
responsedescription = ""
class ConflictError(DAVError):
status = 409
class ForbiddenError(DAVError):
status = 403
class PropertyNotFoundError(DAVError):
status = 404
class FailedDependencyError(DAVError):
# context is generally None for a failed dependency error.
status = 424
class AlreadyLockedError(DAVError):
status = 423
class UnauthorizedError(DAVError):
status = 401
################################################################################
#
# Multi-status error view
#
################################################################################
class MultiStatusErrorView(object):
component.adapts(z3c.dav.interfaces.IWebDAVErrors,
z3c.dav.interfaces.IWebDAVRequest)
interface.implements(IHTTPException)
def __init__(self, error, request):
self.error = error
self.request = request
def __call__(self):
multistatus = z3c.dav.utils.MultiStatus()
if len(self.error.errors) == 1 and \
self.error.errors[0].resource == self.error.context:
# If have only one error and the context on which we raised the
# exception, then we just try and view the default view of the
# error.
error = self.error.errors[0]
name = zope.publisher.defaultview.queryDefaultViewName(
error, self.request)
if name is not None:
view = component.queryMultiAdapter(
(error, self.request), name = name)
return view()
seenContext = False
for error in self.error.errors:
if error.resource == self.error.context:
seenContext = True
davwidget = component.getMultiAdapter(
(error, self.request), z3c.dav.interfaces.IDAVErrorWidget)
response = z3c.dav.utils.Response(
z3c.dav.utils.getObjectURL(error.resource, self.request))
response.status = davwidget.status
# we don't generate a propstat elements during this view so
# we just ignore the propstatdescription.
response.responsedescription += davwidget.responsedescription
multistatus.responses.append(response)
if not seenContext:
response = z3c.dav.utils.Response(
z3c.dav.utils.getObjectURL(self.error.context, self.request))
response.status = 424 # Failed Dependency
multistatus.responses.append(response)
self.request.response.setStatus(207)
self.request.response.setHeader("content-type", "application/xml")
return ElementTree.tostring(multistatus(), encoding = "utf-8")
class WebDAVPropstatErrorView(object):
interface.implements(IHTTPException)
component.adapts(z3c.dav.interfaces.IWebDAVPropstatErrors,
z3c.dav.interfaces.IWebDAVRequest)
def __init__(self, error, request):
self.error = error
self.request = request
def __call__(self):
multistatus = z3c.dav.utils.MultiStatus()
response = z3c.dav.utils.Response(
z3c.dav.utils.getObjectURL(self.error.context, self.request))
multistatus.responses.append(response)
for prop, error in self.error.items():
error_view = component.getMultiAdapter(
(error, self.request), z3c.dav.interfaces.IDAVErrorWidget)
propstat = response.getPropstat(error_view.status)
if z3c.dav.interfaces.IDAVProperty.providedBy(prop):
## XXX - not tested - but is it needed?
prop = "{%s}%s" %(prop.namespace, prop.__name__)
propstat.properties.append(ElementTree.Element(prop))
## XXX - needs testing.
propstat.responsedescription += error_view.propstatdescription
response.responsedescription += error_view.responsedescription
self.request.response.setStatus(207)
self.request.response.setHeader("content-type", "application/xml")
return ElementTree.tostring(multistatus(), encoding = "utf-8")
################################################################################
#
# Some more generic exception view.
#
################################################################################
class HTTPForbiddenError(object):
interface.implements(IHTTPException)
component.adapts(z3c.dav.interfaces.IForbiddenError,
zope.publisher.interfaces.http.IHTTPRequest)
def __init__(self, error, request):
self.error = error
self.request = request
def __call__(self):
self.request.response.setStatus(403)
return ""
class HTTPConflictError(object):
interface.implements(IHTTPException)
component.adapts(z3c.dav.interfaces.IConflictError,
zope.publisher.interfaces.http.IHTTPRequest)
def __init__(self, error, request):
self.error = error
self.request = request
def __call__(self):
self.request.response.setStatus(409)
return ""
class PreconditionFailed(object):
interface.implements(IHTTPException)
component.adapts(z3c.dav.interfaces.IPreconditionFailed,
zope.publisher.interfaces.http.IHTTPRequest)
def __init__(self, error, request):
self.error = error
self.request = request
def __call__(self):
self.request.response.setStatus(412)
return ""
class HTTPUnsupportedMediaTypeError(object):
interface.implements(IHTTPException)
component.adapts(z3c.dav.interfaces.IUnsupportedMediaType,
zope.publisher.interfaces.http.IHTTPRequest)
def __init__(self, error, request):
self.error = error
self.request = request
def __call__(self):
self.request.response.setStatus(415)
return ""
class UnprocessableError(object):
interface.implements(IHTTPException)
component.adapts(z3c.dav.interfaces.IUnprocessableError,
zope.publisher.interfaces.http.IHTTPRequest)
def __init__(self, context, request):
self.context = context
self.request = request
def __call__(self):
self.request.response.setStatus(422)
return ""
class AlreadyLockedErrorView(object):
interface.implements(IHTTPException)
component.adapts(z3c.dav.interfaces.IAlreadyLocked,
zope.publisher.interfaces.http.IHTTPRequest)
def __init__(self, context, request):
self.request = request
def __call__(self):
self.request.response.setStatus(423)
return ""
class BadGateway(object):
interface.implements(IHTTPException)
component.adapts(z3c.dav.interfaces.IBadGateway,
zope.publisher.interfaces.http.IHTTPRequest)
def __init__(self, error, request):
self.error = error
self.request = request
def __call__(self):
self.request.response.setStatus(502)
return ""
| 1.765625 | 2 |
Codeforces/C_Nice_Garland.py | anubhab-code/Competitive-Programming | 0 | 12785302 | n=int(input())
s=input()
l=["RGB","RBG","GRB","GBR","BGR","BRG"]
mn=10**10
ans=""
for i in l:
t=i
t=t*((n+3)//3)
t=t[0:n]
c=0
for j in range(n):
if t[j]!=s[j]:
c+=1
if c < mn:
mn=c
ans=t
print(mn)
print(ans) | 3.203125 | 3 |
snek/__init__.py | MaT1g3R/dot_snek | 0 | 12785303 | __license__ = 'MIT'
__title__ = 'dot_snek'
__version__ = '0.0.5'
__url__ = 'https://github.com/MaT1g3R/dot_snek'
__author__ = 'MaT1g3R'
__description__ = 'Dot files manager'
| 0.953125 | 1 |
tf_encrypted/player/player.py | bendecoste/tf-encrypted | 0 | 12785304 | <gh_stars>0
from typing import Optional
import tensorflow as tf
class Player(object):
def __init__(self, name: str, index: int, device_name: str, host: Optional[str] = None) -> None:
self.name = name
self.index = index
self.device_name = device_name
self.host = host
def player(player: Player):
return tf.device(player.device_name)
| 2.75 | 3 |
tools/change_model_savings.py | hli2020/turbo-boost-detection | 18 | 12785305 | <reponame>hli2020/turbo-boost-detection
import torch
import os
# in terminal, execute python this_file_name.py, the following path is right;
# however, in MacPycharm, it sees tools folder as root
base_name = 'results/meta_101_quick_3_l1_sig_multi/train'
file_name = 'mask_rcnn_ep_0006_iter_001238.pth'
# old file
model_path = os.path.join(base_name, file_name)
# load model
checkpoints = torch.load(model_path)
# CHANGE YOUR NEED HERE
checkpoints['iter'] -= 1
# Do **NOT** change the following
_ep = checkpoints['epoch']
_iter = checkpoints['iter']
# new file
model_file = os.path.join(base_name, 'mask_rcnn_ep_{:04d}_iter_{:06d}.pth'.format(_ep, _iter))
print('saving file: {}'.format(model_file))
torch.save(checkpoints, model_file)
if model_path == model_file:
print('old name and new name is the same! will not delete old file!')
else:
print('removing old file: {}'.format(model_path))
os.remove(model_path)
| 2.140625 | 2 |
Sim.Pro.Flow/src/clustering.py | EmmaAspland/Sim.Pro.Flow | 1 | 12785306 | <gh_stars>1-10
import numpy as np
import pandas as pd
from pyclustering.cluster.kmedoids import kmedoids
from pyclustering.utils.metric import distance_metric, type_metric
from sklearn.metrics import silhouette_samples, silhouette_score
from collections import Counter
import matplotlib.pyplot as plt
import imp
import wx
import wx.lib.scrolledpanel as scrolled
transitions = imp.load_source('transitions', 'src/transitions.py')
class ClusterFrame(wx.Frame):
"""
Frame to display clustering reaults.
"""
def __init__(self):
wx.Frame.__init__(self, None, wx.ID_ANY, 'All Cluster Information')
self.panel = scrolled.ScrolledPanel(self)
vbox = wx.BoxSizer(wx.HORIZONTAL)
vbox.Add(wx.StaticLine(self.panel, size=(-1, 5000)), 0, wx.ALL, 5)
hbox = wx.BoxSizer(wx.VERTICAL)
hbox.Add(wx.StaticLine(self.panel, size=(5000, -1)), 0, wx.ALL, 5)
vbox.Add(hbox)
self.panel.SetSizer(vbox)
self.panel.SetupScrolling()
self.panel.SetSizer(vbox)
self.SetSize((600, 500))
self.Centre()
#======== Classic ================
def cluster_results(cframe, y, k, set_medoids, medoids, pathways_medoids, Frequency, Score):
"""
Generate static text to display clustering results.
"""
x = 20
medoids_label = 'The initial medoids were :' + str(set_medoids)
k_label = 'k is ' + str(k)
center_label = 'The cluster centers are ' + str(medoids) + ': ' + str(pathways_medoids)
freq_label = 'The frequency of pathways in each cluster is ' + str(Frequency)
sil_label = 'The average silhouette score is ' + str(Score)
medoids = wx.StaticText(cframe.panel, id=wx.ID_ANY, label=medoids_label, pos=(x,y))
k_is = wx.StaticText(cframe.panel, id=wx.ID_ANY, label=k_label, pos=(x,y+20))
center = wx.StaticText(cframe.panel, id=wx.ID_ANY, label=center_label, pos=(x,y+40))
freq = wx.StaticText(cframe.panel, id=wx.ID_ANY, label=freq_label, pos=(x,y+60))
sil = wx.StaticText(cframe.panel, id=wx.ID_ANY, label=sil_label, pos=(x,y+80))
return cframe
def run_clustering(comp_Matrix, set_medoids, df, k):
"""
Runs k-medoids clustering.
"""
initial_medoids = [0]*k
for n in range(0,k):
initial_medoids[n] = set_medoids[n]
kmedoids_instance = kmedoids(comp_Matrix, initial_medoids, data_type="distance_matrix")
kmedoids_instance.process()
clusters = kmedoids_instance.get_clusters()
medoids = kmedoids_instance.get_medoids()
pathways_medoids = [None]*k
for n in range(0,k):
pathways_medoids[n] = df[medoids[n]]
assign = [0 for x in range(len(df))]
for i in range(len(medoids)):
for j in range(len(df)):
if j in clusters[i]:
assign[j] = medoids[i]
Frequency = Counter(assign)
Score = silhouette_score(comp_Matrix, assign, metric='precomputed')
return (clusters, medoids, assign, pathways_medoids, Frequency, Score)
def classic_cluster(data, df, comp_Matrix, set_medoids, max_k, save_location , save_name, results, include_centroids):
"""
Takes in user specifications for clustering.
Uses silhouette score to evaluate clustering.
Saves solution as specified.
"""
cframe = ClusterFrame()
y = 20
Max_Score = -1
Best_k = 0
if results == 'Best k (ex 2)':
lower = 3
elif results == 'k only':
lower = max_k
else:
lower = 2
for k in range(lower,max_k+1):
clusters, medoids, assign, pathways_medoids, Frequency, Score = run_clustering(comp_Matrix, set_medoids, df, k)
if include_centroids == 'Yes':
column_name = 'Centroids K = ' + str(k)
save_centroids = pd.DataFrame({'Pathways' : df, column_name: assign})
with pd.ExcelWriter(save_location + 'Cluster_Centroids.xlsx', engine="openpyxl", mode='a') as writer:
save_centroids.to_excel(writer,save_name + '_df')
if results == 'All':
cframe = cluster_results(cframe, y, k, set_medoids, medoids, pathways_medoids, Frequency, Score)
y += 120
if results == 'k only':
cframe = cluster_results(cframe, y, k, set_medoids, medoids, pathways_medoids, Frequency, Score)
else:
if Score >= Max_Score:
Max_Score = Score
Best_k = k
medoids_Best_k = medoids
pathways_medoids_Best_k = pathways_medoids
Frequency_Best_k = Frequency
Score_Best_k = Score
Best_clusters = clusters
if results == 'Best k' or results == 'Best k (ex 2)':
cframe = cluster_results(cframe, y, Best_k, set_medoids, medoids_Best_k, pathways_medoids_Best_k, Frequency_Best_k, Score_Best_k)
cframe.Show()
if results == 'k only':
return clusters
elif results == 'All':
return
else:
return Best_clusters
#======== Process =================
def violinplot(non_zero, difference_mean, k, axes, no_cons):
"""
Generates violin plot of the transitions percentage points differece
between the original transistions and the cluster transitions.
"""
axes.violinplot(non_zero, showmeans=True)
axes.set_ylim([0,1.1])
axes.set_title(str(k) + '_' + str(no_cons) + '_' + str(round(difference_mean,3)), fontsize=14)
axes.tick_params(axis='both', which='major', labelsize=14)
return axes
def subplot_shape(size):
"""
Get the most square shape for the plot axes values.
If only one factor, increase size and allow for empty slots.
"""
facts = [[i, size//i] for i in range(1, int(size**0.5) + 1) if size % i == 0]
# re-calc for prime numbers larger than 3 to get better shape
while len(facts) == 1:
size += 1
facts = [[i, size//i] for i in range(1, int(size**0.5) + 1) if size % i == 0]
sum_facts = [sum(i) for i in facts]
smallest = min(sum_facts)
shape = facts[sum_facts.index(smallest)]
return shape
def difference(letters, centroids, counts, original_transitions):
"""
Calculates the difference matrix:
Takes the absolute difference of percentage point from original to cluster transitions.
"""
Start, centroid_transitions, Matrix_prob = transitions.get_transitions(centroids, letters, counts)
number_connections = np.count_nonzero(centroid_transitions)
Difference_c = [[abs(a-b) for a,b in zip(original_transitions[j],centroid_transitions[j])] for j in range(len(original_transitions))]
return(Difference_c, number_connections)
def process_cluster_results(cframe, results, set_medoids, tol, original_non_zero, highlight_results):
"""
Generate static text to display process clustering results.
"""
x = 20
y = 20
medoids_label = 'The initial medoids were :' + str(set_medoids)
medoids_text = wx.StaticText(cframe.panel, id=wx.ID_ANY, label=medoids_label, pos=(20,y))
y += 20
def_results = 'The results are displayed as: [k, number of connections, percentage points different, silhouette score]'
def_results = wx.StaticText(cframe.panel, id=wx.ID_ANY, label=def_results, pos=(20,y))
y += 20
results_label_2 = ''
if results != 'k only':
results_label = 'The highlighted results below were within tolerance ' + str(tol) + ' of ' + str(original_non_zero) + ' connections'
if results != 'All':
results_label_2 = 'The suggested \'best\' results are shown results. \nNote this is just a suggestion, as highlighted is the k with the closest number of connection \nand lowest mean percentage points difference'
else:
results_label = 'The selected k results are shown below'
results_text = wx.StaticText(cframe.panel, id=wx.ID_ANY, label=results_label, pos=(20,y))
y += 20
if results_label_2 != '':
results_text = wx.StaticText(cframe.panel, id=wx.ID_ANY, label=results_label_2, pos=(20,y))
y += 60
for row in highlight_results:
row_label = wx.StaticText(cframe.panel, id=wx.ID_ANY, label=str(row), pos=(20,y))
y += 20
return cframe
def process_cluster(data, df, letters, comp_Matrix, set_medoids, max_k, save_location, save_name, tol, results, include_centroids, adjust):
"""
Takes in user specifications for process clustering.
Uses difference matrix and difference mean to evaluate clustering.
Saves solution as specified.
"""
cframe = ClusterFrame()
if results == 'Best k (ex 2)':
lower = 3
elif results == 'k only':
lower = max_k
else:
lower = 2
if results == 'All':
shape = subplot_shape(max_k)
pos = 0
fig, ax = plt.subplots(shape[1], shape[0], figsize=[15,20])
else:
fig, ax = plt.subplots(1, 1, figsize=[15,20])
Start, original_transitions, Matrix_prob = transitions.get_transitions(data.pathways, letters, adjust)
original_non_zero = np.count_nonzero(original_transitions)
all_centroids = pd.DataFrame()
highlight_results = ['']
# setup for best results
best_diff_cons = original_non_zero
best_diff_mean = 1.0
best_k = 0
for k in range(lower,max_k+1):
clusters, medoids, assign, pathways_medoids, Frequency, Score = run_clustering(comp_Matrix, set_medoids, df.pathway, k)
# get results
counter_name = 'counter_' + str(k)
prop_counter_name = 'prop_counter_' + str(k)
propergated_clusters_foot = transitions.propergate_clusters(df, clusters)
current_selected = pd.DataFrame({str(k) : pathways_medoids,
counter_name : [Frequency[v] for v in medoids],
prop_counter_name : [len(cluster) for cluster in propergated_clusters_foot]})
all_centroids = pd.concat([all_centroids, current_selected], axis=1, sort=False)
# calculate difference
diff, no_cons = difference(letters, current_selected[str(k)], current_selected[prop_counter_name], original_transitions)
non_zero = []
for row in diff:
non_zero += [r for r in row if r > 0]
difference_value = sum([sum(row) for row in diff])
difference_mean = difference_value/len(non_zero)
# set up display results
diff_cons = abs(original_non_zero - no_cons)
if results == 'k only':
highlight_results.append([k, no_cons, difference_mean, Score])
elif diff_cons <= tol:
if results == 'All':
highlight_results.append([k, no_cons, difference_mean, Score])
else:
if diff_cons < best_diff_cons:
highlight_results[0] = [k, no_cons, difference_mean, Score]
best_current_selected = current_selected
best_diff_cons = diff_cons
best_diff_mean = difference_mean
best_k = k
if diff_cons == best_diff_cons:
if difference_mean < best_diff_mean:
highlight_results[0] = [k, no_cons, difference_mean, Score]
best_current_selected = current_selected
best_diff_cons = diff_cons
best_diff_mean = difference_mean
best_k = k
# produce plot
if results == 'All':
axes = ax[int(pos/shape[0]), pos%shape[0]]
pos+=1
violinplot(non_zero, difference_mean, k , axes, no_cons)
elif results == 'k only':
violinplot(non_zero, difference_mean, k , ax, no_cons)
if results == 'Best k' or results == 'Best k (ex 2)':
if best_k != 0:
violinplot(non_zero, difference_mean, k , ax, no_cons)
# remove blank subplots
if results == 'All':
for i in range(shape[0]*shape[1] - (k - 1)):
fig.delaxes(ax.flatten()[(k-1)+i])
fig.subplots_adjust(wspace=0.5, hspace=0.5)
plot_name = save_name + '_' + results + '_' + str(max_k)
fig.savefig(save_location + 'Plots/Process_Violin_Plots/' + plot_name + '.png', bbox_inches='tight', facecolor="None")
plt.close()
if include_centroids == 'Yes':
column_name = 'Centroids K = ' + str(k)
save_centroids = pd.DataFrame({'Pathways' : df.pathway, column_name: assign})
with pd.ExcelWriter(save_location + 'Process_Centroids.xlsx', engine="openpyxl", mode='a') as writer:
all_centroids.to_excel(writer,save_name)
save_centroids.to_excel(writer, save_name + '_df')
cframe = process_cluster_results(cframe, results, set_medoids, tol, original_non_zero, highlight_results)
cframe.Show()
if results == 'k only':
return (k, current_selected, plot_name)
elif results != 'All':
if best_k != 0 :
return (best_k, best_current_selected, plot_name)
else:
return (None, None, None)
else:
return (k, current_selected, plot_name) | 2.34375 | 2 |
model.py | chinmaydas96/CarND-Behavioral-Cloning-P3 | 0 | 12785307 | <filename>model.py
## Import the necessary libraries
import os
import csv
import cv2
import sklearn
import numpy as np
from keras.models import Sequential
from keras.layers import Lambda, Conv2D, MaxPool2D, Cropping2D, Flatten, Dense,Dropout
from keras.callbacks import ModelCheckpoint
from keras.utils import plot_model
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
import random
## Read the csv file for extraction of path of the image
lines = []
with open('./new_data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader, None)
for line in reader :
lines.append(line)
## Spliting dataset to trainset and validation set
training_data, validation_data = train_test_split(lines,test_size=0.2)
def make_dataset(line, images, measurements):
"""
This function reads data from line and adds it to the list of input images
and measurements
"""
# correction for left and right camera image
steering_angle_correction = 0.2
# load images and measurements
"./new_data/" + line[0].replace('/Users/bat/image_data/','')
image_center = cv2.imread("./new_data/" + line[0].replace('/Users/bat/image_data/',''))
image_left = cv2.imread("./new_data/" + line[0].replace('/Users/bat/image_data/',''))
image_right = cv2.imread("./new_data/" + line[0].replace('/Users/bat/image_data/',''))
steering_angle = float(line[3])
# correct angles for left and right image
steering_angle_left = steering_angle + steering_angle_correction
steering_angle_right = steering_angle - steering_angle_correction
# add original and flipped images to the list of images
images.append(image_center)
images.append(cv2.flip(image_center,1))
images.append(image_left)
images.append(cv2.flip(image_left,1))
images.append(image_right)
images.append(cv2.flip(image_right,1))
# add corresponting measurements
measurements.append(steering_angle)
measurements.append(-steering_angle)
measurements.append(steering_angle_left)
measurements.append(-steering_angle_left)
measurements.append(steering_angle_right)
measurements.append(-steering_angle_right)
### Create generator
def generator(samples, batch_size=64):
num_samples = len(samples)
while 1:
random.shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset : offset + batch_size]
images = []
angles = []
for batch_sample in batch_samples:
make_dataset(batch_sample, images, angles)
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
## Creating training and validation generator
train_generator = generator(training_data, batch_size=64)
validation_generator = generator(validation_data, batch_size=64)
## Define architecture
model = Sequential()
model.add(Cropping2D(cropping=((50, 20), (0, 0)), input_shape=(160, 320, 3)))
model.add(Lambda(lambda x: (x / 255.0) - 0.5))
model.add(Conv2D(24, (5, 5), strides =(2,2), activation="relu"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
model.add(Conv2D(36, (5, 5), strides =(2,2), activation="relu"))
model.add(Dropout(0.3))
model.add(Conv2D(48, (5, 5), strides =(2,2), activation="relu"))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
model_image = './images/architecture.png'
plot_model(model, to_file=model_image, show_shapes=True)
## Checkpointer for saving the best model
checkpointer = ModelCheckpoint(filepath='model.h5',
verbose=1, save_best_only=True)
## Compile the model with mse loss and adam optimizer
model.compile(loss='mse', optimizer='adam')
## Training the model
model.fit_generator(train_generator, validation_steps=len(validation_data), epochs=10,
validation_data=validation_generator, steps_per_epoch= len(training_data),callbacks=[checkpointer])
| 3.203125 | 3 |
codegeneration/code_manip/file_utils.py | sacceus/BabylonCpp | 277 | 12785308 | <gh_stars>100-1000
import os
import os.path
import codecs
from typing import *
from bab_types import *
def files_with_extension(folder: Folder, extension: Extension) -> List[FileFullPath]:
r = []
ext_len = len(extension)
if not os.path.isdir(folder):
print("ouch")
for root, _, files in os.walk(folder):
for file in files:
if file[-ext_len:] == extension:
full_file = root + "/" + file
full_file = full_file.replace("\\", "/")
r.append(full_file)
return r
def file_has_utf8_bom(file: FileFullPath) -> bool:
bom_marker = codecs.BOM_UTF8
with open(file, "rb") as f:
content = f.read()
start = content[:3]
if start == bom_marker: #u'\ufeff':
return True
return False
def file_has_windows_crlf(file: FileFullPath) -> bool:
with open(file, "rb") as f:
content = f.read()
nb_lf = 0
nb_crlf = 0
was_last_char_cr = False
for i, b in enumerate(content):
if b == b'\n'[0]:
if not was_last_char_cr:
nb_lf = nb_lf + 1
if b == b'\r'[0]:
nb_crlf = nb_crlf + 1
was_last_char_cr = True
else:
was_last_char_cr = False
if nb_lf > 0 and nb_crlf > 0:
raise Exception("Mixed CR CRLF!")
return nb_crlf > nb_lf
def read_file_lines_no_eol(file_full_path: FileFullPath) -> List[CodeLine]:
with open(file_full_path, "r") as f:
content = f.read()
lines = content.split("\n")
return lines
def write_file_lines_no_eol(file_full_path: FileFullPath, lines: List[CodeLine]):
content = "\n".join(lines)
with open(file_full_path, "w") as f:
f.write(content)
def write_file_lines_no_eol_formatted(
file: FileFullPath,
lines: List[CodeLine],
has_utf8_bom: bool,
has_windows_crlf: bool
):
bom_marker = codecs.BOM_UTF8
if has_windows_crlf:
content = "\r\n".join(lines)
else:
content ="\n".join(lines)
with open(file, "wb") as f:
if has_utf8_bom:
f.write(bom_marker)
bytes_content = content.encode('utf-8')
f.write(bytes_content)
def is_cpp(file: FileFullPath) -> bool:
return file.endswith(".cpp")
def h_file_from_cpp(cpp_file: FileFullPath, all_h_files: List[FileFullPath]) -> Optional[FileFullPath]:
items = cpp_file.split("/")
file_with_parent_folder = "/".join(items[-2:])
basename_with_parent_folder = file_with_parent_folder.replace(".cpp", "")
found_h_files = list(filter(lambda f: basename_with_parent_folder + ".h" in f, all_h_files))
assert(len(found_h_files) <= 1)
if len(found_h_files) == 1:
return found_h_files[0]
else:
return None
def make_babylon_include_path(h_file: FileFullPath):
include = h_file
idx = include.find("include/babylon")
if idx < 0:
return None
include = include[idx + 8:]
return include
| 2.734375 | 3 |
src/terra/contracts/orion.py | fentas/staketaxcsv | 0 | 12785309 | # known contracts from protocol
CONTRACTS = [
# Airdrop
"terra1rqlu6w83tzrmm04ld97muwakuegn4c09vwgk2l",
]
def handle(exporter, elem, txinfo, contract):
print(f"Orion! {contract}")
#print(elem)
| 1.507813 | 2 |
events/admin.py | NyntoFive/big-peach | 4 | 12785310 | <reponame>NyntoFive/big-peach<gh_stars>1-10
from django.contrib import admin
from events.models import Location, Event
class EventAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
list_display = ('name', 'start', 'location')
class LocationAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
admin.site.register(Location, LocationAdmin)
admin.site.register(Event, EventAdmin)
| 1.960938 | 2 |
test/utils/utils.py | Andres-GC/py-spiffe-HPE | 0 | 12785311 | <reponame>Andres-GC/py-spiffe-HPE<gh_stars>0
def read_file_bytes(filename):
with open(filename, 'rb') as file:
return file.read()
| 2.3125 | 2 |
experimentos/Experimentos_artigo_2/parse.py | dcomp-labPi/G-PETo | 0 | 12785312 | import xml.etree.ElementTree as ET
from os import sys
#Return *.c for 1, *.o for 2
#tipo = sys.argv[1]
#names_files = []
names_files = []
names_functions = []
hosts_ips = []
names = ""
root = ET.parse('startup.xml').getroot()
arquivo = open("Makefile","w")
functions_file = open("functions_names","w")
hosts = open("comm/hosts.cfg","w")
config = open("comm/config","w")
rank = 0
cont_rank = 0
cont = 0
for neighbor in root.iter('ipmachine'):
cont_rank = 0
hosts.write(neighbor.get('ip')+" slots="+neighbor.get('ngpus')+"\n")
while(cont_rank < int(neighbor.get('ngpus'))):
config.write(str(rank)+"\t"+str(cont_rank)+"\n")
cont_rank = cont_rank + 1
rank = rank + 1
for neighbor in root.iter('file'):
names_files.append(neighbor.get('name'))
for neighbor in root.iter('func'):
cont +=1
names_functions.append(neighbor.get('funcName'))
for neighbor in root.iter('file'):
names = names +" "+ neighbor.get('name')
functions_file.write(str(cont)+"\n")
for i in names_functions:
functions_file.write(i+"\n")
#for i in hosts_ips:
# hosts.write(i+"\n");
objs = ""
for i in names_files:
objs = objs+i.strip(".cu")+".o"+" "
arquivo.write("CUFILES= "+names+"\n")
arquivo.write("OBJS= "+objs+"\n")
arquivo.write("TARGET= "+objs+" link.o libfw.so"+"\n")
arquivo.write("\n")
arquivo.write("\n")
arquivo.write("\n")
arquivo.write("all: $(TARGET)"+"\n")
arquivo.write("\n")
arquivo.write("\n")
arquivo.write("\n")
arquivo.write("libfw.so: link.o \n")
arquivo.write("\tg++ -shared -Wl,-soname,libfw.so -o libfw.so " + objs +" comm/comm.o link.o -L/usr/local/cuda-8.0/lib64 -lcudart \n")
arquivo.write("\n")
arquivo.write("\n")
arquivo.write("link.o: "+names+"\n")
arquivo.write("\tnvcc -m64 -arch=sm_20 -dlink -Xcompiler -fPIC comm/comm.o "+objs+" -o link.o\n")
arquivo.write("\n")
arquivo.write("\n")
for i in names_files:
arquivo.write(i.strip(".cu")+".o: "+i+" "+i.strip(".cu")+".h comm/comm.h\n")
arquivo.write("\tnvcc -m64 -arch=sm_20 -dc -Xcompiler -fPIC -c "+i+" \n")
arquivo.write("\n")
arquivo.write("\n")
arquivo.write("clean: \n")
arquivo.write("\trm -f link.o libfw.so "+ objs +" \n")
| 2.609375 | 3 |
aws/approveParticipants.py | stevencdang/data_proc_lib | 0 | 12785313 | import mongohq
import mturk
priming_exp = {'desc': "rare vs common individual brainstorming",
"hit_id": '3ZURAPD288NJZ4HUTVEJXT1RKF51F5'
}
if __name__ == '__main__':
params = mongohq.ideagenstest
db = mongohq.get_mongodb(params['url'],
params['port'],
params['dbName'],
params['user'],
params['pswd'])
m = mturk.MechanicalTurk()
# Confirms Mturk connection is valid
r = m.request("GetAccountBalance")
if r.valid:
print r.get_response_element("AvailableBalance")
# Get all submitted assignments for the hit
r = m.request("GetAssignmentsForHIT",
{"HITId": priming_exp["hit_id"]}
)
if r.valid:
print r.get_response_element("assignment")
else:
print "failed"
| 2.375 | 2 |
tests/document/test_retrieve.py | constructpm/pysyncgateway | 2 | 12785314 | import pytest
from pysyncgateway.exceptions import DoesNotExist
def test(recipe_document, database):
reload_document = database.get_document('butter_chicken')
result = reload_document.retrieve()
assert result is True
for key in list(reload_document.data.keys()):
assert isinstance(key, str)
assert sorted(list(reload_document.data)) == ['ingredients', 'recipe']
assert reload_document.data['ingredients'] == ['chicken', 'butter']
assert reload_document.data['recipe'] == 'Mix the chicken and the butter. Voila!'
assert isinstance(reload_document.data['recipe'], str)
assert reload_document.rev == recipe_document.rev
assert reload_document.channels == ()
def test_channels(permissions_document, database):
"""
Document with no data can be retrieved, channels are updated
"""
reload_document = database.get_document('permission-list')
result = reload_document.retrieve()
assert result is True
assert reload_document.data == {}
assert reload_document.rev == permissions_document.rev
assert reload_document.channels == ('acc.1234', 'acc.7882')
# --- FAILURES ---
def test_missing(empty_document):
with pytest.raises(DoesNotExist):
empty_document.retrieve()
| 2.34375 | 2 |
tests/documents.py | macrat/macracoin | 0 | 12785315 | <filename>tests/documents.py
import doctest
import unittest
import core.block
import core.chain
import core.errors
import core.message
import core.user
import peer.chainmanager
import peer.client
import peer.endpoint
import peer.peer
class DocTest(unittest.TestCase):
def test_doctest_core_block(self):
failure, _ = doctest.testmod(core.block)
self.assertEqual(failure, 0)
def test_doctest_core_chain(self):
failure, _ = doctest.testmod(core.chain)
self.assertEqual(failure, 0)
def test_doctest_core_errors(self):
failure, _ = doctest.testmod(core.errors)
self.assertEqual(failure, 0)
def test_doctest_core_message(self):
failure, _ = doctest.testmod(core.message)
self.assertEqual(failure, 0)
def test_doctest_core_user(self):
failure, _ = doctest.testmod(core.user)
self.assertEqual(failure, 0)
def test_doctest_peer_chainmanager(self):
failure, _ = doctest.testmod(peer.chainmanager)
self.assertEqual(failure, 0)
def test_doctest_peer_client(self):
failure, _ = doctest.testmod(peer.client)
self.assertEqual(failure, 0)
def test_doctest_peer_endpoint(self):
failure, _ = doctest.testmod(peer.endpoint)
self.assertEqual(failure, 0)
def test_doctest_peer_peer(self):
failure, _ = doctest.testmod(peer.peer)
self.assertEqual(failure, 0)
| 2.328125 | 2 |
doc/examples/skeleton_behaviour.py | andrewbest-tri/py_trees | 201 | 12785316 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import py_trees
import random
class Foo(py_trees.behaviour.Behaviour):
def __init__(self, name):
"""
Minimal one-time initialisation. A good rule of thumb is
to only include the initialisation relevant for being able
to insert this behaviour in a tree for offline rendering to
dot graphs.
Other one-time initialisation requirements should be met via
the setup() method.
"""
super(Foo, self).__init__(name)
def setup(self):
"""
When is this called?
This function should be either manually called by your program
to setup this behaviour alone, or more commonly, via
:meth:`~py_trees.behaviour.Behaviour.setup_with_descendants`
or :meth:`~py_trees.trees.BehaviourTree.setup`, both of which
will iterate over this behaviour, it's children (it's children's
children ...) calling :meth:`~py_trees.behaviour.Behaviour.setup`
on each in turn.
If you have vital initialisation necessary to the success
execution of your behaviour, put a guard in your
:meth:`~py_trees.behaviour.Behaviour.initialise` method
to protect against entry without having been setup.
What to do here?
Delayed one-time initialisation that would otherwise interfere
with offline rendering of this behaviour in a tree to dot graph
or validation of the behaviour's configuration.
Good examples include:
- Hardware or driver initialisation
- Middleware initialisation (e.g. ROS pubs/subs/services)
- A parallel checking for a valid policy configuration after
children have been added or removed
"""
self.logger.debug(" %s [Foo::setup()]" % self.name)
def initialise(self):
"""
When is this called?
The first time your behaviour is ticked and anytime the
status is not RUNNING thereafter.
What to do here?
Any initialisation you need before putting your behaviour
to work.
"""
self.logger.debug(" %s [Foo::initialise()]" % self.name)
def update(self):
"""
When is this called?
Every time your behaviour is ticked.
What to do here?
- Triggering, checking, monitoring. Anything...but do not block!
- Set a feedback message
- return a py_trees.common.Status.[RUNNING, SUCCESS, FAILURE]
"""
self.logger.debug(" %s [Foo::update()]" % self.name)
ready_to_make_a_decision = random.choice([True, False])
decision = random.choice([True, False])
if not ready_to_make_a_decision:
return py_trees.common.Status.RUNNING
elif decision:
self.feedback_message = "We are not bar!"
return py_trees.common.Status.SUCCESS
else:
self.feedback_message = "Uh oh"
return py_trees.common.Status.FAILURE
def terminate(self, new_status):
"""
When is this called?
Whenever your behaviour switches to a non-running state.
- SUCCESS || FAILURE : your behaviour's work cycle has finished
- INVALID : a higher priority branch has interrupted, or shutting down
"""
self.logger.debug(" %s [Foo::terminate().terminate()][%s->%s]" % (self.name, self.status, new_status))
| 2.953125 | 3 |
pdbparse/construct/protocols/application/xwindows.py | ax330d/ida_pdb_loader | 50 | 12785317 | """
X-Windows (TCP/IP protocol stack)
"""
| 0.832031 | 1 |
lbrynet/schema/claim.py | abueide/lbry | 0 | 12785318 | <reponame>abueide/lbry
import os.path
import json
from string import ascii_letters
from typing import List, Tuple, Iterator, TypeVar, Generic
from decimal import Decimal, ROUND_UP
from binascii import hexlify, unhexlify
from google.protobuf.json_format import MessageToDict
from google.protobuf.message import DecodeError
from hachoir.parser import createParser as binary_file_parser
from hachoir.metadata import extractMetadata as binary_file_metadata
from hachoir.core.log import log as hachoir_log
from torba.client.hash import Base58
from torba.client.constants import COIN
from lbrynet.schema import compat
from lbrynet.schema.base import Signable
from lbrynet.schema.mime_types import guess_media_type
from lbrynet.schema.types.v2.claim_pb2 import (
Claim as ClaimMessage,
Fee as FeeMessage,
Location as LocationMessage,
Language as LanguageMessage
)
hachoir_log.use_print = False
class Claim(Signable):
__slots__ = 'version',
message_class = ClaimMessage
def __init__(self, claim_message=None):
super().__init__(claim_message)
self.version = 2
@property
def is_stream(self):
return self.message.WhichOneof('type') == 'stream'
@property
def is_channel(self):
return self.message.WhichOneof('type') == 'channel'
@property
def stream_message(self):
if self.is_undetermined:
self.message.stream.SetInParent()
if not self.is_stream:
raise ValueError('Claim is not a stream.')
return self.message.stream
@property
def stream(self) -> 'Stream':
return Stream(self)
@property
def channel_message(self):
if self.is_undetermined:
self.message.channel.SetInParent()
if not self.is_channel:
raise ValueError('Claim is not a channel.')
return self.message.channel
@property
def channel(self) -> 'Channel':
return Channel(self)
def to_dict(self):
return MessageToDict(self.message, preserving_proto_field_name=True)
@classmethod
def from_bytes(cls, data: bytes) -> 'Claim':
try:
return super().from_bytes(data)
except DecodeError:
claim = cls()
if data[0] == ord('{'):
claim.version = 0
compat.from_old_json_schema(claim, data)
elif data[0] not in (0, 1):
claim.version = 1
compat.from_types_v1(claim, data)
else:
raise
return claim
I = TypeVar('I')
class BaseMessageList(Generic[I]):
__slots__ = 'message',
item_class = None
def __init__(self, message):
self.message = message
def add(self) -> I:
return self.item_class(self.message.add())
def extend(self, values: List[str]):
for value in values:
self.append(value)
def append(self, value: str):
raise NotImplemented
def __len__(self):
return len(self.message)
def __iter__(self) -> Iterator[I]:
for lang in self.message:
yield self.item_class(lang)
def __getitem__(self, item) -> I:
return self.item_class(self.message[item])
class Dimmensional:
__slots__ = ()
@property
def width(self) -> int:
return self.message.width
@width.setter
def width(self, width: int):
self.message.width = width
@property
def height(self) -> int:
return self.message.height
@height.setter
def height(self, height: int):
self.message.height = height
@property
def dimensions(self) -> Tuple[int, int]:
return self.width, self.height
@dimensions.setter
def dimensions(self, dimensions: Tuple[int, int]):
self.message.width, self.message.height = dimensions
class Playable:
__slots__ = ()
@property
def duration(self) -> int:
return self.message.duration
@duration.setter
def duration(self, duration: int):
self.message.duration = duration
def set_duration_from_path(self, file_path):
try:
file_metadata = binary_file_metadata(binary_file_parser(file_path))
self.duration = file_metadata.getValues('duration')[0].seconds
except:
pass
class Image(Dimmensional):
__slots__ = 'message',
def __init__(self, image_message):
self.message = image_message
class Video(Dimmensional, Playable):
__slots__ = 'message',
def __init__(self, video_message):
self.message = video_message
class Audio(Playable):
__slots__ = 'message',
def __init__(self, audio_message):
self.message = audio_message
class File:
__slots__ = '_file',
def __init__(self, file_message):
self._file = file_message
@property
def name(self) -> str:
return self._file.name
@name.setter
def name(self, name: str):
self._file.name = name
@property
def size(self) -> int:
return self._file.size
@size.setter
def size(self, size: int):
self._file.size = size
class Fee:
__slots__ = '_fee',
def __init__(self, fee_message):
self._fee = fee_message
@property
def currency(self) -> str:
return FeeMessage.Currency.Name(self._fee.currency)
@property
def address(self) -> str:
return Base58.encode(self._fee.address)
@address.setter
def address(self, address: str):
self._fee.address = Base58.decode(address)
@property
def address_bytes(self) -> bytes:
return self._fee.address
@address_bytes.setter
def address_bytes(self, address: bytes):
self._fee.address = address
@property
def amount(self) -> Decimal:
if self.currency == 'LBC':
return self.lbc
if self.currency == 'BTC':
return self.btc
if self.currency == 'USD':
return self.usd
DEWIES = Decimal(COIN)
@property
def lbc(self) -> Decimal:
if self._fee.currency != FeeMessage.LBC:
raise ValueError('LBC can only be returned for LBC fees.')
return Decimal(self._fee.amount / self.DEWIES)
@lbc.setter
def lbc(self, amount: Decimal):
self.dewies = int(amount * self.DEWIES)
@property
def dewies(self) -> int:
if self._fee.currency != FeeMessage.LBC:
raise ValueError('Dewies can only be returned for LBC fees.')
return self._fee.amount
@dewies.setter
def dewies(self, amount: int):
self._fee.amount = amount
self._fee.currency = FeeMessage.LBC
SATOSHIES = Decimal(COIN)
@property
def btc(self) -> Decimal:
if self._fee.currency != FeeMessage.BTC:
raise ValueError('BTC can only be returned for BTC fees.')
return Decimal(self._fee.amount / self.SATOSHIES)
@btc.setter
def btc(self, amount: Decimal):
self.satoshis = int(amount * self.SATOSHIES)
@property
def satoshis(self) -> int:
if self._fee.currency != FeeMessage.BTC:
raise ValueError('Satoshies can only be returned for BTC fees.')
return self._fee.amount
@satoshis.setter
def satoshis(self, amount: int):
self._fee.amount = amount
self._fee.currency = FeeMessage.BTC
PENNIES = Decimal('100.0')
PENNY = Decimal('0.01')
@property
def usd(self) -> Decimal:
if self._fee.currency != FeeMessage.USD:
raise ValueError('USD can only be returned for USD fees.')
return Decimal(self._fee.amount / self.PENNIES)
@usd.setter
def usd(self, amount: Decimal):
self.pennies = int(amount.quantize(self.PENNY, ROUND_UP) * self.PENNIES)
@property
def pennies(self) -> int:
if self._fee.currency != FeeMessage.USD:
raise ValueError('Pennies can only be returned for USD fees.')
return self._fee.amount
@pennies.setter
def pennies(self, amount: int):
self._fee.amount = amount
self._fee.currency = FeeMessage.USD
class Language:
__slots__ = 'message',
def __init__(self, message):
self.message = message
@property
def langtag(self) -> str:
langtag = []
if self.language:
langtag.append(self.language)
if self.script:
langtag.append(self.script)
if self.region:
langtag.append(self.region)
return '-'.join(langtag)
@langtag.setter
def langtag(self, langtag: str):
parts = langtag.split('-')
self.language = parts.pop(0)
if parts and len(parts[0]) == 4:
self.script = parts.pop(0)
if parts and len(parts[0]) == 2:
self.region = parts.pop(0)
assert not parts, f"Failed to parse language tag: {langtag}"
@property
def language(self) -> str:
if self.message.language:
return LanguageMessage.Language.Name(self.message.language)
@language.setter
def language(self, language: str):
self.message.language = LanguageMessage.Language.Value(language)
@property
def script(self) -> str:
if self.message.script:
return LanguageMessage.Script.Name(self.message.script)
@script.setter
def script(self, script: str):
self.message.script = LanguageMessage.Script.Value(script)
@property
def region(self) -> str:
if self.message.region:
return LocationMessage.Country.Name(self.message.region)
@region.setter
def region(self, region: str):
self.message.region = LocationMessage.Country.Value(region)
class LanguageList(BaseMessageList[Language]):
__slots__ = ()
item_class = Language
def append(self, value: str):
self.add().langtag = value
class Location:
__slots__ = 'message',
def __init__(self, message):
self.message = message
def from_value(self, value):
if isinstance(value, str) and value.startswith('{'):
value = json.loads(value)
if isinstance(value, dict):
for key, val in value.items():
setattr(self, key, val)
elif isinstance(value, str):
parts = value.split(':')
if len(parts) > 2 or (parts[0] and parts[0][0] in ascii_letters):
country = parts and parts.pop(0)
if country:
self.country = country
state = parts and parts.pop(0)
if state:
self.state = state
city = parts and parts.pop(0)
if city:
self.city = city
code = parts and parts.pop(0)
if code:
self.code = code
latitude = parts and parts.pop(0)
if latitude:
self.latitude = latitude
longitude = parts and parts.pop(0)
if longitude:
self.longitude = longitude
else:
raise ValueError(f'Could not parse country value: {value}')
@property
def country(self) -> str:
if self.message.country:
return LocationMessage.Country.Name(self.message.country)
@country.setter
def country(self, country: str):
self.message.country = LocationMessage.Country.Value(country)
@property
def state(self) -> str:
return self.message.state
@state.setter
def state(self, state: str):
self.message.state = state
@property
def city(self) -> str:
return self.message.city
@city.setter
def city(self, city: str):
self.message.city = city
@property
def code(self) -> str:
return self.message.code
@code.setter
def code(self, code: str):
self.message.code = code
GPS_PRECISION = Decimal('10000000')
@property
def latitude(self) -> str:
if self.message.latitude:
return str(Decimal(self.message.latitude) / self.GPS_PRECISION)
@latitude.setter
def latitude(self, latitude: str):
latitude = Decimal(latitude)
assert -90 <= latitude <= 90, "Latitude must be between -90 and 90 degrees."
self.message.latitude = int(latitude * self.GPS_PRECISION)
@property
def longitude(self) -> str:
if self.message.longitude:
return str(Decimal(self.message.longitude) / self.GPS_PRECISION)
@longitude.setter
def longitude(self, longitude: str):
longitude = Decimal(longitude)
assert -180 <= longitude <= 180, "Longitude must be between -180 and 180 degrees."
self.message.longitude = int(longitude * self.GPS_PRECISION)
class LocationList(BaseMessageList[Location]):
__slots__ = ()
item_class = Location
def append(self, value):
self.add().from_value(value)
class BaseClaimSubType:
__slots__ = 'claim', 'message'
def __init__(self, claim: Claim):
self.claim = claim or Claim()
@property
def title(self) -> str:
return self.message.title
@title.setter
def title(self, title: str):
self.message.title = title
@property
def description(self) -> str:
return self.message.description
@description.setter
def description(self, description: str):
self.message.description = description
@property
def thumbnail_url(self) -> str:
return self.message.thumbnail_url
@thumbnail_url.setter
def thumbnail_url(self, thumbnail_url: str):
self.message.thumbnail_url = thumbnail_url
@property
def tags(self) -> List:
return self.message.tags
@property
def languages(self) -> LanguageList:
return LanguageList(self.message.languages)
@property
def langtags(self) -> List[str]:
return [l.langtag for l in self.languages]
@property
def locations(self) -> LocationList:
return LocationList(self.message.locations)
def to_dict(self):
return MessageToDict(self.message, preserving_proto_field_name=True)
def update(self, **kwargs):
for l in ('tags', 'languages', 'locations'):
if kwargs.pop(f'clear_{l}', False):
self.message.ClearField('tags')
items = kwargs.pop(l, None)
if items is not None:
if isinstance(items, str):
getattr(self, l).append(items)
elif isinstance(items, list):
getattr(self, l).extend(items)
else:
raise ValueError(f"Unknown {l} value: {items}")
for key, value in kwargs.items():
setattr(self, key, value)
class Channel(BaseClaimSubType):
__slots__ = ()
def __init__(self, claim: Claim = None):
super().__init__(claim)
self.message = self.claim.channel_message
@property
def public_key(self) -> str:
return hexlify(self.message.public_key).decode()
@public_key.setter
def public_key(self, sd_public_key: str):
self.message.public_key = unhexlify(sd_public_key.encode())
@property
def public_key_bytes(self) -> bytes:
return self.message.public_key
@public_key_bytes.setter
def public_key_bytes(self, public_key: bytes):
self.message.public_key = public_key
@property
def contact_email(self) -> str:
return self.message.contact_email
@contact_email.setter
def contact_email(self, contact_email: str):
self.message.contact_email = contact_email
@property
def homepage_url(self) -> str:
return self.message.homepage_url
@homepage_url.setter
def homepage_url(self, homepage_url: str):
self.message.homepage_url = homepage_url
@property
def cover_url(self) -> str:
return self.message.cover_url
@cover_url.setter
def cover_url(self, cover_url: str):
self.message.cover_url = cover_url
class Stream(BaseClaimSubType):
__slots__ = ()
def __init__(self, claim: Claim = None):
super().__init__(claim)
self.message = self.claim.stream_message
def update(
self, file_path=None, stream_type=None,
fee_currency=None, fee_amount=None, fee_address=None,
**kwargs):
duration_was_not_set = True
sub_types = ('image', 'video', 'audio')
for key in list(kwargs.keys()):
for sub_type in sub_types:
if key.startswith(f'{sub_type}_'):
stream_type = sub_type
sub_obj = getattr(self, sub_type)
sub_obj_attr = key[len(f'{sub_type}_'):]
setattr(sub_obj, sub_obj_attr, kwargs.pop(key))
if sub_obj_attr == 'duration':
duration_was_not_set = False
break
if stream_type is not None:
if stream_type not in sub_types:
raise Exception(
f"stream_type of '{stream_type}' is not valid, must be one of: {sub_types}"
)
sub_obj = getattr(self, stream_type)
if duration_was_not_set and file_path and isinstance(sub_obj, Playable):
sub_obj.set_duration_from_path(file_path)
super().update(**kwargs)
if file_path is not None:
self.media_type = guess_media_type(file_path)
if not os.path.isfile(file_path):
raise Exception(f"File does not exist: {file_path}")
self.file.size = os.path.getsize(file_path)
if self.file.size == 0:
raise Exception(f"Cannot publish empty file: {file_path}")
if fee_amount and fee_currency:
if fee_address:
self.fee.address = fee_address
if fee_currency.lower() == 'lbc':
self.fee.lbc = Decimal(fee_amount)
elif fee_currency.lower() == 'btc':
self.fee.btc = Decimal(fee_amount)
elif fee_currency.lower() == 'usd':
self.fee.usd = Decimal(fee_amount)
else:
raise Exception(f'Unknown currency type: {fee_currency}')
@property
def sd_hash(self) -> str:
return hexlify(self.message.sd_hash).decode()
@sd_hash.setter
def sd_hash(self, sd_hash: str):
self.message.sd_hash = unhexlify(sd_hash.encode())
@property
def sd_hash_bytes(self) -> bytes:
return self.message.sd_hash
@sd_hash_bytes.setter
def sd_hash_bytes(self, sd_hash: bytes):
self.message.sd_hash = sd_hash
@property
def author(self) -> str:
return self.message.author
@author.setter
def author(self, author: str):
self.message.author = author
@property
def license(self) -> str:
return self.message.license
@license.setter
def license(self, license: str):
self.message.license = license
@property
def license_url(self) -> str:
return self.message.license_url
@license_url.setter
def license_url(self, license_url: str):
self.message.license_url = license_url
@property
def release_time(self) -> int:
return self.message.release_time
@release_time.setter
def release_time(self, release_time: int):
self.message.release_time = release_time
@property
def media_type(self) -> str:
return self.message.media_type
@media_type.setter
def media_type(self, media_type: str):
self.message.media_type = media_type
@property
def fee(self) -> Fee:
return Fee(self.message.fee)
@property
def has_fee(self) -> bool:
return self.message.HasField('fee')
@property
def file(self) -> File:
return File(self.message.file)
@property
def image(self) -> Image:
return Image(self.message.image)
@property
def video(self) -> Video:
return Video(self.message.video)
@property
def audio(self) -> Audio:
return Audio(self.message.audio)
| 1.789063 | 2 |
apps/notifications/views/NotificationView.py | Nelson-Morais/HA-OOAD | 0 | 12785319 | <reponame>Nelson-Morais/HA-OOAD
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
NotificationView controls the Notifications of the System
@author <NAME>, <NAME> ,<NAME>
Projekt OOAD Hausarbeit WiSe 2020/21
"""
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from django.views import View
from apps.notifications.models import Message
class NotificationView(View):
"""Notification views class"""
@login_required(login_url='login')
def get_notifications(request):
"""
:param request: HTTP request
:return: returns Json response containing the Users notifications
"""
output_data = []
notification_list = Message.objects.filter(user_owner_id=request.user.id).order_by('-created_at')[:15]
for notification in notification_list:
new_object = {
"title": notification.title,
"content": notification.content,
}
output_data.append(new_object)
return JsonResponse(output_data, content_type='application/json', safe=False)
def add_notification(user_id, title, content):
"""
add a new notification for the user
:param user_id: User ID to be notified
:param title: Title of the notification
:param content: content (text) of the notification
:return: return
"""
new_notification = Message(user_owner_id=user_id, title=title, content=content)
new_notification.save()
return
| 1.664063 | 2 |
montlake/tslasso/main.py | sjkoelle/montlake | 8 | 12785320 | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/tslasso.main.ipynb (unless otherwise specified).
__all__ = ['run_exp']
# Cell
from ..atomgeom.features import get_features,get_D_feats_feats
from ..atomgeom.utils import get_atoms_4
from ..simulations.rigidethanol import get_rigid_ethanol_data
from ..utils.utils import get_234_indices, get_atoms3_full, get_atoms4_full, data_stream_custom_range, get_cosines
from ..geometry.geometry import get_geom, get_wlpca_tangent_sel, get_rm_tangent_sel
from ..statistics.normalization import normalize_L212
from ..optimization.gradientgrouplasso import get_sr_lambda_parallel
from ..optimization.utils import get_selected_function_ids,get_selected_functions_lm2
from ..utils.replicates import Replicate, get_supports_brute_tslasso,get_supports_lasso
from megaman.embedding import SpectralEmbedding
import dill as pickle
import os
import sys
import numpy as np
import itertools
from itertools import permutations,combinations
from sklearn.decomposition import TruncatedSVD
import pathos
from pathos.multiprocessing import ProcessingPool as Pool
# Cell
def run_exp(positions, hparams):
d = hparams.d
n_components = hparams.n_components
atoms2_feat = hparams.atoms2_feat
atoms3_feat = hparams.atoms3_feat
atoms4_feat = hparams.atoms4_feat
atoms2_dict = hparams.atoms2_dict
atoms3_dict = hparams.atoms3_dict
atoms4_dict = hparams.atoms4_dict
diagram = hparams.diagram
ii = np.asarray(hparams.ii)
jj = np.asarray(hparams.jj)
outfile = hparams.outdir + '/' + hparams.name + 'results_tslasso'
#load geometric features
natoms = positions.shape[1]
n = positions.shape[0]
atoms2 = np.asarray(list(itertools.combinations(range(natoms), 2)))
atoms2full = atoms2
atoms3 = np.asarray(list(itertools.combinations(range(natoms), 3)))
atoms4 = np.asarray(list(itertools.combinations(range(natoms), 4)))
atoms3full = get_atoms3_full(atoms3)
atoms4full = get_atoms4_full(atoms4)
if atoms2_feat:
atoms2_feats = atoms2full
else:
atoms2_feats = np.asarray([])
if atoms3_feat:
atoms3_feats = atoms3full
else:
atoms3_feats = np.asarray([])
if atoms4_feat:
atoms4_feats = atoms4full
else:
atoms4_feats = np.asarray([])
#compute rotation/translation invariant featureization
cores = pathos.multiprocessing.cpu_count() - 1
pool = Pool(cores)
print('feature dimensions',atoms2_feats.shape, atoms3_feats.shape,atoms4_feats.shape)
#import pdb;pdb.set_trace
results = pool.map(lambda i: get_features(positions[i],
atoms2 = atoms2_feats,
atoms3 = atoms3_feats,
atoms4 = atoms4_feats),
data_stream_custom_range(list(range(n))))
data = np.vstack([np.hstack(results[i]) for i in range(n)])
data = data - np.mean(data, axis = 0)
#apply SVD
svd = TruncatedSVD(n_components=50)
data_svd = svd.fit_transform(data)
#compute geometry
radius = hparams.radius
n_neighbors = hparams.n_neighbors
geom = get_geom(data_svd, radius, n_neighbors)
print('computing embedding (for comparison)')
spectral_embedding = SpectralEmbedding(n_components=n_components,eigen_solver='arpack',geom=geom)
embed_spectral = spectral_embedding.fit_transform(data_svd)
embedding = embed_spectral
#obtain gradients
if atoms2_dict:
atoms2_dicts = atoms2full
else:
atoms2_dicts = np.asarray([])
if atoms3_dict:
atoms3_dicts = atoms3full
else:
atoms3_dicts = np.asarray([])
if atoms4_dict and not diagram:
atoms4_dicts = atoms4full
elif atoms4_dict:
atoms4_dicts= get_atoms_4(natoms, ii, jj)[0]
else:
atoms4_dicts = np.asarray([])
p = len(atoms2_dicts) + len(atoms3_dicts) + len(atoms4_dicts)
#get gradients
replicates = {}
nreps = hparams.nreps
nsel = hparams.nsel
for r in range(nreps):
#print(i)
replicates[r] = Replicate(nsel = nsel, n = 10000)
replicates[r].tangent_bases_M = get_wlpca_tangent_sel(data_svd, geom, replicates[r].selected_points, d)
D_feats_feats = np.asarray([get_D_feats_feats(positions[replicates[r].selected_points[i]],
atoms2in = atoms2_feats,
atoms3in = atoms3_feats,
atoms4in = atoms4_feats,
atoms2out = atoms2_dicts,
atoms3out = atoms3_dicts,
atoms4out = atoms4_dicts) for i in range(nsel)])
replicates[r].dg_x = np.asarray([svd.transform(D_feats_feats[i].transpose()).transpose() for i in range(nsel)])
replicates[r].dg_x_normalized = normalize_L212(replicates[r].dg_x)
replicates[r].dg_M = np.einsum('i b p, i b d -> i d p', replicates[r].dg_x_normalized, replicates[r].tangent_bases_M)
#run ts lasso
gl_itermax= hparams.gl_itermax
reg_l2 = hparams.reg_l2
max_search = hparams.max_search
d = hparams.d
tol = hparams.tol
learning_rate = hparams.learning_rate
for r in range(nreps):
replicates[r].results = get_sr_lambda_parallel(np.asarray([np.identity(d) for i in range(nsel)]), replicates[r].dg_M, gl_itermax,reg_l2, max_search, d, tol,learning_rate)
replicates[r].get_ordered_axes()
replicates[r].sel_l = replicates[r].get_selection_lambda()
#get manifold lasso support
selected_functions_unique = np.asarray(np.unique(get_selected_function_ids(replicates,d)), dtype = int)
support_tensor_lasso, supports_lasso = get_supports_lasso(replicates,p,d)
#get two stage support
selected_functions_lm2 = get_selected_functions_lm2(replicates)
support_tensor_ts, supports_ts = get_supports_brute_tslasso(replicates,nreps,p,d,selected_functions_lm2)
selected_functions_unique_twostage = np.asarray(np.unique(supports_ts), dtype = int)#np.unique(np.asarray(np.where(support_tensor_ts > 0.)[0], dtype = int))
pool.close()
pool.restart()
#compute function values for plotting... needs 'order234' for full computation
print('computing selected function values lasso, ' + str(selected_functions_unique))
selected_function_values = pool.map(
lambda i: get_features(positions[i],
atoms2 = np.asarray([]),
atoms3 = np.asarray([]),
atoms4 = atoms4_dicts[selected_functions_unique]),
data_stream_custom_range(list(range(n))))
selected_function_values_array = np.vstack([np.hstack(selected_function_values[i]) for i in range(n)])
print('computing selected function values two stage, ' + str(selected_functions_unique_twostage))
selected_function_values_brute = pool.map(
lambda i: get_features(positions[i],
atoms2 = np.asarray([]),
atoms3 = np.asarray([]),
atoms4 = atoms4_dicts[selected_functions_unique_twostage]),
data_stream_custom_range(list(range(n))))
selected_function_values_array_brute = np.vstack([np.hstack(selected_function_values_brute[i]) for i in range(n)])
#remove large gradient arrays
print('prep save')
replicates_small = {}
for r in range(nreps):
replicates_small[r] = Replicate(nsel=nsel, n=n,
selected_points=replicates[r].selected_points)
replicates_small[r].dg_M = replicates[r].dg_M
replicates_small[r].cs_reorder = replicates[r].cs_reorder
replicates_small[r].xaxis_reorder = replicates[r].xaxis_reorder
print('getting cosines')
cosine = get_cosines(replicates[0].dg_M)
replicates_small[0].cosine_abs = np.mean(np.abs(cosine), axis = 0)
#prepare to save
results = {}
results['replicates_small'] = replicates_small
results['data'] = data_svd
results['embed'] = embedding
results['supports_ts'] = support_tensor_ts, supports_ts
results['supports_lasso'] = support_tensor_lasso, supports_lasso
results['supports_lasso_values'] = selected_function_values
results['supports_ts_values'] = selected_function_values_brute
results['selected_lasso'] = selected_functions_unique
results['selected_ts'] = selected_functions_unique_twostage
results['geom'] = geom
results['dictionary'] = {}
results['dictionary']['atoms2'] = atoms2_dicts
results['dictionary']['atoms3'] = atoms3_dicts
results['dictionary']['atoms4'] = atoms4_dicts
#save
with open(outfile,'wb') as output:
pickle.dump(results, output, pickle.HIGHEST_PROTOCOL) | 1.5625 | 2 |
nlpproject/main/processing.py | Hrishi2312/IR-reimagined | 0 | 12785321 | <reponame>Hrishi2312/IR-reimagined
from .words import *
from .Node import *
connecting_words = []
cnt = 1
different_words = []
total_files = len(files_with_index)
zeroes_and_ones = []
zeroes_and_ones_of_all_words = []
query_words = []
files = []
def processing(query):
connecting_words = []
cnt = 1
different_words = []
total_files = len(files_with_index)
zeroes_and_ones = []
zeroes_and_ones_of_all_words = []
query_words = []
files = []
query = word_tokenize(query)
for word in query:
if word.lower() != "and" and word.lower() != "or" and word.lower() != "not":
different_words.append(word.lower())
else:
connecting_words.append(word.lower())
for word in (different_words):
if word.lower() in unique_words_all:
zeroes_and_ones = [0] * total_files
linkedlist = linked_list_data[word].head
query_words.append(word)
while linkedlist.nextval is not None:
zeroes_and_ones[linkedlist.nextval.doc - 1] = 1
linkedlist = linkedlist.nextval
zeroes_and_ones_of_all_words.append(zeroes_and_ones)
for word in connecting_words:
word_list1 = zeroes_and_ones_of_all_words[0]
word_list2 = zeroes_and_ones_of_all_words[1]
if word == "and":
bitwise_op = [w1 & w2 for (w1,w2) in zip(word_list1,word_list2)]
zeroes_and_ones_of_all_words.remove(word_list1)
zeroes_and_ones_of_all_words.remove(word_list2)
zeroes_and_ones_of_all_words.insert(0, bitwise_op);
elif word == "or":
bitwise_op = [w1 | w2 for (w1,w2) in zip(word_list1,word_list2)]
zeroes_and_ones_of_all_words.remove(word_list1)
zeroes_and_ones_of_all_words.remove(word_list2)
zeroes_and_ones_of_all_words.insert(0, bitwise_op);
elif word == "not":
bitwise_op = [not w1 for w1 in word_list2]
bitwise_op = [int(b == True) for b in bitwise_op]
zeroes_and_ones_of_all_words.remove(word_list2)
zeroes_and_ones_of_all_words.remove(word_list1)
bitwise_op = [w1 & w2 for (w1,w2) in zip(word_list1,bitwise_op)]
zeroes_and_ones_of_all_words.insert(0, bitwise_op);
lis = []
if zeroes_and_ones_of_all_words:
lis = zeroes_and_ones_of_all_words[0]
cnt = 1
for index in lis:
if index == 1:
files.append(files_with_index[cnt])
cnt = cnt+1
return [query, query_words, connecting_words, zeroes_and_ones_of_all_words, files]
| 3 | 3 |
addonpayments/api/common/requests.py | javisenberg/addonpayments-Python-SDK | 2 | 12785322 | # -*- encoding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import attr
from attr import ib as Field
from addonpayments.validators import RequestValidator
from addonpayments.utils import GenerationUtils
from addonpayments.mixins import HashMixin
from addonpayments.api.mixins import XmlMixin
@attr.s
class ApiRequest(HashMixin, XmlMixin):
"""
Super class representing a request to be sent to API.
This class contains all common attributes and functions for all other classes.
You can consult the specific documentation of all request fields on the website
https://desarrolladores.addonpayments.com
Subclasses values (fields to be defined in the subclasses):
request_type Type of the Addonpayments request (auth, receipt-in, payer-new, card-new, ...)
Mixin HashMixin attributes:
hash_fields Hash a string made up of the request values
Mixin XMLMixin attributes:
xml_root_tag If the object is a Request the root tag is <request attributes></ request>.
xml_root_attributes Normalized request objects always have timestamp and type attributes in the root tag
"""
# Mandatory field
merchantid = Field(validator=RequestValidator.merchant_id)
type = Field(default=None)
# Mandatory fields with auto-generation
timestamp = Field(default=None, validator=RequestValidator.timestamp)
orderid = Field(default=None, validator=RequestValidator.order_id)
# Mandatory fields generated later
sha1hash = Field(default=None, validator=RequestValidator.sha1hash)
# Optional field
account = Field(default='', validator=RequestValidator.account)
# Static variables
# Defined in subclasses
request_type = ''
# Default values for XmlMixin, all XML requests starts with <request type='' timestamp=''>
xml_root_tag = 'request'
xml_root_attributes = ['timestamp', 'type']
def __attrs_post_init__(self):
"""
This method will be called after the class is fully initialized.
Uses method to set auto-generate values if they have not been initialized and request type
"""
self.type = self.request_type
gen_utl = GenerationUtils()
if not self.timestamp:
self.timestamp = gen_utl.generate_timestamp()
if not self.orderid:
self.orderid = gen_utl.generate_order_id()
def hash(self, secret):
"""
Set and validate sha1hash
:param secret: string
"""
self.sha1hash = self.generate_hash(secret)
# Validate hash
attr.validate(self)
return self.sha1hash
| 2.21875 | 2 |
removeNthFromEnd.py | xiaochuan-cd/leetcode | 0 | 12785323 | <gh_stars>0
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
f, s, fi, si = head, head, 0, 0
while f:
f = f.next
fi += 1
if fi - n > si + 1:
si += 1
s = s.next
if s == head and n == fi-si:
head = head.next
elif n < fi-si:
s.next = s.next.next
return head
| 3.703125 | 4 |
joblib/backports.py | aabadie/joblib | 0 | 12785324 | <gh_stars>0
"""
Backports of fixes for joblib dependencies
"""
from distutils.version import LooseVersion
try:
import numpy as np
def make_memmap(filename, dtype='uint8', mode='r+', offset=0,
shape=None, order='C'):
"""Backport of numpy memmap offset fix.
See https://github.com/numpy/numpy/pull/8443 for more details.
The numpy fix will be available in numpy 1.13.
"""
mm = np.memmap(filename, dtype=dtype, mode=mode, offset=offset,
shape=shape, order=order)
if LooseVersion(np.__version__) < '1.13':
mm.offset = offset
return mm
except ImportError:
def make_memmap(filename, dtype='uint8', mode='r+', offset=0,
shape=None, order='C'):
raise NotImplementedError(
"'joblib.backports.make_memmap' should not be used "
'if numpy is not installed.')
| 2.09375 | 2 |
al/model/model_zoo/image_classification.py | kili-technology/active-learning | 3 | 12785325 | <filename>al/model/model_zoo/image_classification.py
import torchvision.models as models
resnet18 = models.resnet18()
mobilenet = models.mobilenet_v2()
vgg16 = models.vgg16() | 1.609375 | 2 |
crawler/admin.py | mental689/paddict | 1 | 12785326 | <reponame>mental689/paddict
from django.contrib import admin
from crawler.models import Author, Document, Event, Tag, TagAssignment, Comment
# Register your models here.
class MembershipInline(admin.TabularInline):
model = Document.authors.through
class DocumentAdmin(admin.ModelAdmin):
inlines = [MembershipInline]
exclude = ['authors']
class AuthorAdmin(admin.ModelAdmin):
inlines = [MembershipInline]
admin.site.register(Event)
admin.site.register(Author, AuthorAdmin)
admin.site.register(Document, DocumentAdmin)
admin.site.register(Tag)
admin.site.register(TagAssignment)
admin.site.register(Comment)
| 2.09375 | 2 |
adhmodel/simulation/iteration_parameters.py | erdc/AdhModel | 3 | 12785327 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import param
import panel as pn
class IterationParameters(param.Parameterized):
non_linear_iterations = param.Integer(
default=10,
bounds=(1, None),
softbounds=(1, 20),
doc='IP NIT: Maximum number of nonlinear iterations. REQUIRED.',
precedence=1,
)
non_linear_tolerance_option = param.ObjectSelector(
default='Specify residual and incremental (IP NTL & IP ITL)',
objects=['Specify residual and incremental (IP NTL & IP ITL)', 'Specify residual (IP NTL)',
'Specify incremental (IP ITL)'],
doc='Types of nonlinear tolerance(s) specified by user.',
precedence=2,
)
non_linear_residual_tolerance = param.Number(
default=0.001,
bounds=(1e-20, None),
softbounds=(0, 1),
doc='IP NTL: Nonlinear residual tolerance. IP NTL and/or IP ITL REQUIRED.',
precedence=3,
)
non_linear_incremental_tolerance = param.Number(
default=0.01,
bounds=(1e-20, None),
softbounds=(0, 1),
doc='IP ITL: Nonlinear incremental tolerance. IP NTL and/or IP ITL REQUIRED.',
precedence=4,
)
linear_iterations = param.Integer(
default=80,
bounds=(1, None),
softbounds=(1, 100),
doc='IP MIT: Maximum number of linear iterations. REQUIRED.',
precedence=5,
)
def __init__(self):
super(IterationParameters, self).__init__()
self._update_non_linear_residual_tolerance()
@param.depends('non_linear_tolerance_option', watch=True)
def _update_non_linear_residual_tolerance(self):
self.param.non_linear_residual_tolerance.precedence = -1 # NTL
self.param.non_linear_incremental_tolerance.precedence = -1 # ITL
objects = list(self.param.non_linear_tolerance_option.get_range())
if self.non_linear_tolerance_option == objects[0]: # both
self.param.non_linear_residual_tolerance.precedence = 3
self.param.non_linear_incremental_tolerance.precedence = 4
elif self.non_linear_tolerance_option == objects[1]: # NTL only
self.param.non_linear_residual_tolerance.precedence = 3
else: # ITL only
self.param.non_linear_incremental_tolerance.precedence = 4
def panel(self):
return pn.Pane(self.param, show_name=False)
| 2.234375 | 2 |
tests/simple_substitution_tests.py | Steven-Hall/holtzman | 0 | 12785328 | <filename>tests/simple_substitution_tests.py
"""
Test simple variable substituion, for example:
{{ variable }}
in a template should be replaced by the variables value in the rendered template
"""
import pytest
from collections import namedtuple
import holtzman
from holtzman.errors import TemplateError, MissingVariableError
class SimpleSubstitutionTests:
@pytest.mark.parametrize('source', ['{{ variable', '{{ variable }', '{{ }}'])
def test_invalid_variable_string_throws_error(self, source):
with pytest.raises(TemplateError):
holtzman.from_string(source)
@pytest.mark.parametrize('source', ['{{ variable_1 }}'])
def test_valid_variable_names_do_not_throw_errors(self, source):
holtzman.from_string(source)
def test_single_opening_brace_is_ignored(self):
source = "{ variable }"
template = holtzman.from_string(source)
result = template.render({})
assert result == "{ variable }"
def test_escaped_opening_brace_is_replaced(self):
source = "\\{ variable }"
template = holtzman.from_string(source)
result = template.render({})
assert result == "{ variable }"
def test_escaped_slash_is_replaced(self):
source = "\\\\ variable \\\\"
template = holtzman.from_string(source)
result = template.render({})
assert result == "\\ variable \\"
def test_non_escaped_back_slashes_throw_exceptions(self):
source = "\\ variable \\"
with pytest.raises(TemplateError):
holtzman.from_string(source)
def test_variable_is_substituted_correctly(self):
source = "12345{{ variable }}12345"
template = holtzman.from_string(source)
result = template.render({"variable": "value"})
assert result == "12345value12345"
def test_error_is_thrown_if_template_variable_missing_from_dictionary(self):
source = "{{ variable }}"
template = holtzman.from_string(source)
with pytest.raises(MissingVariableError) as error:
template.render({})
assert error.value.variable == 'variable'
def test_error_is_thrown_if_template_variable_missing_from_object(self):
source = "{{ variable }}"
template = holtzman.from_string(source)
Object = namedtuple('Object', [])
with pytest.raises(MissingVariableError) as error:
template.render(Object())
assert error.value.variable == 'variable'
def test_same_variable_is_substituted_multiple_times(self):
source = "{{ variable }} {{ variable }}"
template = holtzman.from_string(source)
result = template.render({"variable": "value"})
assert result == "value value"
def test_multiple_variables_are_substituted_correctly(self):
source = "{{ variable1 }} {{ variable2 }} {{ variable3 }}"
template = holtzman.from_string(source)
result = template.render({
"variable1": "value_1",
"variable2": "value_2",
"variable3": "value_3"
})
assert result == "value_1 value_2 value_3"
| 3.203125 | 3 |
scripts/MGCluster/mgcluster.py | fplaza/CAMISIM | 88 | 12785329 | __author__ = 'hofmann'
import os
import glob
import time
import shutil
import tempfile
from scripts.Validator.validator import Validator
class MGCluster(Validator):
"""
Alignment and clustering of marker genes with references
"""
_label = "MGCluster"
_cluster_method_choices = ['average', 'furthest', 'nearest']
_file_name_map = "map.tsv"
_silva_ref_files = ["mothur_ref_distances", "mothur_ref_names", "mothur_alignment_ref.fasta", _file_name_map]
# mothur command: cluster.split
_mothur_cmd_cluster_split = "; ".join([
'unique.seqs(fasta={mg_fasta})',
'align.seqs(candidate=current, template={ref_align}, align=gotoh, flip=t, processors={processors})',
'remove.seqs(accnos={filename}.unique.flip.accnos, fasta=current, name=current)',
'merge.files(input={filename}.names-{ref_names}, output={filename}.merged.names)',
'merge.files(input={filename}.pick.names-{ref_names}, output={filename}.merged.names)',
'set.current(name={filename}.merged.names, column={local_dist})',
'dist.seqs(oldfasta={ref_align}, column=current, cutoff={cutoff}, processors={processors}, calc=onegap, countends=F)',
'set.current(column={local_dist})',
'cluster.split(cutoff={cutoff}, method={method}, precision={precision}, column={local_dist}, name={filename}.merged.names)'
])
# mothur command: cluster
_mothur_cmd_cluster = "; ".join([
"unique.seqs(fasta={mg_fasta})",
"align.seqs(candidate=current, template={ref_align}, align=gotoh, flip=t, processors={processors})",
"remove.seqs(accnos={filename}.unique.flip.accnos, fasta=current, name=current)",
"merge.files(input={filename}.names-{ref_names}, output={filename}.merged.names)",
"merge.files(input={filename}.pick.names-{ref_names}, output={filename}.merged.names)",
"set.current(name={filename}.merged.names, column={local_dist})",
"dist.seqs(oldfasta={ref_align}, column=current, cutoff={cutoff}, processors={processors}, calc=onegap, countends=F)",
"set.current(column={local_dist})",
"cluster(cutoff={cutoff}, method={method}, precision={precision}, column={local_dist}, name={filename}.merged.names)"
])
def __init__(
self, mothur_executable, directory_silva_reference, max_processors=1, temp_directory=None,
logfile=None, verbose=False, debug=False):
"""
Constructor
@param mothur_executable: File path to mothur binary
@type mothur_executable: str | unicode
@param directory_silva_reference: Path to directory with SILVA reference database files
@type directory_silva_reference: str | unicode
@param max_processors: Maximum number of available processors
@type max_processors: int | long
@param temp_directory: Directory for temporary data
@type temp_directory: str | unicode
@param logfile: file handler or file path to a log file
@type logfile: file | FileIO | StringIO | basestring
@param verbose: Not verbose means that only warnings and errors will be past to stream
@type verbose: bool
@param debug: Display debug messages
@type debug: bool
"""
assert self.validate_file(mothur_executable, executable=True)
assert self.validate_dir(directory_silva_reference, file_names=self._silva_ref_files)
assert self.validate_number(max_processors, minimum=1)
assert self.validate_dir(temp_directory)
super(MGCluster, self).__init__(logfile=logfile, verbose=verbose, debug=False)
self._mothur_executable = mothur_executable
self._tmp_dir = tempfile.mkdtemp(dir=temp_directory)
self._max_processors = max_processors
self._debug = debug
ref_silva_distances = self._get_symbolic_link_path(os.path.join(directory_silva_reference, "mothur_ref_distances"))
ref_silva_names = self._get_symbolic_link_path(os.path.join(directory_silva_reference, "mothur_ref_names")) # unique
ref_silva_alignment = self._get_symbolic_link_path(os.path.join(directory_silva_reference, "mothur_alignment_ref.fasta"))
self._ref_silva_distances = ref_silva_distances
self._ref_silva_names = ref_silva_names
self._ref_silva_alignment = ref_silva_alignment
# local_distance = os.path.join(self._working_dir, "ref.align.dist")
self._local_distance = "ref.align.dist"
def __exit__(self, type, value, traceback):
super(MGCluster, self).__exit__(type, value, traceback)
if not self._debug:
shutil.rmtree(self._tmp_dir)
def cluster(self, marker_gene_fasta, output_cluster_file, distance_cutoff, precision=1000, method="average"):
"""
CLuster Markergenes
@param marker_gene_fasta: Fasta formatted file with marker genes
@type marker_gene_fasta: str | unicode
@param output_cluster_file: Output of mg clustering
@type output_cluster_file: str | unicode
@param distance_cutoff: Exclude irrelevant higher distances before clustering
@type distance_cutoff: int | long
@param precision: Cluster are made in steps: 10: 0.1, 100: 0.01, 1000: 0.001
@type precision: int | long
@param method: Cluster algorithm 'average', 'furthest', 'nearest'
@type method: str | unicode
@rtype: None
"""
assert self.validate_file(marker_gene_fasta)
assert self.validate_dir(output_cluster_file, only_parent=True)
assert self.validate_number(distance_cutoff, minimum=0, maximum=1)
assert self.validate_number(precision, minimum=0)
assert method in self._cluster_method_choices
self._logger.info("Starting clustering process")
start = time.time()
old_dir = os.getcwd()
# local paths required or mothur messes up the dist.seqs command, do not use absolut paths!!!
os.chdir(self._tmp_dir)
local_marker_gene_fasta = self._get_symbolic_link_path(marker_gene_fasta)
shutil.copy2(self._ref_silva_distances, self._local_distance)
mothur_cmd = self._get_mothur_cmd(local_marker_gene_fasta, distance_cutoff, precision, method=method)
cmd = "{mothur_executable} '#{mothur_cmd}'".format(
mothur_executable=self._mothur_executable,
mothur_cmd=mothur_cmd)
os.system(cmd)
os.chdir(old_dir)
project_folder = os.path.dirname(output_cluster_file)
find_mask_list = os.path.join(self._tmp_dir, "*.list")
list_of_files = glob.glob(find_mask_list)
if len(list_of_files) == 0:
msg = "Clustering with mothur failed #1"
self._logger.error(msg)
raise RuntimeError(msg)
elif len(list_of_files) == 1:
local_distance = os.path.join(self._tmp_dir, "ref.align.dist")
if os.path.exists(local_distance):
if self._debug:
shutil.copy2(local_distance, os.path.join(project_folder, "mothur_distances.tsv"))
shutil.copy2(list_of_files[0], output_cluster_file)
self._logger.info("Clustering success")
else:
msg = "Clustering with mothur failed #2"
self._logger.error(msg)
raise RuntimeError(msg)
else:
msg = "Clustering with odd result, several files found!"
self._logger.error(msg)
raise RuntimeError(msg)
end = time.time()
# move logfiles
find_mask_list = os.path.join(self._tmp_dir, "*.logfile")
list_of_log_files = glob.glob(find_mask_list)
for log_file in list_of_log_files:
log_file_name = os.path.basename(log_file)
shutil.copy2(log_file, os.path.join(project_folder, log_file_name))
self._logger.info("Done ({}s)".format(round(end - start), 1))
def _get_symbolic_link_path(self, original_file_path):
"""
Get path to local symbolic link since mothur might act odd else.
@param original_file_path:
@type original_file_path: str | unicode
@return: Local path
@rtype: str | unicode
"""
assert isinstance(original_file_path, basestring)
basename = os.path.basename(original_file_path)
new_path = os.path.join(self._tmp_dir, basename)
os.symlink(original_file_path, new_path)
# return new_path
return basename
def _get_mothur_cmd(self, marker_gene_fasta, cutoff, precision, method="average"):
"""
Get command line to run mothur
@param marker_gene_fasta: Fasta formatted file with marker genes
@type marker_gene_fasta: str | unicode
@param cutoff: Exclude irrelevant higher distances before clustering
@type cutoff: int | long
@param precision: Cluster are made in steps: 10: 0.1, 100: 0.01, 1000: 0.001
@type precision: int | long
@param method: Cluster algorithm 'average', 'furthest', 'nearest'
@type method: str | unicode
@return: Command line
@rtype: str | unicode
"""
assert self.validate_file(marker_gene_fasta)
assert self.validate_number(cutoff, minimum=0, maximum=1)
assert self.validate_number(precision, minimum=0)
assert method in self._cluster_method_choices
# basename = os.path.basename(marker_gene_fasta)
# filename, extension = os.path.splitext(basename)
filename, extension = os.path.splitext(marker_gene_fasta)
#
# mothur_cmd = MGCluster._mothur_cmd_cluster
mothur_cmd = MGCluster._mothur_cmd_cluster_split
return mothur_cmd.format(
wd=self._tmp_dir,
debug=self._tmp_dir,
# filename=os.path.join(self._working_dir, filename),
filename=filename,
mg_fasta=marker_gene_fasta,
ref_align=self._ref_silva_alignment,
ref_names=self._ref_silva_names,
local_dist=self._local_distance,
processors=self._max_processors,
cutoff=cutoff,
precision=precision,
method=method)
@staticmethod
def get_file_name_of_map():
return MGCluster._file_name_map
| 2.296875 | 2 |
home/admin.py | codejay411/Wheeling_and_dealing | 1 | 12785330 | <gh_stars>1-10
from django.contrib import admin
from .models import ngodetail, donordetail, medicine
# Register your models here.
admin.site.register(ngodetail)
admin.site.register(donordetail)
admin.site.register(medicine)
# admin.site.register(post)
# admin.site.register(postngo) | 1.421875 | 1 |
trainer/__init__.py | Neronjust2017/pytorch-classification-project | 1 | 12785331 | from .trainer import *
from trainer.Bayes_By_Backprop.trainer import *
from trainer.Variational_dropout.trainer import *
from trainer.MC_Dropout.trainer import *
from trainer.Quality_driven_PI.trainer import *
from trainer.Deep_Ensemble.trainer import *
from trainer.Deep_Ensemble.trainer_ensemble import *
from trainer.Quality_driven_PI.trainer_ensemble import * | 1.101563 | 1 |
app/pg_pipeline/retweeter_details.py | s2t2/tweet-analyzer-py | 5 | 12785332 | <filename>app/pg_pipeline/retweeter_details.py
from app.pg_pipeline import Pipeline
if __name__ == "__main__":
pipeline = Pipeline()
pipeline.download_retweeter_details() # takes about 16 minutes for 2.7M users in batches of 2500
| 1.757813 | 2 |
tasks/celery.py | codeb2cc/noweibo | 1 | 12785333 | from datetime import timedelta
from celery import Celery
from ..conf import setting
celery = Celery(
'noweibo',
broker=setting.CELERY_BROKER,
)
celery.conf.update(
CELERY_TIMEZONE='Asia/Shanghai',
CELERY_IMPORTS=('noweibo.tasks.periodic', ),
CELERY_RESULT_BACKEND=setting.CELERY_BACKEND,
CELERY_IGNORE_RESULT=True,
CELERY_ACCEPT_CONTENT=['pickle', 'json', ],
CELERY_TASK_SERIALIZER='pickle',
CELERYD_MAX_TASKS_PER_CHILD=100,
CELERYBEAT_SCHEDULE={
'user_update': {
'task': 'noweibo.tasks.periodic.user_update',
'schedule': timedelta(minutes=setting.SCHEDULE_PERIODIC),
'args': (),
},
'weibo_update': {
'task': 'noweibo.tasks.periodic.weibo_update',
'schedule': timedelta(minutes=setting.SCHEDULE_PERIODIC),
'args': (),
},
'weibo_scan': {
'task': 'noweibo.tasks.periodic.weibo_scan',
'schedule': timedelta(minutes=setting.SCHEDULE_PERIODIC),
'args': (),
},
'weibo_delete': {
'task': 'noweibo.tasks.periodic.weibo_delete',
'schedule': timedelta(minutes=setting.SCHEDULE_PERIODIC),
'args': (),
},
},
ADMINS=(('<NAME>', '<EMAIL>'), ),
CELERY_SEND_TASK_ERROR_EMAILS=True,
SERVER_EMAIL='<EMAIL>',
EMAIL_HOST='smtp.163.com',
EMAIL_PORT=25,
EMAIL_HOST_USER='<EMAIL>',
EMAIL_HOST_PASSWORD='<PASSWORD>',
)
if __name__ == '__main__':
celery.start()
| 1.875 | 2 |
bcbio/ngsalign/bwa.py | brentp/bcbio-nextgen | 1 | 12785334 | """Next-gen alignments with BWA (http://bio-bwa.sourceforge.net/)
"""
import contextlib
import gzip
import os
from Bio.SeqIO.QualityIO import FastqGeneralIterator
from bcbio.pipeline import config_utils
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.ngsalign import novoalign
from bcbio.provenance import do
galaxy_location_file = "bwa_index.loc"
def align_bam(in_bam, ref_file, names, align_dir, config):
"""Perform direct alignment of an input BAM file with BWA using pipes.
This avoids disk IO by piping between processes:
- samtools sort of input BAM to queryname
- bedtools conversion to interleaved FASTQ
- bwa-mem alignment
- samtools conversion to BAM
- samtools sort to coordinate
"""
out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"]))
samtools = config_utils.get_program("samtools", config)
bedtools = config_utils.get_program("bedtools", config)
bwa = config_utils.get_program("bwa", config)
resources = config_utils.get_resources("samtools", config)
num_cores = config["algorithm"].get("num_cores", 1)
# adjust memory for samtools since used for input and output
max_mem = config_utils.adjust_memory(resources.get("memory", "1G"),
3, "decrease")
rg_info = novoalign.get_rg_info(names)
if not utils.file_exists(out_file):
with utils.curdir_tmpdir() as work_dir:
with file_transaction(out_file) as tx_out_file:
tx_out_prefix = os.path.splitext(tx_out_file)[0]
prefix1 = "%s-in1" % tx_out_prefix
cmd = ("{samtools} sort -n -o -l 0 -@ {num_cores} -m {max_mem} {in_bam} {prefix1} "
"| {bedtools} bamtofastq -i /dev/stdin -fq /dev/stdout -fq2 /dev/stdout "
"| {bwa} mem -p -M -t {num_cores} -R '{rg_info}' -v 1 {ref_file} - "
"| {samtools} view -b -S -u - "
"| {samtools} sort -@ {num_cores} -m {max_mem} - {tx_out_prefix}")
cmd = cmd.format(**locals())
do.run(cmd, "bwa mem alignment from BAM: %s" % names["sample"], None,
[do.file_nonempty(tx_out_file)])
return out_file
def can_pipe(fastq_file):
"""bwa-mem handle longer (> 75bp) reads with improved piping.
Default to no piping if more than half the first 500 reads are small.
"""
min_size = 75
thresh = 0.5
tocheck = 500
shorter = 0
if fastq_file.endswith(".gz"):
handle = gzip.open(fastq_file, "rb")
else:
handle = open(fastq_file)
with contextlib.closing(handle) as in_handle:
fqit = FastqGeneralIterator(in_handle)
for i, (_, seq, _) in enumerate(fqit):
if len(seq) < min_size:
shorter += 1
if i > tocheck:
break
return (float(shorter) / float(tocheck)) <= thresh
def align_pipe(fastq_file, pair_file, ref_file, names, align_dir, config):
"""Perform piped alignment of fastq input files, generating sorted output BAM.
"""
pair_file = pair_file if pair_file else ""
out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"]))
samtools = config_utils.get_program("samtools", config)
bwa = config_utils.get_program("bwa", config)
resources = config_utils.get_resources("samtools", config)
num_cores = config["algorithm"].get("num_cores", 1)
# adjust memory for samtools since used alongside alignment
max_mem = config_utils.adjust_memory(resources.get("memory", "2G"),
3, "decrease")
rg_info = novoalign.get_rg_info(names)
if not utils.file_exists(out_file):
with utils.curdir_tmpdir() as work_dir:
with file_transaction(out_file) as tx_out_file:
tx_out_prefix = os.path.splitext(tx_out_file)[0]
cmd = ("{bwa} mem -M -t {num_cores} -R '{rg_info}' -v 1 {ref_file} "
"{fastq_file} {pair_file} "
"| {samtools} view -b -S -u - "
"| {samtools} sort -@ {num_cores} -m {max_mem} - {tx_out_prefix}")
cmd = cmd.format(**locals())
do.run(cmd, "bwa mem alignment from fastq: %s" % names["sample"], None,
[do.file_nonempty(tx_out_file)])
return out_file
def align(fastq_file, pair_file, ref_file, out_base, align_dir, config,
names=None):
"""Perform a BWA alignment, generating a SAM file.
"""
sai1_file = os.path.join(align_dir, "%s_1.sai" % out_base)
sai2_file = (os.path.join(align_dir, "%s_2.sai" % out_base)
if pair_file else None)
sam_file = os.path.join(align_dir, "%s.sam" % out_base)
if not utils.file_exists(sam_file):
if not utils.file_exists(sai1_file):
with file_transaction(sai1_file) as tx_sai1_file:
_run_bwa_align(fastq_file, ref_file, tx_sai1_file, config)
if sai2_file and not utils.file_exists(sai2_file):
with file_transaction(sai2_file) as tx_sai2_file:
_run_bwa_align(pair_file, ref_file, tx_sai2_file, config)
align_type = "sampe" if sai2_file else "samse"
sam_cl = [config_utils.get_program("bwa", config), align_type, ref_file, sai1_file]
if sai2_file:
sam_cl.append(sai2_file)
sam_cl.append(fastq_file)
if sai2_file:
sam_cl.append(pair_file)
with file_transaction(sam_file) as tx_sam_file:
cmd = "{cl} > {out_file}".format(cl=" ".join(sam_cl), out_file=tx_sam_file)
do.run(cmd, "bwa {align_type}".format(**locals()), None)
return sam_file
def _bwa_args_from_config(config):
num_cores = config["algorithm"].get("num_cores", 1)
core_flags = ["-t", str(num_cores)] if num_cores > 1 else []
qual_format = config["algorithm"].get("quality_format", "").lower()
qual_flags = ["-I"] if qual_format == "illumina" else []
return core_flags + qual_flags
def _run_bwa_align(fastq_file, ref_file, out_file, config):
aln_cl = [config_utils.get_program("bwa", config), "aln",
"-n %s" % config["algorithm"]["max_errors"],
"-k %s" % config["algorithm"]["max_errors"]]
aln_cl += _bwa_args_from_config(config)
aln_cl += [ref_file, fastq_file]
cmd = "{cl} > {out_file}".format(cl=" ".join(aln_cl), out_file=out_file)
do.run(cmd, "bwa aln: {f}".format(f=os.path.basename(fastq_file)), None)
| 2.203125 | 2 |
SMPL++/scripts/preprocess.py | Smorodov/SMPLpp | 4 | 12785335 | <reponame>Smorodov/SMPLpp
# Copyright 2018 <NAME>. All rights reserved.
#
# This software implements a 3D human skinning model, SMPL, with tensorflow
# and numpy.
# For more detail, see the paper - SMPL: A Skinned Multi-Person Linear Model -
# published by Max Planck Institute for Intelligent Systems on SIGGRAPH ASIA 2015.
#
# Here we provide the software for research purposes only.
# More information about SMPL is available on http://smpl.is.tue.mpg.
#
# ============================= preprocess.py =================================
# File Description:
#
# This file loads the models downloaded from the official SMPL website, grab
# data and write them in to numpy and json format.
#
# =============================================================================
#!/usr/bin/python2
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import numpy as np
import pickle as pkl
def main(args):
"""Main entrance.
Arguments
----------
- args: list of strings
Command line arguments.
Returns
----------
"""
modelName="MANO_left"
#modelName="SMPLH_female"
raw_model_path = modelName+'.pkl'
save_dir = 'result'
NP_SAVE_FILE = modelName+'.npz'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
np_save_path = os.path.join(save_dir, NP_SAVE_FILE)
'''
* Model Data Description * #
vertices_template: global vertex locations of template - (6890, 3)
face_indices: vertex indices of each face (triangles) - (13776, 3)
joint_regressor: joint regressor - (24, 6890)
kinematic_tree_table: table of kinematic tree - (2, 24)
weights: weights for linear blend skinning - (6890, 24)
shape_blend_shapes: shape blend shapes - (6890, 3, 10)
pose_blend_shapes: pose blend shapes - (6890, 3, 207)
* Extra Data Description *
Besides the data above, the official model provide the following things.
The pickle file downloaded from SMPL website seems to be redundant or
some of the contents are used for training the model. None of them will
be used to generate a new skinning.
bs_stype: blend skinning style - (default)linear blend skinning
bs_type: blend skinning type - (default) linear rotation minimization
J: global joint locations of the template mesh - (24, 3)
J_regressor_prior: prior joint regressor - (24, 6890)
pose_training_info: pose training information - string list with 6
elements.
vert_sym_idxs: symmetrical corresponding vertex indices - (6890, )
weights_prior: prior weights for linear blend skinning
'''
with open(raw_model_path, 'rb') as f:
raw_model_data = pkl.load(f,encoding='latin1')
vertices_template = np.require(raw_model_data['v_template'],dtype=np.float32,requirements=['C'])
face_indices = np.require((raw_model_data['f'] + 1),dtype=np.int32,requirements=['C']) # starts from 1
weights = np.require(raw_model_data['weights'],dtype=np.float32,requirements=['C'])
shape_blend_shapes = np.require(raw_model_data['shapedirs'],dtype=np.float32,requirements=['C'])
pose_blend_shapes = np.require(raw_model_data['posedirs'],dtype=np.float32,requirements=['C'])
joint_regressor = np.require(raw_model_data['J_regressor'].toarray(),dtype=np.float32,requirements=['C'])
kinematic_tree = np.require(raw_model_data['kintree_table'],dtype=np.int32,requirements=['C'])
print(kinematic_tree)
model_data_np = {
'vertices_template': vertices_template,
'face_indices': face_indices,
'weights': weights,
'shape_blend_shapes': shape_blend_shapes,
'pose_blend_shapes': pose_blend_shapes,
'joint_regressor': joint_regressor,
'kinematic_tree': kinematic_tree
}
np.savez(np_save_path, **model_data_np)
print('Save SMPL Model to: ', os.path.abspath(save_dir))
if __name__ == '__main__':
#if sys.version_info[0] != 2:
# raise EnvironmentError('Run this file with Python2!')
main(sys.argv)
| 2.703125 | 3 |
portality/lib/query_filters.py | glauberm/doaj | 0 | 12785336 | from flask_login import current_user
from portality.core import app
from portality import models
# query sanitisers
##################
def public_query_validator(q):
# no deep paging
if q.from_result() > 10000:
return False
if q.size() > 200:
return False
# if the query has facets, that's fine
# otherwise, if it has no facets, only allow "count" style
# queries with zero results returned
if q.has_facets():
return True
else:
return q.size() == 0
# query filters
###############
def only_in_doaj(q):
q.clear_match_all()
q.add_must({"term" : {"admin.in_doaj" : True}})
return q
def owner(q):
q.clear_match_all()
q.add_must({"term" : {"admin.owner.exact" : current_user.id}})
return q
def update_request(q):
q.clear_match_all()
q.add_must({"range" : {"created_date" : {"gte" : app.config.get("UPDATE_REQUEST_SHOW_OLDEST")}}})
return q
def associate(q):
q.clear_match_all()
q.add_must({"term" : {"admin.editor.exact" : current_user.id}})
return q
def editor(q):
gnames = []
groups = models.EditorGroup.groups_by_editor(current_user.id)
for g in groups:
gnames.append(g.name)
q.clear_match_all()
q.add_must({"terms" : {"admin.editor_group.exact" : gnames}})
return q
def private_source(q):
q.add_include(["admin.application_status", "suggestion", "admin.ticked",
"admin.seal", "last_updated", "created_date", "id", "bibjson"])
return q
def public_source(q):
q.add_include(["admin.ticked", "admin.seal", "last_updated",
"created_date", "id", "bibjson"])
return q
# results filters
#################
def public_result_filter(results, unpacked=False):
# Dealing with single unpacked result
if unpacked:
if "admin" in results:
for k in results["admin"].keys():
if k not in ["ticked", "seal"]:
del results["admin"][k]
return results
# Dealing with a list of es results
if "hits" not in results:
return results
if "hits" not in results["hits"]:
return results
for hit in results["hits"]["hits"]:
if "_source" in hit:
if "admin" in hit["_source"]:
for k in hit["_source"]["admin"].keys():
if k not in ["ticked", "seal"]:
del hit["_source"]["admin"][k]
return results
def prune_author_emails(results, unpacked=False):
# Dealing with single unpacked ES result
if unpacked:
if "bibjson" in results:
if "author" in results["bibjson"]:
for a in results["bibjson"]["author"]:
if "email" in a:
del a["email"]
return results
# Dealing with a list of ES results
if "hits" not in results:
return results
if "hits" not in results["hits"]:
return results
for hit in results["hits"]["hits"]:
if "_source" in hit:
if "bibjson" in hit["_source"]:
if "author" in hit["_source"]["bibjson"]:
for a in hit["_source"]["bibjson"]["author"]:
if "email" in a:
del a["email"]
return results
def publisher_result_filter(results, unpacked=False):
# Dealing with single unpacked ES result
if unpacked:
if "admin" in results:
for k in results["admin"].keys():
if k not in ["ticked", "seal", "in_doaj", "related_applications", "current_application", "current_journal", "application_status"]:
del results["admin"][k]
return results
# Dealing with a list of ES results
if "hits" not in results:
return results
if "hits" not in results["hits"]:
return results
for hit in results["hits"]["hits"]:
if "_source" in hit:
if "admin" in hit["_source"]:
for k in hit["_source"]["admin"].keys():
if k not in ["ticked", "seal", "in_doaj", "related_applications", "current_application", "current_journal", "application_status"]:
del hit["_source"]["admin"][k]
return results
| 2.203125 | 2 |
Bird-Species/train-py-ViT.py | NCcoco/kaggle-project | 0 | 12785337 | <reponame>NCcoco/kaggle-project<gh_stars>0
import os
import numpy as np
import pathlib
import pandas as pd
import keras.api._v2.keras as keras
from sklearn.metrics import confusion_matrix, classification_report
from keras.api._v2.keras import layers, \
losses, regularizers, optimizers
from keras.api._v2.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
import tensorflow_hub as hub
from util.my_tf_callback import LearningRateA, saver
import util.datasets_util as ds_util
from util.util import print_in_color
import matplotlib.pyplot as plt
import math
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']
# 定义一个函数创建混淆矩阵和分类报告
def print_info(test_gen, preds, print_code, save_dir, subject):
"""
:param test_gen: 测试集数据集生成器(其指定了生成方式,通常是指向本地图片库)
:param preds: 预测结果
:param print_code:
:param save_dir: 保存目录
:param subject:
:return:
"""
# 获取类名及下标字典
class_dict = test_gen.class_indices
# 获取所有类名
labels = test_gen.labels
# 获取所有文件名称
file_names = test_gen.filenames
error_list = []
true_class = []
pred_class = []
prob_list = []
# 按下标为key 类名为value创建一个新的字典
new_dict = {}
error_indies = []
# 实际预测值数组
y_pred = []
for key, value in class_dict.items():
new_dict[value] = key
# 将所有类名作为目录存储在save_dir下
classes = list(new_dict.values())
# 记录错误的分类次数
errors = 0
for i, p in enumerate(preds):
# 预测值
pred_index = np.argmax(p)
# 实际值
true_index = labels[i]
# 如果预测错误
if pred_index != true_index:
error_list.append(file_names[i])
true_class.append(new_dict[true_index])
pred_class.append(new_dict[pred_index])
# 预测的最高概率装进prob
prob_list.append(p[pred_index])
error_indies.append(true_index)
errors = errors + 1
y_pred.append(pred_index)
if print_code != 0:
if errors > 0:
if print_code > errors:
r = errors
else:
r = print_code
msg = '{0:^28s}{1:^28s}{2:^28s}{3:^16s}' \
.format('Filename', 'Predicted Class', 'True Class', 'Probability')
print_in_color(msg, (0, 255, 0), (55, 65, 80))
for i in range(r):
# TODO 暂时不知道这几行代码干嘛的
split1 = os.path.split(error_list[i])
split2 = os.path.split(split1[0])
fname = split2[1] + '/' + split1[1]
msg = '{0:^28s}{1:^28s}{2:^28s}{3:4s}{4:^6.4f}'.format(fname, pred_class[i], true_class[i], ' ',
prob_list[i])
print_in_color(msg, (255, 255, 255), (55, 65, 60))
else:
msg = '精度为100%,没有错误'
print_in_color(msg, (0, 255, 0), (55, 65, 80))
if errors > 0:
plot_bar = []
plot_class = []
for key, value in new_dict.items():
# 获得被错误分类的类型的计数(例如:假设 丹顶鹤的下标是11,则下面的操作将获得实际为丹顶鹤的鸟被错误分类的数量)
count = error_indies.count(key)
if count != 0:
plot_bar.append(count)
plot_class.append(value)
fig = plt.figure()
fig.set_figheight(len(plot_class) / 3)
fig.set_figwidth(10)
for i in range(0, len(plot_class)):
c = plot_class[i]
x = plot_bar[i]
plt.barh(c, x, )
plt.title("测试集错误分类")
y_true = np.array(labels)
y_pred = np.array(y_pred)
# 最多显示分类错误的30个分类
if len(classes) <= 30:
# 创建混淆矩阵
cm = confusion_matrix(y_true, y_pred)
length = len(classes)
if length < 8:
fig_width = 8
fig_height = 8
else:
fig_width = int(length * 0.5)
fig_height = int(length * 0.5)
plt.figure(figsize=(fig_width, fig_height))
plt.xticks(np.array(length) + 0.5, classes, rotation=90)
plt.yticks(np.array(length) + 0.5, classes, rotation=0)
plt.xlabel("预测的")
plt.ylabel("真实的")
plt.title("混淆矩阵")
plt.show()
clr = classification_report(y_true, y_pred, target_names=classes)
print("Classification Report:\n----------------------\n", clr)
# 定义一个函数绘制训练数据
def tr_plot(tr_data, start_epoch):
# 绘制训练数据和验证数据
tacc = tr_data.history["accuracy"]
tloss = tr_data.history["loss"]
vacc = tr_data.history["val_accuracy"]
vloss = tr_data.history["val_loss"]
# 计算最终迭代了多少次
Epoch_count = len(tacc) + start_epoch
Epochs = [i + 1 for i in range(start_epoch, Epoch_count)]
index_loss = np.argmin(vloss)
val_lowest = vloss[index_loss]
index_acc = np.argmax(vacc)
acc_highest = vacc[index_acc]
sc_label = 'best epoch=' + str(index_loss + 1 + start_epoch)
vc_label = 'best epoch=' + str(index_acc + 1 + start_epoch)
# 创建图表
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 8))
axes[0].plot(Epochs, tloss, 'r', label='训练损失')
axes[0].plot(Epochs, vloss, 'g', label='验证损失')
axes[0].scatter(index_loss + 1 + start_epoch, val_lowest, s=150, c="blue", label=sc_label)
axes[0].set_title('训练和验证损失')
axes[0].set_xlabel("迭代次数")
axes[0].set_ylabel("损失")
axes[0].legend()
axes[1].plot(Epochs, tacc, 'r', label='训练准确率')
axes[1].plot(Epochs, vacc, 'g', label='验证准确率')
axes[1].scatter(index_acc + 1 + start_epoch, acc_highest, s=150, c='blue', label=val_lowest)
axes[1].set_title("训练和验证损失")
axes[1].set_xlabel("迭代次数")
axes[1].set_ylabel("准确率")
axes[1].legend()
plt.show()
# 定义一个函数,该函数对图片像素值进行压缩(0-1),
# 但由于EfficientNet网络需要0-1所以不需要进行缩放
def scalar(img):
img = img * 1./255.
return img
# 创建训练集、测试集、验证集
train_df, test_df, valid_df = ds_util.preprocessing("datasets")
# 设置超参数
model_name = "ViT-B_32"
ask_epoch = None
dwell = True
stop_patience = 3
patience = 1
epochs = 10
learning_rate = 0.001
factor = 0.5
dropout_p = 0.2
threshold = 0.95
freeze = True
batch_size = 128
num_classes = 325
image_size = (224, 224)
channels = 3
max_num = 140
min_num = 0
label_column_name = "labels"
work_dir = "./datasets"
test_len = len(test_df)
test_batch_size = sorted([int(test_len / n) for n in range(1, test_len + 1)
if test_len % n == 0 and test_len / n <= 80], reverse=True)[0]
# 平衡数据集
train_df = ds_util.balance(train_df, min_num, max_num, work_dir,
label_column_name, image_size)
trgen = ImageDataGenerator(preprocessing_function=scalar, horizontal_flip=True)
tvgen = ImageDataGenerator(preprocessing_function=scalar)
msg = '训练集生成器'
print_in_color(msg, (0, 255, 0), (55, 65, 80))
train_gen = trgen.flow_from_dataframe(
train_df, x_col='filepaths', y_col='labels',
target_size=image_size, class_mode='categorical',
color_mode='rgb', shuffle=True, batch_size=batch_size)
msg = '测试集生成器'
print_in_color(msg, (0, 255, 255), (55, 65, 80))
test_gen = tvgen.flow_from_dataframe(
test_df, x_col='filepaths', y_col='labels',
target_size=image_size, class_mode='categorical',
color_mode='rgb', shuffle=False, batch_size=test_batch_size)
msg = '验证集生成器'
print_in_color(msg, (0, 255, 255), (55, 65, 80))
valid_gen = tvgen.flow_from_dataframe(
valid_df, x_col='filepaths', y_col='labels',
target_size=image_size, class_mode='categorical',
color_mode='rgb', shuffle=True, batch_size=batch_size)
train_steps = int(np.ceil(len(train_gen.labels) / batch_size))
test_steps = int(test_len / test_batch_size)
valid_steps = int(np.ceil(len(valid_gen.labels) / batch_size))
batches = train_steps
# 初始化模型
model = tf.keras.Sequential([
# layers.InputLayer((image_size, image_size, 3)),
hub.KerasLayer(r"transformer/models", trainable=False),
layers.Dropout(dropout_p),
layers.Dense(1024, activation="relu", use_bias=True,
kernel_regularizer=regularizers.l2(0.02), name="fc1"),
layers.Dense(num_classes, activation="softmax", name="fc2")
])
model.build(input_shape=(None, 224, 224, 3))
print(model.summary())
model.compile(optimizer=optimizers.Adam(learning_rate=learning_rate),
loss=losses.CategoricalCrossentropy(),
metrics=["accuracy"])
callbacks = [
LearningRateA(model=model, base_model=None, patience=patience,
stop_patience=stop_patience, threshold=threshold,
factor=factor, dwell=dwell, batches=batches, initial_epoch=0,
epochs=epochs, ask_epoch=ask_epoch)]
history = model.fit(x=train_gen, epochs=epochs, verbose=0,
callbacks=callbacks, validation_data=valid_gen,
validation_steps=None, shuffle=False, initial_epoch=0)
tr_plot(history, 0)
# loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
# train_loss = tf.keras.metrics.Mean(name='train_loss')
# train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
#
# optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
#
# valid_loss = tf.keras.metrics.Mean(name='valid_loss')
# valid_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='valid_accuracy')
# tf.config.experimental_run_functions_eagerly(True)
# @tf.function
# def train_step(images, labels, optimizer):
# with tf.GradientTape() as tape:
# predictions = model(images, training=True)
# loss_aux = loss_object(y_true=labels, y_pred=predictions)
# loss = 0.5 * loss_aux + 0.5 * loss_object(y_true=labels, y_pred=predictions)
# gradients = tape.gradient(loss, model.trainable_variables)
# optimizer.apply_gradients(grads_and_vars=zip(gradients, model.trainable_variables))
#
# train_loss(loss)
# train_accuracy(labels, predictions)
#
#
# @tf.function
# def valid_step(images, labels):
# predictions = model(images, training=False)
# v_loss = loss_object(labels, predictions)
#
# valid_loss(v_loss)
# valid_accuracy(labels, predictions)
#
#
# # start training
# for epoch in range(epochs):
# train_loss.reset_states()
# train_accuracy.reset_states()
# valid_loss.reset_states()
# valid_accuracy.reset_states()
# step = 1
#
# while train_steps >= step:
# images, labels = next(train_gen)
# num_labels = []
# for label in labels:
# num_labels.append(np.argmax(label))
# train_step(images, num_labels, optimizer)
#
# print(f"Epoch: {epoch + 1}/{epochs}, "
# f"step: {step}/{train_steps},"
# f"learning_rate: {optimizer.lr.numpy():.7f}"
# f" loss: {train_loss.result():.5f},"
# f" accuracy: {train_accuracy.result():.5f}")
# step += 1
#
# step = 1
# while valid_steps >= step:
# valid_images, valid_labels = next(valid_gen)
# num_labels = []
# for label in valid_labels:
# num_labels.append(np.argmax(label))
# valid_step(valid_images, num_labels)
# step += 1
# print(f"Epoch: {epoch + 1}/{epochs}, "
# f"valid loss: {valid_loss.result():.5f}, "
# f"valid accuracy: {valid_accuracy.result():.5f}, ")
#
# # 每训练一轮就降低80%
# learning_rate = learning_rate * 0.6
# keras.backend.set_value(optimizer.lr, learning_rate)
subject = 'birds'
acc = model.evaluate(test_gen, steps=test_steps, return_dict=False)[1] * 100
msg = f'accuracy on the test set is {acc:5.2f} %'
print_in_color(msg, (0, 255, 0), (55, 65, 80))
generator = train_gen
scale = 1
model_save_loc, csv_save_loc = saver(work_dir, model, model_name, subject, acc, image_size, scale, generator)
print_code = 0
preds = model.predict(test_gen, steps=test_steps)
print_info(test_gen, preds, print_code, work_dir, subject)
| 2 | 2 |
uwsgi/encorea-old/page_header/apps.py | btardio/encore-swarm | 0 | 12785338 | <reponame>btardio/encore-swarm
from django.apps import AppConfig
class PageHeaderConfig(AppConfig):
name = 'page_header'
| 1.40625 | 1 |
pretrain/training/unsup/__init__.py | archon159/elsa | 13 | 12785339 | def setup(mode, P):
fname = f'{P.dataset}_{P.model}_unsup_{mode}'
if mode == 'simclr':
from .simclr import train
elif mode == 'simclr_CSI':
from .simclr_CSI import train
fname += f'_shift_{P.shift_trans_type}'
else:
raise NotImplementedError()
if P.one_class_idx is not None:
fname += f'_one_class_{P.one_class_idx}'
if P.suffix is not None:
fname += f'_{P.suffix}'
return train, fname
def update_comp_loss(loss_dict, loss_in, loss_out, loss_diff, batch_size):
loss_dict['pos'].update(loss_in, batch_size)
loss_dict['neg'].update(loss_out, batch_size)
loss_dict['diff'].update(loss_diff, batch_size)
def summary_comp_loss(logger, tag, loss_dict, epoch):
logger.scalar_summary(f'{tag}/pos', loss_dict['pos'].average, epoch)
logger.scalar_summary(f'{tag}/neg', loss_dict['neg'].average, epoch)
logger.scalar_summary(f'{tag}', loss_dict['diff'].average, epoch)
| 2.203125 | 2 |
genetic_algorithm/spectroscopy.py | pozzo-research-group/HEAD | 1 | 12785340 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pyGDM2 import (structures, materials, core,
linear, fields, propagators,
tools)
def get_spectrum(geometry, step, wavelengths):
'''Obtains a uv-vis spectra for a specified geometry'''
material = materials.gold()
struct = structures.struct(step, geometry, material, verbose=False)
struct = structures.center_struct(struct)
field_generator = fields.plane_wave
kwargs = dict(theta=0, inc_angle=180)
efield = fields.efield(field_generator,
wavelengths=wavelengths, kwargs=kwargs)
dyads = propagators.DyadsQuasistatic123(n1 = 1.33, n2 = 1.33, n3 = 1.33)
sim = core.simulation(struct, efield, dyads)
sim.scatter(verbose=False)
field_kwargs = tools.get_possible_field_params_spectra(sim)
config_idx = 0
wl, spectrum = tools.calculate_spectrum(sim,
field_kwargs[config_idx], linear.extinct)
abs_ = spectrum.T[2]/np.max(spectrum.T[2])
return abs_, geometry
def obtain_spectra(step, radius_mean, radius_std, wavelength):
'''Calculates the absorption spectra of polydisperse gold spheres that have a normally distributed
radius.
Inputs:
- step: The step size used for the calculation.
- radius_mean: The mean of the normal distribution used to calculate the radius of the sphere
- radius_std: The std of the normal distribution used to calculate the radius of the sphere
- wavelength: A 1-d array of the wavelength values to calculate the absorption spectra
Outputs:
- array: A 2d array of the wavelengths and Intensity values.
'''
n_spheres = 7
radius_list = []
for i in range(n_spheres):
# Normal distribution parameters for Sphere Radius
radius_mean = 6
radius_std = 3
r = (np.random.randn(1)[0]*radius_std + radius_mean)/step
radius_list.append(r)
geometry = structures.sphere(step, R=r, mesh='cube')
loc_array = np.array([[0,0,0],[0,0,1],[0,0,-1],[1,0,0],[-1,0,0],[0,1,0],[0,-1,0]])
sphere = np.hstack((geometry[:,0].reshape(-1,1) + 30*loc_array[i,0]*radius_mean, geometry[:,1].reshape(-1,1) + 30*loc_array[i,1]*radius_mean, geometry[:,2].reshape(-1,1)+ 30*loc_array[i,2]*radius_mean))
if i == 0:
sample = sphere
else:
sample = np.vstack((sample, sphere))
I, g = get_spectrum(geometry, step, wavelength)
array = np.hstack((wavelength.reshape(-1,1), I.reshape(-1,1)))
return array
| 3.078125 | 3 |
pysparkling/fileio/codec/codec.py | lyle-nel/pysparkling | 0 | 12785341 | import logging
log = logging.getLogger(__name__)
class Codec(object):
"""Codec."""
def __init__(self):
pass
def compress(self, stream):
"""Compress.
:param io.BytesIO stream: Uncompressed input stream.
:rtype: io.BytesIO
"""
return stream
def decompress(self, stream):
"""Decompress.
:param io.BytesIO stream: Compressed input stream.
:rtype: io.BytesIO
"""
return stream
| 3 | 3 |
UpdateScheduler.py | DriptaSenapati/CovidindiaServer | 5 | 12785342 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 3 16:14:46 2020
@author: Dripta
"""
from covidindia import *
import os
import pickle
file_path = os.path.dirname(os.path.abspath(__file__))
data_path=os.path.join(file_path,'static','server_data')
with open(os.path.join(file_path,'updater.txt'),'a') as f:
f.write('Update Started\nGathering Data..\n')
init = initializer(silent=True)
with open(os.path.join(data_path, 'init.pkl'), 'wb') as init_file:
pickle.dump(init, init_file)
filter_data = Data(init)
with open(os.path.join(data_path, 'filter_data.pkl'), 'wb') as filter_file:
pickle.dump(filter_data, filter_file)
f.write('Gathering Demographic Data...\n')
demo = Demographic_overview(init, silent=True)
with open(os.path.join(data_path, 'demo.pkl'), 'wb') as demo_file:
pickle.dump(demo, demo_file)
f.write('Gathering tested data...\n')
tested_df = filter_data.tested_subject_data()
tested_df.to_csv(os.path.join(data_path, 'tested_data.csv'), index=False)
f.write('Update Done.')
f.close() | 2.140625 | 2 |
tests/example/endpoints_additional.py | tclh123/aio-openapi | 19 | 12785343 | from typing import List
from aiohttp import web
from openapi.db.path import ApiPath, SqlApiPath
from openapi.exc import JsonHttpException
from openapi.spec import op
from .models import (
MultiKey,
MultiKeyUnique,
SourcePrice,
Task,
TaskAdd,
TaskOrderableQuery,
TaskPathSchema,
TaskPathSchema2,
TaskQuery,
TaskUpdate,
)
additional_routes = web.RouteTableDef()
invalid_path_routes = web.RouteTableDef()
invalid_method_description_routes = web.RouteTableDef()
invalid_method_summary_routes = web.RouteTableDef()
invalid_method_description_routes = web.RouteTableDef()
invalid_tag_missing_description_routes = web.RouteTableDef()
@additional_routes.view("/bulk/tasks")
class TaskBulkPath(SqlApiPath):
"""
---
summary: Bulk manage tasks
tags:
- Task
"""
table = "tasks"
@op(body_schema=List[TaskAdd], response_schema=List[Task])
async def post(self):
"""
---
summary: Create Tasks
description: Create a group of Tasks
responses:
201:
description: Created tasks
"""
data = await self.create_list()
return self.json_response(data, status=201)
@additional_routes.view("/transaction/tasks")
class TaskTransactionsPath(SqlApiPath):
"""
---
summary: Manage tasks with transactions
tags:
- Task
- name: Transaction
description: Endpoints that creates a new transaction
"""
table = "tasks"
@op(body_schema=TaskAdd, response_schema=Task)
async def post(self):
"""
---
summary: Create Task
description: Create a Task using transatcion
responses:
201:
description: Created Task
500:
description: Forced raised error
"""
data = await self.json_data()
async with self.db.transaction() as conn:
should_raise = data.pop("should_raise", False)
task = await self.create_one(data=data, conn=conn)
if should_raise:
raise JsonHttpException(status=500)
return self.json_response(data=task, status=201)
@op(query_schema=TaskOrderableQuery, response_schema=List[Task])
async def get(self):
"""
---
summary: Retrieve Tasks
description: Retrieve a list of Tasks using transaction
responses:
200:
description: Authenticated tasks
"""
paginated = await self.get_list()
return paginated.json_response()
@additional_routes.view("/transaction/tasks/{id}")
class TaskTransactionPath(SqlApiPath):
"""
---
summary: Manage Tasks with transactions
tags:
- Task
- Transaction
"""
table = "tasks"
path_schema = TaskPathSchema
@op(response_schema=Task)
async def get(self):
"""
---
summary: Retrieve Task
description: Retrieve an existing Task by ID using transaction
responses:
200:
description: the task
"""
async with self.db.transaction() as conn:
data = await self.get_one(conn=conn)
return self.json_response(data)
@op(body_schema=TaskUpdate, response_schema=Task)
async def patch(self):
"""
---
summary: Update Task
description: Update an existing Task by ID using transaction
responses:
200:
description: the updated task
"""
data = await self.json_data()
async with self.db.transaction() as conn:
should_raise = data.pop("should_raise", False)
task = await self.update_one(data=data, conn=conn)
if should_raise:
raise JsonHttpException(status=500)
return self.json_response(data=task, status=200)
@op()
async def delete(self):
"""
---
summary: Delete Task
description: Delete an existing task using transaction
responses:
204:
description: Task successfully deleted
"""
data = await self.json_data()
async with self.db.transaction() as conn:
should_raise = data.pop("should_raise", False)
await self.delete_one(conn=conn)
if should_raise:
raise JsonHttpException(status=500)
return self.json_response(data={}, status=204)
@additional_routes.view("/transaction/bulk/tasks")
class TaskBulkTransactionPath(SqlApiPath):
"""
---
summary: Bulk manage tasks with transactions
tags:
- Task
- Transaction
"""
table = "tasks"
@op(query_schema=TaskQuery)
async def delete(self):
"""
---
summary: Delete Tasks
description: Bulk delete a group of Tasks using transaction
responses:
204:
description: Tasks successfully deleted
"""
async with self.db.transaction() as conn:
await self.delete_list(query=dict(self.request.query), conn=conn)
return web.Response(status=204)
@op(body_schema=List[TaskAdd], response_schema=List[Task])
async def post(self):
"""
---
summary: Create Tasks
description: Bulk create Tasks using transaction
responses:
201:
description: created tasks
"""
async with self.db.transaction() as conn:
data = await self.create_list(conn=conn)
return self.json_response(data, status=201)
@additional_routes.view("/tasks2/{task_id}")
class TaskPath2(SqlApiPath):
"""
---
tags:
- Task
"""
table = "tasks"
path_schema = TaskPathSchema2
def get_filters(self):
filters = super().get_filters()
return {"id": filters["task_id"]}
@op(response_schema=Task)
async def get(self):
"""
---
summary: Retrieve a Task
description: Retrieve an existing Task by ID
responses:
200:
description: the task
"""
data = await self.get_one(filters=self.get_filters())
return self.json_response(data)
@op(response_schema=Task, body_schema=TaskUpdate)
async def patch(self):
"""
---
summary: Update a Task
description: Update an existing Task by ID
responses:
200:
description: the updated task
"""
data = await self.update_one(filters=self.get_filters())
return self.json_response(data)
@op()
async def delete(self):
"""
---
summary: Delete a Task
description: Delete an existing Task
responses:
204:
description: Task successfully deleted
"""
await self.delete_one(filters=self.get_filters())
return web.Response(status=204)
@additional_routes.view("/simple-list")
class SipleList(ApiPath):
"""
---
tags:
- Task
"""
@op(response_schema=List[int])
async def get(self):
"""
---
summary: Retrieve a list of integer
description: list of simple integers
responses:
200:
description: list
"""
return self.json_response([2, 4, 5])
@additional_routes.view("/multikey")
class MultiKeyPath(SqlApiPath):
"""
---
summary: Create rows in multikey constraint table
tags:
- name: Multikey
description: several keys
"""
table = "multi_key"
@op(response_schema=MultiKey, body_schema=MultiKey)
async def post(self):
"""
---
summary: Create row in multi-column constrained table
description: Create row in multi-column constrained table
responses:
201:
description: New row
"""
data = await self.create_one()
return self.json_response(data, status=201)
@op(response_schema=List[MultiKey])
async def get(self):
"""
---
summary: List multi-column constrained items
description: List multi-column constrained items
responses:
200:
description: List of items
"""
paginated = await self.get_list()
return paginated.json_response()
@additional_routes.view("/multikey-unique")
class MultiKeyUniquePath(SqlApiPath):
"""
---
summary: Create rows in multikey constraint table
tags:
- Multikey
"""
table = "multi_key_unique"
@op(response_schema=MultiKeyUnique, body_schema=MultiKeyUnique)
async def post(self):
"""
---
summary: Create row in multi-column constrained table
description: Create row in multi-column constrained table
responses:
201:
description: New row
"""
data = await self.create_one()
return self.json_response(data, status=201)
@op(response_schema=List[MultiKeyUnique])
async def get(self):
"""
---
summary: List multi-column constrained items
description: List multi-column constrained items
responses:
200:
description: List of items
"""
paginated = await self.get_list()
return paginated.json_response()
@additional_routes.view("/sources")
class SourcePath(ApiPath):
"""
---
summary: Sources
tags:
- name: Sources
description: Sources
"""
@op(response_schema=List[SourcePrice])
async def get(self):
"""
---
summary: List sources
description: List sources
responses:
200:
description: List of sources
"""
return self.json_response([])
@invalid_path_routes.view("/tasks")
class NoTagsTaskPath(SqlApiPath):
"""
---
"""
pass
@invalid_method_summary_routes.view("/tasks")
class NoSummaryMethodPath(SqlApiPath):
"""
---
tags:
- Tag
"""
@op(response_schema=List[Task])
def get(self):
"""
---
description: Valid method description
responses:
200:
description: Valid response description
"""
pass
@invalid_method_description_routes.view("/tasks")
class NoDescriptionMethodPath(SqlApiPath):
"""
---
tags:
- Tag
"""
@op(response_schema=List[Task])
def get(self):
"""
---
summary: Valid method summary
responses:
200:
description: Valid response description
"""
pass
@invalid_tag_missing_description_routes.view("/tasks")
class NoTagDescriptionPath(SqlApiPath):
""" "
---
tags:
- name: Task
description: Simple description
- Random
"""
pass
| 2.359375 | 2 |
lakes/misc/tarscripts/makefilelistIce.py | kojitominaga/scratch | 0 | 12785344 | import os
count = 0
with open('listHis', 'w') as f:
for root, dirs, files in os.walk('.'):
tzt = [file for file in files if 'His.csv.gz' in file]
for tztfile in tzt:
p = os.path.join(root, tztfile)
f.write(p) ; f.write('\n')
count += 1
if count % 1000 == 0:
print(p)
| 2.65625 | 3 |
OZoptics/__init__.py | gregmoille/InstrumentControl | 3 | 12785345 | from .attenuator import Attenuator
| 1.109375 | 1 |
freebasics/__init__.py | praekeltfoundation/mc-freebasics | 0 | 12785346 | from .celery_app import app as the_celery_app # noqa
| 1.03125 | 1 |
migrations/versions/ac89777fdf69_add_admin_field_to_user_table.py | yuanqili/QuickMining | 0 | 12785347 | <filename>migrations/versions/ac89777fdf69_add_admin_field_to_user_table.py
"""add admin field to user table
Revision ID: <KEY>
Revises: ea6057506937
Create Date: 2018-02-25 01:05:03.266887
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = 'ea6057506937'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('is_admin', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'is_admin')
# ### end Alembic commands ###
| 1.359375 | 1 |
generative_structures/explainer/tests.py | RaymondDashWu/generative-structures2 | 0 | 12785348 | <reponame>RaymondDashWu/generative-structures2
from django.test import TestCase, Client
from django.urls import reverse
from django.contrib.staticfiles import finders
class TestPages(TestCase):
def test_get_homepage(self):
c = Client()
response = c.get('/')
self.assertEqual(response.status_code, 200)
def test_find_static_file(self):
self.assertNotEqual(finders.find('../static/index.html'), None)
self.assertNotEqual(finders.find('../static/img/bg-masthead.webp'), None)
self.assertNotEqual(finders.find('../static/css/creative.min.css'), None) | 2.46875 | 2 |
dfirtrack_artifacts/admin.py | cclauss/dfirtrack | 273 | 12785349 | from django.contrib import admin
from dfirtrack_artifacts.models import (
Artifact,
Artifactpriority,
Artifactstatus,
Artifacttype,
)
# Register your models here.
admin.site.register(Artifact)
admin.site.register(Artifactpriority)
admin.site.register(Artifactstatus)
admin.site.register(Artifacttype)
| 1.445313 | 1 |
packages/python/pyfora/Connection.py | ufora/ufora | 571 | 12785350 | <reponame>ufora/ufora<gh_stars>100-1000
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Connection
Manages a connection to a pyfora cluster
"""
import pyfora.Exceptions as Exceptions
import pyfora.Executor as Executor
import pyfora.ObjectConverter as ObjectConverter
import pyfora.RemotePythonObject as RemotePythonObject
import pyfora.SocketIoJsonInterface as SocketIoJsonInterface
import pyfora.ModuleDirectoryStructure as ModuleDirectoryStructure
import threading
# We defer importing SubscribableWebObjects.py to support auto doc generation
# on readthedocs.org without running a full build.
#import pyfora.SubscribableWebObjects as SubscribableWebObjects
import pyfora
import os
class Connection(object):
"""A live connection to a pyfora cluster that can execute submitted Python code.
Note:
This is an internal implementation class that is primarily used by
:class:`~pyfora.Executor.Executor`.
Args:
webObjectFactory (SubscribableWebObjects.WebObjectFactory): A factory
for subscribable web objects.
converter (Optional ObjectConverter.ObjectConverter): an optional object
converter or None for the default converter.
"""
def __init__(self, webObjectFactory, converter):
self.objectConverter = converter
self.webObjectFactory = webObjectFactory
self.closed = False
self.viewOfEntireSystem = self.webObjectFactory.ViewOfEntireCumulusSystem({})
self.subscribeToMessages()
self.logMessageHandler = None
def subscribeToMessages(self):
def onSuccess(messages):
if self.closed:
return
self.pullAllMessages()
def onChanged(messages):
if self.closed:
return
self.pullAllMessages()
self.subscribeToMessages()
def onFailure(err):
pass
self.viewOfEntireSystem.subscribe_totalMessagesEver({
'onSuccess': onSuccess,
'onFailure': onFailure,
'onChanged': onChanged
})
def pullAllMessages(self):
processed = threading.Event()
def onSuccess(messages):
try:
for m in messages:
if self.logMessageHandler:
self.logMessageHandler(m)
else:
if not m['isDeveloperFacing']:
print m['message'],
finally:
processed.set()
def onFailure(err):
processed.set()
self.viewOfEntireSystem.clearAndReturnMostRecentMessages({}, {
'onSuccess': onSuccess,
'onFailure': onFailure
})
return processed
def pullAllMessagesAndProcess(self):
self.pullAllMessages().wait()
def triggerS3DatasetExport(self,
valueAsString,
bucketname,
keyname,
onCompletedCallback):
if not isinstance(valueAsString, RemotePythonObject.ComputedRemotePythonObject):
onCompletedCallback(
Exceptions.PyforaError(
"The argument to triggerS3DatasetExport should be a ComputedRemotePythonObject"
)
)
return
import pyfora.SubscribableWebObjects as SubscribableWebObjects
if not isinstance(valueAsString.computedValue, SubscribableWebObjects.PyforaComputedValue):
onCompletedCallback(
Exceptions.PyforaError(
"The object handle in the object passed to triggerS3DatasetExport should be a ComputedValue"
)
)
return
#first, ensure that the value itself resolves
computedValue = valueAsString.computedValue
computedValueToCalculate = self.webObjectFactory.ComputedValueForMember(
{
'baseComputedValue': computedValue,
'memberName': '@pyfora_string_as_paged_vec_of_char'
})
def onFailure(err):
if not self.closed:
onCompletedCallback(Exceptions.PyforaError(err['message']))
def isFinishedChanged(isFinished):
if not self.closed and isFinished:
self.triggerS3DatasetExportOnFinishedCalculation(
computedValueToCalculate,
bucketname,
keyname,
onCompletedCallback
)
def subscribeToFinished(result):
computedValueToCalculate.subscribe_isFinished({
'onSuccess': isFinishedChanged,
'onFailure': onFailure,
'onChanged': isFinishedChanged
})
computedValueToCalculate.increaseRequestCount(
{},
{'onSuccess':subscribeToFinished, 'onFailure':onFailure}
)
def getClusterStatus(self, onCompletedCallback):
clusterStatus = self.webObjectFactory.PyforaCluster({})
def onSuccess(clusterStatus):
onCompletedCallback(clusterStatus)
def onFailure(err):
onCompletedCallback(Exceptions.PyforaError(err['message']))
clusterStatus.getClusterStatus({}, {
'onSuccess': onSuccess,
'onFailure': onFailure
})
def triggerS3DatasetExportOnFinishedCalculation(self,
computedValue,
bucketname,
keyname,
onCompletedCallback):
def onSuccess(writeToS3TaskObject):
#we have received a WriteToS3Task computed graph location
self.subscribeToWriteToS3TaskResultAndCallCallback(writeToS3TaskObject,
onCompletedCallback)
def onFailure(err):
onCompletedCallback(Exceptions.PyforaError(err['message']))
computedValue.writeToS3(
{'bucketname': bucketname, 'keyname': keyname},
{'onSuccess': onSuccess, 'onFailure': onFailure}
)
def subscribeToWriteToS3TaskResultAndCallCallback(self,
writeToS3TaskObject,
onCompletedCallback):
def onSuccess(result):
if not self.closed and result is not None:
if result['success']:
onCompletedCallback(None)
else:
onCompletedCallback(Exceptions.PyforaError(result['message']))
def onFailure(err):
onCompletedCallback(Exceptions.PyforaError(err['message']))
writeToS3TaskObject.subscribe_successOrError({
'onSuccess': onSuccess,
'onChanged': onSuccess,
'onFailure': onFailure
})
def convertObject(self, objectId, binaryObjectRegistry, callback):
def wrapper(*args, **kwargs):
if not self.closed:
callback(*args, **kwargs)
self.objectConverter.convert(objectId, binaryObjectRegistry, wrapper)
def createComputation(self, fn, args, onCreatedCallback):
"""Create a computation representing fn(*args).
onCreatedCallback - called after defining the object.
called with an Exception.PyforaError if there is an error,
otherwise, called with a ComputedValue object representing the computation
"""
assert isinstance(fn, RemotePythonObject.RemotePythonObject)
assert all([isinstance(arg, RemotePythonObject.RemotePythonObject) for arg in args])
computedValue = self.webObjectFactory.PyforaComputedValue({
'argIds': (fn._pyforaComputedValueArg(),) + tuple(
arg._pyforaComputedValueArg() for arg in args
)
})
def onFailure(err):
if not self.closed:
onCreatedCallback(Exceptions.PyforaError(err))
def onSuccess(computationId):
if not self.closed:
onCreatedCallback(computedValue)
def onChanged(computationId):
pass
computedValue.subscribe_submittedComputationId({
'onSuccess': onSuccess,
'onFailure': onFailure,
'onChanged': onChanged
})
def prioritizeComputation(self,
computedValue,
onPrioritizedCallback,
onCompletedCallback,
onFailedCallback):
"""Prioritize a given computation.
computedValue - the callback result of creating a computation.
onPrioritizedCallback - called with either an error or None on success of the prioritization
onCompletedCallback - called with the "jsonStatus" if the computation finishes with a value
onFailedCallback - called with a pyfora exception if the computation fails
or throws an exception for some reason
"""
def onFailure(err):
if not self.closed:
onPrioritizedCallback(Exceptions.PyforaError(err))
def onSuccess(result):
if not self.closed:
onPrioritizedCallback(None)
self._subscribeToComputationStatus(computedValue,
onCompletedCallback,
onFailedCallback)
computedValue.increaseRequestCount({}, {
'onSuccess': onSuccess,
'onFailure': onFailure
})
def triggerCompilationOnComputation(self, computedValue, onCompleted):
"""Trigger compilation of the code underlying a computation.
This is exclusively used for testing purposes, as it only works when
there is a single in-process cumulus node.
Returns True on success, False on failure.
"""
def onFailure(err):
onCompleted()
def onSuccess(result):
onCompleted()
computedValue.triggerCompilation({}, {
'onSuccess': onSuccess,
'onFailure': onFailure
})
@staticmethod
def cancelComputation(computedValue):
"""Cancel a computation."""
def completed(_):
pass
computedValue.cancel({}, {
'onSuccess': completed,
'onFailure': completed
})
def expandComputedValueToDictOfAssignedVarsToProxyValues(self, computedValue, onExpanded):
"""Given a computedValue that should represent a dictionary,
expand it to a dictionary of ComputedValues.
If it's not a dictionary, or something else happens, this will resolve to a PyforaError.
"""
def onResult(result):
if result is not None and not self.closed:
onExpanded(result)
def onFailure(result):
if isinstance(result, Exception):
onExpanded(result)
else:
onExpanded(
Exceptions.PyforaError(
"Unknown error translating to dictionary of proxies: %s" + str(result)
)
)
computedValue.increaseRequestCount(
{},
{'onSuccess': lambda *args: None, 'onFailure': lambda *args: None}
)
computedValue.subscribe_pyforaDictToAssignedVarsToComputedValues({
'onSuccess': onResult,
'onFailure': onFailure,
'onChanged': onResult
})
def expandComputedValueToTupleOfProxies(self, computedValue, onExpanded):
def onResult(result):
if result is not None and not self.closed:
onExpanded(result)
def onFailure(result):
if isinstance(result, Exception):
onExpanded(result)
else:
onExpanded(
Exceptions.PyforaError(
"Unknown error translating to dictionary of proxies: %s" + str(result)
)
)
computedValue.increaseRequestCount(
{},
{'onSuccess': lambda *args: None, 'onFailure': lambda *args: None}
)
computedValue.subscribe_pyforaTupleToTupleOfComputedValues({
'onSuccess': onResult,
'onFailure': onFailure,
'onChanged': onResult
})
def _subscribeToComputationStatus(self, computedValue, onCompletedCallback, onFailedCallback):
def statusChanged(jsonStatus):
if not self.closed:
if jsonStatus is not None:
if jsonStatus['status'] == 'failure':
onFailedCallback(Exceptions.PyforaError(jsonStatus['message']))
else:
onCompletedCallback(jsonStatus)
def onFailure(err):
if not self.closed:
onFailedCallback(Exceptions.PyforaError(err))
computedValue.subscribe_jsonStatusRepresentation({
'onSuccess': statusChanged,
'onFailure': onFailure,
'onChanged': statusChanged
})
def downloadComputation(self, computedValue, onResultCallback, maxBytecount=None):
"""download the result of a computation as json.
onResultCallback - called with a PyforaError if there is a problem, or
the json representation of the computation's result or exception otherwise.
"""
def onFailure(err):
if not self.closed:
onResultCallback(Exceptions.PyforaError(err['message']))
def resultChanged(jsonStatus):
if not self.closed and jsonStatus is not None:
onResultCallback(jsonStatus)
computedValue.increaseRequestCount(
{},
{'onSuccess': lambda *args: None, 'onFailure': lambda *args: None}
)
def resultStatusChanged(populated):
if not self.closed and populated:
resultComputer.getResultAsJson({}, {
'onSuccess': resultChanged,
'onFailure': onFailure
})
resultComputer = self.webObjectFactory.PyforaResultAsJson(
{'computedValue': computedValue, 'maxBytecount': maxBytecount}
)
resultComputer.subscribe_resultIsPopulated({
'onSuccess': resultStatusChanged,
'onFailure': onFailure,
'onChanged': resultStatusChanged
})
def close(self):
self.closed = True
self.webObjectFactory.getJsonInterface().close()
def createObjectConverter(webObjectFactory):
path = os.path.join(os.path.abspath(os.path.split(pyfora.__file__)[0]), "fora")
moduleTree = ModuleDirectoryStructure.ModuleDirectoryStructure.read(path, "purePython", "fora")
return ObjectConverter.ObjectConverter(webObjectFactory, moduleTree.toJson())
def connect(url, timeout=30.0):
"""Opens a connection to a pyfora cluster
Args:
url (str): The HTTP URL of the cluster's manager (e.g. ``http://192.168.1.200:30000``)
timeout (Optional float): A timeout for the operation in seconds, or None
to wait indefinitely.
Returns:
An :class:`~pyfora.Executor.Executor` that can be used to submit work
to the cluster.
"""
socketIoInterface = SocketIoJsonInterface.SocketIoJsonInterface(
url,
'/subscribableWebObjects'
)
socketIoInterface.connect(timeout=timeout)
return connectGivenSocketIo(socketIoInterface)
def connectGivenSocketIo(socketIoInterface):
import pyfora.SubscribableWebObjects as SubscribableWebObjects
webObjectFactory = SubscribableWebObjects.WebObjectFactory(socketIoInterface)
return Executor.Executor(Connection(webObjectFactory, createObjectConverter(webObjectFactory)))
| 1.734375 | 2 |