text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
r""" This module is a direct forward to the initial
'servermanager' module of ParaView. We keep it for backward compatibility only.
"""
from paraview import servermanager
for name in dir(servermanager):
if not name.startswith("__"):
globals()[name] = getattr(servermanager, name)
del servermanager
|
FedoraScientific/salome-paravis
|
src/PV_SWIG/no_wrap/paravisSM.py
|
Python
|
lgpl-2.1
| 1,164
|
[
"ParaView"
] |
7ccfa30f3b793d9ae349c649c40abb8ef88a8582417d0de5e3859fa5391ec8d4
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Example that trains a small multi-layer perceptron with fully connected layers
on MNIST.
This example has some command line arguments that enable different neon features.
Examples:
python mnist_mlp.py -b gpu -e 10
Run the example for 10 epochs of mnist data using the nervana gpu
backend
python mnist_mlp.py --validation_freq 1
After each training epoch the validation/test data set will be
processed through the model and the cost will be displayed.
python mnist_mlp.py --serialize 1 -s checkpoint.pkl
After every iteration of training the model will be dumped to a pickle
file names "checkpoint.pkl". Increase the serialize parameter to
change the frequency at which the model is saved.
python mnist_mlp.py --model_file checkpioint.pkl
Before starting to train the model, the model state is set to the
values stored in the checkpoint file named checkpioint.pkl.
"""
import logging
import os
from neon.backends import gen_backend
from neon.callbacks.callbacks import Callbacks
from neon.data import DataIterator, load_mnist
from neon.initializers import Gaussian
from neon.layers import GeneralizedCost, Affine
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Rectlin, Logistic, CrossEntropyBinary, Misclassification
from neon.util.argparser import NeonArgparser
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
logger = logging.getLogger()
logger.setLevel(args.log_thresh)
# hyperparameters
batch_size = 128
num_epochs = args.epochs
# setup backend
be = gen_backend(backend=args.backend,
batch_size=batch_size,
rng_seed=args.rng_seed,
device_id=args.device_id,
default_dtype=args.datatype,
stochastic_round=False)
# load up the mnist data set
# split into train and tests sets
(X_train, y_train), (X_test, y_test), nclass = load_mnist(path=args.data_dir)
# setup a training set iterator
train_set = DataIterator(X_train, y_train, nclass=nclass)
# setup a validation data set iterator
valid_set = DataIterator(X_test, y_test, nclass=nclass)
# setup weight initialization function
init_norm = Gaussian(loc=0.0, scale=0.01)
# setiup model layers
layers = []
layers.append(Affine(nout=100, init=init_norm, activation=Rectlin()))
layers.append(Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True)))
# setup cost function as CrossEntropy
cost = GeneralizedCost(costfunc=CrossEntropyBinary())
# setup optimizer
optimizer = GradientDescentMomentum(0.1, momentum_coef=0.9, stochastic_round=args.rounding)
# initialize model object
mlp = Model(layers=layers)
if args.model_file:
assert os.path.exists(args.model_file), '%s not found' % args.model_file
logger.info('loading initial model state from %s' % args.model_file)
mlp.load_weights(args.model_file)
# setup standard fit callbacks
callbacks = Callbacks(mlp, train_set, output_file=args.output_file,
progress_bar=args.progress_bar)
# add a callback ot calculate
if args.validation_freq:
# setup validation trial callbacks
callbacks.add_validation_callback(valid_set, args.validation_freq)
if args.serialize > 0:
# add callback for saving checkpoint file
# every args.serialize epchs
checkpoint_schedule = args.serialize
checkpoint_model_path = args.save_path
callbacks.add_serialize_callback(checkpoint_schedule, checkpoint_model_path)
# run fit
mlp.fit(train_set, optimizer=optimizer, num_epochs=num_epochs, cost=cost, callbacks=callbacks)
print('Misclassification error = %.1f%%' % (mlp.eval(valid_set, metric=Misclassification())*100))
|
shyamalschandra/neon
|
examples/mnist_mlp.py
|
Python
|
apache-2.0
| 4,538
|
[
"Gaussian"
] |
3b6c367076d50761142473ac6a670102d71e20aa8475b9003d9b8d636585bb97
|
#--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
import sys
import vtk
import wx
from wx.lib.pubsub import pub as Publisher
import invesalius.constants as const
from invesalius.gui.dialogs import ProgressDialog
# If you are frightened by the code bellow, or think it must have been result of
# an identation error, lookup at:
# Closures in Python (pt)
# http://devlog.waltercruz.com/closures
# http://montegasppa.blogspot.com/2007/01/tampinhas.html
# Closures not only in Python (en)
# http://en.wikipedia.org/wiki/Closure_%28computer_science%29
# http://www.ibm.com/developerworks/library/l-prog2.html
# http://jjinux.blogspot.com/2006/10/python-modifying-counter-in-closure.html
def ShowProgress(number_of_filters = 1,
dialog_type="GaugeProgress"):
"""
To use this closure, do something like this:
UpdateProgress = ShowProgress(NUM_FILTERS)
UpdateProgress(vtkObject)
"""
progress = [0]
last_obj_progress = [0]
if (dialog_type == "ProgressDialog"):
dlg = ProgressDialog(100)
# when the pipeline is larger than 1, we have to consider this object
# percentage
ratio = (100.0 / number_of_filters)
def UpdateProgress(obj, label=""):
"""
Show progress on GUI according to pipeline execution.
"""
# object progress is cummulative and is between 0.0 - 1.0
# is necessary verify in case is sending the progress
#represented by number in case multiprocess, not vtk object
if isinstance(obj, float) or isinstance(obj, int):
obj_progress = obj
else:
obj_progress = obj.GetProgress()
# as it is cummulative, we need to compute the diference, to be
# appended on the interface
if obj_progress < last_obj_progress[0]: # current obj != previous obj
difference = obj_progress # 0
else: # current obj == previous obj
difference = obj_progress - last_obj_progress[0]
last_obj_progress[0] = obj_progress
# final progress status value
progress[0] = progress[0] + ratio*difference
# Tell GUI to update progress status value
if (dialog_type == "GaugeProgress"):
Publisher.sendMessage('Update status in GUI',
(progress[0], label))
else:
if (progress[0] >= 99.999):
progress[0] = 100
if not(dlg.Update(progress[0],label)):
dlg.Close()
return progress[0]
return UpdateProgress
class Text(object):
def __init__(self):
property = vtk.vtkTextProperty()
property.SetFontSize(const.TEXT_SIZE)
property.SetFontFamilyToArial()
property.BoldOff()
property.ItalicOff()
property.ShadowOn()
property.SetJustificationToLeft()
property.SetVerticalJustificationToTop()
property.SetColor(const.TEXT_COLOUR)
self.property = property
mapper = vtk.vtkTextMapper()
mapper.SetTextProperty(property)
self.mapper = mapper
actor = vtk.vtkActor2D()
actor.SetMapper(mapper)
actor.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
actor.PickableOff()
self.actor = actor
self.SetPosition(const.TEXT_POS_LEFT_UP)
def SetColour(self, colour):
self.property.SetColor(colour)
def ShadowOff(self):
self.property.ShadowOff()
def SetSize(self, size):
self.property.SetFontSize(size)
def SetValue(self, value):
if isinstance(value, int) or isinstance(value, float):
value = str(value)
if sys.platform == 'win32':
value += "" # Otherwise 0 is not shown under win32
# With some encoding in some dicom fields (like name) raises a
# UnicodeEncodeError because they have non-ascii characters. To avoid
# that we encode in utf-8.
if sys.platform == 'win32':
self.mapper.SetInput(value.encode("utf-8"))
else:
try:
self.mapper.SetInput(value.encode("latin-1"))
except(UnicodeEncodeError):
self.mapper.SetInput(value.encode("utf-8"))
def SetPosition(self, position):
self.actor.GetPositionCoordinate().SetValue(position[0],
position[1])
def GetPosition(self, position):
self.actor.GetPositionCoordinate().GetValue()
def SetJustificationToRight(self):
self.property.SetJustificationToRight()
def SetJustificationToCentered(self):
self.property.SetJustificationToCentered()
def SetVerticalJustificationToBottom(self):
self.property.SetVerticalJustificationToBottom()
def SetVerticalJustificationToCentered(self):
self.property.SetVerticalJustificationToCentered()
def Show(self, value=1):
if value:
self.actor.VisibilityOn()
else:
self.actor.VisibilityOff()
def Hide(self):
self.actor.VisibilityOff()
class TextZero(object):
def __init__(self):
property = vtk.vtkTextProperty()
property.SetFontSize(const.TEXT_SIZE_LARGE)
property.SetFontFamilyToArial()
property.BoldOn()
property.ItalicOff()
#property.ShadowOn()
property.SetJustificationToLeft()
property.SetVerticalJustificationToTop()
property.SetColor(const.TEXT_COLOUR)
self.property = property
actor = vtk.vtkTextActor()
actor.GetTextProperty().ShallowCopy(property)
actor.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
actor.PickableOff()
self.actor = actor
self.text = ''
self.position = (0, 0)
self.symbolic_syze = wx.FONTSIZE_MEDIUM
def SetColour(self, colour):
self.property.SetColor(colour)
def ShadowOff(self):
self.property.ShadowOff()
def SetSize(self, size):
self.property.SetFontSize(size)
def SetSymbolicSize(self, size):
self.symbolic_syze = size
def SetValue(self, value):
if isinstance(value, int) or isinstance(value, float):
value = str(value)
if sys.platform == 'win32':
value += "" # Otherwise 0 is not shown under win32
# With some encoding in some dicom fields (like name) raises a
# UnicodeEncodeError because they have non-ascii characters. To avoid
# that we encode in utf-8.
self.actor.SetInput(value.encode("cp1252"))
self.text = value
def SetPosition(self, position):
self.position = position
self.actor.GetPositionCoordinate().SetValue(position[0],
position[1])
def GetPosition(self):
return self.actor.GetPositionCoordinate().GetValue()
def SetJustificationToRight(self):
self.property.SetJustificationToRight()
def SetJustificationToCentered(self):
self.property.SetJustificationToCentered()
def SetVerticalJustificationToBottom(self):
self.property.SetVerticalJustificationToBottom()
def SetVerticalJustificationToCentered(self):
self.property.SetVerticalJustificationToCentered()
def Show(self, value=1):
if value:
self.actor.VisibilityOn()
else:
self.actor.VisibilityOff()
def Hide(self):
self.actor.VisibilityOff()
def draw_to_canvas(self, gc, canvas):
coord = vtk.vtkCoordinate()
coord.SetCoordinateSystemToNormalizedDisplay()
coord.SetValue(*self.position)
x, y = coord.GetComputedDisplayValue(canvas.evt_renderer)
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
# font.SetWeight(wx.FONTWEIGHT_BOLD)
font.SetSymbolicSize(self.symbolic_syze)
canvas.draw_text(self.text, (x, y), font=font)
|
fabio-otsuka/invesalius3
|
invesalius/data/vtk_utils.py
|
Python
|
gpl-2.0
| 8,929
|
[
"VTK"
] |
12f1c80561270343a8ea7fde0ca449914813686ccc476d299e72b8ca52d91ac9
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import erfc
import pandas as pd
#
# Diffusion-only test
#
# Read MOOSE simulation data
data = pd.read_csv("../../../../../../test/tests/dispersion/gold/diff01_out_xmass_0021.csv")
# The analytical solution is erfc(u) where u is a similarity variable
x = np.linspace(0,10,100)
t = 20
d = 1
tau = 0.1
D = d*tau
u = x/(2*np.sqrt(D*t))
plt.figure(1)
plt.plot(x, erfc(u), label = 'Analytical')
plt.plot(data.x, data.massfrac0, 'o', label = 'MOOSE')
plt.xlabel('x (m)')
plt.ylabel('Mass fraction (-)')
plt.legend()
plt.title('Mass fraction (t = 50 s)')
plt.ylim([-0.05,1])
plt.savefig("diffusion_fig.png")
#
# Dispersion tests
#
def expected(x,t):
porosity = 0.3
alphal = 0.2
v = 1.05e-3 / porosity
D = alphal * v
return 0.5 * erfc((x - v * t)/(2 *np.sqrt(D * t))) + np.sqrt(v * v * t/(np.pi * D)) * \
np.exp(- (x - v * t)**2/(4 * D * t)) - 0.5 * (1 + v * x / D + v * v * t / D) * np.exp(v * x / D) *\
erfc((x+v*t)/(2*np.sqrt(D*t)))
# Read MOOSE simulation data
data = pd.read_csv("../../../../../../test/tests/dispersion/gold/disp01_out_xmass_0029.csv")
data_heavy = pd.read_csv("../../../../../../test/tests/dispersion/gold/disp01_heavy_out_xmass_0105.csv")
plt.figure(2)
plt.plot(data.x, data.massfrac0, 'b.', label = 'MOOSE')
plt.plot(data_heavy.x, data_heavy.massfrac0, 'g+', label = 'MOOSE (heavy)')
plt.plot(x, expected(x, 1e3), 'k-', label = 'Analytical')
plt.xlabel('x (m)')
plt.ylabel('Mass fraction (-)')
plt.legend()
plt.title('Mass fraction (t = 1000 s)')
plt.ylim([-0.05,1])
plt.savefig("dispersion_fig.png")
sys.exit(0)
|
nuclear-wizard/moose
|
modules/porous_flow/doc/content/modules/porous_flow/tests/dispersion/dispersion.py
|
Python
|
lgpl-2.1
| 1,987
|
[
"MOOSE"
] |
a1a45dbc49b36f353b62310f27510bb6a3f2663aa82315747355dff59b9bce15
|
<<<<<<< HEAD
<<<<<<< HEAD
import test.support
# Skip tests if _multiprocessing wasn't built.
test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
test.support.import_module('threading')
from test.script_helper import assert_python_ok
import sys
import threading
import time
import unittest
import weakref
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future)
from concurrent.futures.process import BrokenProcessPool
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
class MyObject(object):
def my_method(self):
pass
class ExecutorMixin:
worker_count = 5
def setUp(self):
self.t1 = time.time()
try:
self.executor = self.executor_type(max_workers=self.worker_count)
except NotImplementedError as e:
self.skipTest(str(e))
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
dt = time.time() - self.t1
if test.support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 60, "synchronization issue: test lasted too long")
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
t = {executor_type}(5)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_threads_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._threads), 3)
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
del executor
queue_management_thread.join()
for p in processes.values():
p.join()
class WaitTests:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, unittest.TestCase):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setswitchinterval(oldswitchinterval)
class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests, unittest.TestCase):
pass
class AsCompletedTests:
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
future1 = self.executor.submit(time.sleep, 2)
completed = [f for f in futures.as_completed([future1,future1])]
self.assertEqual(len(completed), 1)
class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
@test.support.cpython_only
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
my_object = MyObject()
my_object_collected = threading.Event()
my_object_callback = weakref.ref(
my_object, lambda obj: my_object_collected.set())
# Deliberately discarding the future.
self.executor.submit(my_object.my_method)
del my_object
collected = my_object_collected.wait(timeout=5.0)
self.assertTrue(collected,
"Stale reference not collected within timeout.")
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, unittest.TestCase):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest, unittest.TestCase):
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
class FutureTests(unittest.TestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with test.support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), OSError))
@test.support.reap_threads
def test_main():
try:
test.support.run_unittest(__name__)
finally:
test.support.reap_children()
if __name__ == "__main__":
test_main()
=======
import test.support
# Skip tests if _multiprocessing wasn't built.
test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
test.support.import_module('threading')
from test.script_helper import assert_python_ok
import sys
import threading
import time
import unittest
import weakref
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future)
from concurrent.futures.process import BrokenProcessPool
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
class MyObject(object):
def my_method(self):
pass
class ExecutorMixin:
worker_count = 5
def setUp(self):
self.t1 = time.time()
try:
self.executor = self.executor_type(max_workers=self.worker_count)
except NotImplementedError as e:
self.skipTest(str(e))
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
dt = time.time() - self.t1
if test.support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 60, "synchronization issue: test lasted too long")
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
t = {executor_type}(5)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_threads_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._threads), 3)
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
del executor
queue_management_thread.join()
for p in processes.values():
p.join()
class WaitTests:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, unittest.TestCase):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setswitchinterval(oldswitchinterval)
class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests, unittest.TestCase):
pass
class AsCompletedTests:
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
future1 = self.executor.submit(time.sleep, 2)
completed = [f for f in futures.as_completed([future1,future1])]
self.assertEqual(len(completed), 1)
class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
@test.support.cpython_only
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
my_object = MyObject()
my_object_collected = threading.Event()
my_object_callback = weakref.ref(
my_object, lambda obj: my_object_collected.set())
# Deliberately discarding the future.
self.executor.submit(my_object.my_method)
del my_object
collected = my_object_collected.wait(timeout=5.0)
self.assertTrue(collected,
"Stale reference not collected within timeout.")
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, unittest.TestCase):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest, unittest.TestCase):
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
class FutureTests(unittest.TestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with test.support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), OSError))
@test.support.reap_threads
def test_main():
try:
test.support.run_unittest(__name__)
finally:
test.support.reap_children()
if __name__ == "__main__":
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
import test.support
# Skip tests if _multiprocessing wasn't built.
test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
test.support.import_module('threading')
from test.script_helper import assert_python_ok
import sys
import threading
import time
import unittest
import weakref
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future)
from concurrent.futures.process import BrokenProcessPool
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
class MyObject(object):
def my_method(self):
pass
class ExecutorMixin:
worker_count = 5
def setUp(self):
self.t1 = time.time()
try:
self.executor = self.executor_type(max_workers=self.worker_count)
except NotImplementedError as e:
self.skipTest(str(e))
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
dt = time.time() - self.t1
if test.support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 60, "synchronization issue: test lasted too long")
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
t = {executor_type}(5)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_threads_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._threads), 3)
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
del executor
queue_management_thread.join()
for p in processes.values():
p.join()
class WaitTests:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, unittest.TestCase):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setswitchinterval(oldswitchinterval)
class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests, unittest.TestCase):
pass
class AsCompletedTests:
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
future1 = self.executor.submit(time.sleep, 2)
completed = [f for f in futures.as_completed([future1,future1])]
self.assertEqual(len(completed), 1)
class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
@test.support.cpython_only
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
my_object = MyObject()
my_object_collected = threading.Event()
my_object_callback = weakref.ref(
my_object, lambda obj: my_object_collected.set())
# Deliberately discarding the future.
self.executor.submit(my_object.my_method)
del my_object
collected = my_object_collected.wait(timeout=5.0)
self.assertTrue(collected,
"Stale reference not collected within timeout.")
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, unittest.TestCase):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest, unittest.TestCase):
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
class FutureTests(unittest.TestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with test.support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), OSError))
@test.support.reap_threads
def test_main():
try:
test.support.run_unittest(__name__)
finally:
test.support.reap_children()
if __name__ == "__main__":
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
ArcherSys/ArcherSys
|
Lib/test/test_concurrent_futures.py
|
Python
|
mit
| 71,624
|
[
"Brian"
] |
906675db18f8b43a85f3f7788ca9a91467268535d562470728325d0346bb571b
|
# proxy module
from __future__ import absolute_import
from mayavi.preferences.preferences_helpers import *
|
enthought/etsproxy
|
enthought/mayavi/preferences/preferences_helpers.py
|
Python
|
bsd-3-clause
| 107
|
[
"Mayavi"
] |
45f12e3791e2abb33e77611f12a9ab6880bffaf41283d07b49c9b50a42386a6a
|
import numpy as np
def simple_mask(im_rgb, bandwidth=2, bgnd_std=2.5, tissue_std=30,
min_peak_width=10, max_peak_width=25,
fraction=0.10, min_tissue_prob=0.05):
"""Performs segmentation of the foreground (tissue)
Uses a simple two-component Gaussian mixture model to mask tissue areas
from background in brightfield H&E images. Kernel-density estimation is
used to create a smoothed image histogram, and then this histogram is
analyzed to identify modes corresponding to tissue and background. The
mode peaks are then analyzed to estimate their width, and a constrained
optimization is performed to fit gaussians directly to the histogram
(instead of using expectation-maximization directly on the data which
is more prone to local minima effects). A maximum-likelihood threshold
is then derived and used to mask the tissue area in a binarized image.
Parameters
----------
im_rgb : array_like
An RGB image of type unsigned char.
bandwidth : double, optional
Bandwidth for kernel density estimation - used for smoothing the
grayscale histogram. Default value = 2.
bgnd_std : double, optional
Standard deviation of background gaussian to be used if
estimation fails. Default value = 2.5.
tissue_std: double, optional
Standard deviation of tissue gaussian to be used if estimation fails.
Default value = 30.
min_peak_width: double, optional
Minimum peak width for finding peaks in KDE histogram. Used to
initialize curve fitting process. Default value = 10.
max_peak_width: double, optional
Maximum peak width for finding peaks in KDE histogram. Used to
initialize curve fitting process. Default value = 25.
fraction: double, optional
Fraction of pixels to sample for building foreground/background
model. Default value = 0.10.
min_tissue_prob : double, optional
Minimum probability to qualify as tissue pixel. Default value = 0.05.
Returns
-------
im_mask : array_like
A binarized version of `I` where foreground (tissue) has value '1'.
See Also
--------
histomicstk.utils.sample_pixels
"""
from scipy import signal
from scipy.optimize import fmin_slsqp
from scipy.stats import norm
from skimage import color
from sklearn.neighbors import KernelDensity
# convert image to grayscale, flatten and sample
im_rgb = 255 * color.rgb2gray(im_rgb)
im_rgb = im_rgb.astype(np.uint8)
num_samples = np.int(fraction * im_rgb.size)
sI = np.random.choice(im_rgb.flatten(), num_samples)[:, np.newaxis]
# kernel-density smoothed histogram
KDE = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(sI)
xHist = np.linspace(0, 255, 256)[:, np.newaxis]
yHist = np.exp(KDE.score_samples(xHist))[:, np.newaxis]
yHist = yHist / sum(yHist)
# flip smoothed y-histogram so that background mode is on the left side
yHist = np.flipud(yHist)
# identify initial mean parameters for gaussian mixture distribution
# take highest peak among remaining peaks as background
Peaks = signal.find_peaks_cwt(yHist.flatten(),
np.arange(min_peak_width, max_peak_width))
BGPeak = Peaks[0]
if len(Peaks) > 1:
TissuePeak = Peaks[yHist[Peaks[1:]].argmax() + 1]
else: # no peak found - take initial guess at 2/3 distance from origin
TissuePeak = xHist[int(np.round(0.66*xHist.size))].item()
# analyze background peak to estimate variance parameter via FWHM
BGScale = estimate_variance(xHist, yHist, BGPeak)
if BGScale == -1:
BGScale = bgnd_std
# analyze tissue peak to estimate variance parameter via FWHM
TissueScale = estimate_variance(xHist, yHist, TissuePeak)
if TissueScale == -1:
TissueScale = tissue_std
# solve for mixing parameter
Mix = yHist[BGPeak] * (BGScale * (2 * np.pi)**0.5)
# flatten kernel-smoothed histogram and corresponding x values for
# optimization
xHist = xHist.flatten()
yHist = yHist.flatten()
# define gaussian mixture model
def gaussian_mixture(x, mu1, mu2, sigma1, sigma2, p):
rv1 = norm(loc=mu1, scale=sigma1)
rv2 = norm(loc=mu2, scale=sigma2)
return p * rv1.pdf(x) + (1 - p) * rv2.pdf(x)
# define gaussian mixture model residuals
def gaussian_residuals(Parameters, y, x):
mu1, mu2, sigma1, sigma2, p = Parameters
yhat = gaussian_mixture(x, mu1, mu2, sigma1, sigma2, p)
return sum((y - yhat) ** 2)
# fit Gaussian mixture model and unpack results
Parameters = fmin_slsqp(gaussian_residuals,
[BGPeak, TissuePeak, BGScale, TissueScale, Mix],
args=(yHist, xHist),
bounds=[(0, 255), (0, 255),
(np.spacing(1), 10),
(np.spacing(1), 50), (0, 1)],
iprint=0)
muBackground = Parameters[0]
muTissue = Parameters[1]
sigmaBackground = Parameters[2]
sigmaTissue = Parameters[3]
p = Parameters[4]
# create mask based on Gaussian mixture model
Background = norm(loc=muBackground, scale=sigmaBackground)
Tissue = norm(loc=muTissue, scale=sigmaTissue)
pBackground = p * Background.pdf(xHist)
pTissue = (1 - p) * Tissue.pdf(xHist)
# identify maximum likelihood threshold
Difference = pTissue - pBackground
Candidates = np.nonzero(Difference >= 0)[0]
Filtered = np.nonzero(xHist[Candidates] > muBackground)
ML = xHist[Candidates[Filtered[0]][0]]
# identify limits for tissue model (MinProb, 1-MinProb)
Endpoints = np.asarray(Tissue.interval(1 - min_tissue_prob / 2))
# invert threshold and tissue mean
ML = 255 - ML
muTissue = 255 - muTissue
Endpoints = np.sort(255 - Endpoints)
# generate mask
im_mask = (im_rgb <= ML) & (im_rgb >= Endpoints[0]) & \
(im_rgb <= Endpoints[1])
im_mask = im_mask.astype(np.uint8)
return im_mask
def estimate_variance(x, y, peak):
"""Estimates variance of a peak in a histogram using the FWHM of an
approximate normal distribution.
Starting from a user-supplied peak and histogram, this method traces down
each side of the peak to estimate the full-width-half-maximum (FWHM) and
variance of the peak. If tracing fails on either side, the FWHM is
estimated as twice the HWHM.
Parameters
----------
x : array_like
vector of x-histogram locations.
y : array_like
vector of y-histogram locations.
peak : double
index of peak in y to estimate variance of
Returns
-------
scale : double
Standard deviation of normal distribution approximating peak. Value is
-1 if fitting process fails.
See Also
--------
SimpleMask
"""
# analyze peak to estimate variance parameter via FWHM
Left = peak
while y[Left] > y[peak] / 2 and Left >= 0:
Left -= 1
if Left == -1:
break
Right = peak
while y[Right] > y[peak] / 2 and Right < y.size:
Right += 1
if Right == y.size:
break
if Left != -1 and Right != y.size:
LeftSlope = y[Left + 1] - y[Left] / (x[Left + 1] - x[Left])
Left = (y[peak] / 2 - y[Left]) / LeftSlope + x[Left]
RightSlope = y[Right] - y[Right - 1] / (x[Right] - x[Right - 1])
Right = (y[peak] / 2 - y[Right]) / RightSlope + x[Right]
scale = (Right - Left) / 2.355
if Left == -1:
if Right == y.size:
scale = -1
else:
RightSlope = y[Right] - y[Right - 1] / (x[Right] - x[Right - 1])
Right = (y[peak] / 2 - y[Right]) / RightSlope + x[Right]
scale = 2 * (Right - x[peak]) / 2.355
if Right == y.size:
if Left == -1:
scale = -1
else:
LeftSlope = y[Left + 1] - y[Left] / (x[Left + 1] - x[Left])
Left = (y[peak] / 2 - y[Left]) / LeftSlope + x[Left]
scale = 2 * (x[peak] - Left) / 2.355
return scale
|
DigitalSlideArchive/HistomicsTK
|
histomicstk/utils/simple_mask.py
|
Python
|
apache-2.0
| 8,216
|
[
"Gaussian"
] |
f4205d33ac8c69177c190a96ae22573ebe07b8edcc0e44ec4991bfdc49c5b5ff
|
#
# Honeybee: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Honeybee.
#
# Copyright (c) 2013-2020, Mostapha Sadeghipour Roudsari <mostapha@ladybug.tools>
# Honeybee is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Honeybee is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Honeybee; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
EnergyPlus Shadow Parameters
-
Provided by Honeybee 0.0.66
Args:
timestep_: It is the number of times simulation will be performed in an hour.
-
You can choose any number from 1,2,3,4,5,6,10,12,15,20,30, and 60 as the timestep. Generally speaking, the shorter the time step, the longer it takes to finish the simulation. The biggest timestep is 60 minutes. That will mean that the simulation will for every one hour for the year.
-
It is advisable to use the timestep of 60 minutes only in a case when no HVAC system is envisaged, accuracy not a primary concern, and simulation run time is critical.
-
In summary, shorter timesteps improve how calculation models for surface temperature and zone air temperature are coupled together and therefore, shorter timesteps are recommended. On the other side, longer timesteps introduce more lag in temperature distribution and as a result, offers a less dynamic behavior.
-
The default is set to 6 timesteps per hour, which means that the energy balance calculation is run every 10 minutes of the year. This is a recommended default for simulations with HVAC.
-
Other suggested defaults are 4 for non-HVAC simulations. Simulating green roofs require more timesteps per hour.
shadowCalcPar_: An optional set of shadow calculation parameters from the "Honeybee_ShadowPar" component.
solarDistribution_: An optional text string or integer that sets the solar distribution calculation. Choose from the following options:
0 = "MinimalShadowing" - In this case, exterior shadowing is only computed for windows and not for other opaque surfaces that might have their surface temperature affected by the sun. All beam solar radiation entering the zone is assumed to fall on the floor. A simple window view factor calculation will be used to distribute incoming diffuse solar energy between interior surfaces.
1 = "FullExterior" - The simulation will perform the solar calculation in a manner that only accounts for direct sun and whether it is blocked by surrounding context geometry. For the inside of the building, all beam solar radiation entering the zone is assumed to fall on the floor. A simple window view factor calculation will be used to distribute incoming diffuse solar energy between interior surfaces.
2 = "FullInteriorAndExterior" - The simulation will perform the solar calculation in a manner that models the direct sun (and wheter it is blocked by outdoor context goemetry. It will also ray trace the direct sun on the interior of zones to distribute it correctly between interior surfaces. Any indirect sun or sun bouncing off of objects will not be modled.
3 = "FullExteriorWithReflections" - The simulation will perform the solar calculation in a manner that accounts for both direct sun and the light bouncing off outdoor surrounding context. For the inside of the building, all beam solar radiation entering the zone is assumed to fall on the floor. A simple window view factor calculation will be used to distribute incoming diffuse solar energy between interior surfaces.
4 = "FullInteriorAndExteriorWithReflections" - The simulation will perform the solar calculation in a manner that accounts for light bounces that happen both outside and inside the zones. This is the most accurate method and is the one assigned by default. Note that, if you use this method, EnergyPlus will give Severe warnings if your zones have concave geometry (or are "L"-shaped). Such geometries mess up this solar distribution calculation and it is recommeded that you either break up your zones in this case or not use this solar distribution method.
holidays_: A list of integers, each between 1 and 365, that represent the days of the year on which a holiday occurs. Alternatively, this can be a list of text strings (example: DEC 25). Finally, this input can accept the "holidays" output from the "Honeybee_Convert EnergyPlus Schedule to Values" component.
startDayOfWeek_: An integer or text descriptor to set the ssimulation start day of the week. The default is set to 0 - sun - sunday.
-
Choose from one of the following:
0 - sun - sunday
1 - mon - monday
2 - tue - tuesday
3 - wed - wednesday
4 - thu - thursday
5 - fri - friday
6 - sat - saturday
simulationControls_: An optional set of simulation controls from the "Honeybee_Simulation Control" component.
ddyFile_: An optional file path to a .ddy file on your system. This ddy file will be used to size the HVAC system before running the simulation.
heatingSizingFactor_: An optional number that represents the 'saftey factor' to which the heating system will be sized. A sizing factor of 1 means that the system is sized to perfectly meet the design day conditions. The default is set to 1.25 as it is usually appropriate to oversize the system slightly to ensure that there are no unmet hours. Specifying a factor here that is below 1.25 can result in more hours that do not meet the heating setpoint.
coolingSizingFactor_: An optional number that represents the 'saftey factor' to which the cooling system will be sized. A sizing factor of 1 means that the system is sized to perfectly meet the design day conditions. The default is set to 1.15 as it is usually appropriate to oversize the system slightly to ensure that there are no unmet hours. Specifying a factor here that is below 1.15 can result in more hours that do not meet the cooling setpoint.
terrain_: An optional integer or text string to set the surrouning terrain of the building, which will be used to determine how wind speed around the building changes with height. If no value is input here, the default is set to "City." Choose from the following options:
0 = City: large city centres, 50% of buildings above 21m over a distance of at least 2000m upwind.
1 = Suburbs: suburbs, wooded areas.
2 = Country: open, with scattered objects generally less than 10m high.
3 = Ocean: Flat, unobstructed areas exposed to wind flowing over a large water body (no more than 500m inland).
monthlyGrndTemps_: An optional list of 12 monthly ground temperatures to be used by those surfaces in contact with the ground in the simulation. Please note that the EPW values out of the Import Ground Temp component are usually too extreme for a conditioned building. If no values are input here, the model will attempt to estimate a reasonable starting base temperature from these results by using a value of 18C in cases of monthly ground temperatures below 18C, 24C in cases of monthly ground temperatures above 24C, and the actual ground temperature if the monthly average falls in between 18C and 24C. Usually, ground temperatures will be about 2C lower than the overage indoor air temperature for a given month.
Returns:
energySimPar: Energy simulation parameters that can be plugged into the "Honeybee_ Run Energy Simulation" component.
"""
ghenv.Component.Name = "Honeybee_Energy Simulation Par"
ghenv.Component.NickName = 'EnergySimPar'
ghenv.Component.Message = 'VER 0.0.66\nJUL_07_2020'
ghenv.Component.IconDisplayMode = ghenv.Component.IconDisplayMode.application
ghenv.Component.Category = "HB-Legacy"
ghenv.Component.SubCategory = "10 | Energy | Energy"
#compatibleHBVersion = VER 0.0.56\nAPR_11_2015
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "3"
except: pass
import Grasshopper.Kernel as gh
def main(timestep, shadowCalcPar, solarDistribution, simulationControls, ddyFile, terrain, monthlyGrndTemps, holidays, startDayOfWeek, heatingSizingFactor, coolingSizingFactor):
solarDist = {
"0" : "MinimalShadowing",
"1" : "FullExterior",
"2" : "FullInteriorAndExterior",
"3" : "FullExteriorWithReflections",
"4" : "FullInteriorAndExteriorWithReflections",
"MinimalShadowing" : "MinimalShadowing",
"FullExterior" : "FullExterior",
"FullInteriorAndExterior" : "FullInteriorAndExterior",
"FullExteriorWithReflections" : "FullExteriorWithReflections",
"FullInteriorAndExteriorWithReflections" : "FullInteriorAndExteriorWithReflections"
}
terrainDict = {
"0" : "City",
"1" : "Suburbs",
"2" : "Country",
"3" : "Ocean",
"City" : "City",
"Suburbs" : "Suburbs",
"Country" : "Country",
"Ocean" : "Ocean"
}
# Check the start day of the week.
daysOfWeek = {'0':'Sunday', '1':'Monday', '2':'Tuesday', '3':'Wednesday', '4':'Thursday', '5':'Friday', '6':'Saturday',
'sun':'Sunday', 'mon':'Monday', 'tue':'Tuesday', 'wed':'Wednesday', 'thu':'Thursday', 'fri':'Friday', 'sat':'Saturday',
'sunday':'Sunday', 'monday':'monday', 'tuesday':'Tuesday', 'wednesday':'Wednesday', 'thursday':'Thursday', 'friday':'Friday', 'saturday':'Saturday'}
if startDayOfWeek != None:
try:
startDayOfWeek = daysOfWeek[startDayOfWeek.lower()]
except:
warning = 'Input for startDayOfWeek_ is not valid.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
return -1
if timestep == None: timestep = 6
if shadowCalcPar == []:
shadowCalcPar = ["AverageOverDaysInFrequency", 30, 3000]
assert len(shadowCalcPar) == 3, "Wrong input for shadow calculation parameters."
if solarDistribution == None:
solarDistribution = solarDist["4"]
else:
solarDistribution = solarDist[solarDistribution]
if simulationControls == []: simulationControls= [True, True, True, False, True, 25, 6]
if terrain == None:
terrain = terrainDict["0"]
else:
terrain = terrainDict[terrain]
if heatingSizingFactor == None:
heatingSizingFactor = 1.25
if coolingSizingFactor == None:
coolingSizingFactor = 1.15
finalHoliday = []
for holiday in holidays:
if holiday == "" or holiday == None:
pass
else:
finalHoliday.append(holiday)
if (monthlyGrndTemps == [] or len(monthlyGrndTemps) == 12):
return [timestep] + shadowCalcPar + [solarDistribution] + simulationControls + [ddyFile] + [terrain] + [monthlyGrndTemps] + [finalHoliday] + [startDayOfWeek] + [heatingSizingFactor] + [coolingSizingFactor]
else:
if monthlyGrndTemps != [] and len(monthlyGrndTemps) != 12:
warning = 'monthlyGrndTemps_ must either be left blank or contain 12 values representing the average ground temperature for each month.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
return None
energySimPar = main(timestep_, shadowCalcPar_, solarDistribution_, simulationControls_,
ddyFile_, terrain_, monthlyGrndTemps_, holidays_, startDayOfWeek_,
heatingSizingFactor_, coolingSizingFactor_)
|
mostaphaRoudsari/Honeybee
|
src/Honeybee_Energy Simulation Par.py
|
Python
|
gpl-3.0
| 12,336
|
[
"EPW"
] |
0e5246c6e08f2e8eeff50644faa5586aef7e3d57d29fce5663897c4a4cd88cec
|
from octopus.core import app
from octopus.modules.jper import models
from octopus.lib import http, dates
import json
class JPERException(Exception):
pass
class JPERConnectionException(JPERException):
pass
class JPERAuthException(JPERException):
pass
class ValidationException(JPERException):
pass
class JPER(object):
# FilesAndJATS = "http://router.jisc.ac.uk/packages/FilesAndJATS"
FilesAndJATS = "https://pubrouter.jisc.ac.uk/FilesAndJATS"
def __init__(self, api_key=None, base_url=None):
self.api_key = api_key if api_key is not None else app.config.get("JPER_API_KEY")
self.base_url = base_url if base_url is not None else app.config.get("JPER_BASE_URL")
if self.base_url.endswith("/"):
self.base_url = self.base_url[:-1]
def _url(self, endpoint=None, id=None, auth=True, params=None, url=None):
if url is None:
url = self.base_url
if url.endswith("/"):
url += url[:-1]
if endpoint is not None:
url += "/" + endpoint
if id is not None:
url += "/" + http.quote(id)
if auth:
if params is None:
params = {}
if self.api_key is not None and self.api_key != "":
params["api_key"] = self.api_key
args = []
for k, v in params.iteritems():
args.append(k + "=" + http.quote(unicode(v)))
if len(args) > 0:
if "?" not in url:
url += "?"
else:
url += "&"
qs = "&".join(args)
url += qs
return url
def validate(self, notification, file_handle=None):
# turn the notification into a json string
data = None
if isinstance(notification, models.IncomingNotification):
data = notification.json()
else:
data = json.dumps(notification)
# get the url that we are going to send to
url = self._url("validate")
resp = None
if file_handle is None:
# if there is no file handle supplied, send the metadata-only notification
resp = http.post(url, data=data, headers={"Content-Type" : "application/json"})
else:
# otherwise send both parts as a multipart message
files = [
("metadata", ("metadata.json", data, "application/json")),
("content", ("content.zip", file_handle, "application/zip"))
]
resp = http.post(url, files=files)
if resp is None:
raise JPERConnectionException("Unable to communicate with the JPER API")
if resp.status_code == 401:
raise JPERAuthException("Could not authenticate with JPER with your API key")
if resp.status_code == 400:
raise ValidationException(resp.json().get("error"))
return True
def create_notification(self, notification, file_handle=None):
# turn the notification into a json string
data = None
if isinstance(notification, models.IncomingNotification):
data = notification.json()
else:
data = json.dumps(notification)
# get the url that we are going to send to
url = self._url("notification")
resp = None
if file_handle is None:
# if there is no file handle supplied, send the metadata-only notification
resp = http.post(url, data=data, headers={"Content-Type" : "application/json"})
else:
# otherwise send both parts as a multipart message
files = [
("metadata", ("metadata.json", data, "application/json")),
("content", ("content.zip", file_handle, "application/zip"))
]
resp = http.post(url, files=files)
if resp is None:
raise JPERConnectionException("Unable to communicate with the JPER API")
if resp.status_code == 401:
raise JPERAuthException("Could not authenticate with JPER with your API key")
if resp.status_code == 400:
raise ValidationException(resp.json().get("error"))
# extract the useful information from the acceptance response
acc = resp.json()
id = acc.get("id")
loc = acc.get("location")
return id, loc
def get_notification(self, notification_id=None, location=None):
# get the url that we are going to send to
if notification_id is not None:
url = self._url("notification", id=notification_id)
elif location is not None:
url = location
else:
raise JPERException("You must supply either the notification_id or the location")
# get the response object
resp = http.get(url)
if resp is None:
raise JPERConnectionException("Unable to communicate with the JPER API")
if resp.status_code == 404:
return None
if resp.status_code != 200:
raise JPERException("Received unexpected status code from {y}: {x}".format(x=resp.status_code, y=url))
j = resp.json()
if "provider" in j:
return models.ProviderOutgoingNotification(j)
else:
return models.OutgoingNotification(j)
def get_content(self, url, chunk_size=8096):
# just sort out the api_key
url = self._url(url=url)
# get the response object
resp, content, downloaded_bytes = http.get_stream(url, read_stream=False)
# check for errors or problems with the response
if resp is None:
raise JPERConnectionException("Unable to communicate with the JPER API")
if resp.status_code == 401:
raise JPERAuthException("Could not authenticate with JPER with your API key")
if resp.status_code != 200:
raise JPERException("Received unexpected status code from {y}: {x}".format(x=resp.status_code, y=url))
# return the response object, in case the caller wants access to headers, etc.
return resp.iter_content(chunk_size=chunk_size), resp.headers
def list_notifications(self, since, page=None, page_size=None, repository_id=None):
# check that the since date is valid, and get it into the right format
if not hasattr(since, "strftime"):
since = dates.parse(since)
since = since.strftime("%Y-%m-%dT%H:%M:%SZ")
# make the url params into an object
params = {"since" : since}
if page is not None:
try:
params["page"] = str(page)
except:
raise JPERException("Unable to convert page argument to string")
if page_size is not None:
try:
params["pageSize"] = str(page_size)
except:
raise JPERException("Unable to convert page_size argument to string")
# get the url, which may contain the repository id if it is not None
url = self._url("routed", id=repository_id, params=params)
# get the response object
resp = http.get(url)
# check for errors or problems with the response
if resp is None:
raise JPERConnectionException("Unable to communicate with the JPER API")
if resp.status_code == 401:
raise JPERAuthException("Could not authenticate with JPER with your API key")
if resp.status_code == 400:
raise JPERException(resp.json().get("error"))
if resp.status_code != 200:
raise JPERException("Received unexpected status code from {y}: {x} ".format(x=resp.status_code, y=url))
# create the notification list object
j = resp.json()
return models.NotificationList(j)
def iterate_notifications(self, since, repository_id=None, page_size=100):
page = 1
while True:
nl = self.list_notifications(since, page=page, page_size=page_size, repository_id=repository_id)
if len(nl.notifications) == 0:
break
for n in nl.notifications:
yield n
if page * page_size >= nl.total:
break
page += 1
def record_retrieval(self, notification_id, content_id=None):
# FIXME: not yet implemented, while waiting to see how retrieval finally
# works
pass
|
JiscPER/magnificent-octopus
|
octopus/modules/jper/client.py
|
Python
|
apache-2.0
| 8,448
|
[
"Octopus"
] |
a2c388a5809ee54029d9d1133263ed0f05a5d6d85e2d333ecdf914a9b954bd74
|
from Firefly import logging
from Firefly.components.zwave.device_types.motion_sensor import ZwaveMotionSensor
from Firefly.const import DEVICE_TYPE_MOTION
TITLE = 'Zwave Motion Sensor'
DEVICE_TYPE = DEVICE_TYPE_MOTION
def Setup(firefly, package, **kwargs):
logging.message('Entering %s setup' % TITLE)
sensor = MotionSensor(firefly, package, **kwargs)
firefly.install_component(sensor)
return sensor.id
class MotionSensor(ZwaveMotionSensor):
def __init__(self, firefly, package, **kwargs):
super().__init__(firefly, package, TITLE, **kwargs)
|
Firefly-Automation/Firefly
|
Firefly/components/zwave/zwave_generic_devices/motion_sensor.py
|
Python
|
apache-2.0
| 561
|
[
"Firefly"
] |
42d62658cf0ef1107819aaf694a8a02cd0a39d0eecb9640c692a59e5e9902432
|
import json
import wget
import os.path
from distutils.dir_util import mkpath
from urllib.error import HTTPError
blocktopus_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'octopus', 'blocktopus')
)
resources_dir = os.path.join(blocktopus_dir, 'resources', 'cache')
resources_json = os.path.join(blocktopus_dir, 'templates', 'template-resources.json')
def fetch_resource (url, filename, allow_fail = False):
cache_file = os.path.join(resources_dir, filename)
cache_file_dir = os.path.dirname(cache_file)
mkpath(cache_file_dir)
if os.path.isfile(cache_file):
print(f"{filename} already downloaded")
return
print(f"Downloading {url}")
try:
downloaded_file = wget.download(
url = url,
out = cache_file
)
print("\n")
except HTTPError:
if allow_fail:
print(" [Not found]")
else:
raise
if __name__ == "__main__":
try:
os.mkdir(resources_dir)
except FileExistsError:
pass
print ("Downloading third-party resources")
with open(resources_json) as templates_file:
resources = {}
for template_items in json.load(templates_file).values():
resources.update(template_items)
extra_resources = {}
for cache_filename, resource_url in resources.items():
split_filename = cache_filename.split('.')
if len(split_filename) > 3 and split_filename[-2] == 'min' and split_filename[-1] in ('js', 'css'):
base_filename = os.path.splitext(cache_filename)[0]
base_url = os.path.splitext(resource_url)[0]
ext = '.map'
extra_resources[base_filename + ext] = base_url + ext
elif split_filename[-1] == 'ttf':
base_filename = os.path.splitext(cache_filename)[0]
base_url = os.path.splitext(resource_url)[0]
for ext in ('.eot', '.woff', '.woff2', '.svg'):
extra_resources[base_filename + ext] = base_url + ext
for cache_filename, resource_url in resources.items():
fetch_resource(resource_url, cache_filename)
for cache_filename, resource_url in extra_resources.items():
fetch_resource(resource_url, cache_filename, allow_fail = True)
|
richardingham/octopus
|
tools/build.py
|
Python
|
mit
| 2,379
|
[
"Octopus"
] |
2cab2842d700bc498b7ca4f8ec098ab0b7a0be7523ca19a4e5f97820c3e7d586
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for kernelized_utils.py."""
import functools
from absl.testing import parameterized
from tensorflow.python.framework import constant_op
from tensorflow.python.keras.utils import kernelized_utils
from tensorflow.python.platform import test
def _exact_gaussian(stddev):
return functools.partial(
kernelized_utils.exact_gaussian_kernel, stddev=stddev)
def _exact_laplacian(stddev):
return functools.partial(
kernelized_utils.exact_laplacian_kernel, stddev=stddev)
class KernelizedUtilsTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('gaussian', _exact_gaussian(stddev=10.0), [[1.0]]),
('laplacian', _exact_laplacian(stddev=50.0), [[1.0]]))
def test_equal_vectors(self, exact_kernel_fn, expected_values):
"""Identical vectors give exactly the identity kernel value."""
x = constant_op.constant([0.5, -0.5, -0.5, 0.5])
y = constant_op.constant([0.5, -0.5, -0.5, 0.5])
exact_kernel = exact_kernel_fn(x, y)
shape = exact_kernel.shape.as_list()
self.assertLen(shape, 2)
# x and y are identical and therefore K(x, y) will be precisely equal to
# the identity value of the kernel.
self.assertAllClose(expected_values, exact_kernel, atol=1e-6)
@parameterized.named_parameters(
('gaussian', _exact_gaussian(stddev=10.0), [[1.0]]),
('laplacian', _exact_laplacian(stddev=50.0), [[1.0]]))
def test_almost_identical_vectors(self, exact_kernel_fn, expected_values):
"""Almost identical vectors give the identity kernel value."""
x = constant_op.constant([1.0, 0.4, -2.1, -1.1])
y = constant_op.constant([1.01, 0.39, -2.099, -1.101])
exact_kernel = exact_kernel_fn(x, y)
shape = exact_kernel.shape.as_list()
self.assertLen(shape, 2)
# x and y are almost identical and therefore K(x, y) will be almost equal to
# the identity value of the kernel.
self.assertAllClose(expected_values, exact_kernel, atol=1e-3)
@parameterized.named_parameters(
('gaussian', _exact_gaussian(stddev=1.0), [[0.99], [0.977]]),
('laplacian', _exact_laplacian(stddev=5.0), [[0.96], [0.94]]))
def test_similar_matrices(self, exact_kernel_fn, expected_values):
"""Pairwise "close" vectors give high kernel values (similarity scores)."""
x = constant_op.constant([1.0, 3.4, -2.1, 0.9, 3.3, -2.0], shape=[2, 3])
y = constant_op.constant([1.1, 3.35, -2.05])
exact_kernel = exact_kernel_fn(x, y)
shape = exact_kernel.shape.as_list()
self.assertLen(shape, 2)
# The 2 rows of x are close to y. The pairwise kernel values (similarity
# scores) are somewhat close to the identity value of the kernel.
self.assertAllClose(expected_values, exact_kernel, atol=1e-2)
@parameterized.named_parameters(
('gaussian', _exact_gaussian(stddev=2.0), [[.997, .279], [.251, 1.],
[.164, 0.019]]),
('laplacian', _exact_laplacian(stddev=2.0), [[.904, .128], [.116, 1.],
[.07, 0.027]]))
def test_matrices_varying_similarity(self, exact_kernel_fn, expected_values):
"""Test matrices with row vectors of varying pairwise similarity."""
x = constant_op.constant([1.0, 2., -2., 0.9, 3.3, -1.0], shape=[3, 2])
y = constant_op.constant([1.1, 2.1, -2., 0.9], shape=[2, 2])
exact_kernel = exact_kernel_fn(x, y)
shape = exact_kernel.shape.as_list()
self.assertLen(shape, 2)
self.assertAllClose(expected_values, exact_kernel, atol=1e-2)
@parameterized.named_parameters(
('gaussian', _exact_gaussian(stddev=1.0), [[0.0]]),
('laplacian', _exact_laplacian(stddev=1.0), [[0.0]]))
def test_completely_dissimilar_vectors(self, exact_kernel_fn,
expected_values):
"""Very dissimilar vectors give very low similarity scores."""
x = constant_op.constant([1.0, 3.4, -2.1, -5.1])
y = constant_op.constant([0.5, 2.1, 1.0, 3.0])
exact_kernel = exact_kernel_fn(x, y)
shape = exact_kernel.shape.as_list()
self.assertLen(shape, 2)
# x and y are very "far" from each other and so the corresponding kernel
# value will be very low.
self.assertAllClose(expected_values, exact_kernel, atol=1e-2)
if __name__ == '__main__':
test.main()
|
tensorflow/tensorflow
|
tensorflow/python/keras/utils/kernelized_utils_test.py
|
Python
|
apache-2.0
| 4,989
|
[
"Gaussian"
] |
0bcbe8c899200cd9027d025f3fa587b5c33b8179e25d9875d7d4bc9e9576f5b6
|
import sys
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
kPluginNodeName = "MitsubaSSSDipoleShader"
kPluginNodeClassify = "shader/volume"
kPluginNodeId = OpenMaya.MTypeId(0x87016)
class dipole(OpenMayaMPx.MPxNode):
def __init__(self):
OpenMayaMPx.MPxNode.__init__(self)
mMaterial = OpenMaya.MObject()
mUseSigmaSA = OpenMaya.MObject()
mSigmaS = OpenMaya.MObject()
mSigmaA = OpenMaya.MObject()
mUseSigmaTAlbedo = OpenMaya.MObject()
mSigmaT = OpenMaya.MObject()
mAlbedo = OpenMaya.MObject()
mScale = OpenMaya.MObject()
mInteriorMaterial = OpenMaya.MObject()
mIntIOR = OpenMaya.MObject()
mExteriorMaterial = OpenMaya.MObject()
mExtIOR = OpenMaya.MObject()
mIrrSamples = OpenMaya.MObject()
mOutColor = OpenMaya.MObject()
def compute(self, plug, block):
if plug == dipole.mOutColor:
resultColor = OpenMaya.MFloatVector(0.0,0.0,0.0)
outColorHandle = block.outputValue( dipole.mOutColor )
outColorHandle.setMFloatVector(resultColor)
outColorHandle.setClean()
else:
return OpenMaya.kUnknownParameter
def nodeCreator():
return dipole()
def nodeInitializer():
nAttr = OpenMaya.MFnNumericAttribute()
eAttr = OpenMaya.MFnEnumAttribute()
try:
dipole.mMaterial = eAttr.create("material", "mat")
eAttr.setKeyable(1)
eAttr.setStorable(1)
eAttr.setReadable(1)
eAttr.setWritable(1)
Materials = ["Apple",
"Cream",
"Skimmilk",
"Spectralon",
"Chicken1",
"Ketchup",
"Skin1",
"Wholemilk",
"Chicken2",
"Potato",
"Skin2",
"Lowfat Milk",
"Reduced Milk",
"Regular Milk",
"Espresso",
"Mint Mocha Coffee",
"Lowfat Soy Milk",
"Regular Soy Milk",
"Lowfat Chocolate Milk",
"Regular Chocolate Milk",
"Coke",
"Pepsi Sprite",
"Gatorade",
"Chardonnay",
"White Zinfandel",
"Merlot",
"Budweiser Beer",
"Coors Light Beer",
"Clorox",
"Apple Juice",
"Cranberry Juice",
"Grape Juice",
"Ruby Grapefruit Juice",
"White Grapefruit Juice",
"Shampoo",
"Strawberry Shampoo",
"Head & Shoulders Shampoo",
"Lemon Tea Powder",
"Orange Juice Powder",
"Pink Lemonade Powder",
"Cappuccino Powder",
"Salt Powder",
"Sugar Powder",
"Suisse Mocha"
]
for i in range(len(Materials)):
eAttr.addField(Materials[i], i)
# Default to Skin1
eAttr.setDefault(6)
dipole.mUseSigmaSA = nAttr.create("useSigmaSA","ussa", OpenMaya.MFnNumericData.kBoolean, False)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
dipole.mSigmaS = nAttr.createColor("sigmaS", "ss")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(0.0,0.0,0.0)
dipole.mSigmaA = nAttr.createColor("sigmaA", "sa")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(0.0,0.0,0.0)
dipole.mUseSigmaTAlbedo = nAttr.create("useSigmaTAlbedo","usta", OpenMaya.MFnNumericData.kBoolean, False)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
dipole.mSigmaT = nAttr.createColor("sigmaT", "st")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(0.0,0.0,0.0)
dipole.mAlbedo = nAttr.createColor("albedo", "albedo")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(0.0,0.0,0.0)
dipole.mScale = nAttr.create("scale","sc", OpenMaya.MFnNumericData.kFloat, 1000.0)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
dipole.mInteriorMaterial = eAttr.create("interiorMaterial", "intmat")
eAttr.setKeyable(1)
eAttr.setStorable(1)
eAttr.setReadable(1)
eAttr.setWritable(1)
eAttr.addField("Use Value", 0)
eAttr.addField("Vacuum - 1.0", 1)
eAttr.addField("Helum - 1.00004", 2)
eAttr.addField("Hydrogen - 1.00013", 3)
eAttr.addField("Air - 1.00028", 4)
eAttr.addField("Carbon Dioxide - 1.00045", 5)
eAttr.addField("Water - 1.3330", 6)
eAttr.addField("Acetone - 1.36", 7)
eAttr.addField("Ethanol - 1.361", 8)
eAttr.addField("Carbon Tetrachloride - 1.461", 9)
eAttr.addField("Glycerol - 1.4729", 10)
eAttr.addField("Benzene - 1.501", 11)
eAttr.addField("Silicone Oil - 1.52045", 12)
eAttr.addField("Bromine - 1.661", 13)
eAttr.addField("Water Ice - 1.31", 14)
eAttr.addField("Fused Quartz - 1.458", 15)
eAttr.addField("Pyrex - 1.470", 16)
eAttr.addField("Acrylic Glass - 1.49", 17)
eAttr.addField("Polypropylene - 1.49", 18)
eAttr.addField("BK7 - 1.5046", 19)
eAttr.addField("Sodium Chloride - 1.544", 20)
eAttr.addField("Amber - 1.55", 21)
eAttr.addField("Pet - 1.575", 22)
eAttr.addField("Diamond - 2.419", 23)
# Default to
eAttr.setDefault(0)
dipole.mIntIOR = nAttr.create("interiorIOR","intior", OpenMaya.MFnNumericData.kFloat, 1.3)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
dipole.mExteriorMaterial = eAttr.create("exteriorMaterial", "extmat")
eAttr.setKeyable(1)
eAttr.setStorable(1)
eAttr.setReadable(1)
eAttr.setWritable(1)
eAttr.addField("Use Value", 0)
eAttr.addField("Vacuum - 1.0", 1)
eAttr.addField("Helum - 1.00004", 2)
eAttr.addField("Hydrogen - 1.00013", 3)
eAttr.addField("Air - 1.00028", 4)
eAttr.addField("Carbon Dioxide - 1.00045", 5)
eAttr.addField("Water - 1.3330", 6)
eAttr.addField("Acetone - 1.36", 7)
eAttr.addField("Ethanol - 1.361", 8)
eAttr.addField("Carbon Tetrachloride - 1.461", 9)
eAttr.addField("Glycerol - 1.4729", 10)
eAttr.addField("Benzene - 1.501", 11)
eAttr.addField("Silicone Oil - 1.52045", 12)
eAttr.addField("Bromine - 1.661", 13)
eAttr.addField("Water Ice - 1.31", 14)
eAttr.addField("Fused Quartz - 1.458", 15)
eAttr.addField("Pyrex - 1.470", 16)
eAttr.addField("Acrylic Glass - 1.49", 17)
eAttr.addField("Polypropylene - 1.49", 18)
eAttr.addField("BK7 - 1.5046", 19)
eAttr.addField("Sodium Chloride - 1.544", 20)
eAttr.addField("Amber - 1.55", 21)
eAttr.addField("Pet - 1.575", 22)
eAttr.addField("Diamond - 2.419", 23)
# Default to
eAttr.setDefault(0)
dipole.mExtIOR = nAttr.create("exteriorIOR","extior", OpenMaya.MFnNumericData.kFloat, 1.0)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
dipole.mIrrSamples = nAttr.create("irrSamples","irrs", OpenMaya.MFnNumericData.kInt, 16)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
dipole.mOutColor = nAttr.createColor("outColor", "oc")
nAttr.setStorable(0)
nAttr.setHidden(0)
nAttr.setReadable(1)
nAttr.setWritable(0)
except:
sys.stderr.write("Failed to create attributes\n")
raise
try:
dipole.addAttribute(dipole.mMaterial)
dipole.addAttribute(dipole.mUseSigmaSA)
dipole.addAttribute(dipole.mSigmaS)
dipole.addAttribute(dipole.mSigmaA)
dipole.addAttribute(dipole.mUseSigmaTAlbedo)
dipole.addAttribute(dipole.mSigmaT)
dipole.addAttribute(dipole.mAlbedo)
dipole.addAttribute(dipole.mScale)
dipole.addAttribute(dipole.mInteriorMaterial)
dipole.addAttribute(dipole.mIntIOR)
dipole.addAttribute(dipole.mExteriorMaterial)
dipole.addAttribute(dipole.mExtIOR)
dipole.addAttribute(dipole.mIrrSamples)
dipole.addAttribute(dipole.mOutColor)
except:
sys.stderr.write("Failed to add attributes\n")
raise
# initialize the script plug-in
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.registerNode( kPluginNodeName, kPluginNodeId, nodeCreator,
nodeInitializer, OpenMayaMPx.MPxNode.kDependNode, kPluginNodeClassify )
except:
sys.stderr.write( "Failed to register node: %s" % kPluginNodeName )
raise
# uninitialize the script plug-in
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.deregisterNode( kPluginNodeId )
except:
sys.stderr.write( "Failed to deregister node: %s" % kPluginNodeName )
raise
|
hpd/MitsubaForMaya
|
plug-ins/mitsuba/materials/dipole.py
|
Python
|
mit
| 9,561
|
[
"Amber",
"ESPResSo"
] |
fc2c6b9ece1a61ce937f4228fc56907607577675144cef05064fc1791ed04de5
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import espressomd
@utx.skipIfMissingFeatures(["P3M", "EXTERNAL_FORCES"])
class test_icc(ut.TestCase):
def runTest(self):
from espressomd.electrostatics import P3M
from espressomd.electrostatic_extensions import ICC
S = espressomd.System(box_l=[1.0, 1.0, 1.0])
S.seed = S.cell_system.get_state()['n_nodes'] * [1234]
# Parameters
box_l = 20.0
nicc = 10
q_test = 10.0
q_dist = 5.0
# System
S.box_l = [box_l, box_l, box_l + 5.0]
S.cell_system.skin = 0.4
S.time_step = 0.01
# ICC particles
nicc_per_electrode = nicc * nicc
nicc_tot = 2 * nicc_per_electrode
iccArea = box_l * box_l / nicc_per_electrode
iccNormals = []
iccAreas = []
iccSigmas = []
iccEpsilons = []
l = box_l / nicc
for xi in range(nicc):
for yi in range(nicc):
S.part.add(pos=[l * xi, l * yi, 0], q=-0.0001, fix=[1, 1, 1])
iccNormals.append([0, 0, 1])
for xi in range(nicc):
for yi in range(nicc):
S.part.add(pos=[l * xi, l * yi, box_l],
q=0.0001, fix=[1, 1, 1])
iccNormals.append([0, 0, -1])
iccAreas.extend([iccArea] * nicc_tot)
iccSigmas.extend([0] * nicc_tot)
iccEpsilons.extend([10000000] * nicc_tot)
# Test Dipole
b2 = box_l * 0.5
S.part.add(pos=[b2, b2, b2 - q_dist / 2], q=q_test, fix=[1, 1, 1])
S.part.add(pos=[b2, b2, b2 + q_dist / 2], q=-q_test, fix=[1, 1, 1])
# Actors
p3m = P3M(prefactor=1, mesh=32, cao=7, accuracy=1e-5)
icc = ICC(
n_icc=nicc_tot,
convergence=1e-6,
relaxation=0.75,
ext_field=[0, 0, 0],
max_iterations=100,
first_id=0,
eps_out=1,
normals=iccNormals,
areas=iccAreas,
sigmas=iccSigmas,
epsilons=iccEpsilons)
S.actors.add(p3m)
S.actors.add(icc)
# Run
S.integrator.run(0)
# Analyze
QL = sum(S.part[:nicc_per_electrode].q)
QR = sum(S.part[nicc_per_electrode:nicc_tot].q)
testcharge_dipole = q_test * q_dist
induced_dipole = 0.5 * (abs(QL) + abs(QR)) * box_l
# Result
self.assertAlmostEqual(1, induced_dipole / testcharge_dipole, places=4)
# Test applying changes
enegry_pre_change = S.analysis.energy()['total']
pressure_pre_change = S.analysis.pressure()['total']
icc.set_params(sigmas=[2.0] * nicc_tot)
icc.set_params(epsilons=[20.0] * nicc_tot)
enegry_post_change = S.analysis.energy()['total']
pressure_post_change = S.analysis.pressure()['total']
self.assertNotAlmostEqual(enegry_pre_change, enegry_post_change)
self.assertNotAlmostEqual(pressure_pre_change, pressure_post_change)
if __name__ == "__main__":
ut.main()
|
psci2195/espresso-ffans
|
testsuite/python/icc.py
|
Python
|
gpl-3.0
| 3,783
|
[
"ESPResSo"
] |
bdb4b6222e3beabad78991ebfcbdd1ff9501cd5cae6106c0923f051b9eb46567
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 30 08:57:31 2016
@author: nicholas
The Goal of this is to have a unified place to put the useful
python 3.5 functions or templates
how I got the fastq file
# seqtk sample -s 27 ~/GitHub/FA/pseudochromosome/data/20150803_Abram1/ \
reads/3123-1_1_trimmed.fastq .0005
bam file was from a riboseed mapping; md5: 939fbf2c282091aec0dfa278b05e94ec
mapped bam was made from bam file with the following command
samtools view -Bh -F 4 /home/nicholas/GitHub/FB/Ecoli_comparative_genomics/
scripts/riboSeed_pipeline/batch_coli_unpaired/map/
mapping_20160906_region_7_riboSnag/
test_smalt4_20160906_region_7_riboSnagS.bam >
~/GitHub/pyutilsnrw/tests/test_mapped.sam
md5: 27944249bf064ba54576be83053e82b0
"""
__version__ = "0.0.3"
import sys
import os
import unittest
import logging
import shutil
from pyutilsnrw.utils3_5 import make_output_prefix, check_installed_tools,\
copy_file, get_ave_read_len_from_fastq, get_number_mapped,\
extract_mapped_and_mappedmates, keep_only_first_contig, md5,\
combine_contigs, clean_temp_dir, get_genbank_record, get_fasta_lengths,\
file_len, multisplit, check_version_from_init, check_version_from_cmd
sys.dont_write_bytecode = True
logger = logging
@unittest.skipIf((sys.version_info[0] != 3) or (sys.version_info[1] < 5),
"Subprocess.call among otherthings wont run if you try this" +
" with less than python 3.5")
class utils3_5TestCase(unittest.TestCase):
""" Test the utils3_5 collection of functions
"""
def setUp(self):
self.genbank_filename = os.path.join(os.path.dirname(__file__),
str("references" + os.path.sep +
'n_equitans.gbk'))
self.multigenbank_filename = os.path.join(os.path.dirname(__file__),
str("references" +
os.path.sep +
'uams1_rs.gb'))
self.testdirname = os.path.join(os.path.dirname(__file__),
"output_utils3_5_tests")
self.test_fastq_file = os.path.join(os.path.dirname(__file__),
str("references" + os.path.sep +
'reads_reference.fastq'))
self.test_empty_file = os.path.join(os.path.dirname(__file__),
str("references" + os.path.sep +
'empty.fasta'))
self.test_bam_file = os.path.join(os.path.dirname(__file__),
str("references" + os.path.sep +
"mapping_reference.bam"))
self.test_sam_mapped_file = os.path.join(
os.path.dirname(__file__),
str("references" +
os.path.sep +
"mapping_reference_mapped.sam"))
self.test_multifasta = os.path.join(
os.path.dirname(__file__),
str("references" + os.path.sep +
"test_multiseqs_reference.fasta"))
self.test_singlefasta = os.path.join(
os.path.dirname(__file__),
str("references" + os.path.sep +
"test_only_first_reference.fasta"))
self.test_combined = os.path.join(
os.path.dirname(__file__),
str("references" + os.path.sep +
"combined_contigs_reference.fa"))
self.test_md5s_prefix = os.path.join(
os.path.dirname(__file__),
str("references" + os.path.sep + "md5"))
self.samtools_exe = "samtools"
def test_check_init(self):
"""this checks the version number in an init file
"""
initf = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"pyutilsnrw", "__init__.py")
print(initf)
self.assertTrue(isinstance(check_version_from_init(
init_file=initf, min_version="0.0.0"), str))
with self.assertRaises(FileNotFoundError):
check_version_from_init(
init_file="notheinitfile", min_version="0.0.0")
with self.assertRaises(ValueError):
check_version_from_init(init_file=initf, min_version="10.0.0")
def test_cmd_version(self):
"""This isnt a great test, as it has to be changed when I
"""
# samtools_verison = check_version_from_cmd(
# cmd='samtools',
# line=3,
# pattern=r"\s*Version: (?P<version>[^(]+)", where='stderr',
# min_version="0.0.0")
pip_version = check_version_from_cmd(
exe='pip',
cmd=' --version',
line=1,
pattern=r"pip (?P<version>[^from]+)",
logger=logger,
where='stdout',
min_version="0.0.0")
self.assertTrue(pip_version > '7.0.0')
def test_file_len(self):
""" test against file of known length
"""
self.assertEqual(file_len(self.test_combined), 32)
def test_make_testing_dir(self):
""" make a place for the temporary files
"""
if not os.path.exists(self.testdirname):
os.makedirs(self.testdirname)
self.assertTrue(os.path.exists(self.testdirname))
def test_clean_temp_dir(self):
""" I tried to do something like
@unittest.skipUnless(clean_temp, "temporary files were retained")
but couldnt get the variabel to be passed through.
"""
if not os.path.exists(os.path.join(self.testdirname, "test_subdir")):
os.makedirs(os.path.join(self.testdirname, "test_subdir"))
clean_temp_dir(self.testdirname)
def test_make_output_prefix(self):
test_prefix = make_output_prefix(self.testdirname, "utils_3.5")
self.assertEqual(test_prefix,
"".join([self.testdirname, os.path.sep, "utils_3.5"]))
def test_check_installed_tools(self):
"""check against known install (python) and a non_executable
"""
check_installed_tools("python")
# test fails properly
nonex = "thisisnotapathtoanactualexecutable"
with self.assertRaises(SystemExit):
check_installed_tools(nonex)
self.assertFalse(check_installed_tools(nonex,
hard=False))
def test_md5_strings(self):
""" minimal md5 examples with strings
"""
self.assertEqual(md5("thisstringisidenticalto", string=True),
md5("thisstringisidenticalto", string=True))
self.assertNotEqual(md5("thisstringisntidenticalto", string=True),
md5("thisstringisnotidenticalto", string=True))
def test_references_md5(self):
""" is this paranoia, as well as bad testing?
"""
test_pairs = [["3ba332f8a3b5d935ea6c4e410ccdf44b",
"references/combined_contigs_reference.fa"],
["939fbf2c282091aec0dfa278b05e94ec",
"references/mapping_reference.bam"],
["27944249bf064ba54576be83053e82b0",
"references/mapping_reference_mapped.sam"],
["ac80c75f468011ba11e72ddee8560b33",
"references/md5_a.txt"],
["ac80c75f468011ba11e72ddee8560b33",
"references/md5_b.txt"],
["92fc8592819173343a75a40874d86144",
"references/md5_fail.txt"],
["d6b0e5b28d0b4de431f10a03042ff37b",
"references/reads_reference.fastq"],
["40ac496ec5b221636db81ce09e04c1d9",
"references/test_multiseqs_reference.fasta"],
["920b5c9dc69fb2a9fed50b18f3e92895",
"references/test_only_first_reference.fasta"]]
for i in test_pairs:
self.assertEqual(i[0],
md5(os.path.join(os.path.dirname(__file__),
i[1])))
def test_md5_files(self):
""" test file contests identiy
"""
md5_a = md5(str(self.test_md5s_prefix + "_a.txt"))
md5_b = md5(str(self.test_md5s_prefix + "_b.txt"))
md5_fail = md5(str(self.test_md5s_prefix + "_fail.txt"))
self.assertEqual(md5_a, md5_b)
self.assertNotEqual(md5_a, md5_fail)
def test_copy_file(self):
""" make sure copied files are the same
"""
if not os.path.exists(self.test_fastq_file):
raise FileNotFoundError("test file is gone! " +
"where is test_reads.fastq ?")
new_path = copy_file(current_file=self.test_fastq_file,
dest_dir=self.testdirname,
name="newname.fastq", overwrite=False)
# test path to copied file is constructed properly
self.assertEqual(new_path, os.path.join(self.testdirname,
"newname.fastq"))
# test identity of files
self.assertEqual(md5(new_path),
md5(os.path.join(self.testdirname, "newname.fastq")))
# test overwrite exit
with self.assertRaises(SystemExit):
new_path = copy_file(current_file=self.test_fastq_file,
dest_dir=self.testdirname,
name="newname.fastq", overwrite=False)
os.remove(new_path)
def test_average_read_len(self):
""" tests get_ave_read_len_from_fastq
this probably could/should be refined to have a better test
"""
mean_read_len = get_ave_read_len_from_fastq(self.test_fastq_file, N=5)
self.assertEqual(217.8, mean_read_len)
@unittest.skipIf(shutil.which("samtools") is None,
"samtools executable not found, skipping." +
"If this isnt an error from travis deployment, you " +
"probably should install it")
def test_get_number_mapped(self):
""" checks flagstat
"""
result = get_number_mapped(self.test_bam_file, self.samtools_exe)
reference = "151 + 0 mapped (0.56% : N/A)"
self.assertEqual(result, reference)
@unittest.skipIf(shutil.which("samtools") is None,
"samtools executable not found, skipping." +
"If this isnt an error from travis deployment, you " +
"probably should install it")
def test_extraction(self):
""" tests extract_mapped_and_mappedmates
dont trust this if make_output_prefix test fails
some help from PSS on SO:
http://stackoverflow.com/questions/16874598/
how-do-i-calculate-the-md5-checksum-of-a-file-in-python
"""
# copy files
test_bam_dup = copy_file(current_file=self.test_bam_file,
dest_dir=self.testdirname,
name="", overwrite=False)
self.assertTrue(os.path.exists(test_bam_dup))
test_mapped_dup = copy_file(current_file=self.test_sam_mapped_file,
dest_dir=self.testdirname,
name="", overwrite=False)
self.assertTrue(os.path.exists(test_mapped_dup))
ref_dir = os.path.join(self.testdirname)
prefix = make_output_prefix(output_dir=ref_dir,
name="mapping_reference")
extract_mapped_and_mappedmates(map_results_prefix=prefix,
fetch_mates=False,
keep_unmapped=False,
samtools_exe=self.samtools_exe)
# reference mapping md5
mapped_md5 = "27944249bf064ba54576be83053e82b0"
md5_returned = md5(str(prefix + "_mapped.sam"))
# Finally compare original MD5 with freshly calculated
self.assertEqual(mapped_md5, md5_returned)
# delete files created
files_created = ["_mapped.bam",
"_mapped.bam.bai",
"_mapped.sam",
".bam"]
if mapped_md5 == md5_returned:
for i in files_created:
os.remove(str(prefix + i))
def test_keep_only_first_contig(self):
"""copy_file
"""
# copy to test dir
copy_file(current_file=self.test_multifasta,
dest_dir=self.testdirname,
name='duplicated_multifasta.fasta', overwrite=False,
logger=None)
path_to_dup = os.path.join(self.testdirname,
"duplicated_multifasta.fasta")
keep_only_first_contig(path_to_dup, newname="contig1")
self.assertEqual(md5(path_to_dup), md5(self.test_singlefasta))
os.remove(path_to_dup)
def test_combine_contigs(self):
""" compine two files, compare lengths, check construction
"""
duplicated_multifasta = copy_file(current_file=self.test_multifasta,
dest_dir=self.testdirname,
name='multifasta_test_combine.fasta',
overwrite=False,
logger=None)
for_first_contig = copy_file(current_file=self.test_multifasta,
dest_dir=self.testdirname,
name='single_fasta_test_combine.fasta',
overwrite=False,
logger=None)
keep_only_first_contig(for_first_contig, newname="contig1")
combined_contigs = combine_contigs(self.testdirname,
pattern="*test_combine",
contigs_name="combined_contigs.fa",
ext=".fasta",
verbose=False)
self.assertEqual(md5(self.test_combined), md5(combined_contigs))
for i in [duplicated_multifasta, for_first_contig, combined_contigs]:
os.remove(i)
def test_get_genbank_record(self):
"""Reads records from a GenBank file.
"""
records = get_genbank_record(self.genbank_filename)
assert isinstance(records, list)
multirecords = get_genbank_record(self.multigenbank_filename,
first_only=False)
assert isinstance(multirecords, list)
def test_get_fasta_lengths(self):
""" get the lengths of the multifasta entries
"""
self.assertEqual(get_fasta_lengths(self.test_singlefasta), [169])
self.assertEqual(get_fasta_lengths(self.test_multifasta),
[169, 161, 159, 159, 151, 133, 128])
def test_multisplit(self):
""" split a string that has multiple delimiters
"""
test_string = "look_this+is+a locus_that_is+multi-delimited"
list_of_things = multisplit(["-", "_", "+", " "], test_string)
test_other_string = "look_this+is+a\faillocus_that_is+multi-delimited"
list_of_other_things = multisplit(["-", "_", "+", " "],
test_other_string)
self.assertEqual(list_of_things, ["look", "this", "is", "a", "locus",
"that", "is", "multi", "delimited"])
self.assertNotEqual(list_of_other_things, ["look", "this", "is", "a",
"locus", "that", "is",
"multi", "delimited"])
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
###
### below are the functions to still write tests for
###
# def run_quast(contigs, output, quast_exe, ref="", threads=1, logger=None):
# """Reference is optional. This is, honestly, a pretty dumb feature
# requires sys, subprocess, (system install of quast)
# """
# def setup_protein_blast(input_file, input_type="fasta", dbtype="prot",
# title="blastdb", out="blastdb",
# makeblastdb_exe='', logger=None):
# """
# This runs make blast db with the given parameters
# requires logging, os, subprocess, shutil
# """
# if makeblastdb_exe == '':
# makeblastdb_exe = shutil.which("makeblastdb")
# makedbcmd = str("{0} -in {1} -input_type {2} -dbtype {3} " +
# "-title {4} -out {5}").format(makeblastdb_exe,
# input_file,
# input_type,
# dbtype, title, out)
# if logger:
# logger.info("Making blast db: {0}".format(makedbcmd))
# try:
# subprocess.run(makedbcmd, shell=sys.platform != "win32",
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE, check=True)
# logging.debug("BLAST database '{0}' created here: {1}".format(
# title, out))
# return(0)
# except:
# if logger:
# logging.error("Something bad happened when trying to make " +
# "a blast database")
# sys.exit(1)
# def run_blastp(input_file, database_name, outfmt, blastp_exe='', logger=None):
# """
# requires logging subprocess, os, shutil
# """
# output_file = os.path.join(os.path.split(input_file)[0],
# str(os.path.splitext(
# os.path.basename(input_file))[0] +
# "_blast_hits.tab"))
# if blastp_exe == '':
# blastp_exe = shutil.which("blastp")
# blastpcmd = str("{0} -db {1} -query {2} -out {3} -outfmt " +
# "{4}").format(blastp_exe, database_name, input_file,
# output_file, outfmt)
# if logger:
# logger.info("Running blastp: {0}".format(blastpcmd))
# try:
# subprocess.run(blastpcmd, shell=sys.platform != "win32",
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE, check=True)
# if logger:
# logger.debug("Results from BLASTing {0} are here: {1}".format(
# input_file, output_file))
# return(0)
# except:
# if logger:
# logger.error("Something bad happened when running blast")
# sys.exit(1)
# #def merge_outfiles(filelist, outfile_name):
# def merge_blast_tab_outfiles(filelist, outfile_name, logger=None):
# """
# #TODO needs a test for headers
# for combining tab formated blast output format 6
# returns 0 if successful
# requires logging
# """
# # only grab .tab files, ie, the blast output
# # logger=logging.getLogger()
# filelist = [i for i in filelist if i.split(".")[-1:] == ['tab']]
# if len(filelist) == 1:
# if logger:
# logger.warning("only one file found! no merging needed")
# return(0)
# elif len(filelist) == 0:
# if logger:
# logger.error("filelist empt; cannot perform merge!")
# return(1)
# else:
# if logger:
# logger.info("merging all the blast results to %s" % outfile_name)
# nfiles = len(filelist)
# fout = open(outfile_name, "a")
# # first file:
# for line in open(filelist[0]):
# fout.write(line)
# # now the rest:
# for num in range(1, nfiles):
# f = open(filelist[num])
# for line in f:
# fout.write(line)
# f.close() # not really needed
# fout.close()
# return(0)
# def cleanup_output_to_csv(infile,
# accession_pattern='(?P<accession>[A-Z _\d]*\.\d*)',
# logger=None):
# """
# given .tab from merge_blast_tab_outfiles, assign pretty column names,
# """
# # if logger:
# # logger=logging.getLogger(name=None)
# print("cleaning up the csv output")
# colnames = ["query_id", "subject_id", "identity_perc", "alignment_length",
# "mismatches", "gap_opens", "q_start", "q_end", "s_start",
# "s_end", "evalue", "bit_score"]
# csv_results = pd.read_csv(open(infile), comment="#", sep="\t",
# names=colnames)
# #This default regex will probably break things eventually...
# # it looks for capital letter and numbers, dot, number, ie SHH11555JJ8.99
# csv_results["accession"] = csv_results.query_id.str.extract(accession_pattern)
# # write out results with new headers or with new headers and merged metadat from accessions.tab
# genes = open(genelist, "r")
# genedf = pd.read_csv(genes, sep=",")
# output_path_csv = str(os.path.splitext(infile)[0] + ".csv")
# results_annotated = pd.merge(csv_results, genedf, how="left",
# on="accession")
# results_annotated.to_csv(open(output_path_csv, "w"))
# print("wrote final csv to %s" % output_path_csv)
# #%%
# def check_single_scaffold(input_genome_path):
# """Test for single scaffold. from genbank
# """
# def get_genbank_seq(input_genome_path, first_only=False):
# """Get all sequences from genbank, return a list, unless first only
# get the sequence from the FIRST record only in a genbank file
# """
|
nickp60/pyutilsnrw
|
tests/test_utils3_5.py
|
Python
|
mit
| 21,944
|
[
"BLAST"
] |
4cd7dfefd12dcaf75aad3c72a65e6c7406687988dd1af2f650e70e94fa378eaf
|
#!env/python3
# coding: utf-8
import os
import sys
import datetime
import sqlalchemy
import subprocess
import reprlib
import time
from common import *
from progress.bar import Bar
from sqlalchemy import Table, Column, Integer, String, Boolean, ForeignKey, Sequence, UniqueConstraint, Index, func, distinct
from sqlalchemy.orm import relationship, Session
from sqlalchemy.exc import IntegrityError
from sqlalchemy.sql.expression import ClauseElement
from sqlalchemy.ext.declarative import declarative_base
from pysam import VariantFile
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# MODEL
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class SampleVariant(SQLBase):
__tablename__ = '_7_sample_variant'
sample_id = Column(Integer, ForeignKey('_7_sample.id'), primary_key=True, nullable=False)
chr = Column(String, primary_key=True, nullable=False)
pos = Column(Integer, primary_key=True, nullable=False)
ref = Column(String, primary_key=True, nullable=False)
alt = Column(String, primary_key=True, nullable=False)
# genotype = Column(JSONB, nullable=True)
# infos = Column(Array(String, dimensions=2))
__table_args__ = (Index('_7_sample_variant_idx', 'sample_id', 'chr', 'pos', 'ref', 'alt', unique=True), UniqueConstraint('sample_id', 'chr', 'pos', 'ref', 'alt', name='_7_sample_variant_uc'), )
#variants = relationship("Variant", back_populates="samples")
#samples = relationship("Sample", back_populates="variants")
class Sample(SQLBase):
__tablename__ = '_7_sample'
id = Column(Integer, autoincrement=True, primary_key=True, nullable=False)
name = Column(String)
description = Column(String)
#variants = relationship("SampleVariant", back_populates="samples")
def __str__(self):
return "<Sample(name='%s')>" % (self.name)
class Variant(SQLBase):
__tablename__ = '_7_variant'
bin = Column(Integer)
chr = Column(String, primary_key=True, nullable=False)
pos = Column(Integer, primary_key=True, nullable=False)
ref = Column(String, primary_key=True, nullable=False)
alt = Column(String, primary_key=True, nullable=False)
is_transition = Column(Boolean)
#samples = relationship("SampleVariant", back_populates="variants")
__table_args__ = (Index('_7_variant_idx', 'chr', 'pos', 'ref', 'alt', unique=True), UniqueConstraint('chr', 'pos', 'ref', 'alt', name='_7_variant_uc'), )
def __str__(self):
return "<Variant(id='%s', chr='%s', pos='%s', ref='%s', alt='%s')>" % (self.id, self.chr, self.pos, self.ref, self.alt)
class Bench(PostgreBench):
def __init__(self, config):
super(Bench, self).__init__(config)
self.description = "Benchmark n°7\n - PySam\n - Model : Sample & Variant & SampleVariant (id on chr, pos, ref, alt)\n - SQLAlchemy using raw sql query\n - Parsing on main thread - SQL query exec on multithread"
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# IMPORT VCF Data
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def import_vcf(self, file, records_count:int, out_log:str, out_stat:str):
vcf_reader = VariantFile(file)
# get samples in the VCF
samples = {i : get_or_create(self.session, Sample, name=i)[0] for i in list((vcf_reader.header.samples))}
self.session.commit()
# parsing vcf file
log("\n\r\tsamples : (" + str(len(samples.keys())) + ") " + str(reprlib.repr([s for s in samples.keys()])))
bar = Bar('\tparsing : ', max=records_count, suffix='%(percent).1f%% - %(elapsed_td)s')
sql_head1 = "INSERT INTO _7_variant (chr, pos, ref, alt, is_transition) VALUES "
sql_head2 = "INSERT INTO _7_sample_variant (sample_id, chr, pos, ref, alt) VALUES "
sql_tail = " ON CONFLICT DO NOTHING"
sql_query1 = ""
sql_query2 = ""
count = 0
for r in vcf_reader:
bar.next()
chrm = normalize_chr(str(r.chrom))
for sn in r.samples:
s = r.samples.get(sn)
pos, ref, alt = normalize(r.pos, r.ref, s.alleles[0])
if alt != ref :
sql_query1 = sql_query1.join("('%s', %s, '%s', '%s', %s)," % (chrm, str(pos), ref, alt, is_transition(ref, alt)))
sql_query2 = sql_query2.join("(%s, '%s', %s, '%s', '%s')," % (str(samples[sn].id), chrm, str(pos), ref, alt))
count += 1
pos, ref, alt = normalize(r.pos, r.ref, s.alleles[1])
if alt != ref :
sql_query1 = sql_query1.join("('%s', %s, '%s', '%s', %s)," % (chrm, str(pos), ref, alt, is_transition(ref, alt)))
sql_query2 = sql_query2.join("(%s, '%s', %s, '%s', '%s')," % (str(samples[sn].id), chrm, str(pos), ref, alt))
count += 1
# manage split big request to avoid sql out of memory transaction
if count >= 1000000:
count = 0
transaction1 = sql_head1 + sql_query1[:-1] + sql_tail
transaction2 = sql_head2 + sql_query2[:-1] + sql_tail
threading.Thread(target=self.exec_sql_query, args=(transaction1, )).start()
threading.Thread(target=self.exec_sql_query, args=(transaction2, )).start()
sql_query1 = ""
sql_query2 = ""
bar.finish()
transaction1 = sql_head1 + sql_query1[:-1] + sql_tail
transaction2 = sql_head2 + sql_query2[:-1] + sql_tail
self.connection.execute(transaction1)
self.connection.execute(transaction2)
while self.job_in_progress > 0:
pass
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# TEST Requests
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def test_req(self, out_log:str, out_stat:str):
# count sample
with Timer() as t:
result = self.connection.execute("SELECT COUNT(*) FROM _7_sample")
print("Sample total : " , result.first()[0], " (", t, ")")
#count sample_variant
with Timer() as t:
result = self.connection.execute("SELECT COUNT(*) FROM _7_sample_variant")
print("Variant total : " , result.first()[0], " (", t, ")")
#count variant
with Timer() as t:
result = self.connection.execute("SELECT COUNT(*) FROM _7_variant")
print("Distinct Variant total : " , result.first()[0], " (", t, ")")
#count variant / sample
with Timer() as t:
result = self.connection.execute("SELECT sample_id, COUNT(*) FROM _7_sample_variant GROUP BY sample_id")
print("\nCount variant by sample (", t, ")")
for r in result:
print ("\tSample n°", r[0], " : ", r[1], " variants")
# Get all variant with REF=A on chr5 for the sample 1
with Timer() as t:
result = self.connection.execute("SELECT pos, alt FROM _7_sample_variant WHERE sample_id = 1 AND chr = '5' AND ref = 'A'")
print("\nList variant of sample n°1, on chr5, with ref A : " , result.rowcount, " results (", t, ")")
print ("\t", reprlib.repr((result)))
# Test group by same table :
with Timer() as t:
result = self.connection.execute("SELECT chr, pos, ref, alt, count(sample_id ) as \"used\" FROM _7_sample_variant GROUP BY chr, pos, ref, alt ORDER BY \"used\" DESC")
print("\nCount how many variant are common by sample : " , result.rowcount, " results (", t, ")")
t = 0
c = 0
print(" sample : variant count")
for r in result:
if t > int(r[4]):
print("\t", t, " : ", c)
c = 0
t = int(r[4])
c += 1
with Timer() as t:
result = self.connection.execute("SELECT s.sample_id, v.is_transition, count(*) FROM _7_variant v INNER JOIN _7_sample_variant s ON v.chr = s.chr AND v.pos = s.pos AND v.ref = s.ref AND v.alt = s.alt GROUP BY v.is_transition, s.sample_id ORDER BY s.sample_id, v.is_transition")
print("\nCheck Sequencing integrity : " , result.rowcount, " results (", t, ")")
print(" sample : transition / transversion")
s = 0
tv = 0
for r in result:
if int(r[0]) > s :
print("\tSample n°", s, " : ", r[2], "/", tv, " ", round(tv / r[2],2))
c = 0
s = int(r[0])
tv = int(r[2])
|
REGOVAR/Sandbox
|
benchs/db/scripts/poc_007.py
|
Python
|
agpl-3.0
| 7,951
|
[
"pysam"
] |
a5ef2e0dee10ac79815541975a3f5ee603b8ab96ca382afdff3828c8483e6a78
|
import itertools
import pddl_types
import f_expression
import tasks
def parse_condition(alist):
condition = parse_condition_aux(alist, False)
return condition
def parse_condition_aux(alist, negated):
"""Parse a PDDL condition. The condition is translated into NNF on the fly."""
tag = alist[0]
if is_function_comparison(alist):
args = [f_expression.parse_expression(arg) for arg in alist[1:]]
assert len(args) == 2, args
if negated:
return NegatedFunctionComparison(tag, args, True)
else:
return FunctionComparison(tag, args, True)
elif tag in ("and", "or", "not", "imply"):
args = alist[1:]
if tag == "imply":
assert len(args) == 2
if tag == "not":
assert len(args) == 1
return parse_condition_aux(args[0], not negated)
elif tag in ("forall", "exists"):
parameters = pddl_types.parse_typed_list(alist[1])
args = alist[2:]
assert len(args) == 1
elif tag == "[":
assert not negated
assert alist[-1] == "]"
alist = alist[1:-1] # strip [ and ]
# where go the type checks here? parameter should match the module...
# somewhere check the "nestedness" as we want them at the top only?
# Perhaps later in translate meckern
return ModuleCall(alist[0], [parse_term(term) for term in alist[1:]])
elif negated:
return NegatedAtom(alist[0], [parse_term(term) for term in alist[1:]])
else:
return Atom(alist[0],[parse_term(term) for term in alist[1:]])
if tag == "imply":
parts = [parse_condition_aux(args[0], not negated),
parse_condition_aux(args[1], negated)]
tag = "or"
else:
parts = [parse_condition_aux(part, negated) for part in args]
if tag == "and" and not negated or tag == "or" and negated:
return Conjunction(parts)
elif tag == "or" and not negated or tag == "and" and negated:
return Disjunction(parts)
elif tag == "forall" and not negated or tag == "exists" and negated:
return UniversalCondition(parameters, parts)
elif tag == "exists" and not negated or tag == "forall" and negated:
return ExistentialCondition(parameters, parts)
def parse_durative_condition(alist):
"""Parse a durative PDDL condition. i
The condition is translated into NNF on the fly.
Returns triple [start condition, over all condition, end condition]"""
if len(alist)==0:
return [Truth(), Truth(),Truth()]
tag = alist[0]
if tag == "and":
args = alist[1:]
parts = [parse_durative_condition(part) for part in args]
parts_begin = [part[0] for part in parts]
parts_end = [part[1] for part in parts]
parts_all = [part[2] for part in parts]
return [Conjunction(parts_begin),Conjunction(parts_end),Conjunction(parts_all)]
elif tag == "forall":
parameters = pddl_types.parse_typed_list(alist[1])
args = alist[2:]
assert len(args) == 1
parts = [parse_durative_condition(part) for part in args]
parts_begin = [part[0] for part in parts]
parts_end = [part[1] for part in parts]
parts_all = [part[2] for part in parts]
return [UniversalCondition(parameters, parts_begin),
UniversalCondition(parameters, parts_end),
UniversalCondition(parameters, parts_all)]
elif tag == "at":
assert len(alist) == 3
assert alist[1] in ("start", "end")
condition = parse_condition_aux(alist[2], False)
if alist[1] == "start":
return [condition, Truth(), Truth()]
else:
return [Truth(), Truth(), condition]
elif tag == "over":
assert alist[1] == "all"
assert len(alist) == 3
condition = parse_condition_aux(alist[2], False)
return [Truth(), condition, Truth()]
def is_function_comparison(alist):
tag = alist[0]
if tag in (">","<",">=","<="):
return True
if not tag == "=":
return False
# tag is '='
symbol = alist[1]
if isinstance(symbol,list):
if symbol[0] in ("+","/","*","-"):
return True
symbol = symbol[0]
if (tasks.Task.FUNCTION_SYMBOLS.get(symbol,"object")=="number" or
symbol.replace(".","").isdigit()):
return True
return False
def parse_literal(alist):
if alist[0] == "not":
assert len(alist) == 2
alist = alist[1]
return NegatedAtom(alist[0], [parse_term(term) for term in alist[1:]])
else:
return Atom(alist[0], [parse_term(term) for term in alist[1:]])
def parse_term(term):
if isinstance(term, list):
return FunctionTerm(term[0],[parse_term(t) for t in term[1:]])
elif term.startswith("?"):
return Variable(term)
elif term in tasks.Task.FUNCTION_SYMBOLS:
return FunctionTerm(term,[])
else:
return ObjectTerm(term)
def dump_temporal_condition(condition,indent=" "):
assert len(condition)==3
if not isinstance(condition[0],Truth):
print "%sat start:" % indent
condition[0].dump(indent+" ")
if not isinstance(condition[1],Truth):
print "%sover all:" % indent
condition[1].dump(indent+" ")
if not isinstance(condition[2],Truth):
print "%sat end:" % indent
condition[2].dump(indent+" ")
# Conditions (of any type) are immutable, because they need to
# be hashed occasionally. Immutability also allows more efficient comparison
# based on a precomputed hash value.
#
# Careful: Most other classes (e.g. Effects, Axioms, Actions) are not!
class Condition(object):
def __init__(self, parts):
self.parts = tuple(parts)
self.hash = hash((self.__class__, self.parts))
def __hash__(self):
return self.hash
def __ne__(self, other):
return not self == other
def dump(self, indent=" "):
print "%s%s" % (indent, self._dump())
for part in self.parts:
part.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def _postorder_visit(self, method_name, *args):
part_results = [part._postorder_visit(method_name, *args)
for part in self.parts]
method = getattr(self, method_name, self._propagate)
return method(part_results, *args)
def _propagate(self, parts, *args):
return self.change_parts(parts)
def simplified(self):
return self._postorder_visit("_simplified")
def relaxed(self):
return self._postorder_visit("_relaxed")
def untyped(self):
return self._postorder_visit("_untyped")
def uniquify_variables(self, type_map, renamings={}):
# Cannot used _postorder_visit because this requires preorder
# for quantified effects.
if not self.parts:
return self
else:
return self.__class__([part.uniquify_variables(type_map, renamings)
for part in self.parts])
def to_untyped_strips(self):
raise ValueError("Not a STRIPS condition: %s" % self.__class__.__name__)
def instantiate(self, var_mapping, init_facts, fluent_facts, init_function_vals,
fluent_functions, task, new_axiom, new_modules, result):
raise ValueError("Cannot instantiate condition: not normalized")
def free_variables(self):
result = set()
for part in self.parts:
result |= part.free_variables()
return result
def has_disjunction(self):
for part in self.parts:
if part.has_disjunction():
return True
return False
def has_existential_part(self):
for part in self.parts:
if part.has_existential_part():
return True
return False
def has_universal_part(self):
for part in self.parts:
if part.has_universal_part():
return True
return False
class ConstantCondition(Condition):
parts = ()
def __init__(self):
self.hash = hash(self.__class__)
def change_parts(self, parts):
return self
def __eq__(self, other):
return self.__class__ is other.__class__
class Impossible(Exception):
pass
class Falsity(ConstantCondition):
def instantiate(self, var_mapping, init_facts, fluent_facts, init_function_vals,
fluent_functions, task, new_axiom, new_modules, result):
raise Impossible()
def negate(self):
return Truth()
class Truth(ConstantCondition):
def to_untyped_strips(self):
return []
def instantiate(self, var_mapping, init_facts, fluent_facts, init_function_vals,
fluent_functions, task, new_axiom, new_modules, result):
pass
def negate(self):
return Falsity()
class JunctorCondition(Condition):
def __eq__(self, other):
# Compare hash first for speed reasons.
return (self.hash == other.hash and
self.__class__ is other.__class__ and
self.parts == other.parts)
def change_parts(self, parts):
return self.__class__(parts)
class Conjunction(JunctorCondition):
def _simplified(self, parts):
result_parts = []
for part in parts:
if isinstance(part, Conjunction):
result_parts += part.parts
elif isinstance(part, Falsity):
return Falsity()
elif not isinstance(part, Truth):
result_parts.append(part)
if not result_parts:
return Truth()
if len(result_parts) == 1:
return result_parts[0]
return Conjunction(result_parts)
def to_untyped_strips(self):
result = []
for part in self.parts:
result += part.to_untyped_strips()
return result
def instantiate(self, var_mapping, init_facts, fluent_facts, init_function_vals,
fluent_functions, task, new_axiom, new_modules, result):
assert not result, "Condition not simplified"
for part in self.parts:
part.instantiate(var_mapping, init_facts, fluent_facts, init_function_vals,
fluent_functions, task, new_axiom, new_modules, result)
def negate(self):
return Disjunction([p.negate() for p in self.parts])
class Disjunction(JunctorCondition):
def _simplified(self, parts):
result_parts = []
for part in parts:
if isinstance(part, Disjunction):
result_parts += part.parts
elif isinstance(part, Truth):
return Truth()
elif not isinstance(part, Falsity):
result_parts.append(part)
if not result_parts:
return Falsity()
if len(result_parts) == 1:
return result_parts[0]
return Disjunction(result_parts)
def negate(self):
return Conjunction([p.negate() for p in self.parts])
def has_disjunction(self):
return True
class QuantifiedCondition(Condition):
def __init__(self, parameters, parts):
self.parameters = tuple(parameters)
self.parts = tuple(parts)
self.hash = hash((self.__class__, self.parameters, self.parts))
def __eq__(self, other):
# Compare hash first for speed reasons.
return (self.hash == other.hash and
self.__class__ is other.__class__ and
self.parameters == other.parameters and
self.parts == other.parts)
def _dump(self, indent=" "):
arglist = ", ".join(map(str, self.parameters))
return "%s %s" % (self.__class__.__name__, arglist)
def _simplified(self, parts):
if isinstance(parts[0], ConstantCondition):
return parts[0]
else:
return self._propagate(parts)
def uniquify_variables(self, type_map, renamings={}):
renamings = dict(renamings) # Create a copy.
new_parameters = [par.uniquify_name(type_map, renamings)
for par in self.parameters]
new_parts = (self.parts[0].uniquify_variables(type_map, renamings),)
return self.__class__(new_parameters, new_parts)
def free_variables(self):
result = Condition.free_variables(self)
for par in self.parameters:
result.discard(par.name)
return result
def change_parts(self, parts):
return self.__class__(self.parameters, parts)
class UniversalCondition(QuantifiedCondition):
# def _untyped(self, parts):
# type_literals = [NegatedAtom(par.type, [par.name]) for par in self.parameters]
# return UniversalCondition(self.parameters,
# [Disjunction(type_literals + parts)])
def negate(self):
return ExistentialCondition(self.parameters, [p.negate() for p in self.parts])
def has_universal_part(self):
return True
class ExistentialCondition(QuantifiedCondition):
# def _untyped(self, parts):
# type_literals = [Atom(par.type, [par.name]) for par in self.parameters]
# return ExistentialCondition(self.parameters,
# [Conjunction(type_literals + parts)])
def negate(self):
return UniversalCondition(self.parameters, [p.negate() for p in self.parts])
def instantiate(self, var_mapping, init_facts, fluent_facts, init_function_vals,
fluent_functions, task, new_axiom, new_modules, result):
assert not result, "Condition not simplified"
self.parts[0].instantiate(var_mapping, init_facts, fluent_facts, init_function_vals,
fluent_functions, task, new_axiom, new_modules, result)
def has_existential_part(self):
return True
class Literal(Condition):
parts = []
def __eq__(self, other):
# Compare hash first for speed reasons.
return (self.hash == other.hash and
self.__class__ is other.__class__ and
self.predicate == other.predicate and
self.args == other.args)
def __init__(self, predicate, args):
self.predicate = predicate
self.args = tuple(args)
self.hash = hash((self.__class__, self.predicate, self.args))
def __str__(self):
return "%s %s(%s)" % (self.__class__.__name__, self.predicate,
", ".join(map(str, self.args)))
def dump(self, indent=" "):
print "%s%s %s" % (indent, self._dump(), self.predicate)
for arg in self.args:
arg.dump(indent + " ")
def change_parts(self, parts):
return self
def uniquify_variables(self, type_map, renamings={}):
if not self.args:
return self
else:
return self.__class__(self.predicate,[arg.uniquify_variables(type_map, renamings)
for arg in self.args])
def rename_variables(self, renamings):
new_args = [arg.rename_variables(renamings) for arg in self.args]
return self.__class__(self.predicate, new_args)
def free_variables(self):
result = set()
for arg in self.args:
result |= arg.free_variables()
return result
class Atom(Literal):
negated = False
def to_untyped_strips(self):
return [self]
def instantiate(self, var_mapping, init_facts, fluent_facts, init_function_vals,
fluent_functions, task, new_axiom, new_modules, result):
args = [var_mapping.get(arg, arg) for arg in self.args]
atom = Atom(self.predicate, args)
if atom in fluent_facts:
result.append(atom)
elif atom not in init_facts:
raise Impossible()
def negate(self):
return NegatedAtom(self.predicate, self.args)
def positive(self):
return self
class NegatedAtom(Literal):
negated = True
def _relaxed(self, parts):
return Truth()
def instantiate(self, var_mapping, init_facts, fluent_facts, init_function_vals,
fluent_functions, task, new_axiom, new_modules, result):
args = [var_mapping.get(arg, arg) for arg in self.args]
atom = Atom(self.predicate, args)
if atom in fluent_facts:
result.append(NegatedAtom(self.predicate, args))
elif atom in init_facts:
raise Impossible()
def negate(self):
return Atom(self.predicate, self.args)
positive = negate
class ModuleCall(Condition):
# Can we use _relaxed() = Truth() wenn noetig
negated = False
parts = []
def __eq__(self, other):
# Compare hash first for speed reasons.
return (self.hash == other.hash and
self.__class__ is other.__class__ and
self.name == other.name and
self.args == other.args)
def __init__(self, name, args):
self.name = name
self.args = tuple(args)
self.hash = hash((self.__class__, self.name, self.args))
def change_parts(self, parts):
return self
def rename_variables(self, renamings):
new_args = [arg.rename_variables(renamings) for arg in self.args]
return self.__class__(self.name, new_args)
def free_variables(self):
result = set()
for arg in self.args:
result |= arg.free_variables()
return result
def instantiate(self, var_mapping, init_facts, fluent_facts, init_function_vals,
fluent_functions, task, new_axiom, new_modules, result):
args = [var_mapping.get(arg, arg) for arg in self.args]
mc = ModuleCall(self.name, args)
result.append(mc)
# we also instantiate the modules here
my_module = None
for module in task.modules:
if module.name == self.name:
my_module = module
break
assert my_module, "No matching module for call found"
if my_module:
# we need to rename all args, s.th. the module's
# fluents args are the same as in the modulecall.
# [count ?to] and
# (count ?target - location (stepped-on ?target) effect cout@libBla.so)
# -> (count ?to - location (stepped-on ?to) effect cout@libBla.so)
assert len(my_module.parameters) == len(self.args)
renamings = {}
for param, arg in zip(my_module.parameters, self.args):
pVar = Variable(param.name)
renamings[pVar] = arg
new_module = my_module.rename_variables(renamings)
new_module.instantiate(var_mapping, new_modules)
def __str__(self):
return "%s %s(%s)" % (self.__class__.__name__, self.name,
", ".join(map(str, self.args)))
def __repr__(self):
return self.__str__()
def dump(self, indent=" "):
print "%s%s %s" % (indent, self._dump(), self.name)
for arg in self.args:
arg.dump(indent + " ")
class FunctionComparison(Condition): # comparing numerical functions
negated = False
def _relaxed(self, parts):
return Truth()
def __init__(self, comparator, parts, compare_to_zero = False):
self.comparator = comparator
assert len(parts) == 2
if compare_to_zero:
self.parts = (f_expression.Difference(parts), f_expression.NumericConstant(0))
else:
self.parts = tuple(parts)
self.hash = hash((self.__class__, self.comparator, self.parts))
def _dump(self, indent=" "):
return "%s %s" % (self.__class__.__name__, self.comparator)
def __eq__(self, other):
# Compare hash first for speed reasons.
return (self.hash == other.hash and
self.__class__ is other.__class__ and
self.comparator == other.comparator and
self.parts == other.parts)
def __str__(self):
return "%s (%s %s)" % (self.__class__.__name__, self.comparator,
", ".join(map(str, self.parts)))
def uniquify_variables(self, type_map, renamings={}):
return self.__class__(self.comparator,[part.rename_variables(renamings)
for part in self.parts])
def has_disjunction(self):
return False
def has_universal_part(self):
return False
def has_existential_part(self):
return False
def negate(self):
return NegatedFunctionComparison(self.comparator, self.parts)
def change_parts(self, parts):
return self.__class__(self.comparator,parts)
def primitive_numeric_expressions(self):
result = set()
for part in self.parts:
result |= part.primitive_numeric_expressions()
return result
def instantiate(self, var_mapping, init_facts, fluent_facts, init_function_vals,
fluent_functions, task, new_axiom, new_modules, result):
instantiated_parts = [part.instantiate(var_mapping, fluent_functions,
init_function_vals, task, new_axiom)
for part in self.parts]
# future work: eliminate non-fluent functions
result.append(self.__class__(self.comparator,instantiated_parts))
def positive(self):
return self
class NegatedFunctionComparison(FunctionComparison):
negated = True
def negate(self):
return FunctionComparison(self.comparator, self.parts)
positive = negate
class Term(object):
def dump(self, indent=" "):
print "%s%s %s" % (indent, self._dump(), self.name)
for arg in self.args:
arg.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.args == other.args)
def uniquify_variables(self, type_map, renamings={}):
if not self.args:
return self
else:
return self.__class__(self.name,[arg.uniquify_variables(type_map, renamings)
for arg in self.args])
def compile_objectfunctions_aux(self, used_variables, recurse_object_terms=True):
return ([],[],self)
def rename_variables(self, renamings):
new_args = [renamings.get(arg, arg) for arg in self.args]
return self.__class__(self.name, new_args)
def free_variables(self):
result = set()
for arg in self.args:
result |= arg.free_variables()
return result
class FunctionTerm(Term):
def __init__(self, name, args=[]):
self.name = name
self.args = args
def __str__(self):
return "%s(%s)" % (self.name, ", ".join(map(str, self.args)))
def compile_objectfunctions_aux(self, used_variables, recurse_object_terms=True):
# could be done by postorder visit
typed_vars = []
conjunction_parts = []
new_args = []
for arg in self.args:
if recurse_object_terms:
typed,parts,new_term = arg.compile_objectfunctions_aux(used_variables)
typed_vars += typed
conjunction_parts += parts
new_args.append(new_term)
for counter in itertools.count(1):
new_var_name = "?v" + str(counter)
if new_var_name not in used_variables:
used_variables.append(new_var_name)
typed_vars.append(pddl_types.TypedObject(new_var_name, tasks.Task.FUNCTION_SYMBOLS[self.name]))
new_var = Variable(new_var_name)
break
if recurse_object_terms:
pred_name = function_predicate_name(self.name)
new_args.append(new_var)
atom = Atom(pred_name,new_args)
conjunction_parts = [atom] + conjunction_parts
else:
conjunction_parts = [self]
return (typed_vars, conjunction_parts, new_var)
class Variable(Term):
args = []
def __init__(self, name):
self.name = name
self.hash = hash((self.__class__,self.name))
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.name == other.name)
def __cmp__(self,other):
return cmp(self.name,other.name)
def __hash__(self):
return self.hash
def __str__(self):
return "<%s>" % self.name
def uniquify_variables(self, type_map, renamings={}):
return self.rename_variables(renamings)
def rename_variables(self, renamings):
return self.__class__(renamings.get(self.name,self.name))
def free_variables(self):
return set((self.name,))
class ObjectTerm(Term):
args = []
def __init__(self, name):
self.name = name
self.hash = hash((self.__class__,self.name))
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.name == other.name)
def __cmp__(self,other):
return cmp(self.name,other.name)
def __str__(self):
return "<%s>" % self.name
def __hash__(self):
return self.hash
def free_variables(self):
return set()
def rename_variables(self, renamings):
return self
def function_predicate_name(functionname):
return "%s!val" % functionname
|
GKIFreiburg/gki_symbolic_planning
|
tfd_modules/downward/translate/pddl/conditions.py
|
Python
|
bsd-3-clause
| 25,491
|
[
"VisIt"
] |
ecf986f6acc3e02f0db7e83b7c0d9bbc8469636ae4777a870c1d4fe4624c0e71
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Base topology reader classes --- :mod:`MDAnalysis.topology.base`
================================================================
Derive topology reader classes from the base class in this module. All
topology readers raise :exc:`IOError` upon failing to read a topology
file and :exc:`ValueError` upon failing to make sense of the read data.
Classes
-------
.. autoclass:: TopologyReaderBase
:members:
:inherited-members:
"""
from __future__ import absolute_import
import six
from six.moves import zip
# While reduce is a built-in in python 2, it is not in python 3
from functools import reduce
import itertools
import numpy as np
import warnings
from .. import _PARSERS
from ..coordinates.base import IOBase
from ..lib import util
class _Topologymeta(type):
def __init__(cls, name, bases, classdict):
type.__init__(type, name, bases, classdict)
try:
fmt = util.asiterable(classdict['format'])
except KeyError:
pass
else:
for f in fmt:
f = f.upper()
_PARSERS[f] = cls
class TopologyReaderBase(six.with_metaclass(_Topologymeta, IOBase)):
"""Base class for topology readers
Parameters
----------
filename : str
name of the topology file
universe : Universe, optional
Supply a Universe to the Parser. This then passes it to the
atom instances that are created within parsers.
kwargs : optional
Other keyword arguments that can vary with the specific format.
These are stored as self.kwargs
All topology readers must define a `parse` method which
returns a Topology object
Raises
------
* :exc:`IOError` upon failing to read a topology file
* :exc:`ValueError` upon failing to make sense of the read data
.. versionadded:: 0.9.0
.. versionchanged:: 0.9.2
Added keyword 'universe' to pass to Atom creation.
"""
def __init__(self, filename, **kwargs):
self.filename = filename
self.kwargs = kwargs
def parse(self): # pragma: no cover
raise NotImplementedError("Override this in each subclass")
def squash_by(child_parent_ids, *attributes):
"""Squash a child-parent relationship
Arguments
---------
child_parent_ids - array of ids (unique values that identify the parent)
*attributes - other arrays that need to follow the sorting of ids
Returns
-------
child_parents_idx - an array of len(child) which points to the index of
parent
parent_ids - len(parent) of the ids
*parent_attrs - len(parent) of the other attributes
"""
unique_resids, sort_mask, atom_idx = np.unique(
child_parent_ids, return_index=True, return_inverse=True)
return atom_idx, unique_resids, [attr[sort_mask] for attr in attributes]
def change_squash(criteria, to_squash):
"""Squash per atom data to per residue according to changes in resid
Parameters
----------
criteria : list of numpy ndarray
Arrays which when changing indicate a new residue
to_squash : list of numpy arrays
Arrays which get squashed according to the criteria arrays
Returns
-------
residx : numpy array
The Residue *index* that each Atom gets assigned to. [len(resids)]
squashed : numpy array
The to_squash arrays reduced down to per Residue values
Example
-------
resids = np.array([2, 2, 3, 3, 2, 2])
resnames = np.array(['RsA', 'RsA', 'RsB', 'RsB', 'RsC', 'RsC'])
segids = np.array(['A', 'A', 'A', 'A', 'B', 'B'])
residx, (new_resids, new_resnames, new_segids) = resid_change_squash(
(resids,), (resids, resnames, segids))
# Per atom res index
residx: [0, 0, 1, 1, 2, 2]
# Per residue record of each attribute
new_resids: [2, 3, 2]
new_resnames: ['RsA', 'RsB', 'RsC']
new_segids: ['A', 'A', 'B']
"""
def get_borders(*arrays):
"""Generator of indices to slice arrays when they change"""
borders = np.nonzero(reduce(np.logical_or,
(a[:-1] != a[1:] for a in arrays)))
# Add Nones so we can slice from start to end
return [None] + list(borders[0] + 1) + [None]
l0 = len(criteria[0])
if not all(len(other) == l0
for other in itertools.chain(criteria[1:], to_squash)):
raise ValueError("All arrays must be equally sized")
# 1) Detect where resids change
borders = get_borders(*criteria)
# Number of groups = number of changes + 1
# 2 `None`s have been added, so -1
nres = len(borders) - 1
# 2) Allocate new arrays
# Per atom record of what residue they belong to
residx = np.zeros_like(criteria[0], dtype=np.int)
# Per residue record of various attributes
new_others = [np.zeros(nres, dtype=o.dtype) for o in to_squash]
# 3) Slice through resids and others to find values
for i, (x, y) in enumerate(zip(borders[:-1], borders[1:])):
residx[x:y] = i # atoms between x & y are in the i'th residue
for old, new in zip(to_squash, new_others):
new[i] = old[x:y][0] # TODO: Check that x:y is the same
# Should be the same for self consistency...
return residx, new_others
|
kain88-de/mdanalysis
|
package/MDAnalysis/topology/base.py
|
Python
|
gpl-2.0
| 6,373
|
[
"MDAnalysis"
] |
cd86b22928a48390fa1a537287c05facc87d2dcff7e15898465683c2a101d55b
|
import numpy as np
from scipy.stats import norm
def lerp(val, low, high):
"""Linear interpolation"""
return low + (high - low) * val
def lerp_gaussian(val, low, high):
"""Linear interpolation with gaussian CDF"""
low_gau = norm.cdf(low)
high_gau = norm.cdf(high)
lerped_gau = lerp(val, low_gau, high_gau)
return norm.ppf(lerped_gau)
def slerp(val, low, high):
"""Spherical interpolation. val has a range of 0 to 1."""
if val <= 0:
return low
elif val >= 1:
return high
elif np.allclose(low, high):
return low
omega = np.arccos(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high)))
so = np.sin(omega)
return np.sin((1.0-val)*omega) / so * low + np.sin(val*omega)/so * high
def slerp_gaussian(val, low, high):
"""Spherical interpolation with gaussian CDF (generally not useful)"""
offset = norm.cdf(np.zeros_like(low)) # offset is just [0.5, 0.5, ...]
low_gau_shifted = norm.cdf(low) - offset
high_gau_shifted = norm.cdf(high) - offset
circle_lerped_gau = slerp(val, low_gau_shifted, high_gau_shifted)
epsilon = 0.001
clipped_sum = np.clip(circle_lerped_gau + offset, epsilon, 1.0 - epsilon)
result = norm.ppf(clipped_sum)
return result
def get_interpfn(spherical, gaussian):
"""Returns an interpolation function"""
if spherical and gaussian:
return slerp_gaussian
elif spherical:
return slerp
elif gaussian:
return lerp_gaussian
else:
return lerp
|
dribnet/plat
|
plat/interpolate.py
|
Python
|
mit
| 1,527
|
[
"Gaussian"
] |
422ecb961d67387320af15cd2dee534fabf4e9de3359d1fcb6c59aa369b92500
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=C0103
# We disable C0103 because the unittest prevailing style is camelCase
"""tools.py
This module provides helper functions for tests.
(c) The James Hutton Institute 2017-2019
Author: Leighton Pritchard
Contact:
leighton.pritchard@hutton.ac.uk
Leighton Pritchard,
Information and Computing Sciences,
James Hutton Institute,
Errol Road,
Invergowrie,
Dundee,
DD2 5DA,
Scotland,
UK
The MIT License
Copyright (c) 2017-2019 The James Hutton Institute
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import copy
import json
import os
import re
import shlex
import shutil
import subprocess
import unittest
from diagnostic_primers import blast, nucmer
class PDPFileEqualityTests(unittest.TestCase):
"""Tests for equality of filetypes used in PDP.
Each test defines a comparison for a specific filetype, with contents that are
expected to be equal in some way.
"""
def assertJsonEqual(self, json1, json2):
"""Assert that two passed JSON files are equal.
:param json1: path to reference JSON file
:param json2: path to comparator JSON file
As we can't always be sure that JSON elements are in the same order in otherwise
equal files, we compare ordered components.
"""
with open(json1, "r") as fh1:
with open(json2, "r") as fh2:
self.assertEqual(
ordered(json.load(fh1)),
ordered(json.load(fh2)),
msg="JSON files {} and {} do not contain equal contents".format(
json1, json2
),
)
def assertEprimer3Equal(self, fname1, fname2):
"""Assert that two passed ePrimer3 output files are equal.
:param fname1: path to reference ePrimer3 file
:param fname2: path to comparator ePrimer3 file
This is a standard file comparison, skipping the first line.
"""
with open(fname1, "r") as fh1:
with open(fname2, "r") as fh2:
fdata1 = fh1.readlines()[1:]
fdata2 = fh2.readlines()[1:]
self.assertEqual(
fdata1,
fdata2,
msg="ePrimer3 files {} and {} are not equivalent".format(
fname1, fname2
),
)
def assertNucmerEqual(self, fname1, fname2):
"""Assert that two passed nucmer output files are equal.
:param fname1: path to reference .delta/.filter file
:param fname2: path to comparator .delta/.filter file
This is a standard file comparison, skipping the first line.
"""
with open(fname1, "r") as fh1:
with open(fname2, "r") as fh2:
fdata1 = nucmer.DeltaData("fh1", fh1)
fdata2 = nucmer.DeltaData("fh2", fh2)
self.assertEqual(
fdata1,
fdata2,
msg="Nucmer files {} and {} are not equivalent".format(
fname1, fname2
),
)
def assertBlasttabEqual(self, fname1, fname2):
"""Assert that two passed BLAST+ .tab output files contain the same data.
This is not a simple comparison, as we can't rely on the same ordering,
so we parse the files and compare objects.
"""
with open(fname1, "r") as fh1:
with open(fname2, "r") as fh2:
data1 = blast.parse_blasttab(fh1)
data2 = blast.parse_blasttab(fh2)
for line1, line2 in zip(data1, data2):
self.assertEqual(line1, line2)
def assertFilesEqual(self, fname1, fname2):
"""Assert that the two passed files have the same contents."""
with open(fname1, "r") as fh1:
with open(fname2, "r") as fh2:
self.assertEqual(
fh1.read(),
fh2.read(),
msg="Files {} and {} do not have the same contents".format(
fname1, fname2
),
)
class PDPTestCase(PDPFileEqualityTests, unittest.TestCase):
"""Specific PDP unit tests."""
def assertDirsEqual(self, dir1, dir2, filt=None):
"""Assert that two passed directories have the same contents.
:param dir1: Reference directory for comparison
:param dir2: Comparator direcotry for comparison
Files to be compared can be restricted using the filter argument. For
instance:
assertDirsEqual(d1, d2, filter=".tab") will only compare files with
the tab extension.
Directories are compared recursively.
"""
# List directories and skip hidden files
dir1files = ordered([_ for _ in os.listdir(dir1) if not _.startswith(".")])
dir2files = ordered([_ for _ in os.listdir(dir2) if not _.startswith(".")])
self.assertEqual(
dir1files,
dir2files,
msg="{} and {} do not have same file listings".format(dir1, dir2),
)
# Compare contents of directories; descend through directories, but
# filter file extensions if needed
if filt is not None:
dir1files = [
_
for _ in dir1files
if (os.path.isdir(_) is False) and (os.path.splitext(_)[-1] == filt)
]
for fpath in dir1files:
if os.path.isdir(os.path.join(dir1, fpath)): # Compare dictionaries
self.assertDirsEqual(
os.path.join(dir1, fpath), os.path.join(dir2, fpath)
)
else: # Compare files
ext = os.path.splitext(fpath)[-1]
fname1 = os.path.join(dir1, fpath)
fname2 = os.path.join(dir2, fpath)
if ext.lower() == ".json": # Compare JSON files
self.assertJsonEqual(fname1, fname2)
elif ext.lower() == ".blasttab": # Compare BLAST+ .tab output
self.assertBlasttabEqual(fname1, fname2)
elif ext.lower() == ".eprimer3": # Compare ePrimer3 output
self.assertEprimer3Equal(fname1, fname2)
elif ext.lower() in (
".delta",
".filter",
): # Compare nucmer/delta-filter output
self.assertNucmerEqual(fname1, fname2)
else: # Compare standard files
self.assertFilesEqual(fname1, fname2)
# Define primer3 version as global variable
def get_primer3_version():
"""Return primer3_core version as a tuple of ints.
Assumes primer3_core is in the $PATH
"""
primer3_core_path = shutil.which("primer3_core")
output = subprocess.run(
[shlex.quote(_) for _ in [primer3_core_path, "--help"]],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
version_line = [
_ for _ in output.stderr.split(b"\n") if _.startswith(b"This is primer3")
][0].decode("utf-8")
return tuple(
[
int(_)
for _ in re.search("(?<=release ).*(?=\\))", version_line)
.group()
.split(".")
]
)
def ordered(obj):
"""Return ordered version of the passed object
Dictionaries are not ordered in all Python versions, and the
implementation of sort_keys() in the the JSON library seems
erratic in terms of effect
"""
if isinstance(obj, dict):
return sorted((k, ordered(v)) for k, v in obj.items())
if isinstance(obj, list):
try:
return sorted(ordered(x) for x in obj)
except TypeError: # list contains non-comparable types
return obj
else:
return obj
def modify_namespace(namespace, args):
"""Modify the specified arguments in the passed Namespace.
namespace argparse.Namespace object
args dict of argument: value pairs
For most command-line tests, we define a base argparse.Namespace object, then
change a few arguments. This function takes the base namespace and a dictionary
of argument: value pairs, and returns the modified namespace.
"""
new_namespace = copy.deepcopy(namespace)
for argname, argval in args.items():
setattr(new_namespace, argname, argval)
return new_namespace
|
widdowquinn/find_differential_primers
|
tests/tools.py
|
Python
|
mit
| 9,477
|
[
"BLAST"
] |
78e0eecb27637ca3f895f35cc0a9d3d4c2ed97c325061fdbd8556f5d247ad389
|
#!/usr/bin/env python
# Copyright 2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import copy
import tempfile
from pyscf import lib, gto, scf
from pyscf.tools import cubegen
mol = gto.Mole()
mol.atom = '''
O 0.00000000, 0.000000, 0.119748
H 0.00000000, 0.761561, -0.478993
H 0.00000000, -0.761561, -0.478993 '''
mol.verbose = 0
mol.build()
mf = scf.RHF(mol).run()
def tearDownModule():
global mol, mf
del mol, mf
class KnownValues(unittest.TestCase):
def test_mep(self):
ftmp = tempfile.NamedTemporaryFile()
mep = cubegen.mep(mol, ftmp.name, mf.make_rdm1(),
nx=10, ny=10, nz=10)
self.assertEqual(mep.shape, (10,10,10))
self.assertAlmostEqual(lib.finger(mep), -0.3198103636180436, 9)
mep = cubegen.mep(mol, ftmp.name, mf.make_rdm1(),
nx=10, ny=10, nz=10, resolution=0.5)
self.assertEqual(mep.shape, (12,18,15))
self.assertAlmostEqual(lib.finger(mep), -4.653995909548524, 9)
def test_orb(self):
ftmp = tempfile.NamedTemporaryFile()
orb = cubegen.orbital(mol, ftmp.name, mf.mo_coeff[:,0],
nx=10, ny=10, nz=10)
self.assertEqual(orb.shape, (10,10,10))
self.assertAlmostEqual(lib.finger(orb), -0.11804191128016768, 9)
orb = cubegen.orbital(mol, ftmp.name, mf.mo_coeff[:,0],
nx=10, ny=10, nz=10, resolution=0.5)
self.assertEqual(orb.shape, (12,18,15))
self.assertAlmostEqual(lib.finger(orb), -0.8591778390706646, 9)
orb = cubegen.orbital(mol, ftmp.name, mf.mo_coeff[:,0],
nx=10, ny=1, nz=1)
self.assertEqual(orb.shape, (10,1,1))
self.assertAlmostEqual(lib.finger(orb), 6.921008881822988e-09, 9)
def test_rho(self):
ftmp = tempfile.NamedTemporaryFile()
rho = cubegen.density(mol, ftmp.name, mf.make_rdm1(),
nx=10, ny=10, nz=10)
self.assertEqual(rho.shape, (10,10,10))
self.assertAlmostEqual(lib.finger(rho), -0.3740462814001553, 9)
rho = cubegen.density(mol, ftmp.name, mf.make_rdm1(),
nx=10, ny=10, nz=10, resolution=0.5)
self.assertEqual(rho.shape, (12,18,15))
self.assertAlmostEqual(lib.finger(rho), -1.007950007160415, 9)
if __name__ == "__main__":
print("Full Tests for molden")
unittest.main()
|
sunqm/pyscf
|
pyscf/tools/test/test_cubegen.py
|
Python
|
apache-2.0
| 2,994
|
[
"PySCF"
] |
18df8e480fec78bd67c25ddc806dc5c24c6a497050123ff7da64454eea66acd2
|
"""
This is the interface to DIRAC PilotAgentsDB.
"""
__RCSID__ = "$Id$"
from DIRAC import gConfig, S_OK, S_ERROR
import DIRAC.Core.Utilities.Time as Time
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getUsernameForDN
from DIRAC.WorkloadManagementSystem.DB.PilotAgentsDB import PilotAgentsDB
from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import TaskQueueDB
from DIRAC.WorkloadManagementSystem.DB.PilotsLoggingDB import PilotsLoggingDB
from DIRAC.WorkloadManagementSystem.Service.WMSUtilities import getPilotLoggingInfo,\
getGridJobOutput, killPilotsInQueues
# This is a global instance of the database classes
pilotDB = None
pilotsLoggingDB = None
enablePilotsLogging = False
FINAL_STATES = ['Done', 'Aborted', 'Cleared', 'Deleted', 'Stalled']
def initializePilotManagerHandler(serviceInfo):
""" PilotManagerHandler initialization
"""
global pilotDB
global pilotsLoggingDB
global enablePilotsLogging
# there is a problem with accessing CS with shorter paths, so full path is extracted from serviceInfo dict
enablePilotsLogging = gConfig.getValue(
serviceInfo['serviceSectionPath'].replace(
'Pilots',
'PilotsLogging') + '/Enable',
'False').lower() in (
'yes',
'true')
pilotDB = PilotAgentsDB()
if enablePilotsLogging:
pilotsLoggingDB = PilotsLoggingDB()
return S_OK()
class PilotManagerHandler(RequestHandler):
##############################################################################
types_getCurrentPilotCounters = [dict]
@classmethod
def export_getCurrentPilotCounters(cls, attrDict={}):
""" Get pilot counters per Status with attrDict selection. Final statuses are given for
the last day.
"""
result = pilotDB.getCounters('PilotAgents', ['Status'], attrDict, timeStamp='LastUpdateTime')
if not result['OK']:
return result
last_update = Time.dateTime() - Time.day
resultDay = pilotDB.getCounters('PilotAgents', ['Status'], attrDict, newer=last_update,
timeStamp='LastUpdateTime')
if not resultDay['OK']:
return resultDay
resultDict = {}
for statusDict, count in result['Value']:
status = statusDict['Status']
resultDict[status] = count
if status in FINAL_STATES:
resultDict[status] = 0
for statusDayDict, ccount in resultDay['Value']:
if status == statusDayDict['Status']:
resultDict[status] = ccount
break
return S_OK(resultDict)
##########################################################################################
types_addPilotTQReference = [list, (int, long), basestring, basestring]
@classmethod
def export_addPilotTQReference(cls, pilotRef, taskQueueID, ownerDN, ownerGroup, broker='Unknown',
gridType='DIRAC', pilotStampDict={}):
""" Add a new pilot job reference """
return pilotDB.addPilotTQReference(pilotRef, taskQueueID,
ownerDN, ownerGroup,
broker, gridType, pilotStampDict)
##############################################################################
types_getPilotOutput = [basestring]
def export_getPilotOutput(self, pilotReference):
""" Get the pilot job standard output and standard error files for the Grid
job reference
"""
return getGridJobOutput(pilotReference)
##############################################################################
types_getPilotInfo = [(list, basestring)]
@classmethod
def export_getPilotInfo(cls, pilotReference):
""" Get the info about a given pilot job reference
"""
return pilotDB.getPilotInfo(pilotReference)
##############################################################################
types_selectPilots = [dict]
@classmethod
def export_selectPilots(cls, condDict):
""" Select pilots given the selection conditions
"""
return pilotDB.selectPilots(condDict)
##############################################################################
types_storePilotOutput = [basestring, basestring, basestring]
@classmethod
def export_storePilotOutput(cls, pilotReference, output, error):
""" Store the pilot output and error
"""
return pilotDB.storePilotOutput(pilotReference, output, error)
##############################################################################
types_getPilotLoggingInfo = [basestring]
@classmethod
def export_getPilotLoggingInfo(cls, pilotReference):
""" Get the pilot logging info for the Grid job reference
"""
result = pilotDB.getPilotInfo(pilotReference)
if not result['OK'] or not result['Value']:
return S_ERROR('Failed to determine owner for pilot ' + pilotReference)
pilotDict = result['Value'][pilotReference]
owner = pilotDict['OwnerDN']
group = pilotDict['OwnerGroup']
gridType = pilotDict['GridType']
return getPilotLoggingInfo(gridType, pilotReference, # pylint: disable=unexpected-keyword-arg
proxyUserDN=owner, proxyUserGroup=group)
##############################################################################
types_getPilotSummary = []
@classmethod
def export_getPilotSummary(cls, startdate='', enddate=''):
""" Get summary of the status of the LCG Pilot Jobs
"""
result = pilotDB.getPilotSummary(startdate, enddate)
return result
##############################################################################
types_getPilotMonitorWeb = [dict, list, (int, long), [int, long]]
@classmethod
def export_getPilotMonitorWeb(cls, selectDict, sortList, startItem, maxItems):
""" Get the summary of the pilot information for a given page in the
pilot monitor in a generic format
"""
result = pilotDB.getPilotMonitorWeb(selectDict, sortList, startItem, maxItems)
return result
##############################################################################
types_getPilotMonitorSelectors = []
@classmethod
def export_getPilotMonitorSelectors(cls):
""" Get all the distinct selector values for the Pilot Monitor web portal page
"""
result = pilotDB.getPilotMonitorSelectors()
return result
##############################################################################
types_getPilotSummaryWeb = [dict, list, (int, long), [int, long]]
@classmethod
def export_getPilotSummaryWeb(cls, selectDict, sortList, startItem, maxItems):
""" Get the summary of the pilot information for a given page in the
pilot monitor in a generic format
"""
result = pilotDB.getPilotSummaryWeb(selectDict, sortList, startItem, maxItems)
return result
##############################################################################
types_getPilots = [(basestring, int, long)]
@classmethod
def export_getPilots(cls, jobID):
""" Get pilot references and their states for :
- those pilots submitted for the TQ where job is sitting
- (or) the pilots executing/having executed the Job
"""
pilots = []
result = pilotDB.getPilotsForJobID(int(jobID))
if not result['OK']:
if result['Message'].find('not found') == -1:
return S_ERROR('Failed to get pilot: ' + result['Message'])
else:
pilots += result['Value']
if not pilots:
# Pilots were not found try to look in the Task Queue
taskQueueID = 0
result = TaskQueueDB().getTaskQueueForJob(int(jobID))
if result['OK'] and result['Value']:
taskQueueID = result['Value']
if taskQueueID:
result = pilotDB.getPilotsForTaskQueue(taskQueueID, limit=10)
if not result['OK']:
return S_ERROR('Failed to get pilot: ' + result['Message'])
pilots += result['Value']
if not pilots:
return S_ERROR('Failed to get pilot for Job %d' % int(jobID))
return pilotDB.getPilotInfo(pilotID=pilots)
##############################################################################
types_killPilot = [(basestring, list)]
@classmethod
def export_killPilot(cls, pilotRefList):
""" Kill the specified pilots
"""
# Make a list if it is not yet
pilotRefs = list(pilotRefList)
if isinstance(pilotRefList, basestring):
pilotRefs = [pilotRefList]
# Regroup pilots per site and per owner
pilotRefDict = {}
for pilotReference in pilotRefs:
result = pilotDB.getPilotInfo(pilotReference)
if not result['OK'] or not result['Value']:
return S_ERROR('Failed to get info for pilot ' + pilotReference)
pilotDict = result['Value'][pilotReference]
owner = pilotDict['OwnerDN']
group = pilotDict['OwnerGroup']
queue = '@@@'.join([owner, group, pilotDict['GridSite'], pilotDict['DestinationSite'], pilotDict['Queue']])
gridType = pilotDict['GridType']
pilotRefDict.setdefault(queue, {})
pilotRefDict[queue].setdefault('PilotList', [])
pilotRefDict[queue]['PilotList'].append(pilotReference)
pilotRefDict[queue]['GridType'] = gridType
failed = killPilotsInQueues(pilotRefDict)
if failed:
return S_ERROR('Failed to kill at least some pilots')
return S_OK()
##############################################################################
types_setJobForPilot = [(basestring, int, long), basestring]
@classmethod
def export_setJobForPilot(cls, jobID, pilotRef, destination=None):
""" Report the DIRAC job ID which is executed by the given pilot job
"""
result = pilotDB.setJobForPilot(int(jobID), pilotRef)
if not result['OK']:
return result
result = pilotDB.setCurrentJobID(pilotRef, int(jobID))
if not result['OK']:
return result
if destination:
result = pilotDB.setPilotDestinationSite(pilotRef, destination)
return result
##########################################################################################
types_setPilotBenchmark = [basestring, float]
@classmethod
def export_setPilotBenchmark(cls, pilotRef, mark):
""" Set the pilot agent benchmark
"""
return pilotDB.setPilotBenchmark(pilotRef, mark)
##########################################################################################
types_setAccountingFlag = [basestring]
@classmethod
def export_setAccountingFlag(cls, pilotRef, mark='True'):
""" Set the pilot AccountingSent flag
"""
return pilotDB.setAccountingFlag(pilotRef, mark)
##########################################################################################
types_setPilotStatus = [basestring, basestring]
def export_setPilotStatus(self, pilotRef, status, destination=None, reason=None, gridSite=None, queue=None):
""" Set the pilot agent status
"""
return pilotDB.setPilotStatus(pilotRef, status, destination=destination,
statusReason=reason, gridSite=gridSite, queue=queue)
##########################################################################################
types_countPilots = [dict]
@classmethod
def export_countPilots(cls, condDict, older=None, newer=None, timeStamp='SubmissionTime'):
""" Set the pilot agent status
"""
return pilotDB.countPilots(condDict, older, newer, timeStamp)
##########################################################################################
types_getCounters = [basestring, list, dict]
@classmethod
def export_getCounters(cls, table, keys, condDict, newer=None, timeStamp='SubmissionTime'):
""" Set the pilot agent status
"""
return pilotDB.getCounters(table, keys, condDict, newer=newer, timeStamp=timeStamp)
##############################################################################
types_getPilotStatistics = [basestring, dict]
@staticmethod
def export_getPilotStatistics(attribute, selectDict):
""" Get pilot statistics distribution per attribute value with a given selection
"""
startDate = selectDict.get('FromDate', None)
if startDate:
del selectDict['FromDate']
if startDate is None:
startDate = selectDict.get('LastUpdate', None)
if startDate:
del selectDict['LastUpdate']
endDate = selectDict.get('ToDate', None)
if endDate:
del selectDict['ToDate']
result = pilotDB.getCounters('PilotAgents', [attribute], selectDict,
newer=startDate,
older=endDate,
timeStamp='LastUpdateTime')
statistics = {}
if result['OK']:
for status, count in result['Value']:
if "OwnerDN" in status:
userName = getUsernameForDN(status['OwnerDN'])
if userName['OK']:
status['OwnerDN'] = userName['Value']
statistics[status['OwnerDN']] = count
else:
statistics[status[attribute]] = count
return S_OK(statistics)
##############################################################################
types_deletePilots = [(list, int, long, basestring)]
def export_deletePilots(self, pilotIDs):
if isinstance(pilotIDs, basestring):
return pilotDB.deletePilot(pilotIDs)
if isinstance(pilotIDs, (int, long)):
pilotIDs = [pilotIDs, ]
result = pilotDB.deletePilots(pilotIDs)
if not result['OK']:
return result
if enablePilotsLogging:
pilotIDs = result['Value']
pilots = pilotDB.getPilotInfo(pilotID=pilotIDs)
if not pilots['OK']:
return pilots
pilotRefs = []
for pilot in pilots:
pilotRefs.append(pilot['PilotJobReference'])
result = pilotsLoggingDB.deletePilotsLogging(pilotRefs)
if not result['OK']:
return result
return S_OK()
##############################################################################
types_clearPilots = [(int, long), (int, long)]
def export_clearPilots(self, interval=30, aborted_interval=7):
result = pilotDB.clearPilots(interval, aborted_interval)
if not result['OK']:
return result
if enablePilotsLogging:
pilotIDs = result['Value']
pilots = pilotDB.getPilotInfo(pilotID=pilotIDs)
if not pilots['OK']:
return pilots
pilotRefs = []
for pilot in pilots:
pilotRefs.append(pilot['PilotJobReference'])
result = pilotsLoggingDB.deletePilotsLogging(pilotRefs)
if not result['OK']:
return result
return S_OK()
|
fstagni/DIRAC
|
WorkloadManagementSystem/Service/PilotManagerHandler.py
|
Python
|
gpl-3.0
| 14,504
|
[
"DIRAC"
] |
5e228bb6ebcf1d4301a1d618f101846471e165a3096ae09a82421b34cd9d03cc
|
#
# AtHomePowerlineServer - networked server for CM11/CM11A/XTB-232 X10 controllers
# Copyright (C) 2014 Dave Hocker
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# See the LICENSE file for more details.
#
import logging
import logging.handlers
import Configuration
########################################################################
# Enable logging for the AtHomePowerlineServer application
# TODO In order to get dual logging to work, we'll need to create
# a logger instance in every module that logs. We can configure that
# instance here. In the mean time, we'll use logging to file.
def EnableServerLogging():
# Default overrides
logformat = '%(asctime)s, %(module)s, %(levelname)s, %(message)s'
logdateformat = '%Y-%m-%d %H:%M:%S'
# Logging level override
log_level_override = Configuration.Configuration.LogLevel().lower()
if log_level_override == "debug":
loglevel = logging.DEBUG
elif log_level_override == "info":
loglevel = logging.INFO
elif log_level_override == "warn":
loglevel = logging.WARNING
elif log_level_override == "error":
loglevel = logging.ERROR
else:
loglevel = logging.DEBUG
# Configure the root logger to cover all loggers
logger = logging.getLogger()
logger.setLevel(loglevel)
formatter = logging.Formatter(logformat, datefmt=logdateformat)
# Do we log to console?
if Configuration.Configuration.Logconsole():
# Covers the server and pyHS100 package
ch = logging.StreamHandler()
ch.setLevel(loglevel)
ch.setFormatter(formatter)
# logger.addHandler(ch)
logger.addHandler(ch)
# Do we log to a file?
logfile = Configuration.Configuration.Logfile()
if logfile != "":
# To file
fh = logging.handlers.TimedRotatingFileHandler(logfile, when='midnight', backupCount=3)
fh.setLevel(loglevel)
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.debug("Logging to file: %s", logfile)
# Controlled logging shutdown
def Shutdown():
logging.shutdown()
print("Logging shutdown")
|
dhocker/athomepowerlineserver
|
Logging.py
|
Python
|
gpl-3.0
| 2,362
|
[
"xTB"
] |
4a15569124bf74e9daf3442a3d21c75f3f1a5df68eead68a35d2120f44cb84dd
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
from __future__ import print_function
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
#from scipy import stats
import vtk
import os
import argparse
import timeit
import pickle as pickle
import random
from imblearn.over_sampling import SMOTE
#import matplotlib.pyplot as plt
import pprint
import inputData
#from sklearn.decomposition import PCA
import math
import inputData
import glob
import numpy as np
import collections
from sklearn import svm
from sklearn.metrics import accuracy_score
#from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.svm import LinearSVC
from sklearn.metrics import confusion_matrix, roc_curve, auc
import itertools
from sklearn import preprocessing
# #############################################################################
# Generate data
parser = argparse.ArgumentParser(description='Shape Variation Analyzer', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
#parser.add_argument('--model', type=str, help='pickle file with the pca decomposition', required=True)
#parser.add_argument('--shapeDir', type=str, help='Directory with vtk files .vtk', required=True)
parser.add_argument('--picklefile',dest='picklefile',help='picklefile with the dataset',required=True)
#parser.add_argument('--dataPathtrain', action='store', dest='dirwithSubtrain', help='folder with subclasses', required=True)
#parser.add_argument('--dataPathtest', action='store', dest='dirwithSubtest', help='folder with subclasses', required=True)
#parser.add_argument('--train_size', help='train ratio', type=float, default=0.8)
#parser.add_argument('--validation_size', help='validation ratio from test data', default=0.5, type=float)
#parser.add_argument('--feature_names', help='Extract the following features from the polydatas', nargs='+', default=["Normals", "Mean_Curvature", "distanceGroup"], type=str)
#parser.add_argument('--out', dest="pickle_file_new", help='Pickle file output', default="new_dataset.pickle", type=str)
#parser.add_argument('-outputdataPath', action='store', dest='dirwithSubGenerated', help='folder with subclasses after generation of data', required=True)
#parser.add_argument('--outputGenerated', help='output folder for shapes', default='./out')
#parser.add_argument('--num_shapes', type=int, help='number shapes to be generated', default=10)
#parser.add_argument('--meanShape',help='mean shape', required=True)
def readData(shapedir):
#Read data from vtk files
print("loading data ......")
print("+++++++Read the surface shape data+++++++")
vtkdirshapes = os.listdir(shapedir)
y_design = []
numpoints = -1
nshape = 0
firstshapedata = 0
for vtkfilename in vtkdirshapes:
if vtkfilename.endswith((".vtk")):
print("Reading", vtkfilename)
reader = vtk.vtkPolyDataReader()
reader.SetFileName(os.path.join(shapedir, vtkfilename))
reader.Update()
shapedata = reader.GetOutput()
shapedatapoints = shapedata.GetPoints()
if firstshapedata == 0:
firstshapedata = shapedata
y_design.append([])
if numpoints == -1:
numpoints = shapedatapoints.GetNumberOfPoints()
if numpoints != shapedatapoints.GetNumberOfPoints():
print("WARNING! The number of points is not the same for the shape:", vtkfilename)
for i in range(shapedatapoints.GetNumberOfPoints()):
p = shapedatapoints.GetPoint(i)
y_design[nshape].append(p)
nshape+=1
y_design = np.array(y_design)
return y_design.reshape(y_design.shape[0], -1), firstshapedata
def writeData(data_for_training,outputdataPath):
#write data in a vtk file
vtkdirshapes = os.listdir(outputdataPath)
for vtkfilename in vtkdirshapes:
if vtkfilename.endswith((".vtk")):
print("Writing", vtkfilename)
writer = vtk.vtkPolyDataWriter()
writer.SetInput(data_for_training)
writer.SetFileName(os.path.join(outputdataPath),vtkfilename)
writer.Write()
def get_labels(pickle_file):
#get labels of a dataset and returns the labels array and the dataset with features
#num_classes=len(pickle_file)
#num_shapes = 268 #should be changed!!
labels = []
shape =[]
dataset_concatenated =[]
for label, pickle_file in enumerate(pickle_file):
try:
with open(pickle_file,'rb') as f:
dataset=pickle.load(f)
shape_dataset = np.shape(dataset)
num_shapes_per_group = shape_dataset[0]
print('num shapes per group',label,num_shapes_per_group)
l=[label]*num_shapes_per_group
labels.extend(l)
dataset_concatenated.extend(dataset)
except Exception as e:
print('Unable to process', pickle_file,':',e)
raise
features=np.array(dataset_concatenated)
shape_features=np.shape(features)
return features.reshape(-1,shape_features[1]*shape_features[2]), np.array(labels)
def generate_data(pca_model):
#generate data thanks to pca decomposition (not used)
print("Generating data ...")
pca = pca_model["pca"]
X_ = pca_model["X_"]
X_pca_ = pca_model["X_pca_"]
X_pca_var = pca_model["X_pca_var"]
print('Variance',X_pca_var)
print('Mean',X_pca_)
#between -1 and 1
alpha = 2.0*(np.random.random_sample(np.size(X_pca_))) - 1.0
print('alpha', alpha)
data_compressed = 1.5*X_pca_var * alpha + X_pca_
print('data compressed',data_compressed)
data_generated = pca.inverse_transform(data_compressed) + X_
return data_generated
def generate_with_SMOTE(dataset,labels):
#generate data thanks to SMOTE algorithm, it balances different groups
sm=SMOTE(random_state=42,kind='borderline1')
print('shape dataset',dataset.shape)
print('shape labels',labels.shape)
dataset_res, labels_res = sm.fit_sample(dataset,labels)
print('shape dataset resampled',np.shape(dataset_res),'shape lables resampled',np.shape(labels_res))
return dataset_res,labels_res
# def PCA_plot(dataset,labels,dataset_res,labels_res):
# #plot original dat and data resampled after a PCA decomposition
# pca = PCA(n_components=200)
# pca.fit(dataset)
# dataset_pca=pca.transform(dataset)
# print('original shape: ',dataset.shape)
# print('transformed shape:',dataset_pca.shape)
# #print('Ratio variance',pca.explained_variance_ratio_)
# #plt.scatter(dataset[:,0],dataset[:,1],alpha=0.2)
# #dataset_new = pca.inverse_transform(dataset_pca)
# plt.figure(2)
# plt.subplot(121)
# plt.scatter(dataset_pca[:,0],dataset_pca[:,1],edgecolor='none',alpha=0.5,c=labels,cmap=plt.cm.get_cmap('nipy_spectral',np.shape(np.unique(labels))[0]))
# plt.title('Original data with pca (' + str(dataset.shape[0]) + ' samples)')
# #pca.fit(dataset_res)
# dataset_res_pca=pca.transform(dataset_res)
# plt.subplot(122)
# plt.scatter(dataset_res_pca[:,0],dataset_res_pca[:,1],edgecolor='none',alpha=0.5,c=labels_res,cmap=plt.cm.get_cmap('nipy_spectral',np.shape(np.unique(labels_res))[0]))
# plt.title('Resampled data with pca (' + str(dataset_res_pca.shape[0]) + ' samples)')
# for i in range(1,3):
# plt.subplot(1,2,i)
# plt.xlabel('component 1')
# plt.ylabel('component 2')
# plt.colorbar()
# cumsum = np.cumsum(pca.explained_variance_ratio_)
# plt.figure(1)
# plt.plot(cumsum)
# plt.xlabel('nb of components')
# plt.ylabel('cumulative explained variance')
# plt.axhline(y=0.95, linestyle=':', label='.95 explained', color="#f23e3e")
# numcomponents = len(np.where(cumsum < 0.95)[0])
# plt.axvline(x=numcomponents, linestyle=':', label=(str(numcomponents) + ' components'), color="#31f9ad")
# plt.legend(loc=0)
# histo = np.bincount(labels)
# histo_range = np.array(range(histo.shape[0]))
# plt.figure(3)
# plt.bar(histo_range, histo)
# plt.xlabel('Groups')
# plt.ylabel('Number of samples')
# for xy in zip(histo_range, histo):
# plt.annotate(xy[1], xy=xy, ha="center", color="#4286f4")
# plt.show()
# def plot_confusion_matrix(cm, classes,
# normalize=False,
# title='Confusion matrix',
# cmap=plt.cm.Blues):
# """
# This function prints and plots the confusion matrix.
# Normalization can be applied by setting `normalize=True`.
# """
# if normalize:
# cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# print("Normalized confusion matrix")
# else:
# print('Confusion matrix, without normalization')
# print(cm)
# plt.figure()
# plt.imshow(cm, interpolation='nearest', cmap=cmap)
# plt.title(title)
# plt.colorbar()
# tick_marks = np.arange(len(classes))
# plt.xticks(tick_marks, classes, rotation=45)
# plt.yticks(tick_marks, classes)
# fmt = '.2f' if normalize else 'd'
# thresh = cm.max() / 2.
# for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
# plt.text(j, i, format(cm[i, j], fmt),
# horizontalalignment="center",
# color="white" if cm[i, j] > thresh else "black")
# #plt.tight_layout()
# plt.ylabel('True label')
# plt.xlabel('Predicted label')
def training_acc(X_train,y_train,X_test,y_test,classifiers):
tab_score=[]
for clf in classifiers:
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
print('score',score)
tab_score.append(score)
print('TAB score',tab_score)
return tab_score
def SVM_classification(X_dataset,y_labels,dataset_test,labels_test):
# model = svm.SVC(decision_function_shape='ovr',kernel='rbf',C=100,gamma=10)
# model.fit(X_dataset,y_labels)
# model.score(X_dataset,y_labels)
print('data shape',X_dataset.shape)
# predicted_labels = model.predict(test_dataset)
# acc = accuracy_score(test_labels,predicted_labels,normalize=True)
# print('accuracy',acc)
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process",
"Decision Tree", "Random Forest", "MLP Classifier", "AdaBoost",
"Naive Bayes", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0)),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
MLPClassifier(alpha=1),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
#linearly_separable = (X_dataset,y_labels)
#datasets = [linearly_separable]
i=1
#standardizing features
#X_stand = StandardScaler().fit_transform(X)
X_train,X_test, y_train, y_test = train_test_split(X_dataset,y_labels, test_size =0.4)
# just plot the dataset first
cm = plt.cm.RdBu
#cm_bright = ListedColormap(['#FF0000', '#0000FF','#48FF00'])
#fig1,ax1=plt.subplots(3,4)
#fig2,ax2=plt.subplots(3,4)
#if ds_cnt == 0:
# ax.set_title("Input data")
# Plot the training points
#score=training_acc(X_train,y_train,X_test,y_test,classifiers)
# iterate over classifiers
#for name, clf in zip(names, classifiers):
# ax = plt.subplot(3, 4, i)
# clf.fit(X_train, y_train)
#score = clf.score(X_test[:,2:], y_test)
#print('score 2 features',score)
#make meshgrid
# x_min, x_max = X_train[:, 0].min()-1, X_train[:, 0].max()+1
# y_min, y_max = X_train[:, 1].min()-1, X_train[:, 1].max()+1
# xx, yy= np.meshgrid(np.arange(x_min, x_max,1) ,np.arange(y_min, y_max, 1))
# print('shape xx',xx.shape,'shape yy',yy.shape)
# print('shape ravel',np.c_[xx.ravel(),yy.ravel()].shape)
# if hasattr(clf, "decision_function"):
# Z = clf.decision_function(np.c_[xx.ravel(),yy.ravel()])
# print('xx shape',xx.shape,'yy shape',yy.shape,'Z shape',Z.shape)
# Z=Z[:,1]
# else:
# Z = clf.predict_proba(np.c_[xx.ravel(),yy.ravel()])[:,1]
# print('Z shape',Z.shape)
# Z = Z.reshape(xx.shape)
# ax.contourf(xx,yy,Z, cmap=plt.cm.get_cmap('nipy_spectral',np.shape(np.unique(y_test))[0]), alpha=.4)
# Plot also the training points
# CS=ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=plt.cm.get_cmap('nipy_spectral',np.shape(np.unique(y_train))[0]))
# and testing points
# ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=plt.cm.get_cmap('nipy_spectral',np.shape(np.unique(y_test))[0]),edgecolors='k', alpha=0.6)
# ax.set_xticks(())
# ax.set_yticks(())
# ax.set_title(name)
#plt.suptitle(score,y=1.05,fontsize=18)
#a=0.98
#if name=="Current neural network":
# ax.text(240, -40, a,size=15, horizontalalignment='right')
#else:
#ax.text(240, -40, ('%.2f' % score).lstrip('0'),size=15, horizontalalignment='right')
#clf.fit(X_train, y_train)
#score_training = clf.score(X_train, y_train)
#print('score training',score_training)
#ax.text(x_max-0.5, y_min+0.5, ('%.2f' % score).lstrip('0'),size=15, horizontalalignment='right')
for name,clf in zip(names,classifiers):
clf.fit(X_train, y_train)
y_prediction = clf.predict(dataset_test)
print('y_prediction',y_prediction,'labels_test',labels_test)
test_score = accuracy_score(labels_test,y_prediction)
confusion = confusion_matrix(labels_test,y_prediction)
print('The accuracy of ',name,'is',test_score)
name_labels=["group0","group1","group2","group3","group4","group5"]
plot_confusion_matrix(confusion,name_labels,title=name)
if hasattr(clf, "decision_function"):
#binarize labels
lb = preprocessing.LabelBinarizer()
lb.fit([0,1,2,3,4,5])
print('y_test',y_test)
y_test_bin=lb.transform(y_test)
fpr=dict()
tpr=dict()
roc_auc=dict()
y_score = clf.fit(X_train,y_train).decision_function(X_test)
print('y_score',y_score)
print('y_score shape',y_score.shape,'y_test shape',y_test.shape)
#compute ROC curve and ROC area for each class
for j in range(6):
print(j)
fpr[j], tpr[j], _ = roc_curve(y_test_bin[:,j],y_score[:,j])
roc_auc[j]=auc(fpr[j],tpr[j])
plt.figure()
lw=2
plt.plot(fpr[2],tpr[2],color='darkorange',lw=lw,label='ROC curve (area = %0.2f)'%roc_auc[2])
plt.show
#ax.text(x_max-1,y_min+0.1,('%.3f' % score_training),size=10,horizontalalignment='right')
#score=trainin_acc(X_train,y_train,X_test,y_test,classifiers)
#ax.text(x_max-0.5,y_min+0.1,('%.3f' % score[i-1]),size=8,horizontalalignment='right')
print('score testing',test_score)
#plt.colorbar(CS)
i += 1
#printing our neural network accuracy
# ax=plt.subplot(3,4,11)
# ax.scatter(X_dataset[:, 0], X_dataset[:, 1], c=y_labels, cmap=plt.cm.get_cmap('nipy_spectral',np.shape(np.unique(y_labels))[0]))
# ax.scatter(dataset_test[:, 0], dataset_test[:, 1], c=labels_test, cmap=plt.cm.get_cmap('nipy_spectral',np.shape(np.unique(labels_test))[0]),edgecolors='k', alpha=0.6)
# ax.set_xticks(())
# ax.set_yticks(())
# ax.set_title("5-layers neural network")
# score_fake=0.971
# maxi=X_dataset[:,0].max()
# mini=X_dataset[:,0].min()
# maxi1=X_dataset[:,1].max()
# mini1=X_dataset[:,1].min()
# print('x_max',maxi,'x_min',mini,'y_max',maxi1,'y_min',mini1)
# #plt.axis([0,1,-1,1])
# ax.text(maxi,mini1,('%.3f' % score_fake),size=8,horizontalalignment='right')
plt.tight_layout()
plt.show()
#def generate(args):
if __name__ == '__main__':
np.set_printoptions(threshold='nan')
args = parser.parse_args()
pickle_file = args.picklefile
#pickle_file_output= args.pickle_file_new
# Get the data from the folders with vtk files
inputdata = inputData.inputData()
fi = open(pickle_file,'rb')
dataset=pickle.load(fi)
test_labels =dataset["test_labels"]
train_labels =dataset["train_labels"]
valid_labels =dataset["valid_labels"]
test_dataset =dataset["test_dataset"]
train_dataset =dataset["train_dataset"]
valid_dataset =dataset["valid_dataset"]
print('counter',collections.Counter(train_labels))
#data_folders_train = inputdata.get_folder_classes_list(dataPathtrain)
#data_folders_test = inputdata.get_folder_classes_list(dataPathtest)
#pickled_datasets_train,vtklisttrain = inputdata.maybe_pickle(data_folders_train, 6, feature_points=args.feature_names)
#pickled_datasets_test,vtklisttest = inputdata.maybe_pickle(data_folders_test, 0, feature_points=args.feature_names)
#Create the labels, i.e., enumerate the groups
#dataset_train,labels_train = get_labels(pickled_datasets_train)
#print('pickled_datasets_train',pickled_datasets_train,'pickled_datasets_test',pickled_datasets_test)
#dataset_test,labels_test = get_labels(pickled_datasets_test)
# Compute the total number of shapes and train/test size
total_number_shapes_train=train_dataset.shape[0]
total_number_shapes_test=test_dataset.shape[0]
print('total number of shapes train',np.shape(train_dataset))
print('total number of shapes test', np.shape(test_dataset))
print('labels to train',train_labels,'labels to test',test_labels)
#num_train = int(args.train_size*total_number_shapes_train)
#num_valid = int((total_number_shapes_train - num_train)*args.validation_size)
# Randomize the original dataset
#print('shape before randomize',dataset_train.shape)
shuffled_dataset, shuffled_labels = inputdata.randomize(train_dataset, train_labels)
#print('shape after randomize',shuffled_dataset.shape)
#shuffled_dataset_test,shuffled_labels_test = inputdata.randomize(dataset_test,labels_test)
shuffled_dataset = np.reshape(shuffled_dataset, (total_number_shapes_train, -1))
#print('shape after reshape',shuffled_dataset.shape)
#shuffled_dataset_test = np .reshape(shuffled_dataset_test,(total_number_shapes_test,-1))
# Generate SMOTE with out including the valid/test samples, in some cases, this may raise an error
# as the number of samples in one class is less than 5 and SMOTE cannot continue. Just run it again
dataset_res,labels_res=generate_with_SMOTE(np.nan_to_num(shuffled_dataset),shuffled_labels)
# SANITY CHECKS
print('dataset train',np.shape(train_dataset))
print('labels train',np.shape(train_labels))
#print('dataset_res',np.shape(dataset_res))
#print('labels_res',np.shape(labels_res))
#print('num_train', num_train)
#print('num_valid', num_valid)
print('number of labels',np.shape(np.unique(train_labels)))
#print('number of labels resampled',np.shape(np.unique(labels_res)))
#print('Labels resampled',np.unique(labels_res).tolist())
print('test labels', test_labels)
print('counter after SMOTE',collections.Counter(labels_res))
#SVM_classification(dataset_res,labels_res,dataset_test,labels_test)
#clf=LinearSVC(random_state=0)
#clf=GaussianProcessClassifier(1.0 * RBF(1.0))
#clf.fit(dataset_res,labels_res)
#prediction = clf.predict(dataset_test)
#for i in range(0,total_number_shapes_test):
# head,tail = os.path.split(vtklisttest[i])
# print(tail,prediction[i])
#PCA_plot(dataset,labels,dataset_res,labels_res)
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': dataset_res,
'train_labels': labels_res,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
|
pdedumast/ShapeVariationAnalyzer
|
ShapeVariationAnalyzer/Resources/Classifier/generation_shapes.py
|
Python
|
apache-2.0
| 20,161
|
[
"Gaussian",
"VTK"
] |
4a7b700733f4898001c6e0262b963d4adedf4467b88228961d95e45c0237a3cb
|
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
## Contact: Nokia Corporation (qt-info@nokia.com)
##
## This file is part of the test suite of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL$
## No Commercial Usage
## This file contains pre-release code and may not be distributed.
## You may use this file in accordance with the terms and conditions
## contained in the Technology Preview License Agreement accompanying
## this package.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Nokia gives you certain additional
## rights. These rights are described in the Nokia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## If you have questions regarding the use of this file, please contact
## Nokia at qt-info@nokia.com.
##
##
##
##
##
##
##
##
## $QT_END_LICENSE$
##
#############################################################################
import os, sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
def createGroupBox(parent, attributes = None, fill = False, fake = False):
background = CustomWidget(parent, fake)
backgroundLayout = QVBoxLayout()
backgroundLayout.setMargin(4)
background.setLayout(backgroundLayout)
groupBox = QGroupBox("&Options")
layout = QGridLayout()
groupBox.setLayout(layout)
layout.addWidget(QCheckBox("C&ase sensitive"), 0, 0)
layout.addWidget(QCheckBox("W&hole words"), 0, 1)
checkedBox = QCheckBox("Search &forwards")
checkedBox.setChecked(True)
layout.addWidget(checkedBox, 1, 0)
layout.addWidget(QCheckBox("From &start of text"), 1, 1)
backgroundLayout.addWidget(groupBox)
if attributes:
for attr in attributes:
groupBox.setAttribute(attr, True)
if not fake:
background.setAttribute(attr, True)
groupBox.setAutoFillBackground(fill)
background.setAutoFillBackground(fill)
return background
class CustomWidget(QWidget):
def __init__(self, parent, fake = False):
QWidget.__init__(self, parent)
self.fake = fake
self.fakeBrush = QBrush(Qt.red, Qt.DiagCrossPattern)
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.setRenderHint(QPainter.Antialiasing)
if self.fake:
painter.fillRect(event.rect(), QBrush(Qt.white))
painter.fillRect(event.rect(), self.fakeBrush)
painter.end()
if __name__ == "__main__":
try:
qt = sys.argv[1]
except IndexError:
qt = "4.1"
if qt != "4.0" and qt != "4.1":
sys.stderr.write("Usage: %s [4.0|4.1]\n" % sys.argv[0])
sys.exit(1)
app = QApplication(sys.argv)
exec_dir = os.path.split(os.path.abspath(sys.argv[0]))[0]
label = QLabel()
label.setPixmap(QPixmap(os.path.join(exec_dir, "lightbackground.png")))
layout = QGridLayout()
label.setLayout(layout)
if qt == "4.0":
layout.addWidget(createGroupBox(label), 0, 0, Qt.AlignCenter)
caption = QLabel("Opaque (Default)", label)
caption.setMargin(2)
layout.addWidget(caption, 1, 0, Qt.AlignCenter | Qt.AlignTop)
elif qt == "4.1":
layout.addWidget(createGroupBox(label), 0, 0, Qt.AlignCenter)
caption = QLabel("Contents Propagated (Default)", label)
caption.setAutoFillBackground(True)
caption.setMargin(2)
layout.addWidget(caption, 1, 0, Qt.AlignCenter | Qt.AlignTop)
if qt == "4.0":
contentsWidget = createGroupBox(label)
contentsWidget.setAttribute(Qt.WA_ContentsPropagated, True)
layout.addWidget(contentsWidget, 0, 1, Qt.AlignCenter)
caption = QLabel("With WA_ContentsPropagated set", label)
caption.setMargin(2)
layout.addWidget(caption, 1, 1, Qt.AlignCenter | Qt.AlignTop)
elif qt == "4.1":
autoFillWidget = createGroupBox(label, fill = True)
layout.addWidget(autoFillWidget, 0, 1, Qt.AlignCenter)
caption = QLabel("With autoFillBackground set", label)
caption.setAutoFillBackground(True)
caption.setMargin(2)
layout.addWidget(caption, 1, 1, Qt.AlignCenter | Qt.AlignTop)
# if qt == "4.0":
# noBackgroundWidget = createGroupBox(
# label, attributes = [Qt.WA_NoBackground], fake = True)
# layout.addWidget(noBackgroundWidget, 2, 0, Qt.AlignCenter)
# caption = QLabel("With WA_NoBackground set", label)
# caption.setWordWrap(True)
# caption.setMargin(2)
# layout.addWidget(caption, 3, 0, Qt.AlignCenter | Qt.AlignTop)
# elif qt == "4.1":
# opaqueWidget = createGroupBox(
# label, attributes = [Qt.WA_OpaquePaintEvent], fake = True)
# layout.addWidget(opaqueWidget, 2, 0, Qt.AlignCenter)
# caption = QLabel("With WA_OpaquePaintEvent set", label)
# caption.setAutoFillBackground(True)
# caption.setMargin(2)
# layout.addWidget(caption, 3, 0, Qt.AlignCenter | Qt.AlignTop)
#
# if qt == "4.0":
# contentsNoBackgroundWidget = createGroupBox(
# label, attributes = [Qt.WA_ContentsPropagated, Qt.WA_NoBackground],
# fake = True)
# layout.addWidget(contentsNoBackgroundWidget, 2, 1, Qt.AlignCenter)
# caption = QLabel("With WA_ContentsPropagated and WA_NoBackground set", label)
# caption.setMargin(2)
# layout.addWidget(caption, 3, 1, Qt.AlignCenter | Qt.AlignTop)
# elif qt == "4.1":
# opaqueAutoFillWidget = createGroupBox(
# label, attributes = [Qt.WA_OpaquePaintEvent], fill = True, fake = True)
# layout.addWidget(opaqueAutoFillWidget, 2, 1, Qt.AlignCenter)
# caption = QLabel("With WA_OpaquePaintEvent and autoFillBackground set", label)
# caption.setWordWrap(True)
# caption.setAutoFillBackground(True)
# caption.setMargin(2)
# layout.addWidget(caption, 3, 1, Qt.AlignCenter | Qt.AlignTop)
if qt == "4.0":
label.setWindowTitle("Qt 4.0: Painting Standard Qt Widgets")
elif qt == "4.1":
label.setWindowTitle("Qt 4.1: Painting Standard Qt Widgets")
label.resize(480, 140)
label.show()
sys.exit(app.exec_())
|
radekp/qt
|
doc/src/diagrams/contentspropagation/standardwidgets.py
|
Python
|
lgpl-2.1
| 6,840
|
[
"ASE"
] |
ef79bcd2a2a15dd63b146a97f1c0b9153f967664007fdd6958f90484ca5c6ea9
|
"""
Generalized linear models currently supports estimation using the one-parameter
exponential families
References
----------
Gill, Jeff. 2000. Generalized Linear Models: A Unified Approach.
SAGE QASS Series.
Green, PJ. 1984. "Iteratively reweighted least squares for maximum
likelihood estimation, and some robust and resistant alternatives."
Journal of the Royal Statistical Society, Series B, 46, 149-192.
Hardin, J.W. and Hilbe, J.M. 2007. "Generalized Linear Models and
Extensions." 2nd ed. Stata Press, College Station, TX.
McCullagh, P. and Nelder, J.A. 1989. "Generalized Linear Models." 2nd ed.
Chapman & Hall, Boca Rotan.
"""
import numpy as np
import families
from statsmodels.tools.tools import rank
from statsmodels.tools.decorators import (cache_readonly,
resettable_cache)
import statsmodels.base.model as base
import statsmodels.regression.linear_model as lm
import statsmodels.base.wrapper as wrap
from statsmodels.tools.sm_exceptions import PerfectSeparationError
__all__ = ['GLM']
def _check_convergence(criterion, iteration, tol, maxiter):
return not ((np.fabs(criterion[iteration] - criterion[iteration-1]) > tol)
and iteration <= maxiter)
class GLM(base.LikelihoodModel):
__doc__ = '''
Generalized Linear Models class
GLM inherits from statsmodels.LikelihoodModel
Parameters
-----------
endog : array-like
1d array of endogenous response variable. This array can be 1d or 2d.
Binomial family models accept a 2d array with two columns. If
supplied, each observation is expected to be [success, failure].
exog : array-like
A nobs x k array where `nobs` is the number of observations and `k`
is the number of regressors. An interecept is not included by default
and should be added by the user. See `statsmodels.tools.add_constant`.
family : family class instance
The default is Gaussian. To specify the binomial distribution
family = sm.family.Binomial()
Each family can take a link instance as an argument. See
statsmodels.family.family for more information.
%(extra_params)s
Attributes
-----------
df_model : float
`p` - 1, where `p` is the number of regressors including the intercept.
df_resid : float
The number of observation `n` minus the number of regressors `p`.
endog : array
See Parameters.
exog : array
See Parameters.
family : family class instance
A pointer to the distribution family of the model.
mu : array
The estimated mean response of the transformed variable.
normalized_cov_params : array
`p` x `p` normalized covariance of the design / exogenous data.
pinv_wexog : array
For GLM this is just the pseudo inverse of the original design.
scale : float
The estimate of the scale / dispersion. Available after fit is called.
scaletype : str
The scaling used for fitting the model. Available after fit is called.
weights : array
The value of the weights after the last iteration of fit.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.scotland.load()
>>> data.exog = sm.add_constant(data.exog)
Instantiate a gamma family model with the default link function.
>>> gamma_model = sm.GLM(data.endog, data.exog,
... family=sm.families.Gamma())
>>> gamma_results = gamma_model.fit()
>>> gamma_results.params
array([-0.01776527, 0.00004962, 0.00203442, -0.00007181, 0.00011185,
-0.00000015, -0.00051868, -0.00000243])
>>> gamma_results.scale
0.0035842831734919055
>>> gamma_results.deviance
0.087388516416999198
>>> gamma_results.pearson_chi2
0.086022796163805704
>>> gamma_results.llf
-83.017202161073527
See also
--------
statsmodels.families.*
Notes
-----
Only the following combinations make sense for family and link ::
+ ident log logit probit cloglog pow opow nbinom loglog logc
Gaussian | x x x
inv Gaussian | x x x
binomial | x x x x x x x x x
Poission | x x x
neg binomial | x x x x
gamma | x x x
Not all of these link functions are currently available.
Endog and exog are references so that if the data they refer to are already
arrays and these arrays are changed, endog and exog will change.
**Attributes**
df_model : float
Model degrees of freedom is equal to p - 1, where p is the number
of regressors. Note that the intercept is not reported as a
degree of freedom.
df_resid : float
Residual degrees of freedom is equal to the number of observation n
minus the number of regressors p.
endog : array
See above. Note that endog is a reference to the data so that if
data is already an array and it is changed, then `endog` changes
as well.
exposure : array-like
Include ln(exposure) in model with coefficient constrained to 1.
exog : array
See above. Note that endog is a reference to the data so that if
data is already an array and it is changed, then `endog` changes
as well.
iteration : int
The number of iterations that fit has run. Initialized at 0.
family : family class instance
The distribution family of the model. Can be any family in
statsmodels.families. Default is Gaussian.
mu : array
The mean response of the transformed variable. `mu` is the value of
the inverse of the link function at eta, where eta is the linear
predicted value of the WLS fit of the transformed variable. `mu` is
only available after fit is called. See
statsmodels.families.family.fitted of the distribution family for more
information.
normalized_cov_params : array
The p x p normalized covariance of the design / exogenous data.
This is approximately equal to (X.T X)^(-1)
offset : array-like
Include offset in model with coefficient constrained to 1.
pinv_wexog : array
The pseudoinverse of the design / exogenous data array. Note that
GLM has no whiten method, so this is just the pseudo inverse of the
design.
The pseudoinverse is approximately equal to (X.T X)^(-1)X.T
scale : float
The estimate of the scale / dispersion of the model fit. Only
available after fit is called. See GLM.fit and GLM.estimate_scale
for more information.
scaletype : str
The scaling used for fitting the model. This is only available after
fit is called. The default is None. See GLM.fit for more information.
weights : array
The value of the weights after the last iteration of fit. Only
available after fit is called. See statsmodels.families.family for
the specific distribution weighting functions.
''' % {'extra_params' : base._missing_param_doc}
def __init__(self, endog, exog, family=None, offset=None, exposure=None,
missing='none'):
self._check_inputs(family, offset, exposure, endog)
super(GLM, self).__init__(endog, exog, missing=missing,
offset=self.offset, exposure=self.exposure)
if offset is None:
delattr(self, 'offset')
if exposure is None:
delattr(self, 'exposure')
#things to remove_data
self._data_attr.extend(['weights', 'pinv_wexog', 'mu', 'data_weights',
])
def initialize(self):
"""
Initialize a generalized linear model.
"""
#TODO: intended for public use?
self.history = {'fittedvalues' : [],
'params' : [np.inf],
'deviance' : [np.inf]}
self.pinv_wexog = np.linalg.pinv(self.exog)
self.normalized_cov_params = np.dot(self.pinv_wexog,
np.transpose(self.pinv_wexog))
self.df_model = rank(self.exog)-1
self.df_resid = self.exog.shape[0] - rank(self.exog)
def _check_inputs(self, family, offset, exposure, endog):
if family is None:
family = families.Gaussian()
self.family = family
if offset is not None:
offset = np.asarray(offset)
if offset.shape[0] != endog.shape[0]:
raise ValueError("offset is not the same length as endog")
self.offset = offset
if exposure is not None:
exposure = np.log(exposure)
if exposure.shape[0] != endog.shape[0]:
raise ValueError("exposure is not the same length as endog")
self.exposure = exposure
def score(self, params):
"""
Score matrix. Not yet implemeneted
"""
raise NotImplementedError
def loglike(self, *args):
"""
Loglikelihood function.
Each distribution family has its own loglikelihood function.
See statsmodels.families.family
"""
return self.family.loglike(*args)
def information(self, params):
"""
Fisher information matrix. Not yet implemented.
"""
raise NotImplementedError
def _update_history(self, tmp_result, mu, history):
"""
Helper method to update history during iterative fit.
"""
history['params'].append(tmp_result.params)
history['deviance'].append(self.family.deviance(self.endog, mu))
return history
def estimate_scale(self, mu):
"""
Estimates the dispersion/scale.
Type of scale can be chose in the fit method.
Parameters
----------
mu : array
mu is the mean response estimate
Returns
-------
Estimate of scale
Notes
-----
The default scale for Binomial and Poisson families is 1. The default
for the other families is Pearson's Chi-Square estimate.
See also
--------
statsmodels.glm.fit for more information
"""
if not self.scaletype:
if isinstance(self.family, (families.Binomial, families.Poisson)):
return 1.
else:
resid = self.endog - mu
return ((np.power(resid, 2) / self.family.variance(mu)).sum() \
/ self.df_resid)
if isinstance(self.scaletype, float):
return np.array(self.scaletype)
if isinstance(self.scaletype, str):
if self.scaletype.lower() == 'x2':
resid = self.endog - mu
return ((np.power(resid, 2) / self.family.variance(mu)).sum() \
/ self.df_resid)
elif self.scaletype.lower() == 'dev':
return self.family.deviance(self.endog, mu)/self.df_resid
else:
raise ValueError("Scale %s with type %s not understood" %\
(self.scaletype,type(self.scaletype)))
else:
raise ValueError("Scale %s with type %s not understood" %\
(self.scaletype, type(self.scaletype)))
def predict(self, params, exog=None, linear=False):
"""
Return predicted values for a design matrix
Parameters
----------
params : array-like
Parameters / coefficients of a GLM.
exog : array-like, optional
Design / exogenous data. Is exog is None, model exog is used.
linear : bool
If True, returns the linear predicted values. If False,
returns the value of the inverse of the model's link function at
the linear predicted values.
Returns
-------
An array of fitted values
"""
offset = getattr(self, 'offset', 0)
exposure = getattr(self, 'exposure', 0)
if exog is None:
exog = self.exog
if linear:
return np.dot(exog, params) + offset + exposure
else:
return self.family.fitted(np.dot(exog, params) + exposure + \
offset)
def fit(self, maxiter=100, method='IRLS', tol=1e-8, scale=None):
'''
Fits a generalized linear model for a given family.
parameters
----------
maxiter : int, optional
Default is 100.
method : string
Default is 'IRLS' for iteratively reweighted least squares. This
is currently the only method available for GLM fit.
scale : string or float, optional
`scale` can be 'X2', 'dev', or a float
The default value is None, which uses `X2` for Gamma, Gaussian,
and Inverse Gaussian.
`X2` is Pearson's chi-square divided by `df_resid`.
The default is 1 for the Binomial and Poisson families.
`dev` is the deviance divided by df_resid
tol : float
Convergence tolerance. Default is 1e-8.
'''
endog = self.endog
if endog.ndim > 1 and endog.shape[1] == 2:
data_weights = endog.sum(1) # weights are total trials
else:
data_weights = np.ones((endog.shape[0]))
self.data_weights = data_weights
if np.shape(self.data_weights) == () and self.data_weights>1:
self.data_weights = self.data_weights *\
np.ones((endog.shape[0]))
self.scaletype = scale
if isinstance(self.family, families.Binomial):
# this checks what kind of data is given for Binomial.
# family will need a reference to endog if this is to be removed from
# preprocessing
self.endog = self.family.initialize(self.endog)
if hasattr(self, 'offset'):
offset = self.offset
elif hasattr(self, 'exposure'):
offset = self.exposure
else:
offset = 0
#TODO: would there ever be both and exposure and an offset?
mu = self.family.starting_mu(self.endog)
wlsexog = self.exog
eta = self.family.predict(mu)
dev = self.family.deviance(self.endog, mu)
if np.isnan(dev):
raise ValueError("The first guess on the deviance function "
"returned a nan. This could be a boundary "
" problem and should be reported.")
# first guess on the deviance is assumed to be scaled by 1.
# params are none to start, so they line up with the deviance
history = dict(params = [None, None], deviance=[np.inf,dev])
iteration = 0
converged = 0
criterion = history['deviance']
while not converged:
self.weights = data_weights*self.family.weights(mu)
wlsendog = eta + self.family.link.deriv(mu) * (self.endog-mu) \
- offset
wls_results = lm.WLS(wlsendog, wlsexog, self.weights).fit()
eta = np.dot(self.exog, wls_results.params) + offset
mu = self.family.fitted(eta)
history = self._update_history(wls_results, mu, history)
self.scale = self.estimate_scale(mu)
iteration += 1
if endog.squeeze().ndim == 1 and np.allclose(mu - endog, 0):
msg = "Perfect separation detected, results not available"
raise PerfectSeparationError(msg)
converged = _check_convergence(criterion, iteration, tol,
maxiter)
self.mu = mu
glm_results = GLMResults(self, wls_results.params,
wls_results.normalized_cov_params,
self.scale)
history['iteration'] = iteration
glm_results.fit_history = history
return GLMResultsWrapper(glm_results)
class GLMResults(base.LikelihoodModelResults):
'''
Class to contain GLM results.
GLMResults inherits from statsmodels.LikelihoodModelResults
Parameters
----------
See statsmodels.LikelihoodModelReesults
Returns
-------
**Attributes**
aic : float
Akaike Information Criterion
-2 * `llf` + 2*(`df_model` + 1)
bic : float
Bayes Information Criterion
`deviance` - `df_resid` * log(`nobs`)
deviance : float
See statsmodels.families.family for the distribution-specific deviance
functions.
df_model : float
See GLM.df_model
df_resid : float
See GLM.df_resid
fit_history : dict
Contains information about the iterations. Its keys are `iterations`,
`deviance` and `params`.
fittedvalues : array
Linear predicted values for the fitted model.
dot(exog, params)
llf : float
Value of the loglikelihood function evalued at params.
See statsmodels.families.family for distribution-specific loglikelihoods.
model : class instance
Pointer to GLM model instance that called fit.
mu : array
See GLM docstring.
nobs : float
The number of observations n.
normalized_cov_params : array
See GLM docstring
null_deviance : float
The value of the deviance function for the model fit with a constant
as the only regressor.
params : array
The coefficients of the fitted model. Note that interpretation
of the coefficients often depends on the distribution family and the
data.
pearson_chi2 : array
Pearson's Chi-Squared statistic is defined as the sum of the squares
of the Pearson residuals.
pinv_wexog : array
See GLM docstring.
pvalues : array
The two-tailed p-values for the parameters.
resid_anscombe : array
Anscombe residuals. See statsmodels.families.family for distribution-
specific Anscombe residuals.
resid_deviance : array
Deviance residuals. See statsmodels.families.family for distribution-
specific deviance residuals.
resid_pearson : array
Pearson residuals. The Pearson residuals are defined as
(`endog` - `mu`)/sqrt(VAR(`mu`)) where VAR is the distribution
specific variance function. See statsmodels.families.family and
statsmodels.families.varfuncs for more information.
resid_response : array
Respnose residuals. The response residuals are defined as
`endog` - `fittedvalues`
resid_working : array
Working residuals. The working residuals are defined as
`resid_response`/link'(`mu`). See statsmodels.family.links for the
derivatives of the link functions. They are defined analytically.
scale : float
The estimate of the scale / dispersion for the model fit.
See GLM.fit and GLM.estimate_scale for more information.
stand_errors : array
The standard errors of the fitted GLM. #TODO still named bse
See Also
--------
statsmodels.LikelihoodModelResults
'''
def __init__(self, model, params, normalized_cov_params, scale):
super(GLMResults, self).__init__(model, params,
normalized_cov_params=normalized_cov_params, scale=scale)
self.family = model.family
self._endog = model.endog
self.nobs = model.endog.shape[0]
self.mu = model.mu
self._data_weights = model.data_weights
self.df_resid = model.df_resid
self.df_model = model.df_model
self.pinv_wexog = model.pinv_wexog
self._cache = resettable_cache()
# are these intermediate results needed or can we just
# call the model's attributes?
@cache_readonly
def resid_response(self):
return self._data_weights * (self._endog-self.mu)
@cache_readonly
def resid_pearson(self):
return np.sqrt(self._data_weights) * (self._endog-self.mu)/\
np.sqrt(self.family.variance(self.mu))
@cache_readonly
def resid_working(self):
val = (self.resid_response / self.family.link.deriv(self.mu))
val *= self._data_weights
return val
@cache_readonly
def resid_anscombe(self):
return self.family.resid_anscombe(self._endog, self.mu)
@cache_readonly
def resid_deviance(self):
return self.family.resid_dev(self._endog, self.mu)
@cache_readonly
def pearson_chi2(self):
chisq = (self._endog- self.mu)**2 / self.family.variance(self.mu)
chisq *= self._data_weights
chisqsum = np.sum(chisq)
return chisqsum
@cache_readonly
def fittedvalues(self):
return self.mu
@cache_readonly
def null(self):
endog = self._endog
model = self.model
exog = np.ones((len(endog), 1))
if hasattr(model, 'offset'):
return GLM(endog, exog, offset=model.offset,
family=self.family).fit().mu
elif hasattr(model, 'exposure'):
return GLM(endog, exog, exposure=model.exposure,
family=self.family).fit().mu
else:
wls_model = lm.WLS(endog, exog, weights=self._data_weights)
return wls_model.fit().fittedvalues
@cache_readonly
def deviance(self):
return self.family.deviance(self._endog, self.mu)
@cache_readonly
def null_deviance(self):
return self.family.deviance(self._endog, self.null)
@cache_readonly
def llf(self):
_modelfamily = self.family
if isinstance(_modelfamily, families.NegativeBinomial):
val = _modelfamily.loglike(self.model.endog,
fittedvalues = np.dot(self.model.exog,self.params))
else:
val = _modelfamily.loglike(self._endog, self.mu,
scale=self.scale)
return val
@cache_readonly
def aic(self):
return -2 * self.llf + 2*(self.df_model+1)
@cache_readonly
def bic(self):
return self.deviance - self.df_resid*np.log(self.nobs)
def remove_data(self):
#GLM has alias/reference in result instance
self._data_attr.extend([i for i in self.model._data_attr
if not '_data.' in i])
super(self.__class__, self).remove_data()
#TODO: what are these in results?
self._endog = None
self._data_weights = None
remove_data.__doc__ = base.LikelihoodModelResults.remove_data.__doc__
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
-----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
top_left = [('Dep. Variable:', None),
('Model:', None),
('Model Family:', [self.family.__class__.__name__]),
('Link Function:', [self.family.link.__class__.__name__]),
('Method:', ['IRLS']),
('Date:', None),
('Time:', None),
('No. Iterations:', ["%d" % self.fit_history['iteration']]),
]
top_right = [('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None),
('Scale:', [self.scale]),
('Log-Likelihood:', None),
('Deviance:', ["%#8.5g" % self.deviance]),
('Pearson chi2:', ["%#6.3g" % self.pearson_chi2])
]
if title is None:
title = "Generalized Linear Model Regression Results"
#create summary tables
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right, #[],
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=True)
#diagnostic table is not used yet:
#smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,
# yname=yname, xname=xname,
# title="")
return smry
def summary2(self, yname=None, xname=None, title=None, alpha=.05,
float_format="%.4f"):
"""Experimental summary for regression Results
Parameters
-----------
yname : string
Name of the dependent variable (optional)
xname : List of strings of length equal to the number of parameters
Names of the independent variables (optional)
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format: string
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
self.method = 'IRLS'
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
return smry
class GLMResultsWrapper(lm.RegressionResultsWrapper):
_attrs = {
'resid_anscombe' : 'rows',
'resid_deviance' : 'rows',
'resid_pearson' : 'rows',
'resid_response' : 'rows',
'resid_working' : 'rows'
}
_wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,
_attrs)
wrap.populate_wrapper(GLMResultsWrapper, GLMResults)
if __name__ == "__main__":
import statsmodels.api as sm
import numpy as np
data = sm.datasets.longley.load()
#data.exog = add_constant(data.exog)
GLMmod = GLM(data.endog, data.exog).fit()
GLMT = GLMmod.summary(returns='tables')
## GLMT[0].extend_right(GLMT[1])
## print(GLMT[0])
## print(GLMT[2])
GLMTp = GLMmod.summary(title='Test GLM')
"""
From Stata
. webuse beetle
. glm r i.beetle ldose, family(binomial n) link(cloglog)
Iteration 0: log likelihood = -79.012269
Iteration 1: log likelihood = -76.94951
Iteration 2: log likelihood = -76.945645
Iteration 3: log likelihood = -76.945645
Generalized linear models No. of obs = 24
Optimization : ML Residual df = 20
Scale parameter = 1
Deviance = 73.76505595 (1/df) Deviance = 3.688253
Pearson = 71.8901173 (1/df) Pearson = 3.594506
Variance function: V(u) = u*(1-u/n) [Binomial]
Link function : g(u) = ln(-ln(1-u/n)) [Complementary log-log]
AIC = 6.74547
Log likelihood = -76.94564525 BIC = 10.20398
------------------------------------------------------------------------------
| OIM
r | Coef. Std. Err. z P>|z| [95% Conf. Interval]
-------------+----------------------------------------------------------------
beetle |
2 | -.0910396 .1076132 -0.85 0.398 -.3019576 .1198783
3 | -1.836058 .1307125 -14.05 0.000 -2.09225 -1.579867
|
ldose | 19.41558 .9954265 19.50 0.000 17.46458 21.36658
_cons | -34.84602 1.79333 -19.43 0.000 -38.36089 -31.33116
------------------------------------------------------------------------------
"""
#NOTE: wfs dataset has been removed due to a licensing issue
# example of using offset
#data = sm.datasets.wfs.load()
# get offset
#offset = np.log(data.exog[:,-1])
#exog = data.exog[:,:-1]
# convert dur to dummy
#exog = sm.tools.categorical(exog, col=0, drop=True)
# drop reference category
# convert res to dummy
#exog = sm.tools.categorical(exog, col=0, drop=True)
# convert edu to dummy
#exog = sm.tools.categorical(exog, col=0, drop=True)
# drop reference categories and add intercept
#exog = sm.add_constant(exog[:,[1,2,3,4,5,7,8,10,11,12]])
#endog = np.round(data.endog)
#mod = sm.GLM(endog, exog, family=sm.families.Poisson()).fit()
#res1 = GLM(endog, exog, family=sm.families.Poisson(),
# offset=offset).fit(tol=1e-12, maxiter=250)
#exposuremod = GLM(endog, exog, family=sm.families.Poisson(),
# exposure = data.exog[:,-1]).fit(tol=1e-12,
# maxiter=250)
#assert(np.all(res1.params == exposuremod.params))
|
bavardage/statsmodels
|
statsmodels/genmod/generalized_linear_model.py
|
Python
|
bsd-3-clause
| 30,165
|
[
"Gaussian"
] |
09180ae627ef6b18e25ca2d71d1879853c24e873c7166216b7fb9017cde86445
|
import sys
import random
from . import wildcardFind, element, loadModel, ChemCompt, exists, Annotator, Pool, ZombiePool,PoolBase,CplxEnzBase,Function,ZombieFunction
import numpy as np
#Todo : To be written
# --Notes
# --StimulusTable
def writeKkit( modelpath, filename,sceneitems=None):
global NA
NA = 6.0221415e23
global xmin,xmax,ymin,ymax
global cord
global multi
xmin = ymin = 0
xmax = ymax = 1
multi = 50
cord = {}
compt = wildcardFind(modelpath+'/##[ISA=ChemCompt]')
maxVol = estimateDefaultVol(compt)
f = open(filename, 'w')
writeHeader (f,maxVol)
if (compt > 0):
if sceneitems == None:
#if sceneitems is none (loaded from script) then check x,y cord exists
xmin,ymin,xmax,ymax,positionInfoExist = getCor(modelpath,sceneitems)
if not positionInfoExist:
#incase of SBML or cspace or python Annotator is not populated then positionInfoExist= False
#print " x and y cordinates doesn't exist so auto cordinates"
print(" auto co-ordinates needs to be applied")
pass
else:
#This is when it comes from Gui where the objects are already layout on to scene
# so using thoes co-ordinates
xmin,ymin,xmax,ymax,positionInfoExist = getCor(modelpath,sceneitems)
gtId_vol = writeCompartment(modelpath,compt,f)
writePool(modelpath,f,gtId_vol)
reacList = writeReac(modelpath,f)
enzList = writeEnz(modelpath,f)
writeSumtotal(modelpath,f)
storeReacMsg(reacList,f)
storeEnzMsg(enzList,f)
writeGui(f)
tgraphs = wildcardFind(modelpath+'/##[ISA=Table2]')
if tgraphs:
writeplot(tgraphs,f)
storePlotMsgs(tgraphs,f)
writeFooter(f)
return True
else:
print("Warning: writeKkit:: No model found on " , modelpath)
return False
def storeCplxEnzMsgs( enz, f ):
for sub in enz.neighbors["subOut"]:
s = "addmsg /kinetics/" + trimPath( sub ) + " /kinetics/" + trimPath(enz) + " SUBSTRATE n \n";
s = s+ "addmsg /kinetics/" + trimPath( enz ) + " /kinetics/" + trimPath( sub ) + " REAC sA B \n";
f.write(s)
for prd in enz.neighbors["prd"]:
s = "addmsg /kinetics/" + trimPath( enz ) + " /kinetics/" + trimPath(prd) + " MM_PRD pA\n";
f.write( s )
for enzOut in enz.neighbors["enzOut"]:
s = "addmsg /kinetics/" + trimPath( enzOut ) + " /kinetics/" + trimPath(enz) + " ENZYME n\n";
s = s+ "addmsg /kinetics/" + trimPath( enz ) + " /kinetics/" + trimPath(enzOut) + " REAC eA B\n";
f.write( s )
def storeMMenzMsgs( enz, f):
subList = enz.neighbors["subOut"]
prdList = enz.neighbors["prd"]
enzDestList = enz.neighbors["enzDest"]
for esub in subList:
es = "addmsg /kinetics/" + trimPath(element(esub)) + " /kinetics/" + trimPath(enz) + " SUBSTRATE n \n";
es = es+"addmsg /kinetics/" + trimPath(enz) + " /kinetics/" + trimPath(element(esub)) + " REAC sA B \n";
f.write(es)
for eprd in prdList:
es = "addmsg /kinetics/" + trimPath( enz ) + " /kinetics/" + trimPath( element(eprd)) + " MM_PRD pA \n";
f.write(es)
for eenzDest in enzDestList:
enzDest = "addmsg /kinetics/" + trimPath( element(eenzDest)) + " /kinetics/" + trimPath( enz ) + " ENZYME n \n";
f.write(enzDest)
def storeEnzMsg( enzList, f):
for enz in enzList:
enzClass = enz.className
if (enzClass == "ZombieMMenz" or enzClass == "MMenz"):
storeMMenzMsgs(enz, f)
else:
storeCplxEnzMsgs( enz, f )
def writeEnz( modelpath,f):
enzList = wildcardFind(modelpath+'/##[ISA=EnzBase]')
for enz in enzList:
x = random.randrange(0,10)
y = random.randrange(0,10)
textcolor = "green"
color = "red"
k1 = 0;
k2 = 0;
k3 = 0;
nInit = 0;
concInit = 0;
n = 0;
conc = 0;
enzParent = enz.parent
if (isinstance(enzParent.className,Pool)) or (isinstance(enzParent.className,ZombiePool)):
print(" raise exception enz doesn't have pool as parent")
return False
else:
vol = enzParent.volume * NA * 1e-3;
isMichaelisMenten = 0;
enzClass = enz.className
if (enzClass == "ZombieMMenz" or enzClass == "MMenz"):
k1 = enz.numKm
k3 = enz.kcat
k2 = 4.0*k3;
k1 = (k2 + k3) / k1;
isMichaelisMenten = 1;
elif (enzClass == "ZombieEnz" or enzClass == "Enz"):
k1 = enz.k1
k2 = enz.k2
k3 = enz.k3
cplx = enz.neighbors['cplx'][0]
nInit = cplx.nInit[0];
xe = cord[enz]['x']
ye = cord[enz]['y']
x = ((xe-xmin)/(xmax-xmin))*multi
y = ((ye-ymin)/(ymax-ymin))*multi
#y = ((ymax-ye)/(ymax-ymin))*multi
einfo = enz.path+'/info'
if exists(einfo):
color = Annotator(einfo).getField('color')
textcolor = Annotator(einfo).getField('textColor')
f.write("simundump kenz /kinetics/" + trimPath(enz) + " " + str(0)+ " " +
str(concInit) + " " +
str(conc) + " " +
str(nInit) + " " +
str(n) + " " +
str(vol) + " " +
str(k1) + " " +
str(k2) + " " +
str(k3) + " " +
str(0) + " " +
str(isMichaelisMenten) + " " +
"\"\"" + " " +
str(color) + " " + str(textcolor) + " \"\"" +
" " + str(x) + " " + str(y) + " "+str(0)+"\n")
return enzList
def storeReacMsg(reacList,f):
for reac in reacList:
reacPath = trimPath( reac);
sublist = reac.neighbors["subOut"]
prdlist = reac.neighbors["prd"]
for sub in sublist:
s = "addmsg /kinetics/" + trimPath( sub ) + " /kinetics/" + reacPath + " SUBSTRATE n \n";
s = s + "addmsg /kinetics/" + reacPath + " /kinetics/" + trimPath( sub ) + " REAC A B \n";
f.write(s)
for prd in prdlist:
s = "addmsg /kinetics/" + trimPath( prd ) + " /kinetics/" + reacPath + " PRODUCT n \n";
s = s + "addmsg /kinetics/" + reacPath + " /kinetics/" + trimPath( prd ) + " REAC B A\n";
f.write( s)
def writeReac(modelpath,f):
reacList = wildcardFind(modelpath+'/##[ISA=ReacBase]')
for reac in reacList :
color = "blue"
textcolor = "red"
kf = reac.numKf
kb = reac.numKb
xr = cord[reac]['x']
yr = cord[reac]['y']
x = ((xr-xmin)/(xmax-xmin))*multi
y = ((yr-ymin)/(ymax-ymin))*multi
#y = ((ymax-yr)/(ymax-ymin))*multi
rinfo = reac.path+'/info'
if exists(rinfo):
color = Annotator(rinfo).getField('color')
textcolor = Annotator(rinfo).getField('textColor')
f.write("simundump kreac /kinetics/" + trimPath(reac) + " " +str(0) +" "+ str(kf) + " " + str(kb) + " \"\" " +
str(color) + " " + str(textcolor) + " " + str(x) + " " + str(y) + " 0\n")
return reacList
def trimPath(mobj):
original = mobj
mobj = element(mobj)
found = False
while not isinstance(mobj,ChemCompt) and mobj.path != "/":
mobj = element(mobj.parent)
found = True
if mobj.path == "/":
print("compartment is not found with the given path and the path has reached root ",original)
return
#other than the kinetics compartment, all the othername are converted to group in Genesis which are place under /kinetics
# Any moose object comes under /kinetics then one level down the path is taken.
# e.g /group/poolObject or /Reac
if found:
if mobj.name != "kinetics":
splitpath = original.path[(original.path.find(mobj.name)):len(original.path)]
else:
pos = original.path.find(mobj.name)
slash = original.path.find('/',pos+1)
splitpath = original.path[slash+1:len(original.path)]
return splitpath
def writeSumtotal( modelpath,f):
funclist = wildcardFind(modelpath+'/##[ISA=Function]')
for func in funclist:
funcInputs = element(func.path+'/x[0]')
s = ""
for funcInput in funcInputs.neighbors["input"]:
s = s+ "addmsg /kinetics/" + trimPath(funcInput)+ " /kinetics/" + trimPath(element(func.parent)) + " SUMTOTAL n nInit\n"
f.write(s)
def storePlotMsgs( tgraphs,f):
s = ""
if tgraphs:
for graph in tgraphs:
slash = graph.path.find('graphs')
if not slash > -1:
slash = graph.path.find('graph_0')
if slash > -1:
conc = graph.path.find('conc')
if conc > -1 :
tabPath = graph.path[slash:len(graph.path)]
else:
slash1 = graph.path.find('/',slash)
tabPath = "graphs/conc1" +graph.path[slash1:len(graph.path)]
if len(element(graph).msgOut):
poolPath = (element(graph).msgOut)[0].e2.path
poolEle = element(poolPath)
poolName = poolEle.name
bgPath = (poolEle.path+'/info')
bg = Annotator(bgPath).color
s = s+"addmsg /kinetics/" + trimPath( poolEle ) + " /" + tabPath + \
" PLOT Co *" + poolName + " *" + bg +"\n";
f.write(s)
def writeplot( tgraphs,f ):
if tgraphs:
for graphs in tgraphs:
slash = graphs.path.find('graphs')
if not slash > -1:
slash = graphs.path.find('graph_0')
if slash > -1:
conc = graphs.path.find('conc')
if conc > -1 :
tabPath = graphs.path[slash:len(graphs.path)]
else:
slash1 = graphs.path.find('/',slash)
tabPath = "graphs/conc1" +graphs.path[slash1:len(graphs.path)]
if len(element(graphs).msgOut):
poolPath = (element(graphs).msgOut)[0].e2.path
poolEle = element(poolPath)
poolAnno = (poolEle.path+'/info')
fg = Annotator(poolAnno).textColor
f.write("simundump xplot " + tabPath + " 3 524288 \\\n" + "\"delete_plot.w <s> <d>; edit_plot.D <w>\" " + fg + " 0 0 1\n")
def writePool(modelpath,f,volIndex ):
for p in wildcardFind(modelpath+'/##[ISA=PoolBase]'):
slave_enable = 0
if (p.className == "BufPool" or p.className == "ZombieBufPool"):
pool_children = p.children
if pool_children== 0:
slave_enable = 4
else:
for pchild in pool_children:
if not(pchild.className == "ZombieFunction") and not(pchild.className == "Function"):
slave_enable = 4
else:
slave_enable = 0
break
xp = cord[p]['x']
yp = cord[p]['y']
x = ((xp-xmin)/(xmax-xmin))*multi
y = ((yp-ymin)/(ymax-ymin))*multi
#y = ((ymax-yp)/(ymax-ymin))*multi
pinfo = p.path+'/info'
if exists(pinfo):
color = Annotator(pinfo).getField('color')
textcolor = Annotator(pinfo).getField('textColor')
geometryName = volIndex[p.volume]
volume = p.volume * NA * 1e-3
f.write("simundump kpool /kinetics/" + trimPath(p) + " 0 " +
str(p.diffConst) + " " +
str(0) + " " +
str(0) + " " +
str(0) + " " +
str(p.nInit) + " " +
str(0) + " " + str(0) + " " +
str(volume)+ " " +
str(slave_enable) +
" /kinetics"+ geometryName + " " +
str(color) +" " + str(textcolor) + " " + str(x) + " " + str(y) + " "+ str(0)+"\n")
def getxyCord(xcord,ycord,list1,sceneitems):
for item in list1:
if not ( isinstance(item,Function) and isinstance(item,ZombieFunction) ):
if sceneitems == None:
objInfo = item.path+'/info'
xpos = xyPosition(objInfo,'x')
ypos = xyPosition(objInfo,'y')
else:
co = sceneitems[item]
xpos = co.scenePos().x()
ypos =-co.scenePos().y()
cord[item] ={ 'x': xpos,'y':ypos}
xcord.append(xpos)
ycord.append(ypos)
def xyPosition(objInfo,xory):
try:
return(float(element(objInfo).getField(xory)))
except ValueError:
return (float(0))
def getCor(modelRoot,sceneitems):
xmin = ymin = 0.0
xmax = ymax = 1.0
positionInfoExist = False
xcord = ycord = []
mollist = realist = enzlist = cplxlist = tablist = funclist = []
meshEntryWildcard = '/##[ISA=ChemCompt]'
if modelRoot != '/':
meshEntryWildcard = modelRoot+meshEntryWildcard
for meshEnt in wildcardFind(meshEntryWildcard):
mol_cpl = wildcardFind(meshEnt.path+'/##[ISA=PoolBase]')
realist = wildcardFind(meshEnt.path+'/##[ISA=ReacBase]')
enzlist = wildcardFind(meshEnt.path+'/##[ISA=EnzBase]')
funclist = wildcardFind(meshEnt.path+'/##[ISA=Function]')
tablist = wildcardFind(meshEnt.path+'/##[ISA=StimulusTable]')
if mol_cpl or funclist or enzlist or realist or tablist:
for m in mol_cpl:
if isinstance(element(m.parent),CplxEnzBase):
cplxlist.append(m)
objInfo = m.parent.path+'/info'
elif isinstance(element(m),PoolBase):
mollist.append(m)
objInfo =m.path+'/info'
if sceneitems == None:
xx = xyPosition(objInfo,'x')
yy = xyPosition(objInfo,'y')
else:
c = sceneitems[m]
xx = c.scenePos().x()
yy =-c.scenePos().y()
cord[m] ={ 'x': xx,'y':yy}
xcord.append(xx)
ycord.append(yy)
getxyCord(xcord,ycord,realist,sceneitems)
getxyCord(xcord,ycord,enzlist,sceneitems)
getxyCord(xcord,ycord,funclist,sceneitems)
getxyCord(xcord,ycord,tablist,sceneitems)
xmin = min(xcord)
xmax = max(xcord)
ymin = min(ycord)
ymax = max(ycord)
positionInfoExist = not(len(np.nonzero(xcord)[0]) == 0 \
and len(np.nonzero(ycord)[0]) == 0)
return(xmin,ymin,xmax,ymax,positionInfoExist)
def writeCompartment(modelpath,compts,f):
index = 0
volIndex = {}
for compt in compts:
if compt.name != "kinetics":
xgrp = xmax -random.randrange(1,10)
ygrp = ymin +random.randrange(1,10)
x = ((xgrp-xmin)/(xmax-xmin))*multi
#y = ((ymax-ygrp)/(ymax-ymin))*multi
y = ((ygrp-ymin)/(ymax-ymin))*multi
f.write("simundump group /kinetics/" + compt.name + " 0 " + "blue" + " " + "green" + " x 0 0 \"\" defaultfile \\\n" )
f.write( " defaultfile.g 0 0 0 " + str(x) + " " + str(y) + " 0\n")
i = 0
l = len(compts)
geometry = ""
for compt in compts:
size = compt.volume
ndim = compt.numDimensions
vecIndex = l-i-1
#print vecIndex
i = i+1
xgeo = xmax -random.randrange(1,10)
ygeo = ymin +random.randrange(1,10)
x = ((xgeo-xmin)/(xmax-xmin))*multi
#y = ((ymax-ygeo)/(ymax-ymin))*multi
y = ((ygeo-ymin)/(ymax-ymin))*multi
if vecIndex > 0:
geometry = geometry+"simundump geometry /kinetics" + "/geometry[" + str(vecIndex) +"] 0 " + str(size) + " " + str(ndim) + " sphere " +" \"\" white black "+ str(x) + " " +str(y) +" 0\n";
volIndex[size] = "/geometry["+str(vecIndex)+"]"
else:
geometry = geometry+"simundump geometry /kinetics" + "/geometry 0 " + str(size) + " " + str(ndim) + " sphere " +" \"\" white black " + str(x) + " "+str(y)+ " 0\n";
volIndex[size] = "/geometry"
f.write(geometry)
writeGroup(modelpath,f,xmax,ymax)
return volIndex
def writeGroup(modelpath,f,xmax,ymax):
ignore = ["graphs","moregraphs","geometry","groups","conc1","conc2","conc3","conc4"]
for g in wildcardFind(modelpath+'/##[TYPE=Neutral]'):
if not g.name in ignore:
if trimPath(g) != None:
xgrp1 = xmax - random.randrange(1,10)
ygrp1 = ymin + random.randrange(1,10)
x = ((xgrp1-xmin)/(xmax-xmin))*multi
#y = ((ymax-ygrp1)/(ymax-ymin))*multi
y = ((ygrp1-ymin)/(ymax-ymin))*multi
f.write("simundump group /kinetics/" + trimPath(g) + " 0 " + "blue" + " " + "green" + " x 0 0 \"\" defaultfile \\\n")
f.write(" defaultfile.g 0 0 0 " + str(x) + " " + str(y) + " 0\n")
def writeHeader(f,maxVol):
simdt = 0.001
plotdt = 0.1
rawtime = 100
maxtime = 100
defaultVol = maxVol
f.write("//genesis\n"
"// kkit Version 11 flat dumpfile\n\n"
"// Saved on " + str(rawtime)+"\n"
"include kkit {argv 1}\n"
"FASTDT = " + str(simdt)+"\n"
"SIMDT = " +str(simdt)+"\n"
"CONTROLDT = " +str(plotdt)+"\n"
"PLOTDT = " +str(plotdt)+"\n"
"MAXTIME = " +str(maxtime)+"\n"
"TRANSIENT_TIME = 2"+"\n"
"VARIABLE_DT_FLAG = 0"+"\n"
"DEFAULT_VOL = " +str(defaultVol)+"\n"
"VERSION = 11.0 \n"
"setfield /file/modpath value ~/scripts/modules\n"
"kparms\n\n"
)
f.write( "//genesis\n"
"initdump -version 3 -ignoreorphans 1\n"
"simobjdump table input output alloced step_mode stepsize x y z\n"
"simobjdump xtree path script namemode sizescale\n"
"simobjdump xcoredraw xmin xmax ymin ymax\n"
"simobjdump xtext editable\n"
"simobjdump xgraph xmin xmax ymin ymax overlay\n"
"simobjdump xplot pixflags script fg ysquish do_slope wy\n"
"simobjdump group xtree_fg_req xtree_textfg_req plotfield expanded movealone \\\n"
" link savename file version md5sum mod_save_flag x y z\n"
"simobjdump geometry size dim shape outside xtree_fg_req xtree_textfg_req x y z\n"
"simobjdump kpool DiffConst CoInit Co n nInit mwt nMin vol slave_enable \\\n"
" geomname xtree_fg_req xtree_textfg_req x y z\n"
"simobjdump kreac kf kb notes xtree_fg_req xtree_textfg_req x y z\n"
"simobjdump kenz CoComplexInit CoComplex nComplexInit nComplex vol k1 k2 k3 \\\n"
" keepconc usecomplex notes xtree_fg_req xtree_textfg_req link x y z\n"
"simobjdump stim level1 width1 delay1 level2 width2 delay2 baselevel trig_time \\\n"
" trig_mode notes xtree_fg_req xtree_textfg_req is_running x y z\n"
"simobjdump xtab input output alloced step_mode stepsize notes editfunc \\\n"
" xtree_fg_req xtree_textfg_req baselevel last_x last_y is_running x y z\n"
"simobjdump kchan perm gmax Vm is_active use_nernst notewriteReacs xtree_fg_req \\\n"
" xtree_textfg_req x y z\n"
"simobjdump transport input output alloced step_mode stepsize dt delay clock \\\n"
" kf xtree_fg_req xtree_textfg_req x y z\n"
"simobjdump proto x y z\n"
)
def estimateDefaultVol(compts):
maxVol = 0
vol = []
for compt in compts:
vol.append(compt.volume)
if len(vol) > 0:
return max(vol)
return maxVol
def writeGui( f ):
f.write("simundump xgraph /graphs/conc1 0 0 99 0.001 0.999 0\n"
"simundump xgraph /graphs/conc2 0 0 100 0 1 0\n"
"simundump xgraph /moregraphs/conc3 0 0 100 0 1 0\n"
"simundump xgraph /moregraphs/conc4 0 0 100 0 1 0\n"
"simundump xcoredraw /edit/draw 0 -6 4 -2 6\n"
"simundump xtree /edit/draw/tree 0 \\\n"
" /kinetics/#[],/kinetics/#[]/#[],/kinetics/#[]/#[]/#[][TYPE!=proto],/kinetics/#[]/#[]/#[][TYPE!=linkinfo]/##[] \"edit_elm.D <v>; drag_from_edit.w <d> <S> <x> <y> <z>\" auto 0.6\n"
"simundump xtext /file/notes 0 1\n")
def writeFooter( f ):
f.write( "\nenddump\n" +
"complete_loading\n")
if __name__ == "__main__":
import sys
filename = sys.argv[1]
modelpath = filename[0:filename.find('.')]
loadModel('/home/harsha/genesis_files/gfile/'+filename,'/'+modelpath,"gsl")
output = '/home/harsha/Desktop/moose2genesis/moosefolder_cmd__sep2_'+filename
written = writeKkit('/'+modelpath,output)
if written:
print(" file written to ",output)
else:
print(" could be written to kkit format")
|
rahulgayatri23/moose-core
|
python/moose/writekkit.py
|
Python
|
gpl-3.0
| 25,726
|
[
"MOOSE"
] |
e07cd135d679f666dfdac76f5ce5403d7a971e9af3a8ef7ce8a789521f426505
|
""" Proxy Renewal agent is the key element of the Proxy Repository
which maintains the user proxies alive
.. literalinclude:: ../ConfigTemplate.cfg
:start-after: ##BEGIN MyProxyRenewalAgent
:end-before: ##END
:dedent: 2
:caption: MyProxyRenewalAgent options
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import concurrent.futures
from DIRAC import gLogger, S_OK
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.FrameworkSystem.DB.ProxyDB import ProxyDB
DEFAULT_MAIL_FROM = "proxymanager@diracgrid.org"
class MyProxyRenewalAgent(AgentModule):
def initialize(self):
requiredLifeTime = self.am_getOption("MinimumLifeTime", 3600)
renewedLifeTime = self.am_getOption("RenewedLifeTime", 54000)
mailFrom = self.am_getOption("MailFrom", DEFAULT_MAIL_FROM)
self.proxyDB = ProxyDB(useMyProxy=True, mailFrom=mailFrom)
gLogger.info("Minimum Life time : %s" % requiredLifeTime)
gLogger.info("Life time on renew : %s" % renewedLifeTime)
gLogger.info("MyProxy server : %s" % self.proxyDB.getMyProxyServer())
gLogger.info("MyProxy max proxy time : %s" % self.proxyDB.getMyProxyMaxLifeTime())
return S_OK()
def __renewProxyForCredentials(self, userDN, userGroup):
lifeTime = self.am_getOption("RenewedLifeTime", 54000)
gLogger.info("Renewing for %s@%s %s secs" % (userDN, userGroup, lifeTime))
retVal = self.proxyDB.renewFromMyProxy(userDN, userGroup, lifeTime=lifeTime)
if not retVal["OK"]:
gLogger.error("Failed to renew proxy", "for %s@%s : %s" % (userDN, userGroup, retVal["Message"]))
else:
gLogger.info("Renewed proxy for %s@%s" % (userDN, userGroup))
def __treatRenewalCallback(self, oTJ, exceptionList):
gLogger.exception(lException=exceptionList)
def execute(self):
"""The main agent execution method"""
self.proxyDB.purgeLogs()
gLogger.info("Purging expired requests")
retVal = self.proxyDB.purgeExpiredRequests()
if retVal["OK"]:
gLogger.info(" purged %s requests" % retVal["Value"])
gLogger.info("Purging expired proxies")
retVal = self.proxyDB.purgeExpiredProxies()
if retVal["OK"]:
gLogger.info(" purged %s proxies" % retVal["Value"])
retVal = self.proxyDB.getCredentialsAboutToExpire(self.am_getOption("MinimumLifeTime", 3600))
if not retVal["OK"]:
return retVal
data = retVal["Value"]
gLogger.info("Renewing %s proxies..." % len(data))
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
futures = []
for record in data:
userDN = record[0]
userGroup = record[1]
futures.append(executor.submit(self.__renewProxyForCredentials, userDN, userGroup))
return S_OK()
|
ic-hep/DIRAC
|
src/DIRAC/FrameworkSystem/Agent/MyProxyRenewalAgent.py
|
Python
|
gpl-3.0
| 3,024
|
[
"DIRAC"
] |
e9afbf577cdcf87280dc260154b38500d507a814154e35b484f926b68d2128a9
|
import sys
from pyneuroml import pynml
####################################################################
# Choose a LEMS/NeuroML2 file and run it with jNeuroML
example_lems_file = 'LEMS_NML2_Ex5_DetCell.xml'
results1 = pynml.run_lems_with_jneuroml(example_lems_file, nogui=True, load_saved_data=True)
####################################################################
# Convert LEMS/NeuroML2 file to NEURON with jNeuroML & run
if not '-noneuron' in sys.argv: # To allow skipping of this for ease of testing
results2 = pynml.run_lems_with_jneuroml_neuron(example_lems_file, nogui=True, load_saved_data=True)
####################################################################
# Reload & plot results
if not '-nogui' in sys.argv:
from matplotlib import pyplot as plt
for key in results1.keys():
plt.xlabel('Time (ms)')
plt.ylabel('...')
plt.grid('on')
if key != 't':
plt.plot(results1['t'],results1[key], label="jNeuroML: "+key)
if not '-noneuron' in sys.argv:
plt.plot(results2['t'],results2[key], label="jNeuroML_NEURON: "+key)
plt.show()
|
34383c/pyNeuroML
|
examples/run_jneuroml_plot_matplotlib.py
|
Python
|
lgpl-3.0
| 1,165
|
[
"NEURON"
] |
2a7cf3405cc3bfd1977bee1d330658c32351aceed4be027a2a6bc0b07eaa98dc
|
import time
from pathlib import Path
import cv2
import numpy as np
from pydub import AudioSegment
from pydub.playback import play
import cplexus as plexus
VIDEO_FILE = 'videos/lower3.mp4'
Path("videos/output/original").mkdir(parents=True, exist_ok=True)
Path("videos/output/training").mkdir(parents=True, exist_ok=True)
Path("videos/output/evaluation").mkdir(parents=True, exist_ok=True)
audio = AudioSegment.from_file(VIDEO_FILE, "mp4")
audio_samples = audio.get_array_of_samples()
print('Normalizing audio...')
max_hz = max(audio_samples)
min_hz = min(audio_samples)
#audio_samples = [x + (abs(min_hz)) for x in audio_samples]
#audio_samples = [x / (max_hz + abs(min_hz)) for x in audio_samples]
audio_samples = np.array(audio_samples)
audio_samples = audio_samples + (abs(min_hz))
audio_samples = np.true_divide(audio_samples, (max_hz + abs(min_hz)))
cap = cv2.VideoCapture(VIDEO_FILE)
frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
buf = np.empty((frameHeight, frameWidth, 3), np.dtype('uint8'))
ret = True
chunk_size = int((1) / frameCount * len(audio_samples)) - int(0 / frameCount * len(audio_samples))
SIZE = chunk_size + frameWidth * frameHeight * 3 + 2048
INPUT_SIZE = chunk_size
OUTPUT_SIZE = frameWidth * frameHeight * 3
CONNECTIVITY = 16 / SIZE
PRECISION = 3
TRAINING_DURATION = 0.01
RANDOMLY_FIRE = False
DYNAMIC_OUTPUT = False
VISUALIZATION = False
net = plexus.Network(
SIZE,
INPUT_SIZE,
OUTPUT_SIZE,
CONNECTIVITY,
PRECISION,
RANDOMLY_FIRE,
DYNAMIC_OUTPUT,
VISUALIZATION
)
print("\n*** LEARNING ***")
for n in range(1):
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
print("Doing iteration {0}.".format(str(n + 1)))
fc = 0
while (fc < frameCount and ret):
print(fc)
(i, j) = ([fc * chunk_size, (fc + 1) * chunk_size])
chunk = audio_samples[i:j]
ret, buf = cap.read()
buf_normalized = np.true_divide(buf, 255).flatten()
# Load data into network
net.load(chunk, buf_normalized)
cv2.namedWindow('video')
cv2.imshow('video', buf)
output = net.output
output = np.array(output) * 255
learn = output.reshape((frameHeight, frameWidth, 3))
learn = learn.astype(np.uint8)
cv2.namedWindow('learn')
cv2.imshow('learn', learn)
cv2.imwrite("videos/output/original/{0}.png".format(str(fc)), buf)
cv2.imwrite("videos/output/training/{0}.png".format(str(fc)), learn)
cv2.waitKey(int(1000 * TRAINING_DURATION))
fc += 1
net.load(chunk)
#cap.release()
print("\n\n*** TESTING ***")
fc = 0
while (fc < frameCount):
print(fc)
(i, j) = ([fc * chunk_size, (fc + 1) * chunk_size])
chunk = audio_samples[i:j]
# Wait for the data to propagate and get the output
net.load(chunk)
output = net.output
output = np.array(output) * 255
buf = output.reshape((frameHeight, frameWidth, 3))
buf = buf.astype(np.uint8)
cv2.namedWindow('output')
cv2.imshow('output', buf)
cv2.imwrite("videos/output/evaluation/{0}.png".format(str(fc)), buf)
cv2.waitKey(int(1000 * TRAINING_DURATION))
fc += 1
net.freeze()
print("\n{0} waves are executed throughout the network".format(
str(net.wave_counter)
))
print("\nIn total: {0} times a random non-sensory neuron is fired\n".format(
str(net.fire_counter)
))
print("Exit the program")
|
mertyildiran/Plexus
|
examples/audio_visual.py
|
Python
|
mit
| 3,496
|
[
"NEURON"
] |
094a50ea65ae6a42ffd70c9d0be144a0b9b0fdedeb000c2b5925134dc93adafc
|
# $Id$
"""
This is a comment
"""
from __future__ import print_function
__RCSID__ = "$Revision: 1.16 $"
# $Source: /tmp/libdirac/tmp.stZoy15380/dirac/DIRAC3/DIRAC/Core/Workflow/test/WFSamples.py,v $
from DIRAC.Core.Workflow.Parameter import *
from DIRAC.Core.Workflow.Module import *
from DIRAC.Core.Workflow.Step import *
from DIRAC.Core.Workflow.Workflow import *
from DIRAC.Core.Workflow.WorkflowReader import *
import time
""" Collection of objects for the testing"""
body1 = """class PrintOutput(object):
# Warning!!
# class name MUST me the same as a Type field
# and MUST have method execute
class_var = 0 # static class parameter
def __init__(self):
# constructor code
self.enable = 1 #local class parameter
self.version = 0
self.debug = False
self.message = 'empty message'
def execute(self):
# main execution function
if self.enable :
if self.debug:
print 'Executing Module = ',str(type(self))
print str(type(self.message)), self.message
else:
print 'Type1 - pass'
def __del__(self):
pass
"""
body2 = """class Summ(object):
# Warning!!
# class name MUST me the same as a Type field
# and MUST have method execute
class_var = 0 # static class parameter
def __init__(self):
# constructor code
self.enable = 1 #local class parameter
self.version = 0
self.debug = False
self.input1 = 0
self.input2 = 0
self.result = 0
def execute(self):
# main execution function
if self.enable :
if self.debug:
print 'Executing Module = ',str(type(self))
self.result=self.input1+self.input2
if self.debug:
print 'inputs are', self.input1, self.input2
print 'Result is',self.result
else:
print str(type(self)), 'pass'
def __del__(self):
pass
"""
body3 = "from calendar import Calendar\n"
op1 = Parameter("enable","True","bool","","",True, False, "if False execution disabled")
op2 = Parameter("version","1.25","float","","",False, True, "we can get version of the module")
#op3 = Parameter("message","\'this is ugly module\'","string","","",False,False,"message for the printing")
op3 = Parameter("message","@{inparam4}","string","","",False,False,"message for the printing")
op4 = Parameter("debug", "False", "bool", "", "", True, False, "allows to print additional information")
op5 = Parameter("input1","2","int","","",True,False,"argument for addition")
op6 = Parameter("input2","5","int","","",True,False,"argument for addition")
op7 = Parameter("result","0","int","","",False,True,"argument for addition")
md1 = ModuleDefinition('PrintOutput')
md1.addParameter( op1 )
md1.addParameter( op2 )
md1.addParameter( op3 )
md1.addParameter( op4 )
md1.setDescription('Module to print imput messsage')
md1.setBody(body1)
md2 = ModuleDefinition('Summ')
md2.setBody(body2)
md2.addParameter( op1 )
md2.addParameter( op2 )
md2.addParameter( op4 )
md2.addParameter( op5 )
md2.addParameter( op6 )
md2.addParameter( op7 )
md3 = ModuleDefinition('PrintOutput')
md3.addParameter( op1 )
md3.addParameter( op2 )
md3.addParameter( op3 )
md3.addParameter( op4 )
md3.setDescription('Module to print imput messsage')
md3.setBody(body1)
sd1 = StepDefinition('TotalSumm')
sd1.addModule(md3)
sd1.addModule(md1)
sd1.addModule(md2)
sd1.addParameter(Parameter("enable_inst1","True","bool","","",True, True, "enabling instance 1"))
sd1.addParameter(Parameter("enable_inst2","True","bool","","",True, True, "enabling instance 2"))
sd1.addParameter(Parameter("enable_inst3","True","bool","","",True, True, "enabling instance 3"))
sd1.addParameter(Parameter("debug","True","bool","","",True, True, "enabling additional printing"))
sd1.addParameter(Parameter("input1","3.8","float","","",True, False, "input slot"))
sd1.addParameter(Parameter("input2","8.2","float","","",True, False, "input slot"))
sd1.addParameter(Parameter("input3","2.0","float","","",True, False, "input slot"))
sd1.addParameter(Parameter("result","0.0","float","","",False, True, "output"))
#sd1.append(Parameter("message","empty message","string","","",True, False, "output"))
sd1.addParameter(Parameter("message","@{inparam4}","string","","",True, False, "output"))
mi1 = sd1.createModuleInstance('Summ', 'mi1')
mi2 = sd1.createModuleInstance('PrintOutput', 'mi2')
mi3 = sd1.createModuleInstance('Summ', 'mi3')
mi4 = sd1.createModuleInstance('PrintOutput', 'mi4')
mi5 = sd1.createModuleInstance('Summ', 'mi5')
mi6 = sd1.createModuleInstance('PrintOutput', 'mi6')
mi7 = sd1.createModuleInstance('PrintOutput', 'mi7')
mi8 = sd1.createModuleInstance('PrintOutput', 'mi8')
mi9 = sd1.createModuleInstance('PrintOutput', 'mi9')
mi10 = sd1.createModuleInstance('PrintOutput', 'mi10')
mi11 = sd1.createModuleInstance('PrintOutput', 'mi11')
mi12 = sd1.createModuleInstance('PrintOutput', 'mi12')
mi13 = sd1.createModuleInstance('PrintOutput', 'mi13')
mi1.findParameter('enable').link('self','enable_inst1')
mi1.findParameter('debug').link('self','debug')
mi1.findParameter('input1').link('self','input1')
mi1.findParameter('input2').link('self','input2')
mi2.findParameter('enable').link('mi1','enable')
mi2.findParameter('debug').link('self','debug')
#mi2.findParameter('message').link('mi1','result') # taken from the level of step
mi3.findParameter('enable').link('self','enable_inst2')
mi3.findParameter('debug').link('self','debug')
mi3.findParameter('input1').link('self','input2')
mi3.findParameter('input2').link('self','input3')
mi4.findParameter('enable').link('mi3','enable')
mi4.findParameter('debug').link('self','debug')
#mi4.findParameter('message').link('mi3','result') # taken from the previouse instance
mi4.findParameter('message').link('mi3','result') # taken from the previouse instance
mi5.findParameter('enable').link('self','enable_inst3')
mi5.findParameter('debug').link('self','debug')
mi5.findParameter('input1').link('mi1','result')
mi5.findParameter('input2').link('mi3','result')
mi6.findParameter('enable').link('mi5','enable')
mi6.findParameter('debug').link('self','debug')
#mi6.findParameter('message').link('mi5','result') # taken from the previouse instance (chain propagation)
mdouble = 2.3567
mi6.findParameter('message').setValue(mdouble, 'float')
mlist = ['file1', 'file2', 'file3']
mi7.findParameter('message').setValue(mlist, 'list')
mdict = {'jack': 4098, 'sape': 4139}
mi8.findParameter('message').setValue(mdict, 'dict')
mtuple = (1, 2, 3, 4, 5)
mi9.findParameter('message').setValue(mtuple, 'tuple')
mstring = """\"Clever string of mine; WR.Output = \"Collection=\'EVTTAGS/TagCreator/1\' ADDRESS=\'/Event\' DATAFILE=\""""
#mstring = "Clever string of mine;"
mi10.findParameter('message').setValue(mstring, 'string')
mbool = False
mi11.findParameter('message').setValue(mbool, 'bool')
mint = 12672
mi12.findParameter('message').setValue(mint, 'int')
mlistdict = [{'SORTIE_@{inparam4}': 4098, 'sape_@{inpar2}': 4139},{"@{inparam4}jj@{inpar2}":234}]
mi12.findParameter('message').setValue(mlistdict, 'list')
sd1.findParameter('result').link('mi5','result')
#sd1.findParameter('message').link('self','inparam4') # taken from the level of step
w1 = Workflow(name='main')
w1.setOrigin('/home/user/blablabla')
w1.setDescription("Pretty long description\n several lines of text")
w1.setDescrShort("Oooooo short description")
w1.addStep(sd1)
w1.addParameter(Parameter("final","0.0","float","","",False, True, "Final result"))
w1.addParameter(Parameter("debug","False","bool","","",True, False, "Debug switch"))
w1.addParameter(Parameter("message","vv@{inparam4}jj@{inpar2}ge","string","","",True, False, ""))
w1.addParameter(Parameter("inparam4","VER","string","","",True, False, ""))
w1.addParameter(Parameter("inpar2","SORTIE@{inparam4}","string","","",True, False, ""))
si1 = w1.createStepInstance('TotalSumm', 'si1')
si2 = w1.createStepInstance('TotalSumm', 'si2')
si1.findParameter('debug').link('self','debug')
si2.findParameter('debug').link('self','debug')
si2.findParameter('input1').link('si1','result') # linking the results
w1.findParameter('final').link('si2','result')
#============================================================================
# test section
#============================================================================
w1.toXMLFile('/afs/cern.ch/user/g/gkuznets/test1.xml')
w2 = fromXMLFile("/afs/cern.ch/user/g/gkuznets/test1.xml")
w2.toXMLFile('/afs/cern.ch/user/g/gkuznets/test2.xml')
w4 = fromXMLFile("/afs/cern.ch/user/g/gkuznets/test2.xml")
print(w4.createCode())
eval(compile(w4.createCode(),'<string>','exec'))
print("===================================================================")
w4.execute()
|
fstagni/DIRAC
|
Core/Workflow/test/WFSamples.py
|
Python
|
gpl-3.0
| 8,629
|
[
"DIRAC"
] |
da5a6980e881da770b121b55efd7cd4727f4e0a82ad2c80d9db2b2e4c94fbd26
|
#!/usr/bin/env python
# from distutils.core import setup
from setuptools import setup # , find_packages
import glob
setup(name='cargo-port',
version='1.0',
description='Python Distribution Utilities',
author='Intergalactic Utilities Commission',
maintainer='Eric Rasche',
url='https://github.com/erasche/community-package-cache',
packages=['cargoport'],
scripts=list(glob.glob("bin/galaxy*")),
install_requires=['click'],
classifiers=[
'Environment :: Console',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics'
],
)
|
gregvonkuster/cargo-port
|
setup.py
|
Python
|
mit
| 743
|
[
"Galaxy"
] |
eab1c3605b28060619d2d371fe6b8ad0a57d21e36018386d5a319c2aa7f896cb
|
import numpy as np
import math
from espressomd.shapes import Rhomboid
from espressomd.shapes import Cylinder
small_epsilon = 0.000000001
large_number = 10000000.0
output_precision = 14
def custom_str(realn):
return str('{:.{prec}f}'.format(realn, prec = output_precision))
def get_triangle_normal(a, b, c):
"""
Returns the normal vector of a triangle given by points a,b,c.
Parameters
----------
a : list of :obj:`float`
vector with 3 components, point a
b : list of :obj:`float`
vector with 3 components, point b
c : list of :obj:`float`
vector with 3 components, point c
"""
n = [0.0, 0.0, 0.0]
n[0] = (b[1] - a[1]) * (c[2] - a[2]) - (b[2] - a[2]) * (c[1] - a[1])
n[1] = (b[2] - a[2]) * (c[0] - a[0]) - (b[0] - a[0]) * (c[2] - a[2])
n[2] = (b[0] - a[0]) * (c[1] - a[1]) - (b[1] - a[1]) * (c[0] - a[0])
return np.array(n)
def norm(vect):
"""
Returns the norm of a vector.
Parameters
----------
vect : list of :obj:`float`
vector with 3 components
"""
v = np.array(vect)
return np.sqrt(np.dot(v,v))
def vec_distance(a, b):
"""
Returns the length of vector between points a and b.
Parameters
----------
a : list of :obj:`float`
vector with 3 components, point a
b : list of :obj:`float`
vector with 3 components, point b
"""
return norm(np.array(a) - np.array(b))
def area_triangle(a, b, c):
"""
Returns the area of a triangle given by points a,b,c.
Parameters
----------
a : list of :obj:`float`
vector with 3 components, point a
b : list of :obj:`float`
vector with 3 components, point b
c : list of :obj:`float`
vector with 3 components, point c
"""
n = get_triangle_normal(a, b, c)
area = 0.5 * norm(n)
return area
def angle_btw_triangles(P1, P2, P3, P4):
"""
Returns the size of an angle between triangles given by points P2, P1, P3 and P2, P3, P4.
Parameters
----------
P1 : list of :obj:`float`
vector with 3 components, point P1
P2 : list of :obj:`float`
vector with 3 components, point P2
P3 : list of :obj:`float`
vector with 3 components, point P3
P4 : list of :obj:`float`
vector with 3 components, point P4
"""
n1 = get_triangle_normal(P2, P1, P3)
n2 = get_triangle_normal(P2, P3, P4)
tmp11 = np.dot(n1, n2)
tmp11 = tmp11 * abs(tmp11)
tmp22 = np.dot(n1, n1)
tmp33 = np.dot(n2, n2)
tmp11 /= (tmp22 * tmp33)
if tmp11 > 0:
tmp11 = np.sqrt(tmp11)
else:
tmp11 = - np.sqrt(- tmp11)
if tmp11 >= 1.0:
tmp11 = 0.0
elif tmp11 <= -1.:
tmp11 = np.pi
phi = np.pi - math.acos(tmp11)
if (np.dot(n1, np.array(P4)) - np.dot(n1, np.array(P1))) < 0:
phi = 2.0 * np.pi - phi
return phi
def discard_epsilon(x):
"""
Returns zero if the argument is too small.
Parameters
----------
x : :obj:`float`
real number
"""
if (x > -small_epsilon and x < small_epsilon):
res = 0.0
else:
res = x
return res
def oif_neo_hookean_nonlin(lambd):
"""
Defines NeoHookean nonlinearity.
Parameters
----------
lambd : :obj:`float`
real number
"""
# Defined by (19) from Dupin2007
res = (pow(lambd, 0.5) + pow(lambd, -2.5)) / (lambd + pow(lambd, -3.))
return res
def oif_calc_stretching_force(ks, pA, pB, dist0, dist):
"""
Calculates nonlinear stretching forces between two points on an edge.
Parameters
----------
ks : :obj:`float`
coefficient of the stretching, spring stiffness
pA : list of :obj:`float`
position of the first particle
pB : list of :obj:`float`
position of the second particle
dist0 : :obj:`float`
relaxed distance btw particles
dist : :obj:`float`
current distance btw particles
"""
# this has to correspond to the calculation in oif_local_forces.hpp: calc_oif_local
# as of now, corresponds to git commit f156f9b44dcfd3cef9dd5537a1adfc903ac4772a
dr = dist - dist0
# nonlinear stretching:
lambd = 1.0 * dist / dist0
fac = ks * oif_neo_hookean_nonlin(lambd) * dr
# no negative sign here! different from C implementation
# due to reverse order of vector subtraction
f = fac * (np.array(pB) - np.array(pA)) / dist
return f
def oif_calc_linear_stretching_force(ks, pA, pB, dist0, dist):
"""
Calculates linear stretching forces between two points on an edge.
Parameters
----------
ks : :obj:`float`
coefficient of the stretching, spring stiffness
pA : list of :obj:`float`
position of the first particle
pB : list of :obj:`float`
position of the second particle
dist0 : :obj:`float`
relaxed distance btw particles
dist : :obj:`float`
current distance btw particles
"""
dr = dist - dist0
fac = ks * dr
# no negative sign here! different from C implementation due to
# reverse order of vector subtraction
f = fac * (np.array(pB) - np.array(pA)) / dist
return f
def oif_calc_bending_force(kb, pA, pB, pC, pD, phi0, phi):
"""
Calculates bending forces for four points on two adjacent triangles.
Parameters
----------
kb : :obj:`float`
coefficient of the stretching, spring stiffness
pA : list of :obj:`float`
position of the first particle
pB : list of :obj:`float`
position of the second particle
pC : list of :obj:`float`
position of the third particle
pD : list of :obj:`float`
position of the fourth particle
phi0 : :obj:`float`
relaxed angle btw two triangles
phi : :obj:`float`
current angle btw two triangles
"""
# this has to correspond to the calculation in oif_local_forces.hpp: calc_oif_local
# as of now, corresponds to git commit f156f9b44dcfd3cef9dd5537a1adfc903ac4772a
n1 = get_triangle_normal(pB, pA, pC)
n2 = get_triangle_normal(pB, pC, pD)
angles = (phi - phi0) / phi0
fac = kb * angles
f1 = fac * np.array(n1) / norm(n1)
f2 = fac * np.array(n2) / norm(n2)
f = [f1[0], f1[1], f1[2], f2[0], f2[1], f2[2]]
return f
def oif_calc_local_area_force(kal, pA, pB, pC, A0, A):
"""
Calculates local area forces between three points in one triangle.
Parameters
----------
kal : :obj:`float`
coefficient of the stretching, spring stiffness
pA : list of :obj:`float`
position of the first particle
pB : list of :obj:`float`
position of the second particle
pC : list of :obj:`float`
position of the third particle
A0 : :obj:`float`
relaxed area of the triangle
A : :obj:`float`
current area of the triangle
"""
# this has to correspond to the calculation in oif_local_forces.hpp: calc_oif_local
# except for division by 3 - each triangle enters this calculation once, while each triangle enters the
# calc_oif_local three times
# as of now, corresponds to git commit f156f9b44dcfd3cef9dd5537a1adfc903ac4772a
centroid = np.array((pA + pB + pC) / 3.0)
delta_area = A - A0
ta = centroid - pA
ta_norm = norm(ta)
tb = centroid - pB
tb_norm = norm(tb)
tc = centroid - pC
tc_norm = norm(tc)
common_factor = kal * delta_area / (ta_norm * ta_norm + tb_norm * tb_norm + tc_norm * tc_norm)
# local area force for first node
f1 = common_factor * ta
# local area force for second node
f2 = common_factor * tb
# local area force for third node
f3 = common_factor * tc
f = [f1[0], f1[1], f1[2], f2[0], f2[1], f2[2], f3[0], f3[1], f3[2]]
return f
def oif_calc_global_area_force(kag, pA, pB, pC, Ag0, Ag):
"""
Calculates global area forces between three points in a triangle.
Parameters
----------
kag : :obj:`float`
coefficient of the stretching, spring stiffness
pA : list of :obj:`float`
position of the first particle
pB : list of :obj:`float`
position of the second particle
pC : list of :obj:`float`
position of the third particle
Ag0 : :obj:`float`
relaxed surface area of the cell
Ag : :obj:`float`
current surface area of the cell
"""
# this has to correspond to the calculation in oif_global_forces.hpp: add_oif_global_forces
# as of now, corresponds to git commit f156f9b44dcfd3cef9dd5537a1adfc903ac4772a
centroid = np.array((pA + pB + pC) / 3.0)
delta = Ag - Ag0
ta = centroid - pA
ta_norm = norm(ta)
tb = centroid - pB
tb_norm = norm(tb)
tc = centroid - pC
tc_norm = norm(tc)
A = area_triangle(pA, pB, pC)
common_factor = kag * A * delta / (ta_norm * ta_norm + tb_norm * tb_norm + tc_norm * tc_norm)
# global area force for first node
f1 = common_factor * ta
# global area force for second node
f2 = common_factor * tb
# global area force for third node
f3 = common_factor * tc
f = [f1[0], f1[1], f1[2], f2[0], f2[1], f2[2], f3[0], f3[1], f3[2]]
return f
def oif_calc_volume_force(kv, pA, pB, pC, V0, V):
"""
Calculates volume forces for three points in a triangle.
Parameters
----------
kv : :obj:`float`
coefficient of the stretching, spring stiffness
pA : list of :obj:`float`
position of the first particle
pB : list of :obj:`float`
position of the second particle
pC : list of :obj:`float`
position of the third particle
V0 : :obj:`float`
relaxed volume of the cell
V : :obj:`float`
current volume of the cell
"""
# this has to correspond to the calculation in oif_global_forces.hpp: add_oif_global_forces
# as of now, corresponds to git commit f156f9b44dcfd3cef9dd5537a1adfc903ac4772a
n = get_triangle_normal(pA, pB, pC)
dn = norm(n)
vv = (V - V0) / V0
A = area_triangle(pA, pB, pC)
f = kv * vv * A * np.array(n) / (dn * 3.0)
return f
def output_vtk_rhomboid(rhom_shape, out_file):
"""
Outputs the VTK files for visualisation of a rhomboid in e.g. Paraview.
Parameters
----------
rhom_shape : :obj:`shape`
rhomboid shape
out_file : :obj:`str`
filename for the output
"""
corner = rhom_shape.corner
a = rhom_shape.a
b = rhom_shape.b
c = rhom_shape.c
output_file = open(out_file, "w")
output_file.write("# vtk DataFile Version 3.0\n")
output_file.write("Data\n")
output_file.write("ASCII\n")
output_file.write("DATASET POLYDATA\n")
output_file.write("POINTS 8 float\n")
output_file.write(str(corner[0]) + " " + str(corner[1]) + " " + str(corner[2]) + "\n")
output_file.write(str(corner[0] + a[0]) + " " + str(corner[1] + a[1]) + " " + str(corner[2] + a[2]) + "\n")
output_file.write(str(corner[0] + a[0] + b[0]) + " " + str(corner[1] + a[1] + b[1]) + " " +
str(corner[2] + a[2] + b[2]) + "\n")
output_file.write(str(corner[0] + b[0]) + " " + str(corner[1] + b[1]) + " " + str(corner[2] + b[2]) + "\n")
output_file.write(str(corner[0] + c[0]) + " " + str(corner[1] + c[1]) + " " + str(corner[2] + c[2]) + "\n")
output_file.write(str(corner[0] + a[0] + c[0]) + " " + str(corner[1] + a[1] + c[1]) + " " +
str(corner[2] + a[2] + c[2]) + "\n")
output_file.write(str(corner[0] + a[0] + b[0] + c[0]) + " " + str(corner[1] + a[1] + b[1] + c[1]) + " " +
str(corner[2] + a[2] + b[2] + c[2]) + "\n")
output_file.write(str(corner[0] + b[0] + c[0]) + " " + str(corner[1] + b[1] + c[1]) + " " +
str(corner[2] + b[2] + c[2]) + "\n")
output_file.write("POLYGONS 6 30\n")
output_file.write("4 0 1 2 3\n")
output_file.write("4 4 5 6 7\n")
output_file.write("4 0 1 5 4\n")
output_file.write("4 2 3 7 6\n")
output_file.write("4 0 4 7 3\n")
output_file.write("4 1 2 6 5")
output_file.close()
return 0
def output_vtk_cylinder(cyl_shape, n, out_file):
"""
Outputs the VTK files for visualisation of a cylinder in e.g. Paraview.
Parameters
----------
cyl_shape : :obj:`shape`
cylindrical shape
n : :obj:`int`
number of discretization sections
out_file : :obj:`str`
filename for the output
"""
# length is the full height of the cylinder (note: used to be just half in the previous versions)
# only vertical cylinders are supported for now, i.e. with normal (0.0, 0.0, 1.0)
axis = cyl_shape.axis
length = cyl_shape.length
radius = cyl_shape.radius
center = cyl_shape.center
check_axis = True
if axis[0] != 0.0:
check_axis = False
if axis[1] != 0.0:
check_axis = False
if axis[2] == 0.0:
check_axis = False
if check_axis is False:
raise Exception("output_vtk_cylinder: Output for this type of cylinder is not supported yet.")
axisZ = 1.0
# setting points on perimeter
alpha = 2 * np.pi / n
points = 2 * n
# shift center to the bottom circle
p1 = center - length * np.array(axis) / 2.0
output_file = open(out_file, "w")
output_file.write("# vtk DataFile Version 3.0\n")
output_file.write("Data\n")
output_file.write("ASCII\n")
output_file.write("DATASET POLYDATA\n")
output_file.write("POINTS " + str(points) + " float\n")
for i in range(0, n):
output_file.write(
str(p1[0] + radius*np.cos(i*alpha)) + " " + str(p1[1] + radius*np.sin(i*alpha)) + " " +
str(p1[2]) + "\n")
for i in range(0, n):
output_file.write(
str(p1[0] + radius*np.cos(i*alpha)) + " " + str(p1[1] + radius*np.sin(i*alpha)) + " " +
str(p1[2] + length*axisZ) + "\n")
output_file.write("POLYGONS " + str(n+2) + " " + str(5*n + (n + 1)*2) + "\n")
# writing bottom "circle"
output_file.write(str(n) + " ")
for i in range(0, n - 1):
output_file.write(str(i) + " ")
output_file.write(str(n - 1) + "\n")
# writing top "circle"
output_file.write(str(n) + " ")
for i in range(0, n - 1):
output_file.write(str(i + n) + " ")
output_file.write(str(2 * n - 1) + "\n")
# writing sides - rectangles
for i in range(0, n - 1):
output_file.write("4 " + str(i) + " " + str(i + 1) + " " + str(i + n + 1) + " " + str(i + n) + "\n")
output_file.write("4 " + str(n - 1) + " " + str(0) + " " + str(n) + " " + str(2 * n - 1) + "\n")
output_file.close()
return 0
def output_vtk_lines(lines, out_file):
"""
Outputs the VTK files for visualisation of lines in e.g. Paraview.
Parameters
----------
lines : list of :obj:`float`
lines is a list of pairs of points p1, p2
each pair represents a line segment to output to vtk
each line in lines contains 6 floats: p1x, p1y, p1z, p2x, p2y, p2z
out_file : :obj:`str`
filename for the output
"""
n_lines = len(lines)
output_file = open(out_file, "w")
output_file.write("# vtk DataFile Version 3.0\n")
output_file.write("Data\n")
output_file.write("ASCII\n")
output_file.write("DATASET POLYDATA\n")
output_file.write("POINTS " + str(2*n_lines) + " float\n")
for i in range(0, n_lines):
one_line = lines[i]
output_file.write(str(one_line[0]) + " " + str(one_line[1]) + " " + str(one_line[2]) + "\n")
output_file.write(str(one_line[3]) + " " + str(one_line[4]) + " " + str(one_line[5]) + "\n")
output_file.write("LINES " + str(n_lines) + " " + str(3 * n_lines) + "\n")
for i in range(0, n_lines):
output_file.write(str(2) + " " + str(2*i) + " " + str(2*i+1) + "\n")
output_file.close()
return 0
def output_vtk_pore(axis, length, outer_rad_left, outer_rad_right, pos, rad_left, rad_right,
smoothing_radius, m, out_file):
"""
Outputs the VTK files for visualisation of a pore in e.g. Paraview.
Parameters
----------
axis : list of :obj:`float`
3 floats specifying the axis
length : :obj:`float`
length of pore
outer_rad_left : :obj:`float`
outer left radius of pore
outer_rad_right : :obj:`float`
outer right radius of pore
rad_left : :obj:`float`
inner left radius of pore
rad_right : :obj:`float`
inner right radius of pore
smoothing_radius : :obj:`float`
smoothing radius for surface connecting outer and inner radii of the pore
pos : list of :obj:`float`
3 floats specifying position of the center of the pore
m : :obj:`int`
number of discretization sections
out_file : :obj:`str`
filename for the output
"""
# length is the length of the pore without the smoothing part
# for now, only axis=(1,0,0) is supported
# should implement rotation
# m is sufficient to be 10
if ".vtk" not in out_file:
print("output_vtk_pore warning: A file with vtk format will be written without .vtk extension.")
# n must be even therefore:
n = 2 * m
# setting points on perimeter
alpha = 2 * np.pi / n
beta = 2 * np.pi / n
number_of_points = 2 * n * (n/2 + 1)
output_file = open(out_file, "w")
output_file.write("# vtk DataFile Version 3.0\n")
output_file.write("Data\n")
output_file.write("ASCII\n")
output_file.write("DATASET POLYDATA\n")
output_file.write("POINTS " + str(number_of_points) + " float\n")
# shift center to the left half torus
p1 = pos - length / 2 * np.array(axis)
# points on the left half torus
for j in range(0, n/2 + 1):
for i in range(0, n):
output_file.write(str(p1[0] - np.sin(j * beta)) + " " +
str(p1[1] + (rad_left + smoothing_radius - np.cos(j * beta)) * np.cos(i * alpha)) + " " +
str(p1[2] + (rad_left + smoothing_radius - np.cos(j * beta)) * np.sin(i * alpha)) + "\n")
n_points_left = n * (n/2 + 1)
# shift center to the right half torus
p1 = pos + length / 2 * np.array(axis)
# points on the right half torus
for j in range(0, n / 2 + 1):
for i in range(0, n):
output_file.write(str(p1[0] + np.sin(j * beta)) + " " +
str(p1[1] + (rad_right + smoothing_radius - np.cos(j * beta)) * np.cos(i * alpha)) + " " +
str(p1[2] + (rad_right + smoothing_radius - np.cos(j * beta)) * np.sin(i * alpha)) + "\n")
number_of_rectangles = n * n + 2 * n
output_file.write("POLYGONS " + str(number_of_rectangles) + " " + str(5 * number_of_rectangles) + "\n")
# writing inner side rectangles
for i in range(0, n - 1):
output_file.write("4 " + str(i) + " " +
str(i + 1) + " " +
str(i + n_points_left + 1) + " " +
str(i + n_points_left) + "\n")
output_file.write("4 " + str(n - 1) + " " +
str(0) + " " +
str(n_points_left) + " " +
str(n_points_left + n - 1) + "\n")
# writing outer side rectangles
for i in range(0, n - 1):
output_file.write("4 " + str(n_points_left - n + i) + " " +
str(n_points_left - n + i + 1) + " " +
str(n_points_left - n + i + n_points_left + 1) + " " +
str(n_points_left - n + i + n_points_left) + "\n")
output_file.write("4 " + str(n_points_left - n + n - 1) + " " +
str(n_points_left - n) + " " +
str(n_points_left - n + n_points_left) + " " +
str(n_points_left - n + n_points_left + n - 1) + "\n")
# writing rectangles on the left half of the torus
for j in range(0, n / 2):
for i in range(0, n - 1):
output_file.write("4 " + str(n * j + i) + " " +
str(n * j + i + 1) + " " +
str(n * j + i + n + 1) + " " +
str(n * j + i + n) + "\n")
output_file.write("4 " + str(n * j + n - 1) + " " +
str(n * j) + " " +
str(n * j + n) + " " +
str(n * j + 2 * n - 1) + "\n")
# writing rectangles on the right half of the torus
for j in range(0, n / 2):
for i in range(0, n - 1):
output_file.write("4 " + str(n_points_left + n * j + i) + " " +
str(n_points_left + n * j + i + 1) + " " +
str(n_points_left + n * j + i + n + 1) + " " +
str(n_points_left + n * j + i + n) + "\n")
output_file.write("4 " + str(n_points_left + n * j + n - 1) + " " +
str(n_points_left + n * j) + " " +
str(n_points_left + n * j + n) + " " +
str(n_points_left + n * j + 2 * n - 1) + "\n")
output_file.close()
return 0
|
KonradBreitsprecher/espresso
|
src/python/object_in_fluid/oif_utils.py
|
Python
|
gpl-3.0
| 21,450
|
[
"ParaView",
"VTK"
] |
5a5afd0f4569fa32966a310ab87e95682e6dc5969623a2ae1c6018b9fe183aa1
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from torch.utils.data import Dataset, DataLoader
import torch
import numpy as np
from unittest import TestCase
import pytest
import tempfile
from bigdl.chronos.autots.model.auto_tcn import AutoTCN
from bigdl.orca.automl import hp
input_feature_dim = 10
output_feature_dim = 2
past_seq_len = 5
future_seq_len = 1
def get_x_y(size):
x = np.random.randn(size, past_seq_len, input_feature_dim)
y = np.random.randn(size, future_seq_len, output_feature_dim)
return x.astype(np.float32), y.astype(np.float32)
class RandomDataset(Dataset):
def __init__(self, size=1000):
x, y = get_x_y(size)
self.x = torch.from_numpy(x).float()
self.y = torch.from_numpy(y).float()
def __len__(self):
return self.x.shape[0]
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
def train_dataloader_creator(config):
return DataLoader(RandomDataset(size=1000),
batch_size=config["batch_size"],
shuffle=True)
def valid_dataloader_creator(config):
return DataLoader(RandomDataset(size=400),
batch_size=config["batch_size"],
shuffle=True)
def get_auto_estimator():
auto_tcn = AutoTCN(input_feature_num=input_feature_dim,
output_target_num=output_feature_dim,
past_seq_len=past_seq_len,
future_seq_len=future_seq_len,
optimizer='Adam',
loss=torch.nn.MSELoss(),
metric="mse",
hidden_units=8,
levels=hp.randint(1, 3),
kernel_size=hp.choice([2, 3]),
lr=hp.choice([0.001, 0.003, 0.01]),
dropout=hp.uniform(0.1, 0.2),
logs_dir="/tmp/auto_tcn",
cpus_per_trial=2,
name="auto_tcn")
return auto_tcn
class TestAutoTCN(TestCase):
def setUp(self) -> None:
from bigdl.orca import init_orca_context
init_orca_context(cores=8, init_ray_on_spark=True)
def tearDown(self) -> None:
from bigdl.orca import stop_orca_context
stop_orca_context()
def test_fit_np(self):
auto_tcn = get_auto_estimator()
auto_tcn.fit(data=get_x_y(size=1000),
epochs=1,
batch_size=hp.choice([32, 64]),
validation_data=get_x_y(size=400),
n_sampling=1,
)
assert auto_tcn.get_best_model()
best_config = auto_tcn.get_best_config()
assert 0.1 <= best_config['dropout'] <= 0.2
assert best_config['batch_size'] in (32, 64)
assert 1 <= best_config['levels'] < 3
def test_fit_loader(self):
auto_tcn = get_auto_estimator()
auto_tcn.fit(data=train_dataloader_creator(config={"batch_size": 64}),
epochs=1,
validation_data=valid_dataloader_creator(config={"batch_size": 64}),
n_sampling=1,
)
assert auto_tcn.get_best_model()
best_config = auto_tcn.get_best_config()
assert 0.1 <= best_config['dropout'] <= 0.2
assert 1 <= best_config['levels'] < 3
def test_fit_data_creator(self):
auto_tcn = get_auto_estimator()
auto_tcn.fit(data=train_dataloader_creator,
epochs=1,
batch_size=hp.choice([32, 64]),
validation_data=valid_dataloader_creator,
n_sampling=1,
)
assert auto_tcn.get_best_model()
best_config = auto_tcn.get_best_config()
assert 0.1 <= best_config['dropout'] <= 0.2
assert best_config['batch_size'] in (32, 64)
assert 1 <= best_config['levels'] < 3
def test_num_channels(self):
auto_tcn = AutoTCN(input_feature_num=input_feature_dim,
output_target_num=output_feature_dim,
past_seq_len=past_seq_len,
future_seq_len=future_seq_len,
optimizer='Adam',
loss=torch.nn.MSELoss(),
metric="mse",
hidden_units=4,
levels=hp.randint(1, 3),
num_channels=[8] * 2,
kernel_size=hp.choice([2, 3]),
lr=hp.choice([0.001, 0.003, 0.01]),
dropout=hp.uniform(0.1, 0.2),
logs_dir="/tmp/auto_tcn",
cpus_per_trial=2,
name="auto_tcn")
auto_tcn.fit(data=train_dataloader_creator,
epochs=1,
batch_size=hp.choice([32, 64]),
validation_data=valid_dataloader_creator,
n_sampling=1,
)
assert auto_tcn.get_best_model()
best_config = auto_tcn.get_best_config()
assert best_config['num_channels'] == [8]*2
def test_predict_evaluation(self):
auto_tcn = get_auto_estimator()
auto_tcn.fit(data=train_dataloader_creator(config={"batch_size": 64}),
epochs=1,
validation_data=valid_dataloader_creator(config={"batch_size": 64}),
n_sampling=1)
test_data_x, test_data_y = get_x_y(size=100)
auto_tcn.predict(test_data_x)
auto_tcn.evaluate((test_data_x, test_data_y))
def test_onnx_methods(self):
auto_tcn = get_auto_estimator()
auto_tcn.fit(data=train_dataloader_creator(config={"batch_size": 64}),
epochs=1,
validation_data=valid_dataloader_creator(config={"batch_size": 64}),
n_sampling=1)
test_data_x, test_data_y = get_x_y(size=100)
pred = auto_tcn.predict(test_data_x)
eval_res = auto_tcn.evaluate((test_data_x, test_data_y))
try:
import onnx
import onnxruntime
pred_onnx = auto_tcn.predict_with_onnx(test_data_x)
eval_res_onnx = auto_tcn.evaluate_with_onnx((test_data_x, test_data_y))
np.testing.assert_almost_equal(pred, pred_onnx, decimal=5)
np.testing.assert_almost_equal(eval_res, eval_res_onnx, decimal=5)
except ImportError:
pass
def test_save_load(self):
auto_tcn = get_auto_estimator()
auto_tcn.fit(data=train_dataloader_creator(config={"batch_size": 64}),
epochs=1,
validation_data=valid_dataloader_creator(config={"batch_size": 64}),
n_sampling=1)
with tempfile.TemporaryDirectory() as tmp_dir_name:
auto_tcn.save(tmp_dir_name)
auto_tcn.load(tmp_dir_name)
test_data_x, test_data_y = get_x_y(size=100)
pred = auto_tcn.predict(test_data_x)
eval_res = auto_tcn.evaluate((test_data_x, test_data_y))
try:
import onnx
import onnxruntime
pred_onnx = auto_tcn.predict_with_onnx(test_data_x)
eval_res_onnx = auto_tcn.evaluate_with_onnx((test_data_x, test_data_y))
np.testing.assert_almost_equal(pred, pred_onnx, decimal=5)
np.testing.assert_almost_equal(eval_res, eval_res_onnx, decimal=5)
except ImportError:
pass
if __name__ == "__main__":
pytest.main([__file__])
|
intel-analytics/BigDL
|
python/chronos/test/bigdl/chronos/autots/model/test_auto_tcn.py
|
Python
|
apache-2.0
| 8,177
|
[
"ORCA"
] |
2d87f61ba6b3b474537a810ecd151c7d383088c01599d09d578041caa3e48067
|
import pytest
from gavl import parse
from gavl.parser import nodes
from gavl.parser.nodes import (VarNode, BinaryOpNode, RelationNode, ApplyNode,
AssignNode, IntNode, BarOpNode, BoolLiteral,
BoolExprNode)
from gavl.parser.visitors import GavlToRelAlg
from gavl import relalg
from gavl.constants import OpCodes, JoinTypes, JoinSides
def test_simple_filter():
test = "foo | True"
expected = BarOpNode(VarNode('foo', None), BoolLiteral(True))
assert parse(test) == expected
def test_equal_filter():
test = "foo | True == True"
expected = BarOpNode(
VarNode('foo', None),
BoolExprNode(OpCodes.EQ,
BoolLiteral(True),
BoolLiteral(True)
)
)
assert parse(test) == expected
def test_var_filter():
test = "foo | foo.bar < baz"
expected = BarOpNode(
VarNode('foo', None),
BoolExprNode(OpCodes.LT,
VarNode('bar', RelationNode('foo')),
VarNode('baz', None)
)
)
assert parse(test) == expected
def test_multiple_filter():
test = "foo | foo.bar < baz | False"
expected = BarOpNode(
BarOpNode(
VarNode('foo', None),
BoolExprNode(OpCodes.LT,
VarNode('bar', RelationNode('foo')),
VarNode('baz', None)
)
),
BoolLiteral(False),
)
assert parse(test) == expected
def test_simple_relalg_filter():
test = BarOpNode(VarNode('foo', None), BoolLiteral(True))
expected = relalg.SelectNode(relalg.RelationNode('foo'),
relalg.BoolConstantNode(True))
assert GavlToRelAlg().visit(test) == expected
def test_expr_relalg_filter():
test = BarOpNode(
VarNode('foo', None),
BoolExprNode(OpCodes.LT,
VarNode('bar', RelationNode('foo')),
VarNode('baz', None)
)
)
expected = relalg.SelectNode(
relalg.RelationNode('foo'),
relalg.BoolOpNode(
OpCodes.LT,
relalg.ProjectNode(
relalg.RelationNode('foo'),
['bar']
),
relalg.RelationNode('baz')
)
)
assert GavlToRelAlg().visit(test) == expected
def test_pushdown_relalg_filter():
test = BarOpNode(
BinaryOpNode(
OpCodes.MULT,
VarNode('foo', None),
VarNode('buzz', None),
),
BoolExprNode(OpCodes.LT,
VarNode('bar', RelationNode('foo')),
BoolLiteral(True)
)
)
expected = relalg.JoinNode(
relalg.SelectNode(
relalg.RelationNode('foo'),
relalg.BoolOpNode(
OpCodes.LT,
relalg.ProjectNode(
relalg.RelationNode('foo'),
['bar']
),
relalg.BoolConstantNode(True)
)
),
relalg.RelationNode('buzz'),
JoinTypes.INNER,
JoinSides.FULL,
)
assert GavlToRelAlg().visit(test) == expected
@pytest.mark.skip(reason="Not Yet Implemented")
def test_and_pushdown_relalg_filter():
test = BarOpNode(
BinaryOpNode(
OpCodes.ADD,
VarNode('foo', None),
VarNode('buzz', None),
),
BoolExprNode(
OpCodes.AND,
BoolExprNode(
OpCodes.EQ,
VarNode('bar', RelationNode('foo')),
BoolLiteral(True)),
BoolExprNode(
OpCodes.EQ,
VarNode('baz', RelationNode('buzz')),
BoolLiteral(True))
)
)
expected = relalg.JoinNode(
relalg.SelectNode(
relalg.RelationNode('foo'),
relalg.BoolOpNode(
OpCodes.EQ,
relalg.ProjectNode(
relalg.RelationNode('foo'),
['bar']
),
relalg.BoolConstantNode(True)
)
),
relalg.SelectNode(
relalg.RelationNode('buzz'),
relalg.BoolOpNode(
OpCodes.EQ,
relalg.ProjectNode(
relalg.RelationNode('buzz'),
['baz']
),
relalg.BoolConstantNode(True)
)
),
JoinTypes.INNER,
JoinSides.FULL,
)
assert GavlToRelAlg().visit(test) == expected
|
enderlabs/gavl
|
gavl/parser/tests/test_filter.py
|
Python
|
apache-2.0
| 4,642
|
[
"VisIt"
] |
42ca1ad1f857b9eeb5233e0469755046f1291008a20b2a0af45abe2c2242c038
|
# Copyright 2002 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from Bio.config.DBRegistry import CGIDB, DBGroup
from _support import *
from Martel import *
not_header_expr = AssertNot(Str("HEADER"))
pdb_rcsb_cgi = CGIDB(
name="pdb-rcsb-cgi",
cgi="http://www.rcsb.org/pdb/cgi/export.cgi",
url="XXX PLEASE FILL THIS IN XXX",
delay=5.0,
params=[("format", "PDB"),
("compression", "None")
],
key="pdbId",
# failure cases for file not found are making retrieval freeze up
# while Martel checks for them, for some reason I can't figure
# so we go with checking to make sure results look like PDB
# failure_cases=[(has_str("File not found"), "ID does not exist")],
failure_cases=[(not_header_expr, "results do not look like PDB format")]
)
pdb_ebi_cgi = CGIDB(
name="pdb-ebi-cgi",
cgi="http://www.ebi.ac.uk/cgi-bin/dbfetch",
url="http://www.ebi.ac.uk/cgi-bin/dbfetch",
delay=5.0,
params=[("db", "PDB"),
("format", "default"), # also Fasta, bsml, agave available
("style", "raw"),
],
key="id",
failure_cases=[(not_header_expr, "results do not look like PDB format")]
)
pdb = DBGroup(
name="pdb",
behavior="serial"
)
pdb.add(pdb_rcsb_cgi)
pdb.add(pdb_ebi_cgi)
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/dbdefs/pdb.py
|
Python
|
apache-2.0
| 1,473
|
[
"Biopython"
] |
a8fcb10a01487eb14a5f6ea7f9f12d72bfe81399b34a111e835a57f7d6ac072a
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from scipy.fftpack import fft
fs = 100 # Sample frequency (Hz)
L = 10 # 10 seconds
N = fs*L # Number of samples
t = np.linspace(0, L, num=N, endpoint=False)
noise = np.random.normal(scale=0.5, size=t.shape[0])
sig1 = 1.3*np.sin(2*np.pi*15*t) # 15 Hz
sig2 = 1.7*np.sin(2*np.pi*40*(t-2)) # 40 Hz, phase shifted
y = sig1 + sig2 + noise
plt.subplot(311)
plt.title("15 Hz + 40 Hz")
plt.xlabel("Time (seconds)")
plt.plot(t, sig1+sig2)
plt.subplot(312)
plt.title("15 Hz + 40 Hz + Gaussian noise")
plt.xlabel("Time (seconds)")
plt.plot(t,y)
plt.subplot(313)
yf = fft(y)
plt.title("FFT, 15 Hz + 40 Hz + Gaussian noise")
xf = np.linspace(0.0, fs/2, num=N/2)
plt.plot(xf, np.abs(yf[0:N/2]))
plt.xlabel("Frequency (Hz)")
plt.ylabel("Magnitude (absolute value)")
plt.tight_layout()
plt.show()
|
sbobovyc/LabNotes
|
DSP/gaussian_noise.py
|
Python
|
gpl-3.0
| 863
|
[
"Gaussian"
] |
c93187daebdac0103d882dbfa2485f7b5255d5f09d44140239aaca5423179ba2
|
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "f 10 0.033, s, f 20 0.033, s, f 30 0.033, s, f 30 0.033, s, q"
tags = "particles, Explosion"
import pyglet
import summa
from summa.director import director
from summa.actions import *
from summa.layer import *
from summa.particle_systems import *
class L(Layer):
def __init__(self):
super( L, self).__init__()
# p = Fireworks()
p = Explosion()
# p = Fire()
# p = Flower()
# p = Sun()
# p = Spiral()
# p = Meteor()
# p = Galaxy()
p.auto_remove_on_finish = True
p.position = (320,240)
self.add( p )
def main():
director.init( resizable=True )
main_scene = summa.scene.Scene()
main_scene.add( L() )
director.run( main_scene )
if __name__ == '__main__':
main()
|
shackra/thomas-aquinas
|
tests/test_particle_explosion.py
|
Python
|
bsd-3-clause
| 954
|
[
"Galaxy"
] |
7f386bcb6a2c6a97855d5fcc8c9f68fb3acd8e996abb562ee4aa19e1e423ca59
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- Python -*-
"""
DoCoMoUnderstandingSrv.py
Service Server for DoCoMo SentenceUnderstanding API
The project is hosted on GitHub where your could fork the project or report
issues. Visit https://github.com/roboworks/
:copyright: (c) 2015 by Hiroyuki Okada, All rights reserved.
:license: MIT License (MIT), http://www.opensource.org/licenses/MIT
"""
__author__ = 'Hiroyuki Okada'
__version__ = '0.1'
import sys
import time
sys.path.append(".")
import urllib2
import urllib
import json
import rospy
from std_msgs.msg import String
from trcp_chat.msg import DoCoMoUnderstandingReq
from trcp_chat.msg import DoCoMoUnderstandingRes
from trcp_chat.msg import DoCoMoUnderstandingSlotStatus
from trcp_chat.msg import DoCoMoUnderstandingEtractedWords
from okada.srv import DoCoMoUnderstanding
from okada.srv import DoCoMoUnderstandingResponse
json_data={
"projectKey": "OSU",
"appInfo": {
"appName": "hoge_app",
"appKey": "hoge_app01"
},
"clientVer": "1.0.0",
"dialogMode": "off",
"language": "ja",
"userId": "12 123456 123456 0",
"location": {
"lat": "139.766084",
"lon": "35.681382"
},
"userUtterance": {
"utteranceText": "",
}
}
class DoCoMoSentenceUnderstandingSrv(object):
""" DoCoMoSentenceUnderstandingSrv class """
def __init__(self):
""" Initializer """
def run(self):
""" run ros node """
# initialize ros node
rospy.init_node('DocomoSentenceUnderstandingSrv')
rospy.loginfo("start DoCoMoSentenceUnderstandingSrv node")
service_server = rospy.Service('docomo_sentenceunderstanding',DoCoMoUnderstanding,self.SentenceUnderstanding_handler)
rospy.loginfo("start DoCoMoSentenceUnderstanding service server")
self.APIKEY = rospy.get_param("~APIKEY", "xxxx")
self.url = rospy.get_param("~sentence_url","https://api.apigw.smt.docomo.ne.jp/sentenceUnderstanding/v1/task?" )
rospy.spin()
def SentenceUnderstanding_handler(self, query):
""" query sentence understanding """
""" DoCoMoSentenceUnderstandingReq.msg """
rospy.loginfo("DoCoMoSentenceUnderstandingSrv Query:%s", query)
req = query.request
if req.utteranceText == '':
return DoCoMoUnderstandingResponse(success=False)
if not req.projectKey:
json_data['projectKey'] = "OSU"
else:
json_data['projectKey'] = req.projectKey
if not req.appName:
json_data['appName'] = ""
else:
json_data['appName'] = req.appName
if not req.appKey:
json_data['appKey'] = "hoge_app01"
else:
json_data['appKey'] = req.appKey
json_data['clientVer'] = "1.0.0"
json_data['dialogMode'] = "off"
if not req.language:
json_data['language']="ja"
else:
json_data['language']=req.language
if not req.userId:
json_data['userId']="12 123456 123456 0"
else:
json_data['userId']=req.userId
if not req.lat:
json_data['lat']="139.766084"
else:
json_data['lat']=req.lat
if not req.lon:
json_data['lon']="35.681382"
else:
json_data['lon']=req.lon
(json_data['userUtterance'])['utteranceText'] = req.utteranceText
# Request body
body={}
body['APIKEY'] = self.APIKEY
url_value = urllib.urlencode(body)
req = urllib2.Request(self.url+url_value)
req.add_header('Content-Type', 'application/json')
try:
response = urllib2.urlopen(req,json.dumps(json_data))
except Exception as e:
print e
return DoCoMoUnderstandingResponse(success=False)
the_page=json.load(response)
# Response body
""" """
res=DoCoMoUnderstandingRes()
res.projectKey = the_page['projectKey']
res.appName = (the_page['appInfo'])['appName']
res.appKey = (the_page['appInfo'])['appKey']
res.clientVer = the_page['clientVer']
res.dialogMode = the_page['dialogMode']
res.language = the_page['language']
res.userId = the_page['userId']
res.utteranceText = (the_page['userUtterance'])['utteranceText']
res.utteranceRevised = (the_page['userUtterance'])['utteranceRevised']
for wd in (the_page['userUtterance'])['utteranceWord']:
res.utteranceWord.append(wd)
for tsk in the_page['taskIdList']:
res.taskIdList.append(tsk)
res.commandId = ((the_page['dialogStatus'])['command'])['commandId']
res.commandName = ((the_page['dialogStatus'])['command'])['commandName']
res.serverSendTime = the_page['serverSendTime']
""" """
rospy.loginfo("DoCoMoSentenceUnderstanding:%s",res.utteranceText)
rospy.loginfo("DoCoMoSentenceUnderstanding:%s",res.commandName)
if res.commandId == "BC00101":
"""雑談"""
setExtractedWords(self,the_page,res)
elif res.commandId == "BK00101":
"""知識検索"""
setExtractedWords(self,the_page,res)
elif res.commandId == "BT00101":
"""乗換案内"""
#stationTo, stationFrom
setSlotStatus(self,the_page,res)
setExtractedWords(self,the_page,res)
elif res.commandId == "BT00201":
"""地図"""
#searchArea,hereArround,facilityName
setSlotStatus(self,the_page,res)
setExtractedWords(self,the_page,res)
elif res.commandId == "BT00301":
"""天気"""
#searchArea,hereArround,daten
setSlotStatus(self,the_page,res)
setExtractedWords(self,the_page,res)
elif res.commandId == "BT00401":
"""グルメ検索"""
#gourmetGenre,searchArea,hereArround
setSlotStatus(self,the_page,res)
setExtractedWords(self,the_page,res)
elif res.commandId == "BT00501":
"""ブラウザ"""
#browser,website
setSlotStatus(self,the_page,res)
setExtractedWords(self,the_page,res)
elif res.commandId == "BT00601":
"""観光案内"""
#searchArea,hereArround,sightseeing
setSlotStatus(self,the_page,res)
setExtractedWords(self,the_page,res)
elif res.commandId == "BT00701":
"""カメラ"""
#
setExtractedWords(self,the_page,res)
elif res.commandId == "BT00801":
"""ギャラリー"""
#
setExtractedWords(self,the_page,res)
elif res.commandId == "BT00901":
"""通話"""
#phoneTo
setContent(self, the_page, res)
setSlotStatus(self,the_page,res)
setExtractedWords(self,the_page,res)
elif res.commandId == "BT01001":
"""メール"""
#mailTo,mailBody
setContent(self,the_page, res)
setSlotStatus(self,the_page,res)
setExtractedWords(self,the_page,res)
elif res.commandId == "BT01101":
"""メモ登録"""
#memoBody
setSlotStatus(self,the_page,res)
setExtractedWords(self,the_page,res)
elif res.commandId == "BT01102":
"""メモ参照"""
#memoBody
setSlotStatus(self,the_page,res)
setExtractedWords(self,the_page,res)
elif res.commandId == "BT01201":
"""アラーム"""
#time
setSlotStatus(self,the_page,res)
setExtractedWords(self,the_page,res)
elif res.commandId == "BT01301":
"""スケジュール登録"""
#date,time,scheduleBody
setSlotStatus(self,the_page,res)
setExtractedWords(self,the_page,res)
elif res.commandId == "BT01302":
"""スケジュール参照"""
#date,time
setSlotStatus(self,the_page,res)
setExtractedWords(self,the_page,res)
elif res.commandId == "BT01501":
"""端末設定"""
#setting
elif res.commandId == "BT01601":
"""SNS投稿"""
#snsSource,snsBody
elif res.commandId == "BT90101":
"""キャンセル"""
#
elif res.commandId == "BM00101":
"""地図乗換"""
#searchArea
setSlotStatus(self,the_page,res)
setExtractedWords(self,the_page,res)
elif res.commandId == "BM00201":
"""通話メール"""
#phoneTo
setContent(self, the_page, res)
setSlotStatus(self,the_page,res)
setExtractedWords(self,the_page,res)
elif res.commandId == "EBC00101":
""" Chatting """
pass
elif res.commandId == "EBT02501":
""" Information """
pass
elif res.commandId == "EBT01101":
"""Weather"""
setSlotStatus(self,the_page,res)
elif res.commandId == "EBT01401":
""" Travel"""
setSlotStatus(self,the_page,res)
elif res.commandId == "EBT00101":
"""Transportation"""
setSlotStatus(self,the_page,res)
elif res.commandId == "EBT00601":
"""News"""
setSlotStatus(self,the_page,res)
elif res.commandId == "EBT00301":
"""Call"""
setSlotStatus(self,the_page,res)
elif res.commandId == "EBT01201":
"""Restaurant"""
setSlotStatus(self,the_page,res)
"""error"""
elif res.commandId == "SE00101":
"""判定不能"""
pass
elif res.commandId == "SE00201":
"""サーバエラー1"""
return DoCoMoUnderstandingResponse(success=False, response=res)
elif res.commandId == "SE00202":
"""サーバエラー2"""
return DoCoMoUnderstandingResponse(success=False, response=res)
elif res.commandId == "SE00301":
"""ライブラリエラー"""
return DoCoMoUnderstandingResponse(success=False, response=res)
else:
res.commandId = "SE00101"
"""判定不能"""
"""Undeterminable"""
return DoCoMoUnderstandingResponse(success=True, response=res)
def setContent(self, the_page, res):
res.contentSource=(the_page['content'])['contentSource']
res.contentType=(the_page['content'])['contentType']
res.contentValue=(the_page['content'])['contentValue']
return True
def setSlotStatus(self,the_page,res):
self.slotStatus = (the_page['dialogStatus'])['slotStatus']
for slot in self.slotStatus:
st = DoCoMoUnderstandingSlotStatus()
st.slotName = slot['slotName']
st.slotValue = slot['slotValue']
try:
st.ValueType = slot['valueType']
except:
pass
res.slotStatus.append(st)
return True
def setExtractedWords(self,the_page,res):
self.extractedWords = the_page['extractedWords']
for words in self.extractedWords:
wd = DoCoMoUnderstandingEtractedWords()
wd.wordsValue = words['wordsValue']
for wt in words['wordsType']:
wd.wordsType.append(wt)
res.extractedWords.append(wd)
return True
if __name__ == '__main__':
try:
node = DoCoMoSentenceUnderstandingSrv()
node.run()
except rospy.ROSInterruptException:
pass
|
okadahiroyuki/trcp
|
trcp_chat/nodes/DoCoMoUnderstandingSrv.py
|
Python
|
mit
| 12,054
|
[
"VisIt"
] |
77c5797f2b0b3067082499e8c165ec31bef5f1e72622e02c28d239f730952c80
|
"""
The B{0install remove-feed} command-line interface.
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
syntax = "[INTERFACE] FEED"
from zeroinstall import SafeException, _
from zeroinstall.injector import model, writer
from zeroinstall.cmd import add_feed, UsageError
add_options = add_feed.add_options
def handle(config, options, args):
if len(args) == 2:
iface = config.iface_cache.get_interface(model.canonical_iface_uri(args[0]))
feed_url = model.canonical_iface_uri(args[1])
feed_import = add_feed.find_feed_import(iface, feed_url)
if not feed_import:
raise SafeException(_('Interface %(interface)s has no feed %(feed)s') %
{'interface': iface.uri, 'feed': feed_url})
iface.extra_feeds.remove(feed_import)
writer.save_interface(iface)
elif len(args) == 1:
add_feed.handle(config, options, args, add_ok = False, remove_ok = True)
else:
raise UsageError()
|
dabrahams/zeroinstall
|
zeroinstall/cmd/remove_feed.py
|
Python
|
lgpl-2.1
| 952
|
[
"VisIt"
] |
bdef460f84970c220c0174152892693d804125fa12bb7f74da953462986ad496
|
#!/usr/bin/env python
"""
Convert from vasprun.xml in DIR direcotry
to erg.ref, frc.ref, and pos files.
Note that reading a lot of vasprun.xml takes a lot of time.
Usage:
vasprun2fp.py [options] DIR [DIR...]
Options:
-h,--help Show this message and exit.
--specorder=SPECORDER
Specify the order of species needed to convert POSCAR to pos. [default: None]
--index=INDEX
Convert a snapshot of INDEX. Comma separated indices can be specified.
If three digits are separated by colons like 0:1000:10, indices of slices
of initial(0), final(1000) and skip(10) are spcified. [default: -1]
--sequence
Extract all the sequence of MD or relaxation stored in vasprun.xml.
If the index is specified as a list of indices, this option will be omitted.
--keep-constraints
Keep constraints originally set to the system.
Otherwise all the constratins are removed. [default: False]
"""
from __future__ import print_function
import os
from ase.io import read,write
from docopt import docopt
__author__ = "Ryo KOBAYASHI"
__version__ = "190522"
_kb2gpa = 160.2176487
def get_tag(symbol,atom_id,specorder):
sid= specorder.index(symbol)+1
tag= float(sid) +0.1 +atom_id*1e-14
return '{0:17.14f}'.format(tag)
def write_pos(atoms,fname="pos",specorder=None):
if not specorder:
raise ValueError('Specorder must be specified explicitly.')
cell= atoms.cell
pos= atoms.get_scaled_positions()
with open(fname,'w') as f:
f.write('!\n')
f.write('! specorder: ')
for s in specorder:
f.write(' {0:<3s}'.format(s))
f.write('\n')
f.write('!\n')
f.write(' 1.000 \n')
f.write(' {0:22.14e} {1:22.14e} {2:22.14e}\n'.format(cell[0,0],cell[0,1],cell[0,2]))
f.write(' {0:22.14e} {1:22.14e} {2:22.14e}\n'.format(cell[1,0],cell[1,1],cell[1,2]))
f.write(' {0:22.14e} {1:22.14e} {2:22.14e}\n'.format(cell[2,0],cell[2,1],cell[2,2]))
f.write(' 0.00000000 0.00000000 0.00000000\n')
f.write(' 0.00000000 0.00000000 0.00000000\n')
f.write(' 0.00000000 0.00000000 0.00000000\n')
f.write(' {0:10d}\n'.format(len(atoms)))
for i in range(len(atoms)):
atom= atoms[i]
f.write(' {0:s}'.format(get_tag(atom.symbol,i+1,specorder)))
f.write(' {0:12.8f} {1:12.8f} {2:12.8f}'.format(pos[i,0],pos[i,1],pos[i,2]))
f.write(' 0.0 0.0 0.0 ')
f.write(' 0.0 0.0 '
+' 0.0 0.0 0.0 0.0 0.0 0.0\n')
def output_for_fitpot(atoms,keep_const,dirname='./',specorder=[]):
if not keep_const:
try:
del atoms.constraints
except:
print('del atoms.constraints for ',type(atoms),' failed.')
#write(dirname+'/POSCAR',images=atoms,format='vasp',direct=True,vasp5=True)
try:
epot = atoms.get_potential_energy()
except:
print(' Failed to get_potential_energy(), so skip it.')
return None
with open(dirname+'/erg.ref','w') as f:
f.write("{0:12.7f}\n".format(epot))
with open(dirname+'/frc.ref','w') as f:
f.write("{0:6d}\n".format(len(atoms)))
frcs= atoms.get_forces()
for frc in frcs:
f.write("{0:12.7f} {1:12.7f} {2:12.7f}\n".format(frc[0],frc[1],frc[2]))
write_pos(atoms,fname=dirname+'/pos',specorder=specorder)
if not os.path.exists(dirname+'/POSCAR'):
write(dirname+'/POSCAR',images=atoms,format='vasp',
direct=True,vasp5=True,sort=False)
try:
strs = atoms.get_stress()
except:
with open(dirname+'/WARNING','w') as f:
f.write(' Since failed to get stress tensor, put 0.0s into strs.ref file.\n')
strs = [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
with open(dirname+'/strs.ref','w') as f:
for s in strs:
f.write(" {0:15.7f}".format(s*_kb2gpa)) # converting from kBar to GPa
f.write('\n')
return None
def main():
args=docopt(__doc__)
dirs= args['DIR']
specorder= args['--specorder'].split(',')
sequence = args['--sequence']
keep_const = args['--keep-constraints']
if specorder == 'None':
raise ValueError('specorder must be specified.')
print(' specorder = ',specorder)
index= args['--index']
if ',' in index:
index = [ int(x) for x in index.split(',') ]
elif ':' in index:
index = [ int(x) for x in index.split(':') ]
index = slice(*index)
else:
index = int(index)
if type(index) is list:
print(' The following steps are to be extracted: ',end='')
for i in index:
print(i,end='')
print('')
ase_index = ':'
elif type(index) is slice:
print(' The sliced steps are to be extracted: ')
ase_index = index
elif sequence:
print(' All the sequence are to be extracted.')
ase_index = ':'
else:
ase_index = index
print(' index = ',ase_index)
if keep_const:
print(' Keep constraints originaly set to the system.')
ndirs= len(dirs)
print(' Number of directories to be processed = ',ndirs)
cwd=os.getcwd()
for i,d in enumerate(dirs):
os.chdir(cwd)
print('{0:5d}/{1:d}: '.format(i+1,ndirs)+d)
os.chdir(d)
if not os.path.exists('vasprun.xml'):
print(' No vasprun.xml, so skip.')
continue
if os.path.exists('erg.ref') and \
os.stat('erg.ref').st_mtime > os.stat('vasprun.xml').st_mtime:
print(' Since there is newer erg.ref, skip it.')
continue
try:
#...Since there is a bug in vasp, species "r" needs to be replaced by "Zr"
os.system("sed -i'' -e 's|<c>r </c>|<c>Zr</c>|g' vasprun.xml")
atoms= read('vasprun.xml',index=ase_index,format='vasp-xml')
except Exception as e:
print(' Failed to read vasprun.xml, so skip it.')
print(e)
continue
if type(index) is list:
print(' Extracting specified steps from ',len(atoms),' steps in total')
n = 0
for j,a in enumerate(atoms):
if j not in index:
continue
dirname = '{0:05d}/'.format(n)
print(' {0:s}'.format(dirname))
os.system('mkdir -p {0:s}'.format(dirname))
output_for_fitpot(a,keep_const,dirname=dirname,
specorder=specorder)
n += 1
elif sequence or type(index) is slice: # Whole MD sequence
print(' Extracting sequence of ',len(atoms),' steps')
for j,a in enumerate(atoms):
dirname = '{0:05d}/'.format(j)
print(' {0:s}'.format(dirname))
os.system('mkdir -p {0:s}'.format(dirname))
output_for_fitpot(a,keep_const,dirname=dirname,
specorder=specorder)
pass
else: # snapshopt
dirname = './'
output_for_fitpot(atoms,keep_const,dirname=dirname,
specorder=specorder)
os.chdir(cwd)
return None
if __name__ == "__main__":
main()
|
ryokbys/nap
|
nappy/vasp/vasprun2fp.py
|
Python
|
mit
| 7,316
|
[
"ASE",
"VASP"
] |
f720430962a0b6745b3b5993c8ee33be8961a16da1d083e359bc9903297836f0
|
from numpy.random import RandomState
class GGPSampler(object):
r"""Sample from a Generalised Gaussian Process.
Outcome modelled via
.. math::
\mathbf y \sim \int \prod_i \mathrm{ExpFam}(y_i ~|~ g_i(z_i))
\mathcal N(\mathbf z ~|~ \mathbf m; \mathrm K) \mathrm d\mathbf z.
Parameters
----------
link : str
Likelihood name.
mean : function
Mean function.
cov : function
Covariance function.
Example
-------
.. doctest::
>>> from numpy.random import RandomState
>>>
>>> from glimix_core.example import offset_mean
>>> from glimix_core.example import linear_eye_cov
>>> from glimix_core.random import GGPSampler
>>> from glimix_core.lik import DeltaProdLik
>>>
>>> random = RandomState(1)
>>>
>>> mean = offset_mean()
>>> cov = linear_eye_cov()
>>>
>>> lik = DeltaProdLik()
>>>
>>> y = GGPSampler(lik, mean, cov).sample(random)
>>> print(y[:5]) # doctest: +FLOAT_CMP
[-2.42181498 0.50720447 -1.01053967 0.736624 1.64019063]
"""
def __init__(self, lik, mean, cov):
self._lik = lik
self._mean = mean
self._cov = cov
def sample(self, random_state=None):
r"""Sample from the specified distribution.
Parameters
----------
random_state : random_state
Set the initial random state.
Returns
-------
numpy.ndarray
Sample.
"""
from numpy_sugar import epsilon
from numpy_sugar.linalg import sum2diag
from numpy_sugar.random import multivariate_normal
if random_state is None:
random_state = RandomState()
m = self._mean.value()
K = self._cov.value().copy()
sum2diag(K, +epsilon.small, out=K)
return self._lik.sample(multivariate_normal(m, K, random_state), random_state)
|
limix/glimix-core
|
glimix_core/random/_ggp.py
|
Python
|
mit
| 1,998
|
[
"Gaussian"
] |
aa44bfa9f6ace88fd1a665285806f59c4ace4a93033ed69400599522dcac193d
|
# -*- coding: utf-8 -*-
import time
from threading import Thread
import redis
import redisco
import unittest
from datetime import date
from redisco import models
from redisco.models.base import Mutex
from dateutil.tz import tzlocal
class Person(models.Model):
first_name = models.CharField()
last_name = models.CharField()
year_of_birth = models.IntegerField(indexed=True)
month_of_birth = models.Attribute(indexed=True)
def full_name(self):
return "%s %s" % (self.first_name, self.last_name,)
class Meta:
indices = ['full_name']
class RediscoTestCase(unittest.TestCase):
def setUp(self):
self.client = redisco.get_client()
self.client.flushall()
def tearDown(self):
self.client.flushdb()
class ModelTestCase(RediscoTestCase):
def test_key(self):
self.assertEqual('Person', Person._key)
def test_is_new(self):
p = Person(first_name="Darken", last_name="Rahl")
self.assertTrue(p.is_new())
def test_CharFields(self):
person = Person(first_name="Granny", last_name="Goose")
self.assertEqual("Granny", person.first_name)
self.assertEqual("Goose", person.last_name)
def test_save(self):
person1 = Person(first_name="Granny", last_name="Goose")
self.assertTrue(person1.save())
person2 = Person(first_name="Jejomar")
self.assertTrue(person2.save())
person3 = Person(first_name=2, last_name=3)
self.assertFalse(person3.save()) # Validation error
self.assertTrue(person3.save(validate_fields=False)) # Validation error
self.assertEqual('1', person1.id)
self.assertEqual('2', person2.id)
jejomar = Person.objects.get_by_id('2')
self.assertEqual(None, jejomar.last_name)
def test_unicode(self):
p = Person(first_name=u"Niña", last_name="Jose")
self.assert_(p.save())
g = Person.objects.create(first_name="Granny", last_name="Goose")
self.assert_(g)
p = Person.objects.filter(first_name=u"Niña").first()
self.assert_(p)
self.assert_(isinstance(p.full_name(), unicode))
self.assertEqual(u"Niña Jose", p.full_name())
def test_repr(self):
person1 = Person(first_name="Granny", last_name="Goose")
self.assertEqual("<Person {'first_name': 'Granny', 'last_name': 'Goose', 'year_of_birth': None, 'month_of_birth': None}>",
repr(person1))
self.assert_(person1.save())
self.assertEqual("<Person:1 {'first_name': 'Granny', 'last_name': 'Goose', 'year_of_birth': None, 'month_of_birth': None, 'id': '1'}>",
repr(person1))
def test_update(self):
person1 = Person(first_name="Granny", last_name="Goose")
person1.save()
p = Person.objects.get_by_id('1')
p.first_name = "Morgan"
p.last_name = None
assert p.save()
p = Person.objects.get_by_id(p.id)
self.assertEqual("Morgan", p.first_name)
self.assertEqual(None, p.last_name)
def test_default_CharField_val(self):
class User(models.Model):
views = models.IntegerField(default=199)
liked = models.BooleanField(default=True)
disliked = models.BooleanField(default=False)
u = User()
self.assertEqual(True, u.liked)
self.assertEqual(False, u.disliked)
self.assertEqual(199, u.views)
assert u.save()
u = User.objects.all()[0]
self.assertEqual(True, u.liked)
self.assertEqual(False, u.disliked)
self.assertEqual(199, u.views)
def test_callable_default_CharField_val(self):
class User(models.Model):
views = models.IntegerField(default=lambda: 199)
liked = models.BooleanField(default=lambda: True)
disliked = models.BooleanField(default=lambda: False)
u = User()
self.assertEqual(True, u.liked)
self.assertEqual(False, u.disliked)
self.assertEqual(199, u.views)
assert u.save()
u = User.objects.all()[0]
self.assertEqual(True, u.liked)
self.assertEqual(False, u.disliked)
self.assertEqual(199, u.views)
def test_getitem(self):
person1 = Person(first_name="Granny", last_name="Goose")
person1.save()
person2 = Person(first_name="Jejomar", last_name="Binay")
person2.save()
p1 = Person.objects.get_by_id(1)
p2 = Person.objects.get_by_id(2)
self.assertEqual('Jejomar', p2.first_name)
self.assertEqual('Binay', p2.last_name)
self.assertEqual('Granny', p1.first_name)
self.assertEqual('Goose', p1.last_name)
def test_manager_create(self):
Person.objects.create(first_name="Granny", last_name="Goose")
p1 = Person.objects.get_by_id(1)
self.assertEqual('Granny', p1.first_name)
self.assertEqual('Goose', p1.last_name)
def test_indices(self):
person = Person.objects.create(first_name="Granny", last_name="Goose")
db = person.db
key = person.key(att='_indices')
index = 'Person:first_name:%s' % "Granny"
self.assertTrue(index in db.smembers(key))
self.assertTrue("1" in db.smembers(index))
def test_delete(self):
Person.objects.create(first_name="Granny", last_name="Goose")
Person.objects.create(first_name="Clark", last_name="Kent")
Person.objects.create(first_name="Granny", last_name="Mommy")
Person.objects.create(first_name="Granny", last_name="Kent")
for person in Person.objects.all():
person.delete()
self.assertEqual(0, self.client.scard('Person:all'))
class Event(models.Model):
name = models.CharField(required=True)
created_on = models.DateField(required=True)
Event.objects.create(name="Event #1", created_on=date.today())
Event.objects.create(name="Event #2", created_on=date.today())
Event.objects.create(name="Event #3", created_on=date.today())
Event.objects.create(name="Event #4", created_on=date.today())
for event in Event.objects.all():
event.delete()
self.assertEqual(0, self.client.zcard("Event:created_on"))
def test_filter(self):
Person.objects.create(first_name="Granny", last_name="Goose")
Person.objects.create(first_name="Clark", last_name="Kent")
Person.objects.create(first_name="Granny", last_name="Mommy")
Person.objects.create(first_name="Granny", last_name="Kent")
persons = Person.objects.filter(first_name="Granny")
self.assertEqual(len(persons), 3)
self.assertEqual('1', persons[0].id)
self.assertEqual(3, len(persons))
persons = Person.objects.filter(first_name="Clark")
self.assertEqual(1, len(persons))
# by index
persons = Person.objects.filter(full_name="Granny Mommy")
self.assertEqual(1, len(persons))
self.assertEqual("Granny Mommy", persons[0].full_name())
def test_filter_different_db(self):
class DifferentPerson(models.Model):
first_name = models.CharField()
last_name = models.CharField()
def full_name(self):
return "%s %s" % (self.first_name, self.last_name,)
class Meta:
indices = ['full_name']
db = redis.Redis(db=8)
DifferentPerson.objects.create(first_name="Granny", last_name="Goose")
DifferentPerson.objects.create(first_name="Clark", last_name="Kent")
DifferentPerson.objects.create(first_name="Granny", last_name="Mommy")
DifferentPerson.objects.create(first_name="Granny", last_name="Kent")
persons = DifferentPerson.objects.filter(first_name="Granny")
self.assertEqual(len(persons), 3)
self.assertEqual('1', persons[0].id)
self.assertEqual(3, len(persons))
persons = DifferentPerson.objects.filter(first_name="Clark")
self.assertEqual(1, len(persons))
# by index
persons = DifferentPerson.objects.filter(full_name="Granny Mommy")
self.assertEqual(1, len(persons))
self.assertEqual("Granny Mommy", persons[0].full_name())
def test_exclude(self):
Person.objects.create(first_name="Granny", last_name="Goose")
Person.objects.create(first_name="Clark", last_name="Kent")
Person.objects.create(first_name="Granny", last_name="Mommy")
Person.objects.create(first_name="Granny", last_name="Kent")
persons = Person.objects.exclude(first_name="Granny")
self.assertEqual('2', persons[0].id)
self.assertEqual(1, len(persons))
persons = Person.objects.exclude(first_name="Clark")
self.assertEqual(3, len(persons))
# by index
persons = Person.objects.exclude(full_name="Granny Mommy")
self.assertEqual(3, len(persons))
self.assertEqual("Granny Goose", persons[0].full_name())
self.assertEqual("Clark Kent", persons[1].full_name())
self.assertEqual("Granny Kent", persons[2].full_name())
# mixed
Person.objects.create(first_name="Granny", last_name="Pacman")
persons = (Person.objects.filter(first_name="Granny")
.exclude(last_name="Mommy"))
self.assertEqual(3, len(persons))
def test_first(self):
Person.objects.create(first_name="Granny", last_name="Goose")
Person.objects.create(first_name="Clark", last_name="Kent")
Person.objects.create(first_name="Granny", last_name="Mommy")
Person.objects.create(first_name="Granny", last_name="Kent")
granny = Person.objects.filter(first_name="Granny").first()
self.assertEqual('1', granny.id)
lana = Person.objects.filter(first_name="Lana").first()
self.assertFalse(lana)
def test_iter(self):
Person.objects.create(first_name="Granny", last_name="Goose")
Person.objects.create(first_name="Clark", last_name="Kent")
Person.objects.create(first_name="Granny", last_name="Mommy")
Person.objects.create(first_name="Granny", last_name="Kent")
for person in Person.objects.all():
self.assertTrue(person.full_name() in
("Granny Goose", "Clark Kent", "Granny Mommy", "Granny Kent"))
def test_sort(self):
Person.objects.create(first_name="Zeddicus", last_name="Zorander")
Person.objects.create(first_name="Richard", last_name="Cypher")
Person.objects.create(first_name="Richard", last_name="Rahl")
Person.objects.create(first_name="Kahlan", last_name="Amnell")
res = Person.objects.order('first_name').all()
self.assertEqual("Kahlan", res[0].first_name)
self.assertEqual("Richard", res[1].first_name)
self.assertEqual("Richard", res[2].first_name)
self.assertEqual("Zeddicus Zorander", res[3].full_name())
res = Person.objects.order('-full_name').all()
self.assertEqual("Zeddicus Zorander", res[0].full_name())
self.assertEqual("Richard Rahl", res[1].full_name())
self.assertEqual("Richard Cypher", res[2].full_name())
self.assertEqual("Kahlan Amnell", res[3].full_name())
def test_all(self):
person1 = Person(first_name="Granny", last_name="Goose")
person1.save()
person2 = Person(first_name="Jejomar", last_name="Binay")
person2.save()
all = Person.objects.all()
self.assertEqual(list([person1, person2]), list(all))
def test_limit(self):
Person.objects.create(first_name="Zeddicus", last_name="Zorander")
Person.objects.create(first_name="Richard", last_name="Cypher")
Person.objects.create(first_name="Richard", last_name="Rahl")
Person.objects.create(first_name="Kahlan", last_name="Amnell")
res = Person.objects.order('first_name').all().limit(3)
self.assertEqual(3, len(res))
self.assertEqual("Kahlan", res[0].first_name)
self.assertEqual("Richard", res[1].first_name)
self.assertEqual("Richard", res[2].first_name)
res = Person.objects.order('first_name').limit(3, offset=1)
self.assertEqual(3, len(res))
self.assertEqual("Richard", res[0].first_name)
self.assertEqual("Richard", res[1].first_name)
self.assertEqual("Zeddicus", res[2].first_name)
def test_integer_field(self):
class Character(models.Model):
n = models.IntegerField()
m = models.CharField()
Character.objects.create(n=1998, m="A")
Character.objects.create(n=3100, m="b")
Character.objects.create(n=1, m="C")
chars = Character.objects.all()
self.assertEqual(3, len(chars))
self.assertEqual(1998, chars[0].n)
self.assertEqual("A", chars[0].m)
def test_sort_by_int(self):
class Exam(models.Model):
score = models.IntegerField()
total_score = models.IntegerField()
def percent(self):
return int((float(self.score) / self.total_score) * 100)
class Meta:
indices = ('percent',)
Exam.objects.create(score=9, total_score=100)
Exam.objects.create(score=99, total_score=100)
Exam.objects.create(score=75, total_score=100)
Exam.objects.create(score=33, total_score=100)
Exam.objects.create(score=95, total_score=100)
exams = Exam.objects.order('score')
self.assertEqual([9, 33, 75, 95, 99], [exam.score for exam in exams])
filtered = Exam.objects.zfilter(score__in=(10, 96))
self.assertEqual(3, len(filtered))
def test_filter_date(self):
from datetime import datetime
class Post(models.Model):
name = models.CharField()
date = models.DateTimeField()
dates = (
datetime(2010, 1, 20, 1, 40, 0),
datetime(2010, 2, 20, 1, 40, 0),
datetime(2010, 1, 26, 1, 40, 0),
datetime(2009, 12, 21, 1, 40, 0),
datetime(2010, 1, 10, 1, 40, 0),
datetime(2010, 5, 20, 1, 40, 0),
)
i = 0
for date in dates:
Post.objects.create(name="Post#%d" % i, date=date)
i += 1
self.assertEqual([Post.objects.get_by_id(4)],
list(Post.objects.filter(date=
datetime(2009, 12, 21, 1, 40, 0))))
lt = (0, 2, 3, 4)
res = [Post.objects.get_by_id(l + 1) for l in lt]
self.assertEqual(set(res),
set(Post.objects.zfilter(
date__lt=datetime(2010, 1, 30))))
def test_validation(self):
class Person(models.Model):
name = models.CharField(required=True)
p = Person(name="Kokoy")
self.assertTrue(p.is_valid())
p = Person()
self.assertFalse(p.is_valid())
self.assertTrue(('name', 'required') in p.errors)
def test_errors(self):
class Person(models.Model):
name = models.CharField(required=True, unique=True)
p = Person.objects.create(name="Chuck")
self.assertFalse(p.errors)
p = Person(name="John")
self.assertFalse(p.errors)
p.name = "Chuck" # name should be unique
self.assertFalse(p.is_valid())
self.assertTrue(p.errors)
def test_custom_validation(self):
class Ninja(models.Model):
def validator(field_name, age):
if not age or age >= 10:
return ((field_name, 'must be below 10'),)
age = models.IntegerField(required=True, validator=validator)
nin1 = Ninja(age=9)
self.assertTrue(nin1.is_valid())
nin2 = Ninja(age=10)
self.assertFalse(nin2.is_valid())
self.assertTrue(('age', 'must be below 10') in nin2.errors)
def test_overriden_validation(self):
class Ninja(models.Model):
age = models.IntegerField(required=True)
def validate(self):
if self.age >= 10:
self._errors.append(('age', 'must be below 10'))
nin1 = Ninja(age=9)
self.assertTrue(nin1.is_valid())
nin2 = Ninja(age=10)
self.assertFalse(nin2.is_valid())
self.assertTrue(('age', 'must be below 10') in nin2.errors)
def test_falsy_value_type_validation(self):
class Person(models.Model):
age = models.IntegerField()
for val in ('', {}, []):
p = Person(age=val)
self.assertFalse(p.is_valid())
self.assertEqual([('age', 'bad type')], p.errors)
def test_load_object_from_key(self):
class Schedule(models.Model):
att = models.CharField()
class PaperType(models.Model):
att = models.CharField()
assert Schedule.objects.create(att="dinuguan")
assert Schedule.objects.create(att="chicharon")
assert Schedule.objects.create(att="Pizza")
assert Schedule.objects.create(att="Pasta")
assert Schedule.objects.create(att="Veggies")
assert PaperType.objects.create(att="glossy")
assert PaperType.objects.create(att="large")
assert PaperType.objects.create(att="huge")
assert PaperType.objects.create(att="A6")
assert PaperType.objects.create(att="A9")
o = models.from_key("Schedule:1")
assert o
self.assertEqual('1', o.id)
self.assertEqual(Schedule, type(o))
o = models.from_key("PaperType:1")
self.assertEqual('1', o.id)
self.assertEqual(PaperType, type(o))
o = models.from_key("Schedule:4")
self.assertEqual('4', o.id)
self.assertEqual(Schedule, type(o))
o = models.from_key("PaperType:5")
self.assertEqual('5', o.id)
self.assertEqual(PaperType, type(o))
o = models.from_key("PaperType:6")
self.assertTrue(o is None)
def boom():
models.from_key("some arbitrary key")
from redisco.models.exceptions import BadKeyError
self.assertRaises(BadKeyError, boom)
def test_uniqueness_validation(self):
class Student(models.Model):
student_id = models.CharField(unique=True)
student = Student.objects.create(student_id="042231")
self.assert_(student)
student = Student(student_id="042231")
self.assertFalse(student.is_valid())
self.assert_(('student_id', 'not unique') in student.errors)
student = Student()
self.assertTrue(student.is_valid())
def test_long_integers(self):
class Tweet(models.Model):
status_id = models.IntegerField()
t = Tweet(status_id=int(u'14782201061'))
self.assertTrue(t.is_valid())
t.save()
t = Tweet.objects.get_by_id(t.id)
self.assertEqual(14782201061, t.status_id)
def test_slicing(self):
Person.objects.create(first_name="Granny", last_name="Goose")
Person.objects.create(first_name="Clark", last_name="Kent")
Person.objects.create(first_name="Granny", last_name="Mommy")
Person.objects.create(first_name="Lois", last_name="Kent")
Person.objects.create(first_name="Jonathan", last_name="Kent")
Person.objects.create(first_name="Martha", last_name="Kent")
Person.objects.create(first_name="Lex", last_name="Luthor")
Person.objects.create(first_name="Lionel", last_name="Luthor")
# no slice
a = Person.objects.all()
self.assertEqual(8, len(a))
self.assertEqual(Person.objects.get_by_id('1'), a[0])
self.assertEqual("Lionel Luthor", a[7].full_name())
a = Person.objects.all()[3:]
self.assertEqual(5, len(a))
self.assertEqual(Person.objects.get_by_id('4'), a[0])
self.assertEqual("Lionel Luthor", a[4].full_name())
a = Person.objects.all()[:6]
self.assertEqual(6, len(a))
self.assertEqual(Person.objects.get_by_id('1'), a[0])
self.assertEqual("Martha Kent", a[5].full_name())
a = Person.objects.all()[2:6]
self.assertEqual(4, len(a))
self.assertEqual(Person.objects.get_by_id('3'), a[0])
self.assertEqual("Martha Kent", a[3].full_name())
def test_get_or_create(self):
Person.objects.create(first_name="Granny", last_name="Goose")
Person.objects.create(first_name="Clark", last_name="Kent")
Person.objects.create(first_name="Granny", last_name="Mommy")
Person.objects.create(first_name="Lois", last_name="Kent")
Person.objects.create(first_name="Jonathan", last_name="Kent")
Person.objects.create(first_name="Martha", last_name="Kent")
p = Person.objects.get_or_create(first_name="Lois",
last_name="Kent")
self.assertEqual('4', p.id)
p = Person.objects.get_or_create(first_name="Jonathan",
last_name="Weiss")
self.assertEqual('7', p.id)
def test_values(self):
Person.objects.create(first_name="Granny", last_name="Goose")
Person.objects.create(first_name="Clark", last_name="Kent")
Person.objects.create(first_name="Granny", last_name="Mommy")
Person.objects.create(first_name="Lois", last_name="Kent")
Person.objects.create(first_name="Jonathan", last_name="Kent")
Person.objects.create(first_name="Martha", last_name="Kent")
Person.objects.create(first_name="Lex", last_name="Luthor")
Person.objects.create(first_name="Lionel", last_name="Luthor")
persons_values = Person.objects.values().all()
persons = Person.objects.all()
for p, v in zip(persons, persons_values):
self.assertTrue(isinstance(p, Person))
self.assertTrue(isinstance(v, dict))
self.assertEqual(p.first_name, v["first_name"])
self.assertEqual(p.last_name, v["last_name"])
persons_values = Person.objects.values().all()[2:5]
persons = Person.objects.all()[2:5]
for p, v in zip(persons, persons_values):
self.assertTrue(isinstance(p, Person))
self.assertTrue(isinstance(v, dict))
self.assertEqual(p.first_name, v["first_name"])
self.assertEqual(p.last_name, v["last_name"])
p = Person.objects.get_by_id(1)
v = Person.objects.values().get_by_id(1)
self.assertTrue(isinstance(p, Person))
self.assertTrue(isinstance(v, dict))
self.assertEqual(p.first_name, v["first_name"])
self.assertEqual(p.last_name, v["last_name"])
p = Person.objects.all().first()
v = Person.objects.values().all().first()
self.assertTrue(isinstance(p, Person))
self.assertTrue(isinstance(v, dict))
self.assertEqual(p.first_name, v["first_name"])
self.assertEqual(p.last_name, v["last_name"])
p = Person.objects.all()[3]
v = Person.objects.values().all()[3]
self.assertTrue(isinstance(p, Person))
self.assertTrue(isinstance(v, dict))
self.assertEqual(p.first_name, v["first_name"])
self.assertEqual(p.last_name, v["last_name"])
def test_count(self):
self.assertEqual(Person.objects.count(), 0)
Person.objects.create(first_name="Granny", last_name="Goose")
Person.objects.create(first_name="Clark", last_name="Kent")
Person.objects.create(first_name="Granny", last_name="Mommy")
Person.objects.create(first_name="Lois", last_name="Kent")
Person.objects.create(first_name="Jonathan", last_name="Kent")
Person.objects.create(first_name="Martha", last_name="Kent")
Person.objects.create(first_name="Lex", last_name="Luthor")
Person.objects.create(first_name="Lionel", last_name="Luthor")
self.assertEqual(Person.objects.count(), 8)
def test_customizable_key(self):
class Person(models.Model):
name = models.CharField()
class Meta:
key = 'People'
p = Person(name="Clark Kent")
self.assert_(p.is_valid())
self.assert_(p.save())
self.assert_('1' in self.client.smembers('People:all'))
def test_indexed_values(self):
Person.objects.create(first_name="Granny", last_name="Goose", year_of_birth=1980)
Person.objects.create(first_name="Clark", last_name="Kent", year_of_birth=1980)
Person.objects.create(first_name="Granny", last_name="Mommy", year_of_birth=1979)
Person.objects.create(first_name="Lois", last_name="Kent", year_of_birth=1960)
Person.objects.create(first_name="Jonathan", last_name="Kent", year_of_birth=1944)
Person.objects.create(first_name="Martha", last_name="Kent", year_of_birth=1961)
Person.objects.create(first_name="Lex", last_name="Luthor", year_of_birth=1960)
Person.objects.create(first_name="Lionel", last_name="Luthor", year_of_birth=1980)
years = Person.objects.get_indexed_values("month_of_birth")
self.assertEqual(type(years), list)
self.assertEqual(len(years), 0)
years = Person.objects.get_indexed_values("year_of_birth")
self.assertEqual(type(years), list)
self.assertEqual(len(years), 5)
for year in [1980, 1979, 1960, 1944, 1961]:
self.assertTrue(year in years)
try:
years = Person.objects.get_indexed_values("first_name")
assert False
except:
assert True
class Event(models.Model):
name = models.CharField(required=True)
date = models.DateField(required=True)
class DateFieldTestCase(RediscoTestCase):
def test_CharField(self):
event = Event(name="Legend of the Seeker Premiere",
date=date(2008, 11, 12))
self.assertEqual(date(2008, 11, 12), event.date)
def test_saved_CharField(self):
instance = Event.objects.create(name="Legend of the Seeker Premiere",
date=date(2008, 11, 12))
assert instance
event = Event.objects.get_by_id(instance.id)
assert event
self.assertEqual(date(2008, 11, 12), event.date)
def test_invalid_date(self):
event = Event(name="Event #1")
event.date = 1
self.assertFalse(event.is_valid())
self.assertTrue(('date', 'bad type') in event.errors)
def test_indexes(self):
d = date.today()
Event.objects.create(name="Event #1", date=d)
self.assertTrue('1' in self.client.smembers(Event._all_key))
# zfilter index
self.assertTrue(self.client.exists("Event:date"))
# other field indices
self.assertEqual(2, self.client.scard("Event:1:_indices"))
for index in self.client.smembers("Event:1:_indices"):
self.assertTrue(index.startswith("Event:date") or
index.startswith("Event:name"))
def test_auto_now(self):
class Report(models.Model):
title = models.CharField()
created_on = models.DateField(auto_now_add=True)
updated_on = models.DateField(auto_now=True)
r = Report(title="My Report")
assert r.save()
r = Report.objects.filter(title="My Report")[0]
self.assertTrue(isinstance(r.created_on, date))
self.assertTrue(isinstance(r.updated_on, date))
self.assertEqual(date.today(), r.created_on)
class CharFieldTestCase(RediscoTestCase):
def test_max_length(self):
class Person(models.Model):
name = models.CharField(max_length=20, required=True)
p = Person(name='The quick brown fox jumps over the lazy dog.')
self.assertFalse(p.is_valid())
self.assert_(('name', 'exceeds max length') in p.errors)
class Student(models.Model):
name = models.CharField(required=True)
average = models.FloatField(required=True)
class FloatFieldTestCase(RediscoTestCase):
def test_CharField(self):
s = Student(name="Richard Cypher", average=86.4)
self.assertEqual(86.4, s.average)
def test_saved_CharField(self):
s = Student.objects.create(name="Richard Cypher",
average=3.14159)
assert s
student = Student.objects.get_by_id(s.id)
assert student
self.assertEqual(3.14159, student.average)
def test_indexing(self):
Student.objects.create(name="Richard Cypher", average=3.14159)
Student.objects.create(name="Kahlan Amnell", average=92.45)
Student.objects.create(name="Zeddicus Zorander", average=99.99)
Student.objects.create(name="Cara", average=84.91)
good = Student.objects.zfilter(average__gt=50.0)
self.assertEqual(3, len(good))
self.assertTrue("Richard Cypher",
Student.objects.filter(average=3.14159)[0].name)
class Task(models.Model):
name = models.CharField()
done = models.BooleanField()
class BooleanFieldTestCase(RediscoTestCase):
def test_CharField(self):
t = Task(name="Cook dinner", done=False)
assert t.save()
self.assertFalse(t.done)
def test_saved_CharField(self):
t = Task(name="Cook dinner", done=False)
assert t.save()
t = Task.objects.all()[0]
self.assertFalse(t.done)
t.done = True
assert t.save()
t = Task.objects.all()[0]
self.assertTrue(t.done)
def test_indexing(self):
assert Task.objects.create(name="Study Lua", done=False)
assert Task.objects.create(name="Read News", done=True)
assert Task.objects.create(name="Buy Dinner", done=False)
assert Task.objects.create(name="Visit Sick Friend", done=False)
assert Task.objects.create(name="Play", done=True)
assert Task.objects.create(name="Sing a song", done=False)
assert Task.objects.create(name="Pass the Exam", done=True)
assert Task.objects.create(name="Dance", done=False)
assert Task.objects.create(name="Code", done=True)
done = Task.objects.filter(done=True)
unfin = Task.objects.filter(done=False)
self.assertEqual(4, len(done))
self.assertEqual(5, len(unfin))
class ListFieldTestCase(RediscoTestCase):
def test_basic(self):
class Cake(models.Model):
name = models.CharField()
ingredients = models.ListField(str)
sizes = models.ListField(int)
Cake.objects.create(name="StrCake",
ingredients=['strawberry', 'sugar', 'dough'],
sizes=[1, 2, 5])
Cake.objects.create(name="Normal Cake",
ingredients=['sugar', 'dough'],
sizes=[1, 3, 5])
Cake.objects.create(name="No Sugar Cake",
ingredients=['dough'],
sizes=[])
cake = Cake.objects.all()[0]
self.assertEqual(['strawberry', 'sugar', 'dough'],
cake.ingredients)
with_sugar = Cake.objects.filter(ingredients='sugar')
self.assertTrue(2, len(with_sugar))
self.assertEqual([1, 2, 5], with_sugar[0].sizes)
self.assertEqual([1, 3, 5], with_sugar[1].sizes)
size1 = Cake.objects.filter(sizes=str(2))
self.assertEqual(1, len(size1))
cake.sizes = None
cake.ingredients = None
assert cake.save()
cake = Cake.objects.get_by_id(cake.id)
self.assertEqual([], cake.sizes)
self.assertEqual([], cake.ingredients)
def test_list_of_reference_fields(self):
class Book(models.Model):
title = models.CharField(required=True)
date_published = models.DateField(required=True)
class Author(models.Model):
name = models.CharField(required=True)
books = models.ListField(Book)
book = Book.objects.create(title="University Physics With Modern Physics",
date_published=date(2007, 4, 2))
assert book
author1 = Author.objects.create(name="Hugh Young",
books=[book])
author2 = Author.objects.create(name="Roger Freedman",
books=[book])
assert author1
assert author2
author1 = Author.objects.get_by_id(1)
author2 = Author.objects.get_by_id(2)
self.assertTrue(book in author1.books)
self.assertTrue(book in author2.books)
book = Book.objects.create(title="University Physics With Modern Physics Paperback",
date_published=date(2007, 4, 2))
author1.books.append(book)
assert author1.save()
author1 = Author.objects.get_by_id(1)
self.assertEqual(2, len(author1.books))
def test_lazy_reference_field(self):
class User(models.Model):
name = models.CharField()
likes = models.ListField('Link')
def likes_link(self, link):
if self.likes is None:
self.likes = [link]
self.save()
else:
if link not in self.likes:
self.likes.append(link)
self.save()
class Link(models.Model):
url = models.CharField()
user = User.objects.create(name="Lion King")
assert Link.objects.create(url="http://google.com")
assert Link.objects.create(url="http://yahoo.com")
assert Link.objects.create(url="http://github.com")
assert Link.objects.create(url="http://bitbucket.org")
links = Link.objects.all().limit(3)
for link in links:
user.likes_link(link)
user = User.objects.get_by_id(1)
self.assertEqual("http://google.com", user.likes[0].url)
self.assertEqual("http://yahoo.com", user.likes[1].url)
self.assertEqual("http://github.com", user.likes[2].url)
self.assertEqual(3, len(user.likes))
class ReferenceFieldTestCase(RediscoTestCase):
def test_basic(self):
class Word(models.Model):
placeholder = models.CharField()
class Character(models.Model):
n = models.IntegerField()
m = models.CharField()
word = models.ReferenceField(Word)
Word.objects.create()
word = Word.objects.all()[0]
print "KEYS", self.client.keys("*")
Character.objects.create(n=32, m='a', word=word)
Character.objects.create(n=33, m='b', word=word)
Character.objects.create(n=34, m='c', word=word)
Character.objects.create(n=34, m='d')
for char in Character.objects.all():
if char.m != 'd':
self.assertEqual(word, char.word)
else:
self.assertEqual(None, char.word)
a, b, c, d = list(Character.objects.all())
self.assertTrue(a in word.character_set)
self.assertTrue(b in word.character_set)
self.assertTrue(c in word.character_set)
self.assertTrue(d not in word.character_set)
self.assertEqual(3, len(word.character_set))
def test_reference(self):
class Department(models.Model):
name = models.Attribute(required=True)
class Person(models.Model):
name = models.Attribute(required=True)
manager = models.ReferenceField('Person', related_name='underlings')
department = models.ReferenceField(Department)
d1 = Department.objects.create(name='Accounting')
d2 = Department.objects.create(name='Billing')
p1 = Person.objects.create(name='Joe', department=d1)
p2 = Person.objects.create(name='Jack', department=d2)
self.assertEqual(p1.department_id, p1.department.id)
self.assertEqual(p2.department_id, p2.department.id)
def test_lazy_reference_field(self):
class User(models.Model):
name = models.CharField()
address = models.ReferenceField('Address')
class Address(models.Model):
street_address = models.CharField()
city = models.CharField()
zipcode = models.CharField()
address = Address.objects.create(street_address="32/F Redisville",
city="NoSQL City", zipcode="1.3.18")
assert address
user = User.objects.create(name="Richard Cypher", address=address)
assert user
a = Address.objects.all()[0]
u = User.objects.all()[0]
self.assertTrue(u in a.user_set)
self.assertEqual("32/F Redisville", u.address.street_address)
self.assertEqual("NoSQL City", u.address.city)
self.assertEqual("1.3.18", u.address.zipcode)
class DateTimeFieldTestCase(RediscoTestCase):
def test_basic(self):
from datetime import datetime
n = datetime(2009, 12, 31).replace(tzinfo=tzlocal())
class Post(models.Model):
title = models.CharField()
date_posted = models.DateTimeField()
created_at = models.DateTimeField(auto_now_add=True)
post = Post(title="First!", date_posted=n)
assert post.save()
post = Post.objects.get_by_id(post.id)
self.assertEqual(n, post.date_posted)
assert post.created_at
class CounterFieldTestCase(RediscoTestCase):
def test_basic(self):
class Post(models.Model):
title = models.CharField()
body = models.CharField(indexed=False)
liked = models.Counter()
post = Post.objects.create(title="First!",
body="Lorem ipsum")
self.assert_(post)
post.incr('liked')
post.incr('liked', 2)
post = Post.objects.get_by_id(post.id)
self.assertEqual(3, post.liked)
post.decr('liked', 2)
post = Post.objects.get_by_id(post.id)
self.assertEqual(1, post.liked)
class MutexTestCase(RediscoTestCase):
def setUp(self):
super(MutexTestCase, self).setUp()
self.p1 = Person.objects.create(first_name="Dick")
self.p2 = Person.objects.get_by_id(self.p1.id)
def test_no_block(self):
with Mutex(self.p1):
self.assertTrue(1)
def test_double_acquire(self):
x = Mutex(self.p1)
y = Mutex(self.p1)
self.assertEquals(x._lock_key, y._lock_key)
self.assertEquals(x._lock_mutex, y._lock_mutex)
def test_instance_should_not_modify_locked(self):
time1, time2 = {}, {}
def f1(person, t):
with Mutex(person):
time.sleep(0.4)
t['time'] = time.time()
def f2(person, t):
with Mutex(person):
t['time'] = time.time()
t1 = Thread(target=f1, args=(self.p1, time1,))
t2 = Thread(target=f2, args=(self.p2, time2,))
t1.start()
time.sleep(0.1)
t2.start()
t1.join()
t2.join()
self.assert_(time2['time'] > time1['time'])
def test_lock_expired(self):
Mutex(self.p1).lock()
with self.assertRaises(RuntimeError):
with Mutex(self.p2, timeout=1):
self.assertTrue(1)
|
heckj/redisco
|
redisco/models/basetests.py
|
Python
|
mit
| 39,521
|
[
"VisIt"
] |
66ea78b1a7a0dccaf0dee9c8bfec0cc5e61ce9193f7a06bbddd76aa7eacd5e7e
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Differentially private version of Keras optimizer v2."""
import tensorflow as tf
from tensorflow_privacy.privacy.dp_query import gaussian_query
def make_keras_optimizer_class(cls):
"""Given a subclass of `tf.keras.optimizers.Optimizer`, returns a DP-SGD subclass of it.
Args:
cls: Class from which to derive a DP subclass. Should be a subclass of
`tf.keras.optimizers.Optimizer`.
Returns:
A DP-SGD subclass of `cls`.
"""
class DPOptimizerClass(cls): # pylint: disable=empty-docstring
__doc__ = """Differentially private subclass of class `{base_class}`.
You can use this as a differentially private replacement for
`{base_class}`. This optimizer implements DP-SGD using
the standard Gaussian mechanism.
When instantiating this optimizer, you need to supply several
DP-related arguments followed by the standard arguments for
`{short_base_class}`.
Examples:
```python
# Create optimizer.
opt = {dp_keras_class}(l2_norm_clip=1.0, noise_multiplier=0.5, num_microbatches=1,
<standard arguments>)
```
When using the optimizer, be sure to pass in the loss as a
rank-one tensor with one entry for each example.
The optimizer can be used directly via its `minimize` method, or
through a Keras `Model`.
```python
# Compute loss as a tensor by using tf.losses.Reduction.NONE.
# Compute vector of per-example loss rather than its mean over a minibatch.
loss = tf.keras.losses.CategoricalCrossentropy(
from_logits=True, reduction=tf.losses.Reduction.NONE)
# Use optimizer in a Keras model.
opt.minimize(loss, var_list=[var])
```
```python
# Compute loss as a tensor by using tf.losses.Reduction.NONE.
# Compute vector of per-example loss rather than its mean over a minibatch.
loss = tf.keras.losses.CategoricalCrossentropy(
from_logits=True, reduction=tf.losses.Reduction.NONE)
# Use optimizer in a Keras model.
model = tf.keras.Sequential(...)
model.compile(optimizer=opt, loss=loss, metrics=['accuracy'])
model.fit(...)
```
In DP-SGD training, a larger batch size typically helps to achieve better
privacy/utility tradeoff. However there is typically a maximum batch size
imposed by hardware.
This optimizer can emulate large batch sizes on hardware with limited
memory by accumulating gradients for several steps before actually
applying them to update model weights.
Constructor argument `gradient_accumulation_steps` controls the number
of steps for which gradients are accumulated before updating
the model weights.
Below is an example which demonstrates how to use this feature:
```python
# Create optimizer which will be accumulating gradients for 4 steps.
# and then performing an update of model weights.
opt = {dp_keras_class}(l2_norm_clip=1.0,
noise_multiplier=0.5,
num_microbatches=1,
gradient_accumulation_steps=4,
<standard arguments>)
# Use optimizer in a regular way.
# First three calls to opt.minimize won't update model weights and will
# only accumulate gradients. Model weights will be updated on the fourth
# call to opt.minimize
opt.minimize(loss, var_list=[var])
```
Note that when using this feature effective batch size is
`gradient_accumulation_steps * one_step_batch_size` where
`one_step_batch_size` size of the batch which is passed to single step
of the optimizer. Thus user may have to adjust learning rate, weight decay
and possibly other training hyperparameters accordingly.
""".format(
base_class='tf.keras.optimizers.' + cls.__name__,
short_base_class=cls.__name__,
dp_keras_class='DPKeras' + cls.__name__)
# The class tf.keras.optimizers.Optimizer has two methods to compute
# gradients, `_compute_gradients` and `get_gradients`. The first works
# with eager execution, while the second runs in graph mode and is used
# by canned estimators.
# Internally, DPOptimizerClass stores hyperparameters both individually
# and encapsulated in a `GaussianSumQuery` object for these two use cases.
# However, this should be invisible to users of this class.
def __init__(
self,
l2_norm_clip,
noise_multiplier,
num_microbatches=None,
gradient_accumulation_steps=1,
*args, # pylint: disable=keyword-arg-before-vararg, g-doc-args
**kwargs):
"""Initialize the DPOptimizerClass.
Args:
l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients).
noise_multiplier: Ratio of the standard deviation to the clipping norm.
num_microbatches: Number of microbatches into which each minibatch is
split. Default is `None` which means that number of microbatches
is equal to batch size (i.e. each microbatch contains exactly one
example). If `gradient_accumulation_steps` is greater than 1 and
`num_microbatches` is not `None` then the effective number of
microbatches is equal to
`num_microbatches * gradient_accumulation_steps`.
gradient_accumulation_steps: If greater than 1 then optimizer will be
accumulating gradients for this number of optimizer steps before
applying them to update model weights. If this argument is set to 1
then updates will be applied on each optimizer step.
*args: These will be passed on to the base class `__init__` method.
**kwargs: These will be passed on to the base class `__init__` method.
"""
super().__init__(*args, **kwargs)
self.gradient_accumulation_steps = gradient_accumulation_steps
self._l2_norm_clip = l2_norm_clip
self._noise_multiplier = noise_multiplier
self._num_microbatches = num_microbatches
self._dp_sum_query = gaussian_query.GaussianSumQuery(
l2_norm_clip, l2_norm_clip * noise_multiplier)
self._global_state = None
self._was_dp_gradients_called = False
def _create_slots(self, var_list):
super()._create_slots(var_list) # pytype: disable=attribute-error
if self.gradient_accumulation_steps > 1:
for var in var_list:
self.add_slot(var, 'grad_acc')
def _prepare_local(self, var_device, var_dtype, apply_state):
super()._prepare_local(var_device, var_dtype, apply_state) # pytype: disable=attribute-error
if self.gradient_accumulation_steps > 1:
apply_update = tf.math.equal(
tf.math.floormod(self.iterations + 1,
self.gradient_accumulation_steps), 0)
grad_scaler = tf.cast(1. / self.gradient_accumulation_steps, var_dtype)
apply_state[(var_device, var_dtype)].update({
'apply_update': apply_update,
'grad_scaler': grad_scaler
})
def _resource_apply_dense(self, grad, var, apply_state=None):
if self.gradient_accumulation_steps > 1:
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype)) or
self._fallback_apply_state(var_device, var_dtype))
grad_acc = self.get_slot(var, 'grad_acc')
def _update_grad():
apply_grad_op = super(DPOptimizerClass, self)._resource_apply_dense(
grad_acc + grad * coefficients['grad_scaler'], var, apply_state) # pytype: disable=attribute-error
with tf.control_dependencies([apply_grad_op]):
return grad_acc.assign(
tf.zeros_like(grad_acc),
use_locking=self._use_locking,
read_value=False)
def _accumulate():
return grad_acc.assign_add(
grad * coefficients['grad_scaler'],
use_locking=self._use_locking,
read_value=False)
return tf.cond(coefficients['apply_update'], _update_grad, _accumulate)
else:
return super()._resource_apply_dense(grad, var, apply_state) # pytype: disable=attribute-error
def _resource_apply_sparse_duplicate_indices(self, *args, **kwargs):
if self.gradient_accumulation_steps > 1:
raise NotImplementedError(
'Sparse gradients are not supported with large batch emulation.')
else:
return super()._resource_apply_sparse_duplicate_indices(*args, **kwargs) # pytype: disable=attribute-error
def _resource_apply_sparse(self, *args, **kwargs):
if self.gradient_accumulation_steps > 1:
raise NotImplementedError(
'Sparse gradients are not supported with large batch emulation.')
else:
return super()._resource_apply_sparse(*args, **kwargs) # pytype: disable=attribute-error
def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None):
"""DP-SGD version of base class method."""
self._was_dp_gradients_called = True
# Compute loss.
if not callable(loss) and tape is None:
raise ValueError('`tape` is required when a `Tensor` loss is passed.')
tape = tape if tape is not None else tf.GradientTape()
if callable(loss):
with tape:
if not callable(var_list):
tape.watch(var_list)
loss = loss()
if self._num_microbatches is None:
num_microbatches = tf.shape(input=loss)[0]
else:
num_microbatches = self._num_microbatches
microbatch_losses = tf.reduce_mean(
tf.reshape(loss, [num_microbatches, -1]), axis=1)
if callable(var_list):
var_list = var_list()
else:
with tape:
if self._num_microbatches is None:
num_microbatches = tf.shape(input=loss)[0]
else:
num_microbatches = self._num_microbatches
microbatch_losses = tf.reduce_mean(
tf.reshape(loss, [num_microbatches, -1]), axis=1)
var_list = tf.nest.flatten(var_list)
# Compute the per-microbatch losses using helpful jacobian method.
with tf.keras.backend.name_scope(self._name + '/gradients'):
jacobian = tape.jacobian(microbatch_losses, var_list)
# Clip gradients to given l2_norm_clip.
def clip_gradients(g):
return tf.clip_by_global_norm(g, self._l2_norm_clip)[0]
clipped_gradients = tf.map_fn(clip_gradients, jacobian)
def reduce_noise_normalize_batch(g):
# Sum gradients over all microbatches.
summed_gradient = tf.reduce_sum(g, axis=0)
# Add noise to summed gradients.
noise_stddev = self._l2_norm_clip * self._noise_multiplier
noise = tf.random.normal(
tf.shape(input=summed_gradient), stddev=noise_stddev)
noised_gradient = tf.add(summed_gradient, noise)
# Normalize by number of microbatches and return.
return tf.truediv(noised_gradient,
tf.cast(num_microbatches, tf.float32))
final_gradients = tf.nest.map_structure(reduce_noise_normalize_batch,
clipped_gradients)
return list(zip(final_gradients, var_list))
def get_gradients(self, loss, params):
"""DP-SGD version of base class method."""
self._was_dp_gradients_called = True
if self._global_state is None:
self._global_state = self._dp_sum_query.initial_global_state()
# This code mostly follows the logic in the original DPOptimizerClass
# in dp_optimizer.py, except that this returns only the gradients,
# not the gradients and variables.
microbatch_losses = tf.reshape(loss, [self._num_microbatches, -1])
sample_params = (
self._dp_sum_query.derive_sample_params(self._global_state))
def process_microbatch(i, sample_state):
"""Process one microbatch (record) with privacy helper."""
mean_loss = tf.reduce_mean(
input_tensor=tf.gather(microbatch_losses, [i]))
grads = tf.gradients(mean_loss, params)
sample_state = self._dp_sum_query.accumulate_record(
sample_params, sample_state, grads)
return sample_state
sample_state = self._dp_sum_query.initial_sample_state(params)
for idx in range(self._num_microbatches):
sample_state = process_microbatch(idx, sample_state)
grad_sums, self._global_state, _ = (
self._dp_sum_query.get_noised_result(sample_state,
self._global_state))
def normalize(v):
try:
return tf.truediv(v, tf.cast(self._num_microbatches, tf.float32))
except TypeError:
return None
final_grads = tf.nest.map_structure(normalize, grad_sums)
return final_grads
def get_config(self):
"""Returns the config of the optimizer.
An optimizer config is a Python dictionary (serializable)
containing the configuration of an optimizer.
The same optimizer can be reinstantiated later
(without any saved state) from this configuration.
Returns:
Python dictionary.
"""
config = super().get_config()
config.update({
'l2_norm_clip': self._l2_norm_clip,
'noise_multiplier': self._noise_multiplier,
'num_microbatches': self._num_microbatches,
})
return config
def apply_gradients(self, *args, **kwargs):
"""DP-SGD version of base class method."""
assert self._was_dp_gradients_called, (
'Neither _compute_gradients() or get_gradients() on the '
'differentially private optimizer was called. This means the '
'training is not differentially private. It may be the case that '
'you need to upgrade to TF 2.4 or higher to use this particular '
'optimizer.')
return super().apply_gradients(*args, **kwargs)
return DPOptimizerClass
DPKerasAdagradOptimizer = make_keras_optimizer_class(
tf.keras.optimizers.Adagrad)
DPKerasAdamOptimizer = make_keras_optimizer_class(tf.keras.optimizers.Adam)
DPKerasSGDOptimizer = make_keras_optimizer_class(tf.keras.optimizers.SGD)
|
tensorflow/privacy
|
tensorflow_privacy/privacy/optimizers/dp_optimizer_keras.py
|
Python
|
apache-2.0
| 14,994
|
[
"Gaussian"
] |
4a602714d0c245e38189e83af5f02ad7cb877b716dadd43a362ca5eee744f95e
|
'''
sbclearn (c) University of Manchester 2018
sbclearn is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=invalid-name
# pylint: disable=unused-argument
import collections
from sklearn.base import BaseEstimator, TransformerMixin
from synbiochem.utils import seq_utils
import numpy as np
SEQ_IDX = 0
# KD Hydrophobicity, EIIP, Helix, Sheet, Turn
AA_PROPS = {
'A': [1.8, -0.0667, 32.9, -23.6, -41.6],
'R': [-4.5, 0.2674, 0, -6.2, -5.1],
'N': [-3.5, -0.2589, -24.8, -41.6, 44.5],
'D': [-3.5, 0.4408, 5.8, -41.6, 37.8],
'C': [2.5, 0.1933, -5.1, 6.8, 17.4],
'Q': [-3.5, 0.1545, 11.3, 0, -2],
'E': [-3.5, -0.2463, 36.5, -67.3, -30.1],
'G': [-0.4, -0.2509, -46.2, -13.9, 44.5],
'H': [-3.2, -0.1414, 11.3, -18.6, -5.1],
'I': [4.5, -0.2794, -1, 45.1, -75.5],
'L': [3.8, -0.2794, 26.2, 15.7, -52.8],
'K': [-3.9, -0.0679, 19.1, -31.5, 1],
'M': [1.9, 0.1899, 27.8, 1, -51.1],
'F': [2.8, 0.26, 10.4, 20.7, -51.1],
'P': [-1.6, -0.1665, -59.8, -47.8, 41.9],
'S': [-0.8, 0.1933, -32.9, -6.2, 35.8],
'T': [-0.7, 0.2572, -24.8, 28.5, -4.1],
'W': [-0.9, 0.0331, 3, 21.5, -4.1],
'Y': [-1.3, 0.0148, -31.5, 27, 13.1],
'V': [4.2, -0.2469, -3, 49.5, -69.3]
}
NUM_AA_PROPS = len(AA_PROPS['A'])
class OneHotTransformer(BaseEstimator, TransformerMixin):
'''Transformer class to perform one-hot encoding on sequences.'''
def __init__(self, nucl=True):
alphabet = seq_utils.NUCLEOTIDES if nucl \
else seq_utils.AA_CODES.values() + ['-', 'X']
self.__alphabet = sorted(alphabet)
# Define a mapping of chars to integers:
self.__char_to_int = {c: i for i, c in enumerate(self.__alphabet)}
def fit(self, *unused):
'''fit.'''
return self
def transform(self, X, *unused):
'''transform.'''
return self.__one_hot_encode(X)
def __one_hot_encode(self, X):
'''One hot encode a DataFrame.'''
encoded = [self.__one_hot_encode_seq(seq) for seq in X[:, SEQ_IDX]]
return np.c_[X, encoded]
def __one_hot_encode_seq(self, seq):
'''One hot encode a seq.'''
int_encoded = [self.__char_to_int[char] for char in seq]
# One hot encode:
one_hot_encoded = []
for value in int_encoded:
letter = [0 for _ in range(len(self.__alphabet))]
letter[value] = 1
one_hot_encoded.extend(letter)
return one_hot_encoded
class AminoAcidTransformer(BaseEstimator, TransformerMixin):
'''Transformer class to perform amino acid property encoding on
sequences.'''
def __init__(self, scale=(0.1, 0.9)):
self.__scaled_aa_props = _scale(scale)
self.__mean_values = [(scale[1] - scale[0]) / 2.0] * \
len(self.__scaled_aa_props['A'])
def fit(self, *unused):
'''fit.'''
return self
def transform(self, X, *unused):
'''transform.'''
return self.__aa_encode(X)
def __aa_encode(self, X):
'''Amino acid property encode a DataFrame.'''
encoded = [self.__aa_encode_seq(seq) for seq in X[:, SEQ_IDX]]
return np.c_[X, encoded]
def __aa_encode_seq(self, seq):
'''Amino acid property encode a seq.'''
encoded = [self.__scaled_aa_props.get(am_acid, self.__mean_values)
for am_acid in seq]
return [val for sublist in encoded for val in sublist]
def _scale(scale):
'''Scale amino acid properties.'''
scaled = collections.defaultdict(list)
for i in range(NUM_AA_PROPS):
props = {key: value[i] for key, value in AA_PROPS.items()}
min_val, max_val = min(props.values()), max(props.values())
for key, value in props.iteritems():
scaled[key].append(scale[0] + (scale[1] - scale[0]) *
(value - min_val) / (max_val - min_val))
return scaled
|
synbiochem/synbiochem-learn
|
gg_learn/utils/transformer.py
|
Python
|
mit
| 4,012
|
[
"VisIt"
] |
86f12cb1a0074e32043f102746ba7aeaae9fd4c8a9819c08da118cd24308655b
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements a core class LammpsData for generating/parsing
LAMMPS data file, and other bridging classes to build LammpsData from
molecules. This module also implements a subclass CombinedData for
merging LammpsData object.
Only point particle styles are supported for now (atom_style in angle,
atomic, bond, charge, full and molecular only). See the pages below for
more info.
http://lammps.sandia.gov/doc/atom_style.html
http://lammps.sandia.gov/doc/read_data.html
"""
from collections import OrderedDict
from io import StringIO
import itertools
import re
import warnings
import numpy as np
import pandas as pd
from pathlib import Path
from monty.json import MSONable
from monty.dev import deprecated
from monty.serialization import loadfn
from ruamel.yaml import YAML
from pymatgen.util.io_utils import clean_lines
from pymatgen import Molecule, Element, Lattice, Structure, SymmOp
__author__ = "Kiran Mathew, Zhi Deng, Tingzheng Hou"
__copyright__ = "Copyright 2018, The Materials Virtual Lab"
__version__ = "1.0"
__maintainer__ = "Zhi Deng"
__email__ = "z4deng@eng.ucsd.edu"
__date__ = "Aug 1, 2018"
MODULE_DIR = Path(__file__).resolve().parent
SECTION_KEYWORDS = {"atom": ["Atoms", "Velocities", "Masses",
"Ellipsoids", "Lines", "Triangles", "Bodies"],
"topology": ["Bonds", "Angles", "Dihedrals", "Impropers"],
"ff": ["Pair Coeffs", "PairIJ Coeffs", "Bond Coeffs",
"Angle Coeffs", "Dihedral Coeffs",
"Improper Coeffs"],
"class2": ["BondBond Coeffs", "BondAngle Coeffs",
"MiddleBondTorsion Coeffs",
"EndBondTorsion Coeffs", "AngleTorsion Coeffs",
"AngleAngleTorsion Coeffs",
"BondBond13 Coeffs", "AngleAngle Coeffs"]}
CLASS2_KEYWORDS = {"Angle Coeffs": ["BondBond Coeffs", "BondAngle Coeffs"],
"Dihedral Coeffs": ["MiddleBondTorsion Coeffs",
"EndBondTorsion Coeffs",
"AngleTorsion Coeffs",
"AngleAngleTorsion Coeffs",
"BondBond13 Coeffs"],
"Improper Coeffs": ["AngleAngle Coeffs"]}
SECTION_HEADERS = {"Masses": ["mass"],
"Velocities": ["vx", "vy", "vz"],
"Bonds": ["type", "atom1", "atom2"],
"Angles": ["type", "atom1", "atom2", "atom3"],
"Dihedrals": ["type", "atom1", "atom2", "atom3", "atom4"],
"Impropers": ["type", "atom1", "atom2", "atom3", "atom4"]}
ATOMS_HEADERS = {"angle": ["molecule-ID", "type", "x", "y", "z"],
"atomic": ["type", "x", "y", "z"],
"bond": ["molecule-ID", "type", "x", "y", "z"],
"charge": ["type", "q", "x", "y", "z"],
"full": ["molecule-ID", "type", "q", "x", "y", "z"],
"molecular": ["molecule-ID", "type", "x", "y", "z"]}
class LammpsBox(MSONable):
"""
Object for representing a simulation box in LAMMPS settings.
"""
def __init__(self, bounds, tilt=None):
"""
Args:
bounds: A (3, 2) array/list of floats setting the
boundaries of simulation box.
tilt: A (3,) array/list of floats setting the tilt of
simulation box. Default to None, i.e., use an
orthogonal box.
"""
bounds_arr = np.array(bounds)
assert bounds_arr.shape == (3, 2), \
"Expecting a (3, 2) array for bounds," \
" got {}".format(bounds_arr.shape)
self.bounds = bounds_arr.tolist()
matrix = np.diag(bounds_arr[:, 1] - bounds_arr[:, 0])
self.tilt = None
if tilt is not None:
tilt_arr = np.array(tilt)
assert tilt_arr.shape == (3,), \
"Expecting a (3,) array for box_tilt," \
" got {}".format(tilt_arr.shape)
self.tilt = tilt_arr.tolist()
matrix[1, 0] = tilt_arr[0]
matrix[2, 0] = tilt_arr[1]
matrix[2, 1] = tilt_arr[2]
self._matrix = matrix
def __str__(self):
return self.get_string()
def __repr__(self):
return self.get_string()
@property
def volume(self):
"""
Volume of simulation box.
"""
m = self._matrix
return np.dot(np.cross(m[0], m[1]), m[2])
def get_string(self, significant_figures=6):
"""
Returns the string representation of simulation box in LAMMPS
data file format.
Args:
significant_figures (int): No. of significant figures to
output for box settings. Default to 6.
Returns:
String representation
"""
ph = "{:.%df}" % significant_figures
lines = []
for bound, d in zip(self.bounds, "xyz"):
fillers = bound + [d] * 2
bound_format = " ".join([ph] * 2 + [" {}lo {}hi"])
lines.append(bound_format.format(*fillers))
if self.tilt:
tilt_format = " ".join([ph] * 3 + [" xy xz yz"])
lines.append(tilt_format.format(*self.tilt))
return "\n".join(lines)
def get_box_shift(self, i):
"""
Calculates the coordinate shift due to PBC.
Args:
i: A (n, 3) integer array containing the labels for box
images of n entries.
Returns:
Coorindate shift array with the same shape of i
"""
return np.inner(i, self._matrix.T)
def to_lattice(self):
"""
Converts the simulation box to a more powerful Lattice backend.
Note that Lattice is always periodic in 3D space while a
simulation box is not necessarily periodic in all dimensions.
Returns:
Lattice
"""
return Lattice(self._matrix)
def lattice_2_lmpbox(lattice, origin=(0, 0, 0)):
"""
Converts a lattice object to LammpsBox, and calculates the symmetry
operation used.
Args:
lattice (Lattice): Input lattice.
origin: A (3,) array/list of floats setting lower bounds of
simulation box. Default to (0, 0, 0).
Returns:
LammpsBox, SymmOp
"""
a, b, c = lattice.abc
xlo, ylo, zlo = origin
xhi = a + xlo
m = lattice.matrix
xy = np.dot(m[1], m[0] / a)
yhi = np.sqrt(b ** 2 - xy ** 2) + ylo
xz = np.dot(m[2], m[0] / a)
yz = (np.dot(m[1], m[2]) - xy * xz) / (yhi - ylo)
zhi = np.sqrt(c ** 2 - xz ** 2 - yz ** 2) + zlo
tilt = None if lattice.is_orthogonal else [xy, xz, yz]
rot_matrix = np.linalg.solve([[xhi - xlo, 0, 0],
[xy, yhi - ylo, 0],
[xz, yz, zhi - zlo]], m)
bounds = [[xlo, xhi], [ylo, yhi], [zlo, zhi]]
symmop = SymmOp.from_rotation_and_translation(rot_matrix, origin)
return LammpsBox(bounds, tilt), symmop
class LammpsData(MSONable):
"""
Object for representing the data in a LAMMPS data file.
"""
def __init__(self, box, masses, atoms, velocities=None, force_field=None,
topology=None, atom_style="full"):
"""
This is a low level constructor designed to work with parsed
data or other bridging objects (ForceField and Topology). Not
recommended to use directly.
Args:
box (LammpsBox): Simulation box.
masses (pandas.DataFrame): DataFrame with one column
["mass"] for Masses section.
atoms (pandas.DataFrame): DataFrame with multiple columns
for Atoms section. Column names vary with atom_style.
velocities (pandas.DataFrame): DataFrame with three columns
["vx", "vy", "vz"] for Velocities section. Optional
with default to None. If not None, its index should be
consistent with atoms.
force_field (dict): Data for force field sections. Optional
with default to None. Only keywords in force field and
class 2 force field are valid keys, and each value is a
DataFrame.
topology (dict): Data for topology sections. Optional with
default to None. Only keywords in topology are valid
keys, and each value is a DataFrame.
atom_style (str): Output atom_style. Default to "full".
"""
if velocities is not None:
assert len(velocities) == len(atoms), \
"Inconsistency found between atoms and velocities"
if force_field:
all_ff_kws = SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]
force_field = {k: v for k, v in force_field.items()
if k in all_ff_kws}
if topology:
topology = {k: v for k, v in topology.items()
if k in SECTION_KEYWORDS["topology"]}
self.box = box
self.masses = masses
self.atoms = atoms
self.velocities = velocities
self.force_field = force_field
self.topology = topology
self.atom_style = atom_style
def __str__(self):
return self.get_string()
def __repr__(self):
return self.get_string()
@property
def structure(self):
"""
Exports a periodic structure object representing the simulation
box.
Return:
Structure
"""
masses = self.masses
atoms = self.atoms.copy()
if "nx" in atoms.columns:
atoms.drop(["nx", "ny", "nz"], axis=1, inplace=True)
atoms["molecule-ID"] = 1
ld_copy = self.__class__(self.box, masses, atoms)
topologies = ld_copy.disassemble()[-1]
molecule = topologies[0].sites
coords = molecule.cart_coords - np.array(self.box.bounds)[:, 0]
species = molecule.species
latt = self.box.to_lattice()
site_properties = {}
if "q" in atoms:
site_properties["charge"] = atoms["q"].values
if self.velocities is not None:
site_properties["velocities"] = self.velocities.values
return Structure(latt, species, coords, coords_are_cartesian=True,
site_properties=site_properties)
def get_string(self, distance=6, velocity=8, charge=4):
"""
Returns the string representation of LammpsData, essentially
the string to be written to a file. Support hybrid style
coeffs read and write.
Args:
distance (int): No. of significant figures to output for
box settings (bounds and tilt) and atomic coordinates.
Default to 6.
velocity (int): No. of significant figures to output for
velocities. Default to 8.
charge (int): No. of significant figures to output for
charges. Default to 3.
Returns:
String representation
"""
file_template = """Generated by pymatgen.io.lammps.data.LammpsData
{stats}
{box}
{body}
"""
box = self.box.get_string(distance)
body_dict = OrderedDict()
body_dict["Masses"] = self.masses
types = OrderedDict()
types["atom"] = len(self.masses)
if self.force_field:
all_ff_kws = SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]
ff_kws = [k for k in all_ff_kws if k in self.force_field]
for kw in ff_kws:
body_dict[kw] = self.force_field[kw]
if kw in SECTION_KEYWORDS["ff"][2:]:
types[kw.lower()[:-7]] = len(self.force_field[kw])
body_dict["Atoms"] = self.atoms
counts = OrderedDict()
counts["atoms"] = len(self.atoms)
if self.velocities is not None:
body_dict["Velocities"] = self.velocities
if self.topology:
for kw in SECTION_KEYWORDS["topology"]:
if kw in self.topology:
body_dict[kw] = self.topology[kw]
counts[kw.lower()] = len(self.topology[kw])
all_stats = list(counts.values()) + list(types.values())
stats_template = "{:>%d} {}" % len(str(max(all_stats)))
count_lines = [stats_template.format(v, k) for k, v in counts.items()]
type_lines = [stats_template.format(v, k + " types")
for k, v in types.items()]
stats = "\n".join(count_lines + [""] + type_lines)
def map_coords(q):
return ("{:.%df}" % distance).format(q)
def map_velos(q):
return ("{:.%df}" % velocity).format(q)
def map_charges(q):
return ("{:.%df}" % charge).format(q)
float_format = '{:.9f}'.format
float_format_2 = '{:.1f}'.format
int_format = '{:.0f}'.format
default_formatters = {"x": map_coords, "y": map_coords, "z": map_coords,
"vx": map_velos, "vy": map_velos, "vz": map_velos,
"q": map_charges}
coeffsdatatype = loadfn(str(MODULE_DIR / "CoeffsDataType.yaml"))
coeffs = {}
for style, types in coeffsdatatype.items():
coeffs[style] = {}
for type, formatter in types.items():
coeffs[style][type] = {}
for coeff, datatype in formatter.items():
if datatype == 'int_format':
coeffs[style][type][coeff] = int_format
elif datatype == 'float_format_2':
coeffs[style][type][coeff] = float_format_2
else:
coeffs[style][type][coeff] = float_format
section_template = "{kw}\n\n{df}\n"
parts = []
for k, v in body_dict.items():
index = True if k != "PairIJ Coeffs" else False
if k in ['Bond Coeffs', 'Angle Coeffs', 'Dihedral Coeffs', 'Improper Coeffs']:
listofdf = np.array_split(v, len(v.index))
df_string = ''
for i, df in enumerate(listofdf):
if isinstance(df.iloc[0]['coeff1'], str):
try:
formatters = {**default_formatters, **coeffs[k][df.iloc[0]['coeff1']]}
except KeyError:
formatters = default_formatters
line_string = \
df.to_string(header=False, formatters=formatters,
index_names=False, index=index, na_rep='')
else:
line_string = \
v.to_string(header=False, formatters=default_formatters,
index_names=False, index=index,
na_rep='').splitlines()[i]
df_string += line_string.replace('nan', '').rstrip() + '\n'
else:
df_string = v.to_string(header=False, formatters=default_formatters,
index_names=False, index=index, na_rep='')
parts.append(section_template.format(kw=k, df=df_string))
body = "\n".join(parts)
return file_template.format(stats=stats, box=box, body=body)
def write_file(self, filename, distance=6, velocity=8, charge=4):
"""
Writes LammpsData to file.
Args:
filename (str): Filename.
distance (int): No. of significant figures to output for
box settings (bounds and tilt) and atomic coordinates.
Default to 6.
velocity (int): No. of significant figures to output for
velocities. Default to 8.
charge (int): No. of significant figures to output for
charges. Default to 3.
"""
with open(filename, "w") as f:
f.write(self.get_string(distance=distance, velocity=velocity,
charge=charge))
def disassemble(self, atom_labels=None, guess_element=True,
ff_label="ff_map"):
"""
Breaks down LammpsData to building blocks
(LammpsBox, ForceField and a series of Topology).
RESTRICTIONS APPLIED:
1. No complex force field defined not just on atom
types, where the same type or equivalent types of topology
may have more than one set of coefficients.
2. No intermolecular topologies (with atoms from different
molecule-ID) since a Topology object includes data for ONE
molecule or structure only.
Args:
atom_labels ([str]): List of strings (must be different
from one another) for labelling each atom type found in
Masses section. Default to None, where the labels are
automaticaly added based on either element guess or
dummy specie assignment.
guess_element (bool): Whether to guess the element based on
its atomic mass. Default to True, otherwise dummy
species "Qa", "Qb", ... will be assigned to various
atom types. The guessed or assigned elements will be
reflected on atom labels if atom_labels is None, as
well as on the species of molecule in each Topology.
ff_label (str): Site property key for labeling atoms of
different types. Default to "ff_map".
Returns:
LammpsBox, ForceField, [Topology]
"""
atoms_df = self.atoms.copy()
if "nx" in atoms_df.columns:
atoms_df[["x", "y", "z"]] += \
self.box.get_box_shift(atoms_df[["nx", "ny", "nz"]].values)
atoms_df = pd.concat([atoms_df, self.velocities], axis=1)
mids = atoms_df.get("molecule-ID")
if mids is None:
unique_mids = [1]
data_by_mols = {1: {"Atoms": atoms_df}}
else:
unique_mids = np.unique(mids)
data_by_mols = {}
for k in unique_mids:
df = atoms_df[atoms_df["molecule-ID"] == k]
data_by_mols[k] = {"Atoms": df}
masses = self.masses.copy()
masses["label"] = atom_labels
unique_masses = np.unique(masses["mass"])
if guess_element:
ref_masses = [el.atomic_mass.real for el in Element]
diff = np.abs(np.array(ref_masses) - unique_masses[:, None])
atomic_numbers = np.argmin(diff, axis=1) + 1
symbols = [Element.from_Z(an).symbol for an in atomic_numbers]
else:
symbols = ["Q%s" % a for a in
map(chr, range(97, 97 + len(unique_masses)))]
for um, s in zip(unique_masses, symbols):
masses.loc[masses["mass"] == um, "element"] = s
if atom_labels is None: # add unique labels based on elements
for el, vc in masses["element"].value_counts().iteritems():
masses.loc[masses["element"] == el, "label"] = \
["%s%d" % (el, c) for c in range(1, vc + 1)]
assert masses["label"].nunique(dropna=False) == len(masses), \
"Expecting unique atom label for each type"
mass_info = [tuple([r["label"], r["mass"]])
for _, r in masses.iterrows()]
nonbond_coeffs, topo_coeffs = None, None
if self.force_field:
if "PairIJ Coeffs" in self.force_field:
nbc = self.force_field["PairIJ Coeffs"]
nbc = nbc.sort_values(["id1", "id2"]).drop(["id1", "id2"], axis=1)
nonbond_coeffs = [list(t) for t in nbc.itertuples(False, None)]
elif "Pair Coeffs" in self.force_field:
nbc = self.force_field["Pair Coeffs"].sort_index()
nonbond_coeffs = [list(t) for t in nbc.itertuples(False, None)]
topo_coeffs = {k: [] for k in SECTION_KEYWORDS["ff"][2:]
if k in self.force_field}
for kw in topo_coeffs.keys():
class2_coeffs = {k: list(v.itertuples(False, None))
for k, v in self.force_field.items()
if k in CLASS2_KEYWORDS.get(kw, [])}
ff_df = self.force_field[kw]
for t in ff_df.itertuples(True, None):
d = {"coeffs": list(t[1:]), "types": []}
if class2_coeffs:
d.update({k: list(v[t[0] - 1])
for k, v in class2_coeffs.items()})
topo_coeffs[kw].append(d)
if self.topology:
def label_topo(t):
return tuple(masses.loc[atoms_df.loc[t, "type"], "label"])
for k, v in self.topology.items():
ff_kw = k[:-1] + " Coeffs"
for topo in v.itertuples(False, None):
topo_idx = topo[0] - 1
indices = topo[1:]
mids = atoms_df.loc[indices, "molecule-ID"].unique()
assert len(mids) == 1, \
"Do not support intermolecular topology formed " \
"by atoms with different molecule-IDs"
label = label_topo(indices)
topo_coeffs[ff_kw][topo_idx]["types"].append(label)
if data_by_mols[mids[0]].get(k):
data_by_mols[mids[0]][k].append(indices)
else:
data_by_mols[mids[0]][k] = [indices]
if topo_coeffs:
for v in topo_coeffs.values():
for d in v:
d["types"] = list(set(d["types"]))
ff = ForceField(mass_info=mass_info, nonbond_coeffs=nonbond_coeffs,
topo_coeffs=topo_coeffs)
topo_list = []
for mid in unique_mids:
data = data_by_mols[mid]
atoms = data["Atoms"]
shift = min(atoms.index)
type_ids = atoms["type"]
species = masses.loc[type_ids, "element"]
labels = masses.loc[type_ids, "label"]
coords = atoms[["x", "y", "z"]]
m = Molecule(species.values, coords.values,
site_properties={ff_label: labels.values})
charges = atoms.get("q")
velocities = atoms[["vx", "vy", "vz"]] if "vx" in atoms.columns \
else None
topologies = {}
for kw in SECTION_KEYWORDS["topology"]:
if data.get(kw):
topologies[kw] = (np.array(data[kw]) - shift).tolist()
topologies = None if not topologies else topologies
topo_list.append(Topology(sites=m, ff_label=ff_label,
charges=charges, velocities=velocities,
topologies=topologies))
return self.box, ff, topo_list
@classmethod
def from_file(cls, filename, atom_style="full", sort_id=False):
"""
Constructor that parses a file.
Args:
filename (str): Filename to read.
atom_style (str): Associated atom_style. Default to "full".
sort_id (bool): Whether sort each section by id. Default to
True.
"""
with open(filename) as f:
lines = f.readlines()
kw_pattern = r"|".join(itertools.chain(*SECTION_KEYWORDS.values()))
section_marks = [i for i, l in enumerate(lines)
if re.search(kw_pattern, l)]
parts = np.split(lines, section_marks)
float_group = r"([0-9eE.+-]+)"
header_pattern = dict()
header_pattern["counts"] = r"^\s*(\d+)\s+([a-zA-Z]+)$"
header_pattern["types"] = r"^\s*(\d+)\s+([a-zA-Z]+)\s+types$"
header_pattern["bounds"] = r"^\s*{}$".format(r"\s+".join(
[float_group] * 2 + [r"([xyz])lo \3hi"]))
header_pattern["tilt"] = r"^\s*{}$".format(r"\s+".join(
[float_group] * 3 + ["xy xz yz"]))
header = {"counts": {}, "types": {}}
bounds = {}
for l in clean_lines(parts[0][1:]): # skip the 1st line
match = None
for k, v in header_pattern.items():
match = re.match(v, l)
if match:
break
else:
continue
if match and k in ["counts", "types"]:
header[k][match.group(2)] = int(match.group(1))
elif match and k == "bounds":
g = match.groups()
bounds[g[2]] = [float(i) for i in g[:2]]
elif match and k == "tilt":
header["tilt"] = [float(i) for i in match.groups()]
header["bounds"] = [bounds.get(i, [-0.5, 0.5]) for i in "xyz"]
box = LammpsBox(header["bounds"], header.get("tilt"))
def parse_section(sec_lines):
title_info = sec_lines[0].split("#", 1)
kw = title_info[0].strip()
sio = StringIO("".join(sec_lines[2:])) # skip the 2nd line
if kw.endswith("Coeffs") and not kw.startswith("PairIJ"):
df_list = [pd.read_csv(StringIO(line), header=None, comment="#",
delim_whitespace=True) for line in sec_lines[2:] if line.strip()]
df = pd.concat(df_list, ignore_index=True)
names = ["id"] + ["coeff%d" % i
for i in range(1, df.shape[1])]
else:
df = pd.read_csv(sio, header=None, comment="#",
delim_whitespace=True)
if kw == "PairIJ Coeffs":
names = ["id1", "id2"] + ["coeff%d" % i
for i in range(1, df.shape[1] - 1)]
df.index.name = None
elif kw in SECTION_HEADERS:
names = ["id"] + SECTION_HEADERS[kw]
elif kw == "Atoms":
names = ["id"] + ATOMS_HEADERS[atom_style]
if df.shape[1] == len(names):
pass
elif df.shape[1] == len(names) + 3:
names += ["nx", "ny", "nz"]
else:
raise ValueError("Format in Atoms section inconsistent"
" with atom_style %s" % atom_style)
else:
raise NotImplementedError("Parser for %s section"
" not implemented" % kw)
df.columns = names
if sort_id:
sort_by = "id" if kw != "PairIJ Coeffs" else ["id1", "id2"]
df.sort_values(sort_by, inplace=True)
if "id" in df.columns:
df.set_index("id", drop=True, inplace=True)
df.index.name = None
return kw, df
err_msg = "Bad LAMMPS data format where "
body = {}
seen_atoms = False
for part in parts[1:]:
name, section = parse_section(part)
if name == "Atoms":
seen_atoms = True
if name in ["Velocities"] + SECTION_KEYWORDS["topology"] and \
not seen_atoms: # Atoms must appear earlier than these
raise RuntimeError(err_msg + "%s section appears before"
" Atoms section" % name)
body.update({name: section})
err_msg += "Nos. of {} do not match between header and {} section"
assert len(body["Masses"]) == header["types"]["atom"], \
err_msg.format("atom types", "Masses")
atom_sections = ["Atoms", "Velocities"] \
if "Velocities" in body else ["Atoms"]
for s in atom_sections:
assert len(body[s]) == header["counts"]["atoms"], \
err_msg.format("atoms", s)
for s in SECTION_KEYWORDS["topology"]:
if header["counts"].get(s.lower(), 0) > 0:
assert len(body[s]) == header["counts"][s.lower()], \
err_msg.format(s.lower(), s)
items = {k.lower(): body[k] for k in ["Masses", "Atoms"]}
items["velocities"] = body.get("Velocities")
ff_kws = [k for k in body if k
in SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]]
items["force_field"] = {k: body[k] for k in ff_kws} if ff_kws \
else None
topo_kws = [k for k in body if k in SECTION_KEYWORDS["topology"]]
items["topology"] = {k: body[k] for k in topo_kws} \
if topo_kws else None
items["atom_style"] = atom_style
items["box"] = box
return cls(**items)
@classmethod
def from_ff_and_topologies(cls, box, ff, topologies, atom_style="full"):
"""
Constructor building LammpsData from a ForceField object and a
list of Topology objects. Do not support intermolecular
topologies since a Topology object includes data for ONE
molecule or structure only.
Args:
box (LammpsBox): Simulation box.
ff (ForceField): ForceField object with data for Masses and
force field sections.
topologies ([Topology]): List of Topology objects with data
for Atoms, Velocities and topology sections.
atom_style (str): Output atom_style. Default to "full".
"""
atom_types = set.union(*[t.species for t in topologies])
assert atom_types.issubset(ff.maps["Atoms"].keys()), \
"Unknown atom type found in topologies"
items = dict(box=box, atom_style=atom_style, masses=ff.masses,
force_field=ff.force_field)
mol_ids, charges, coords, labels = [], [], [], []
v_collector = [] if topologies[0].velocities else None
topo_collector = {"Bonds": [], "Angles": [], "Dihedrals": [],
"Impropers": []}
topo_labels = {"Bonds": [], "Angles": [], "Dihedrals": [],
"Impropers": []}
for i, topo in enumerate(topologies):
if topo.topologies:
shift = len(labels)
for k, v in topo.topologies.items():
topo_collector[k].append(np.array(v) + shift + 1)
topo_labels[k].extend([tuple([topo.type_by_sites[j]
for j in t]) for t in v])
if isinstance(v_collector, list):
v_collector.append(topo.velocities)
mol_ids.extend([i + 1] * len(topo.sites))
labels.extend(topo.type_by_sites)
coords.append(topo.sites.cart_coords)
q = [0.0] * len(topo.sites) if not topo.charges else topo.charges
charges.extend(q)
atoms = pd.DataFrame(np.concatenate(coords), columns=["x", "y", "z"])
atoms["molecule-ID"] = mol_ids
atoms["q"] = charges
atoms["type"] = list(map(ff.maps["Atoms"].get, labels))
atoms.index += 1
atoms = atoms[ATOMS_HEADERS[atom_style]]
velocities = None
if v_collector:
velocities = pd.DataFrame(np.concatenate(v_collector),
columns=SECTION_HEADERS["Velocities"])
velocities.index += 1
topology = {k: None for k, v in topo_labels.items() if len(v) > 0}
for k in topology:
df = pd.DataFrame(np.concatenate(topo_collector[k]),
columns=SECTION_HEADERS[k][1:])
df["type"] = list(map(ff.maps[k].get, topo_labels[k]))
if any(pd.isnull(df["type"])): # Throw away undefined topologies
warnings.warn("Undefined %s detected and removed" % k.lower())
df.dropna(subset=["type"], inplace=True)
df.reset_index(drop=True, inplace=True)
df.index += 1
topology[k] = df[SECTION_HEADERS[k]]
topology = {k: v for k, v in topology.items() if not v.empty}
items.update({"atoms": atoms, "velocities": velocities,
"topology": topology})
return cls(**items)
@classmethod
def from_structure(cls, structure, ff_elements=None, atom_style="charge"):
"""
Simple constructor building LammpsData from a structure without
force field parameters and topologies.
Args:
structure (Structure): Input structure.
ff_elements ([str]): List of strings of elements that must
be present due to force field settings but not
necessarily in the structure. Default to None.
atom_style (str): Choose between "atomic" (neutral) and
"charge" (charged). Default to "charge".
"""
s = structure.get_sorted_structure()
box, symmop = lattice_2_lmpbox(s.lattice)
coords = symmop.operate_multi(s.cart_coords)
site_properties = s.site_properties
if "velocities" in site_properties:
velos = np.array(s.site_properties["velocities"])
rot = SymmOp.from_rotation_and_translation(symmop.rotation_matrix)
rot_velos = rot.operate_multi(velos)
site_properties.update({"velocities": rot_velos})
boxed_s = Structure(box.to_lattice(), s.species, coords,
site_properties=site_properties,
coords_are_cartesian=True)
symbols = list(s.symbol_set)
if ff_elements:
symbols.extend(ff_elements)
elements = sorted(Element(el) for el in set(symbols))
mass_info = [tuple([i.symbol] * 2) for i in elements]
ff = ForceField(mass_info)
topo = Topology(boxed_s)
return cls.from_ff_and_topologies(box=box, ff=ff, topologies=[topo],
atom_style=atom_style)
@classmethod
def from_dict(cls, d):
"""
Constructor that reads in a dictionary.
Args:
d (dict): Dictionary to read.
"""
def decode_df(s):
return pd.read_json(s, orient="split")
items = dict()
items["box"] = LammpsBox.from_dict(d["box"])
items["masses"] = decode_df(d["masses"])
items["atoms"] = decode_df(d["atoms"])
items["atom_style"] = d["atom_style"]
velocities = d["velocities"]
if velocities:
velocities = decode_df(velocities)
items["velocities"] = velocities
force_field = d["force_field"]
if force_field:
force_field = {k: decode_df(v) for k, v in force_field.items()}
items["force_field"] = force_field
topology = d["topology"]
if topology:
topology = {k: decode_df(v) for k, v in topology.items()}
items["topology"] = topology
return cls(**items)
def as_dict(self):
"""
Returns the LammpsData as a dict.
"""
def encode_df(df):
return df.to_json(orient="split")
d = dict()
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["box"] = self.box.as_dict()
d["masses"] = encode_df(self.masses)
d["atoms"] = encode_df(self.atoms)
d["atom_style"] = self.atom_style
d["velocities"] = None if self.velocities is None \
else encode_df(self.velocities)
d["force_field"] = None if not self.force_field \
else {k: encode_df(v) for k, v in self.force_field.items()}
d["topology"] = None if not self.topology \
else {k: encode_df(v) for k, v in self.topology.items()}
return d
class Topology(MSONable):
"""
Class carrying most data in Atoms, Velocities and molecular
topology sections for ONE SINGLE Molecule or Structure
object, or a plain list of Sites.
"""
def __init__(self, sites, ff_label=None, charges=None, velocities=None,
topologies=None):
"""
Args:
sites ([Site] or SiteCollection): A group of sites in a
list or as a Molecule/Structure.
ff_label (str): Site property key for labeling atoms of
different types. Default to None, i.e., use
site.species_string.
charges ([q, ...]): Charge of each site in a (n,)
array/list, where n is the No. of sites. Default to
None, i.e., search site property for charges.
velocities ([[vx, vy, vz], ...]): Velocity of each site in
a (n, 3) array/list, where n is the No. of sites.
Default to None, i.e., search site property for
velocities.
topologies (dict): Bonds, angles, dihedrals and improper
dihedrals defined by site indices. Default to None,
i.e., no additional topology. All four valid keys
listed below are optional.
{
"Bonds": [[i, j], ...],
"Angles": [[i, j, k], ...],
"Dihedrals": [[i, j, k, l], ...],
"Impropers": [[i, j, k, l], ...]
}
"""
if not isinstance(sites, (Molecule, Structure)):
sites = Molecule.from_sites(sites)
if ff_label:
type_by_sites = sites.site_properties.get(ff_label)
else:
type_by_sites = [site.specie.symbol for site in sites]
# search for site property if not override
if charges is None:
charges = sites.site_properties.get("charge")
if velocities is None:
velocities = sites.site_properties.get("velocities")
# validate shape
if charges is not None:
charge_arr = np.array(charges)
assert charge_arr.shape == (len(sites),), \
"Wrong format for charges"
charges = charge_arr.tolist()
if velocities is not None:
velocities_arr = np.array(velocities)
assert velocities_arr.shape == (len(sites), 3), \
"Wrong format for velocities"
velocities = velocities_arr.tolist()
if topologies:
topologies = {k: v for k, v in topologies.items()
if k in SECTION_KEYWORDS["topology"]}
self.sites = sites
self.ff_label = ff_label
self.charges = charges
self.velocities = velocities
self.topologies = topologies
self.type_by_sites = type_by_sites
self.species = set(type_by_sites)
@classmethod
def from_bonding(cls, molecule, bond=True, angle=True, dihedral=True,
tol=0.1, **kwargs):
"""
Another constructor that creates an instance from a molecule.
Covalent bonds and other bond-based topologies (angles and
dihedrals) can be automatically determined. Cannot be used for
non bond-based topologies, e.g., improper dihedrals.
Args:
molecule (Molecule): Input molecule.
bond (bool): Whether find bonds. If set to False, angle and
dihedral searching will be skipped. Default to True.
angle (bool): Whether find angles. Default to True.
dihedral (bool): Whether find dihedrals. Default to True.
tol (float): Bond distance tolerance. Default to 0.1.
Not recommended to alter.
**kwargs: Other kwargs supported by Topology.
"""
real_bonds = molecule.get_covalent_bonds(tol=tol)
bond_list = [list(map(molecule.index, [b.site1, b.site2]))
for b in real_bonds]
if not all((bond, bond_list)):
# do not search for others if not searching for bonds or no bonds
return cls(sites=molecule, **kwargs)
else:
angle_list, dihedral_list = [], []
dests, freq = np.unique(bond_list, return_counts=True)
hubs = dests[np.where(freq > 1)].tolist()
bond_arr = np.array(bond_list)
if len(hubs) > 0:
hub_spokes = {}
for hub in hubs:
ix = np.any(np.isin(bond_arr, hub), axis=1)
bonds = np.unique(bond_arr[ix]).tolist()
bonds.remove(hub)
hub_spokes[hub] = bonds
# skip angle or dihedral searching if too few bonds or hubs
dihedral = False if len(bond_list) < 3 or len(hubs) < 2 \
else dihedral
angle = False if len(bond_list) < 2 or len(hubs) < 1 else angle
if angle:
for k, v in hub_spokes.items():
angle_list.extend([[i, k, j] for i, j in
itertools.combinations(v, 2)])
if dihedral:
hub_cons = bond_arr[np.all(np.isin(bond_arr, hubs), axis=1)]
for i, j in hub_cons.tolist():
ks = [k for k in hub_spokes[i] if k != j]
ls = [l for l in hub_spokes[j] if l != i]
dihedral_list.extend([[k, i, j, l] for k, l in
itertools.product(ks, ls)
if k != l])
topologies = {k: v for k, v
in zip(SECTION_KEYWORDS["topology"][:3],
[bond_list, angle_list, dihedral_list])
if len(v) > 0}
topologies = None if len(topologies) == 0 else topologies
return cls(sites=molecule, topologies=topologies, **kwargs)
class ForceField(MSONable):
"""
Class carrying most data in Masses and force field sections.
Attributes:
masses (pandas.DataFrame): DataFrame for Masses section.
force_field (dict): Force field section keywords (keys) and
data (values) as DataFrames.
maps (dict): Dict for labeling atoms and topologies.
"""
def _is_valid(self, df):
return not pd.isnull(df).values.any()
def __init__(self, mass_info, nonbond_coeffs=None, topo_coeffs=None):
"""
Args:
mass_into (list): List of atomic mass info. Elements,
strings (symbols) and floats are all acceptable for the
values, with the first two converted to the atomic mass
of an element. It is recommended to use
OrderedDict.items() to prevent key duplications.
[("C", 12.01), ("H", Element("H")), ("O", "O"), ...]
nonbond_coeffs [coeffs]: List of pair or pairij
coefficients, of which the sequence must be sorted
according to the species in mass_dict. Pair or PairIJ
determined by the length of list. Optional with default
to None.
topo_coeffs (dict): Dict with force field coefficients for
molecular topologies. Optional with default
to None. All four valid keys listed below are optional.
Each value is a list of dicts with non optional keys
"coeffs" and "types", and related class2 force field
keywords as optional keys.
{
"Bond Coeffs":
[{"coeffs": [coeff],
"types": [("C", "C"), ...]}, ...],
"Angle Coeffs":
[{"coeffs": [coeff],
"BondBond Coeffs": [coeff],
"types": [("H", "C", "H"), ...]}, ...],
"Dihedral Coeffs":
[{"coeffs": [coeff],
"BondBond13 Coeffs": [coeff],
"types": [("H", "C", "C", "H"), ...]}, ...],
"Improper Coeffs":
[{"coeffs": [coeff],
"AngleAngle Coeffs": [coeff],
"types": [("H", "C", "C", "H"), ...]}, ...],
}
Topology of same type or equivalent types (e.g.,
("C", "H") and ("H", "C") bonds) are NOT ALLOWED to
be defined MORE THAN ONCE with DIFFERENT coefficients.
"""
def map_mass(v):
return v.atomic_mass.real if isinstance(v, Element) else Element(v).atomic_mass.real \
if isinstance(v, str) else v
index, masses, self.mass_info, atoms_map = [], [], [], {}
for i, m in enumerate(mass_info):
index.append(i + 1)
mass = map_mass(m[1])
masses.append(mass)
self.mass_info.append((m[0], mass))
atoms_map[m[0]] = i + 1
self.masses = pd.DataFrame({"mass": masses}, index=index)
self.maps = {"Atoms": atoms_map}
ff_dfs = {}
self.nonbond_coeffs = nonbond_coeffs
if self.nonbond_coeffs:
ff_dfs.update(self._process_nonbond())
self.topo_coeffs = topo_coeffs
if self.topo_coeffs:
self.topo_coeffs = {k: v for k, v in self.topo_coeffs.items()
if k in SECTION_KEYWORDS["ff"][2:]}
for k in self.topo_coeffs.keys():
coeffs, mapper = self._process_topo(k)
ff_dfs.update(coeffs)
self.maps.update(mapper)
self.force_field = None if len(ff_dfs) == 0 else ff_dfs
def _process_nonbond(self):
pair_df = pd.DataFrame(self.nonbond_coeffs)
assert self._is_valid(pair_df), \
"Invalid nonbond coefficients with rows varying in length"
npair, ncoeff = pair_df.shape
pair_df.columns = ["coeff%d" % i for i in range(1, ncoeff + 1)]
nm = len(self.mass_info)
ncomb = int(nm * (nm + 1) / 2)
if npair == nm:
kw = "Pair Coeffs"
pair_df.index = range(1, nm + 1)
elif npair == ncomb:
kw = "PairIJ Coeffs"
ids = list(itertools.
combinations_with_replacement(range(1, nm + 1), 2))
id_df = pd.DataFrame(ids, columns=["id1", "id2"])
pair_df = pd.concat([id_df, pair_df], axis=1)
else:
raise ValueError("Expecting {} Pair Coeffs or "
"{} PairIJ Coeffs for {} atom types,"
" got {}".format(nm, ncomb, nm, npair))
return {kw: pair_df}
def _process_topo(self, kw):
def find_eq_types(label, section):
if section.startswith("Improper"):
label_arr = np.array(label)
seqs = [[0, 1, 2, 3], [0, 2, 1, 3],
[3, 1, 2, 0], [3, 2, 1, 0]]
return [tuple(label_arr[s]) for s in seqs]
else:
return [label] + [label[::-1]]
main_data, distinct_types = [], []
class2_data = {k: [] for k in self.topo_coeffs[kw][0].keys()
if k in CLASS2_KEYWORDS.get(kw, [])}
for i, d in enumerate(self.topo_coeffs[kw]):
main_data.append(d["coeffs"])
distinct_types.append(d["types"])
for k in class2_data.keys():
class2_data[k].append(d[k])
distinct_types = [set(itertools.
chain(*[find_eq_types(t, kw)
for t in dt])) for dt in distinct_types]
type_counts = sum([len(dt) for dt in distinct_types])
type_union = set.union(*distinct_types)
assert len(type_union) == type_counts, "Duplicated items found " \
"under different coefficients in %s" % kw
atoms = set(np.ravel(list(itertools.chain(*distinct_types))))
assert atoms.issubset(self.maps["Atoms"].keys()), \
"Undefined atom type found in %s" % kw
mapper = {}
for i, dt in enumerate(distinct_types):
for t in dt:
mapper[t] = i + 1
def process_data(data):
df = pd.DataFrame(data)
assert self._is_valid(df), \
"Invalid coefficients with rows varying in length"
n, c = df.shape
df.columns = ["coeff%d" % i for i in range(1, c + 1)]
df.index = range(1, n + 1)
return df
all_data = {kw: process_data(main_data)}
if class2_data:
all_data.update({k: process_data(v) for k, v
in class2_data.items()})
return all_data, {kw[:-7] + "s": mapper}
def to_file(self, filename):
"""
Saves object to a file in YAML format.
Args:
filename (str): Filename.
"""
d = {"mass_info": self.mass_info,
"nonbond_coeffs": self.nonbond_coeffs,
"topo_coeffs": self.topo_coeffs}
yaml = YAML(typ="safe")
with open(filename, "w") as f:
yaml.dump(d, f)
@classmethod
def from_file(cls, filename):
"""
Constructor that reads in a file in YAML format.
Args:
filename (str): Filename.
"""
yaml = YAML(typ="safe")
with open(filename, "r") as f:
d = yaml.load(f)
return cls.from_dict(d)
@classmethod
def from_dict(cls, d):
"""
Constructor that reads in a dictionary.
Args:
d (dict): Dictionary to read.
"""
d["mass_info"] = [tuple(m) for m in d["mass_info"]]
if d.get("topo_coeffs"):
for v in d["topo_coeffs"].values():
for c in v:
c["types"] = [tuple(t) for t in c["types"]]
return cls(d["mass_info"], d["nonbond_coeffs"], d["topo_coeffs"])
class CombinedData(LammpsData):
"""
Object for a collective set of data for a series of LAMMPS data file.
velocities not yet implementd.
"""
def __init__(self, list_of_molecules, list_of_names, list_of_numbers, coordinates, atom_style="full"):
"""
Args:
list_of_molecules: a list of LammpsData of a single cluster.
list_of_names: a list of name for each cluster.
list_of_numbers: a list of Integer for counts of each molecule
coordinates (pandas.DataFrame): DataFrame with with four
columns ["atom", "x", "y", "z"] for coordinates of atoms.
atom_style (str): Output atom_style. Default to "full".
"""
self.box = list_of_molecules[0].box
self.atom_style = atom_style
self.n = sum(list_of_numbers)
self.names = list_of_names
self.mols = list_of_molecules
self.nums = list_of_numbers
self.masses = pd.concat([mol.masses.copy() for mol in self.mols], ignore_index=True)
self.masses.index += 1
all_ff_kws = SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]
ff_kws = [k for k in all_ff_kws if k in self.mols[0].force_field]
self.force_field = {}
for kw in ff_kws:
self.force_field[kw] = pd.concat([mol.force_field[kw].copy() for mol in self.mols
if kw in mol.force_field], ignore_index=True)
self.force_field[kw].index += 1
self.atoms = pd.DataFrame()
mol_count = 0
type_count = 0
for i, mol in enumerate(self.mols):
atoms_df = mol.atoms.copy()
atoms_df['molecule-ID'] += mol_count
atoms_df['type'] += type_count
for j in range(self.nums[i]):
self.atoms = self.atoms.append(atoms_df, ignore_index=True)
atoms_df['molecule-ID'] += 1
type_count += len(mol.masses)
mol_count += self.nums[i]
self.atoms.index += 1
assert len(self.atoms) == len(coordinates), 'Wrong number of coordinates.'
self.atoms.update(coordinates)
self.velocities = None
assert self.mols[0].velocities is None, "Velocities not supported"
self.topology = {}
atom_count = 0
count = {"Bonds": 0, "Angles": 0, "Dihedrals": 0, "Impropers": 0}
for i, mol in enumerate(self.mols):
for kw in SECTION_KEYWORDS["topology"]:
if kw in mol.topology:
if kw not in self.topology:
self.topology[kw] = pd.DataFrame()
topo_df = mol.topology[kw].copy()
topo_df['type'] += count[kw]
for col in topo_df.columns[1:]:
topo_df[col] += atom_count
for j in range(self.nums[i]):
self.topology[kw] = self.topology[kw].append(topo_df, ignore_index=True)
for col in topo_df.columns[1:]:
topo_df[col] += len(mol.atoms)
count[kw] += len(mol.force_field[kw[:-1]+" Coeffs"])
atom_count += len(mol.atoms) * self.nums[i]
for kw in SECTION_KEYWORDS["topology"]:
if kw in self.topology:
self.topology[kw].index += 1
@classmethod
def parse_xyz(cls, filename):
"""
load xyz file generated from packmol (for those who find it hard to install openbabel)
Returns:
pandas.DataFrame
"""
with open(filename) as f:
lines = f.readlines()
sio = StringIO("".join(lines[2:])) # skip the 2nd line
df = pd.read_csv(sio, header=None, comment="#", delim_whitespace=True, names=['atom', 'x', 'y', 'z'])
df.index += 1
return df
@classmethod
def from_files(cls, coordinate_file, list_of_numbers, *filenames):
"""
Constructor that parse a series of data file.
Args:
coordinate_file (str): The filename of xyz coordinates.
list_of_numbers (list): A list of numbers specifying counts for each
clusters parsed from files.
filenames (str): A series of filenames in string format.
"""
names = []
mols = []
styles = []
coordinates = cls.parse_xyz(filename=coordinate_file)
for i in range(0, len(filenames)):
exec("cluster%d = LammpsData.from_file(filenames[i])" % (i + 1))
names.append("cluster%d" % (i + 1))
mols.append(eval("cluster%d" % (i + 1)))
styles.append(eval("cluster%d" % (i + 1)).atom_style)
style = set(styles)
assert len(style) == 1, "Files have different atom styles."
return cls.from_lammpsdata(mols, names, list_of_numbers, coordinates, style.pop())
@classmethod
def from_lammpsdata(cls, mols, names, list_of_numbers, coordinates, atom_style=None):
"""
Constructor that can infer atom_style.
The input LammpsData objects are used non-destructively.
Args:
mols: a list of LammpsData of a single cluster.
names: a list of name for each cluster.
list_of_numbers: a list of Integer for counts of each molecule
coordinates (pandas.DataFrame): DataFrame with with four
columns ["atom", "x", "y", "z"] for coordinates of atoms.
atom_style (str): Output atom_style. Default to "full".
"""
styles = []
for mol in mols:
styles.append(mol.atom_style)
style = set(styles)
assert len(style) == 1, "Data have different atom_style."
style_return = style.pop()
if atom_style:
assert atom_style == style_return, "Data have different atom_style as specified."
return cls(mols, names, list_of_numbers, coordinates, style_return)
def get_string(self, distance=6, velocity=8, charge=4):
"""
Returns the string representation of CombinedData, essentially
the string to be written to a file. Combination info is included.
Args:
distance (int): No. of significant figures to output for
box settings (bounds and tilt) and atomic coordinates.
Default to 6.
velocity (int): No. of significant figures to output for
velocities. Default to 8.
charge (int): No. of significant figures to output for
charges. Default to 3.
Returns:
String representation
"""
lines = LammpsData.get_string(self, distance, velocity, charge).splitlines()
info = '# ' + ' + '.join(str(a) + " " + b for a, b in zip(self.nums, self.names))
lines.insert(1, info)
return "\n".join(lines)
@deprecated(LammpsData.from_structure,
"structure_2_lmpdata has been deprecated "
"in favor of LammpsData.from_structure")
def structure_2_lmpdata(structure, ff_elements=None, atom_style="charge"):
"""
Converts a structure to a LammpsData object with no force field
parameters and topologies.
Args:
structure (Structure): Input structure.
ff_elements ([str]): List of strings of elements that must be
present due to force field settings but not necessarily in
the structure. Default to None.
atom_style (str): Choose between "atomic" (neutral) and
"charge" (charged). Default to "charge".
Returns:
LammpsData
"""
s = structure.get_sorted_structure()
a, b, c = s.lattice.abc
m = s.lattice.matrix
xhi = a
xy = np.dot(m[1], m[0] / xhi)
yhi = np.sqrt(b ** 2 - xy ** 2)
xz = np.dot(m[2], m[0] / xhi)
yz = (np.dot(m[1], m[2]) - xy * xz) / yhi
zhi = np.sqrt(c ** 2 - xz ** 2 - yz ** 2)
box_bounds = [[0.0, xhi], [0.0, yhi], [0.0, zhi]]
box_tilt = [xy, xz, yz]
box_tilt = None if not any(box_tilt) else box_tilt
box = LammpsBox(box_bounds, box_tilt)
new_latt = Lattice([[xhi, 0, 0], [xy, yhi, 0], [xz, yz, zhi]])
s.lattice = new_latt
symbols = list(s.symbol_set)
if ff_elements:
symbols.extend(ff_elements)
elements = sorted(Element(el) for el in set(symbols))
mass_info = [tuple([i.symbol] * 2) for i in elements]
ff = ForceField(mass_info)
topo = Topology(s)
return LammpsData.from_ff_and_topologies(box=box, ff=ff, topologies=[topo],
atom_style=atom_style)
|
fraricci/pymatgen
|
pymatgen/io/lammps/data.py
|
Python
|
mit
| 59,007
|
[
"LAMMPS",
"pymatgen"
] |
4a55eab064a50bf4641641355620badb235ccb8b78edf5e3ca49cb2d0f678dd0
|
'''
Created on Sep 4, 2015
@author: Iaroslav Shcherbatyi
'''
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as optim
import random as random
Reps = 1 # number of times to repeat the experiment
M = 3 # number of dimensions
Xsz = 100; # number of training instances
Changes = [1,10,100,1000]
# compute single hidden layer neural network
def HLayer(W,X):
return np.maximum( np.dot(X,W[0:M,]) , 0)
#return np.tanh( np.dot(X,W[0:M,]) )
def NN(W,X):
return np.dot(HLayer( W ,X), W[-1,] )
def L2NN(W, X, Y):
return np.linalg.norm(NN(W,X) - Y, ord=2) ** 2
def L2FixedNN(W, X, Y):
W[-1,:] = np.linalg.lstsq(HLayer(W,X), Y)[0] # compute least squares
return W, L2NN(W, X, Y) # compute objective
fvals = np.zeros((len(Changes), Xsz-1))
#fvalsRs = np.zeros(Xsz-1)
#fvalsGr = np.zeros(Xsz-1)
iters = np.zeros((len(Changes), Xsz-1))
impr = 0;
nimpr = 0;
for chidx in range(len(Changes)):
for rep in range(Reps):
fv = 10 ** 10
X = np.random.randn(Xsz, M) # input features
X[:,M-1] = 1
Y = np.sin( X[:,0]*3 ) * np.sin( X[:,1]*3 ) # some nonlinear function
# neuron weights with output weight in column format. Start with 1 neuron
W = np.random.randn(M+1,1)
G = HLayer(W,X)
for N in range(Xsz-1): # number of neurons in the range 1 .. 99
# add one neuron to network
W = np.column_stack((W, W[:,0]*0))
# sample random neuron values until fval is not improved
improved = 1;
#print "start"
while (improved > 0):
W[:,-1] = np.random.randn(M+1); # generate random neuron
W, fval = L2FixedNN(W, X, Y) # compute fixed neurons objective
if fval < fv:
improved-=1
Wb, fv = np.copy(W), fval
impr+=1
else:
G = HLayer(W,X)
nimpr+=1
#print G[:,-1]"
W = Wb
#print "end"
# do random permutations of neurons
Wperm = np.copy(W)
for i in range(Changes[chidx]):
change = np.random.randn(X.shape[1]+1);
idx = random.randint(1,Wperm.shape[1]-1)
Wperm[:,idx] += change # random permutation
Wperm, fval = L2FixedNN(Wperm, X, Y) # compute fixed neurons objective
if fval < fv:
fv = fval
W = np.copy(Wperm)
else:
Wperm[:,idx] -= change
# compute gradient descent
# fvalsGr[N] = optim.minimize(lambda Wfl: L2NN(np.reshape(Wfl, W.shape), X, Y) , W.flatten()).fun
fvals[chidx,N] += fv
iters[chidx,N] = N+1
print N, fvals[chidx,N]
#print "improved:", impr, "not improved:", nimpr
# plot RESULTS
fig,ax = plt.subplots(figsize=(10,4))
#ax.plot(iters[0], np.log10( fvals[0] / Reps ),'-')
for chidx in range(len(Changes)):
ax.plot(iters[chidx,:90], np.log10( fvals[chidx,:90] / Reps ),'-', label=( `Changes[chidx]` + " changes"))
ax.legend(loc='upper right', shadow=False)
#ax.plot(iters, np.log10( fvalsRs / Reps ),'-')
#ax.plot(iters, np.log10( fvalsGr / Reps ),'-')
ax.set_xlabel('Neurons')
ax.set_ylabel(' log(Objective) ')
fig.show()
plt.grid()
plt.show()
|
iaroslav-ai/nn-local-minimum
|
LocalMinimaDemo/ExperimentsShallowNN.py
|
Python
|
mit
| 3,520
|
[
"NEURON"
] |
3896ac63ef9046bd34931fbea4ae01f3e7c8dab569eba0fe63aae86a26f81793
|
# -*- coding: utf-8 -*-
from .screen import screen, screen_width, screen_height, monitor_diagonal
import os
import pygame
import time as builtin_time
import numpy as np
import datetime
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
class Time():
"""
A class object to wait some time, get time, control time and such.
Its methods (functions) are:
- reset()
- control()
- wait()
- get()
See those for further informations.
Note that by default, there is already a Time class object called "time" (lowercase) that is initialized at neuropsydia's loading. For the sake of clarity, use this one (e.g., n.time.wait() ), especially for wait() and control() functions.
Parameters
----------
None
Returns
----------
None
Example
----------
>>> import neuropsydia as n
>>> n.start()
>>> myclock = n.Time()
>>> time_passed_since_myclock_creation = myclock.get()
>>> myclock.reset()
>>> time_passed_since_reset = myclock.get()
>>> n.close()
Authors
----------
Dominique Makowski
Dependencies
----------
- pygame
- time
"""
def __init__(self):
self.clock = builtin_time.clock()
self.pygame_clock = pygame.time.Clock()
def reset(self):
"""
Reset the clock of the Time object.
Parameters
----------
None
Returns
----------
None
Example
----------
>>> import neuropsydia as n
>>> n.start()
>>> time_passed_since_neuropsydia_loading = n.time.get()
>>> n.time.reset()
>>> time_passed_since_reset = n.time.get()
>>> n.close()
Authors
----------
Dominique Makowski
Dependencies
----------
- pygame
- time
"""
self.clock = builtin_time.clock()
self.pygame_clock = pygame.time.Clock()
def control(self, frequency=60):
"""
Control time. Must be placed in a while loop and, each time the program runs through it, checks if the time passed is less than a certain amount (the frequency, by default 60, so 1/60 seconds). If true, the program stops and wait what needed before continuing, so that each loop takes at least 1/frequency seconds to be complete.
Parameters
----------
frequency = int, optional
The minimum frequency you want the loop to run at
Returns
----------
None
Example
----------
>>> import neuropsydia as n
>>> n.start()
>>> while n.time.get() < 5:
>>> n.time.control()
>>> print(n.time.get())
>>> n.close()
Authors
----------
Dominique Makowski
Dependencies
----------
- pygame
- time
"""
self.pygame_clock.tick_busy_loop(frequency)
def get(self, reset=True):
"""
Get time since last initialisation / reset.
Parameters
----------
reset = bool, optional
Should the clock be reset after returning time?
Returns
----------
float
Time passed in milliseconds.
Example
----------
>>> import neuropsydia as n
>>> n.start()
>>> time_passed_since_neuropsydia_loading = n.time.get()
>>> n.time.reset()
>>> time_passed_since_reset = n.time.get()
>>> n.close()
Authors
----------
Dominique Makowski
Dependencies
----------
- pygame
- time
"""
t = (builtin_time.clock()-self.clock)*1000
if reset is True:
self.reset()
return(t)
def wait(self, time_to_wait, unit="ms", frequency=60, round_by_frame=True, skip=None):
"""
Wait some time.
Parameters
----------
time_to_wait = int
Time to wait
unit = str
"min" for minutes, "s" for seconds, "ms" for milliseconds, or "frame" for a certain amount of frames (depending on the frequency parameter)
frequency = int
should be a multiple of your monitor's refresh rate
round by frame = bool
should the waiting time be rounded to match an exact number of frame / refresh cycles? (e.g., on a 60Hz monitor, 95ms will be rounded to 100, because the monitor is refreshed every 16.6667ms)
skip = str
Shoud there be a key to skip the waiting. Default to None.
Returns
----------
float
Actual time waited in milliseconds
Example
----------
>>> import neuropsydia as n
>>> n.start()
>>> n.write("let's wait 500ms", round_by_frame = False)
>>> n.refresh()
>>> wait_time = n.time.wait(520)
>>> n.newpage("white")
>>> n.write("I waited for " + str(wait_time) + "ms")
>>> n.refresh()
>>> wait_time = n.time.wait(520, round_by_frame = True)
>>> n.newpage("white")
>>> n.write("I waited for " + str(wait_time) + "ms")
>>> n.refresh()
>>> n.time.wait(3, unit = "s")
>>> n.close()
Authors
----------
Dominique Makowski
Dependencies
----------
- pygame
- time
"""
t0 = builtin_time.clock()
if unit == "min":
time_to_wait = time_to_wait * 60
unit = "s"
if unit == "s":
time_to_wait = time_to_wait * 1000
unit = "ms"
if unit == "ms":
if round_by_frame is True:
time_to_wait = round(time_to_wait / (1/frequency*1000))
time_to_wait = round(time_to_wait * (1/frequency*1000))
if unit == "frame":
time_to_wait = time_to_wait * (1/frequency*1000)
if skip is None:
pygame.time.delay(time_to_wait) # In milliseconds
else:
response(allow=skip, time_max=time_to_wait)
return((builtin_time.clock()-t0)*1000)
def now(self):
"""
Returns current (absolute) date and time.
Parameters
----------
None
Returns
----------
datetime
Current date and time.
Example
----------
>>> import neuropsydia as n
>>> n.start()
>>> n.time.now()
>>> n.close()
Authors
----------
Dominique Makowski
Dependencies
----------
- datetime
"""
return(datetime.datetime.now())
# Initialize a Time() object.
time = Time()
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def newpage(color_name="white", opacity=100, fade=False, fade_speed=60, fade_type="out", auto_refresh=True):
"""
Fill the background with a color.
Parameters
------------
color_name : str, tuple, optional
name of the color (see color() function), or an RGB tuple (e.g., (122,84,01)).
opacity : int, optional
opacity of the color (in percents).
fade : bool, optional
do you want a fade effect?
fade_speed : int, optional
frequency (speed) of the fading.
fade_type : str, optional
"out" or "in", fade out or fade in.
Example
----------
>>> import neuropsydia as n
>>> n.start()
>>> n.newpage("blue")
>>> n.refresh()
>>> n.time.wait(500)
>>> n.close()
Notes
----------
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
*Dependencies*
- pygame
- time
"""
if color_name is not None:
if fade is False:
if opacity == 100:
try:
screen.fill(color(color_name))
except:
print("NEUROPSYDIA ERROR: newpage(): wrong argument")
else:
opacity = int(opacity * 255 / 100)
color_name = color(color_name) + (opacity,)
mask = pygame.Surface((screen_width, screen_height), pygame.SRCALPHA) # per-pixel alpha
mask.fill(color_name) # notice the alpha value in the color
screen.blit(mask, (0, 0))
if auto_refresh is True:
refresh()
if fade is True:
original_color_name = color_name
clock = pygame.time.Clock()
for i in range(0, 40, 1):
clock.tick_busy_loop(fade_speed)
if fade_type == "out":
color_name = color(original_color_name) + (i,)
else:
color_name = color(original_color_name) + (255-i, )
mask = pygame.Surface((screen_width, screen_height), pygame.SRCALPHA) # per-pixel alpha
mask.fill(color_name) # notice the alpha value in the color
screen.blit(mask, (0, 0))
refresh()
screen.fill(color(original_color_name))
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def refresh():
"""
Refresh / flip the screen: actually display things on screen.
Example
----------
>>> import neuropsydia as n
>>> n.start()
>>> n.newpage("blue")
>>> n.refresh()
>>> n.time.wait(500)
>>> n.close()
Notes
----------
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
*Dependencies*
- pygame
"""
pygame.display.flip()
keys = {"normal" :{13: "ENTER",276: "LEFT",274: "DOWN",275: "RIGHT",
273: "UP", 8: pygame.K_BACKSPACE, 32: "SPACE"},
"shift" :{13: "ENTER",276: "LEFT",274: "DOWN",275: "RIGHT",
273: "UP", 8: pygame.K_BACKSPACE, 32: "SPACE"},
"altgr" :{13: "ENTER",276: "LEFT",274: "DOWN",275: "RIGHT",
273: "UP", 8: pygame.K_BACKSPACE, 32: "SPACE"},
"altgrshift" :{13: "ENTER",276: "LEFT",274: "DOWN",275: "RIGHT",
273: "UP", 8: pygame.K_BACKSPACE, 32: "SPACE"}}
def wait_for_input(time_max=None):
"""
Low level input checker.
Parameters
----------
time_max = int
time max in ms
Returns
----------
key
A key.
Example
----------
NA
Authors
----------
Dominique Makowski
Léo Dutriaux
Dependencies
----------
- pygame
- time
"""
if pygame.event.get_blocked(pygame.KEYDOWN) is True:
blocked = True
pygame.event.set_allowed(pygame.KEYDOWN)
else:
blocked = False
pygame.event.set_allowed(pygame.KEYDOWN)
modifier = ""
time_out = False
loop = True
if time_max is None:
while loop:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if pygame.key.get_mods() == 577:
modifier = "altgrshift"
if event.unicode != "" and event.key not in keys["altgrshift"].keys():
keys[modifier][event.key] = event.unicode
elif pygame.key.get_mods() == 1:
modifier = "shift"
if event.unicode != "" and event.key not in keys["shift"].keys():
keys[modifier][event.key] = event.unicode
elif pygame.key.get_mods() == 576:
modifier = "altgr"
if event.unicode != "" and event.key not in keys["altgr"].keys():
keys[modifier][event.key] = event.unicode
elif pygame.key.get_mods() == 0:
modifier = "normal"
if event.unicode != "" and event.key not in keys["normal"].keys():
keys[modifier][event.key] = event.unicode
else:
pass
if event.key != pygame.K_RSHIFT and event.key != pygame.K_LSHIFT and event.key != pygame.K_RALT:
loop = False
else:
local_time = Time()
while loop and local_time.get(reset=False) < time_max:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if pygame.key.get_mods() == 577:
modifier = "altgrshift"
if event.unicode != "" and event.key not in keys["altgrshift"].keys():
keys[modifier][event.key] = event.unicode
elif pygame.key.get_mods() == 1:
modifier = "shift"
if event.unicode != "" and event.key not in keys["shift"].keys():
keys[modifier][event.key] = event.unicode
elif pygame.key.get_mods() == 576:
modifier = "altgr"
if event.unicode != "" and event.key not in keys["altgr"].keys():
keys[modifier][event.key] = event.unicode
elif pygame.key.get_mods() == 0:
modifier = "normal"
if event.unicode != "" and event.key not in keys["normal"].keys():
keys[modifier][event.key] = event.unicode
else:
pass
if event.key != pygame.K_RSHIFT and event.key != pygame.K_LSHIFT and event.key != pygame.K_RALT:
loop = False
if local_time.get(reset=False) > time_max:
time_out = True
if blocked is True:
pygame.event.set_blocked(pygame.KEYDOWN)
if time_out == True:
return("Time_Max_Exceeded")
else:
try:
return(keys[modifier][event.key])
except KeyError:
return("")
def response(allow=None, enable_escape=True, time_max=None, get_RT=True):
"""
Get a (keyboard, for now) response.
Parameters
----------
allow : str or list
Keys to allow.
enable_escape : bool
Enable escape key to exit.
time_max : int
Maximum time to wait for a response (ms).
get_RT : bool
Return response time.
Returns
----------
str or tuple
returns a tuple when get_RT is set to True
Notes
----------
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
*Dependencies*
- pygame
"""
local_time = Time()
if allow is not None:
if not isinstance(allow, list):
allow = [allow]
while True:
pressed_key = wait_for_input(time_max=time_max)
if pressed_key == "Time_Max_Exceeded":
if get_RT is True:
return("Time_Max_Exceeded", local_time.get())
else:
return("Time_Max_Exceeded")
if pressed_key == pygame.K_ESCAPE:
if enable_escape is True:
if get_RT is True:
return("ESCAPE", local_time.get())
else:
return("ESCAPE")
elif allow is not None:
if pressed_key in allow:
if get_RT is True:
return(pressed_key, local_time.get())
else:
return(pressed_key)
else:
if get_RT is True:
return(pressed_key, local_time.get())
else:
return(pressed_key)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
class Coordinates:
"""
A class object to go from pygame corrdinates system to neuropsydia's and vice versa.
Its methods (functions) are:
- to_pygame()
- from_pygame()
Parameters
----------
None
Returns
----------
None
Example
----------
None
Authors
----------
Dominique Makowski
Dependencies
----------
- pygame
"""
def to_pygame(x=None, y=None, distance_x=None, distance_y=None):
"""
Convert coordinates from neuropsydia (-10:10) to pygame's system (in pixels).
Parameters
----------
x = float
[-10:10]
y = float
[-10:10]
distance_x = convert a horizontal distance
[-10:10]
distance_y = convert a horizontal distance
[-10:10]
Returns
----------
NA
Example
----------
NA
Authors
----------
Dominique Makowski
Dependencies
----------
- pygame
"""
if x != None and y == None:
x = (x+10.0)/(10.0+10.0)*(screen_width-0.0)+0.0
return(int(x))
if x == None and y != None:
y = (-y+10.0)/(10.0+10.0)*(screen_height-0.0)+0.0
return(int(y))
if x != None and y != None:
x = (x+10.0)/(10.0+10.0)*(screen_width-0.0)+0.0
y = (-y+10.0)/(10.0+10.0)*(screen_height-0.0)+0.0
return(int(x),int(y))
if distance_x != None and distance_y is None:
distance_x = (distance_x)/(10.0+10.0)*(screen_width-0.0)+0.0
return(int(distance_x))
if distance_y != None and distance_x is None:
distance_y = (-distance_y)/(10.0+10.0)*(screen_height-0.0)+0.0
return(int(distance_y))
if distance_x != None and distance_y != None:
distance_x = (distance_x)/(10.0+10.0)*(screen_width-0.0)+0.0
distance_y = (-distance_y)/(10.0+10.0)*(screen_height-0.0)+0.0
return(int(distance_x), int(distance_y))
def from_pygame(x=None, y=None):
"""
Help incomplete, sorry.
Parameters
----------
NA
Returns
----------
NA
Example
----------
NA
Authors
----------
Dominique Makowski
Dependencies
----------
- pygame
"""
if x != None and y == None:
x =20*x/screen_width - 10
return(x)
if x == None and y != None:
y = -(20*y/screen_height) + 10
return(y)
if x != None and y != None:
x =20*x/screen_width - 10
y = -(20*y/screen_height) + 10
return(x, y)
def to_physical(distance_x=None, distance_y=None, monitor_diagnonal=monitor_diagonal, unit="cm"):
"""
Help incomplete, sorry.
Parameters
----------
monitor_diagonal = int
in inches (24, 27, etc).
Returns
----------
NA
Example
----------
NA
Authors
----------
Dominique Makowski
Dependencies
----------
None
"""
if unit=="cm":
diagonal = monitor_diagonal*2.54
coef = np.sqrt(((screen_height*screen_height) + (screen_width*screen_width))/(diagonal*diagonal))
monitor_height = screen_height/coef
monitor_width = screen_width/coef
if distance_x != None and distance_y is None:
distance_x = (distance_x)/(10.0+10.0)*(monitor_width-0.0)+0.0
return(int(distance_x))
if distance_y != None and distance_x is None:
distance_y = (distance_y)/(10.0+10.0)*(monitor_height-0.0)+0.0
return(int(distance_y))
if distance_y != None and distance_x != None:
distance_x = (distance_x)/(10.0+10.0)*(monitor_width-0.0)+0.0
distance_y = (distance_y)/(10.0+10.0)*(monitor_height-0.0)+0.0
return(distance_x, distance_y)
def from_physical(distance_x=None, distance_y=None, monitor_diagonal=monitor_diagonal, unit="cm"):
"""
Help incomplete, sorry.
Parameters
----------
monitor_diagonal = int
in inches (24, 27, etc).
Returns
----------
NA
Example
----------
NA
Authors
----------
Dominique Makowski
Dependencies
----------
None
"""
if unit=="cm":
diagonal = monitor_diagonal*2.54
coef = np.sqrt(((screen_height*screen_height) + (screen_width*screen_width))/(diagonal*diagonal))
monitor_height = screen_height/coef
monitor_width = screen_width/coef
if distance_x != None and distance_y is None:
distance_x = (distance_x)*(10.0+10.0)/(monitor_width-0.0)+0.0
return(int(distance_x))
if distance_y != None and distance_x is None:
distance_y = (distance_y)*(10.0+10.0)/(monitor_height-0.0)+0.0
return(int(distance_y))
if distance_y != None and distance_x != None:
distance_x = (distance_x)/(10.0+10.0)*(monitor_width-0.0)+0.0
distance_y = (distance_y)/(10.0+10.0)*(monitor_height-0.0)+0.0
return(distance_x, distance_y)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
class Font_Cache_Init:
def __init__(self):
self.cache = {} #Initialize an empty cache
def get(self,font_path,size):
if not (font_path,int(size)) in self.cache: #if not in cache,
if os.path.exists(font_path): #if the path leads to a font,
self.cache[font_path,int(size)] = pygame.font.Font(font_path, int(size)) #load this font
else:
self.cache[font_path,int(size)] = pygame.font.SysFont(font_path, int(size)) #load a system font
return(self.cache[font_path,int(size)])
global Font
Font = Font_Cache_Init() #Create the font object that will update itself with the different loaded fonts
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
color_list = {
"white":(255,255,255),
"w":(255,255,255),
"black":(0,0,0),
"b":(0,0,0),
"grey":(128,128,128),
"g":(128,128,128),
"raw_red":(255,0,0),
"raw_green":(0,255,0 ),
"raw_blue":(0, 0, 255),
"red":(244,67,54),
"pink":(233,30,99),
"purple":(156,39,176),
"deeppurple":(103,58,183),
"indigo":(63,81,181),
"blue":(33,150,243),
"lightblue":(3,169,244),
"cyan":(0,188,212),
"teal":(0,150,136),
"green":(76,175,80),
"lightgreen":(139,195,74),
"lime":(205,220,57),
"yellow":(255,235,59),
"amber":(255,193,7),
"orange":(255,152,0),
"deeporange":(255,87,34),
"brown":(121,85,72),
"lightgrey":(220,220,220),
"darkgrey":(105,105,105),
"bluegrey":(96,125,139),
"pale_red":(239,154,154),
"pale_pink":(244,143,177),
"pale_purple":(206,147,216),
"pale_deeppurple":(179,157,219),
"pale_indigo":(159,168,218),
"pale_blue":(144,202,249),
"pale_light_blue":(129,212,250),
"pale_cyan":(128,222,234),
"pale_teal":(128,203,196),
"pale_green":(165,214,167),
"pale_lightgreen":(197,225,165),
"pale_lime":(230,238,156),
"pale_yellow":(255,245,157),
"pale_amber":(255,224,130),
"pale_orange":(255,204,128),
"pale_deeporange":(255,171,145),
"pale_brown":(188,170,164),
"blue_shade":[(204,229,255),(153,204,255),(102,178,222),(51,153,255),(0,128,255)],
"red_shade":[(255,204,204),(255,153,153),(255,102,102),(255,51,51),(255,0,0)],
"green_shade":[(204,255,204),(153,255,153),(102,255,102),(51,255,51),(0,255,0)],
"multi_shade":[(255,51,51),(255,51,255),(51,153,255),(51,255,51),(255,153,51)]
}
def color(color):
"""
Returns an RGB color tuple (or list) from its name.
Parameters
----------
color = str
one from the color_list list
Returns
----------
tuple or list
Example
----------
>>> import neuropsydia as n
>>> n.start()
>>> print(n.color_list)
>>> print(n.color("blue"))
>>> n.close()
Authors
----------
Dominique Makowski
Dependencies
----------
None
"""
if isinstance(color,str):
try:
return(color_list[color])
except:
print("NEUROPSYDIA WARNING: color() was used, however the argument " + str(color) + " was not detected and might cause errors.")
return(color)
else:
return(color)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def cursor(visible=True):
"""
Set the mouse cursor to visible or invisible.
Parameters
----------
visible = bool
True for visible, False for invisible.
Returns
----------
None
Example
----------
>>> import neuropsydia as n
>>> n.start()
>>> n.cursor(True)
>>> n.time.wait(2000)
>>> n.close()
Authors
----------
The pygame team
Dependencies
----------
- pygame 1.9.2
"""
pygame.mouse.set_visible(visible)
|
neuropsychology/Neuropsydia.py
|
neuropsydia/core.py
|
Python
|
mpl-2.0
| 28,818
|
[
"Amber"
] |
f591472c4170390efcbfd5262968d83c91f5a53ffe0a1eeb05665328b0cc68c5
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
This is the simplest Python GTK+3 LinkButton snippet.
See: http://python-gtk-3-tutorial.readthedocs.org/en/latest/introduction.html
"""
from gi.repository import Gtk as gtk
def main():
window = gtk.Window()
window.set_border_width(10)
button = gtk.LinkButton(uri="http://www.jdhp.org", label="Visit www.jdhp.org")
window.add(button)
# TO GET THE URI: button.get_uri()
# TO SET THE URI: button.set_uri("...")
window.connect("delete-event", gtk.main_quit) # ask to quit the application when the close button is clicked
window.show_all() # display the window
gtk.main() # GTK+ main loop
if __name__ == '__main__':
main()
|
jeremiedecock/snippets
|
python/pygtk/python_gtk3_pygobject/link_button.py
|
Python
|
mit
| 842
|
[
"VisIt"
] |
6f0ec4aa50f749214d836826c18ee60e51d1a4932daa71b9c9489954a37c32f8
|
""" """
# modes.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
import os
from collections import namedtuple
from mpf.system.timing import Timing, Timer
from mpf.system.tasks import DelayManager
from mpf.system.config import Config
RemoteMethod = namedtuple('RemoteMethod', 'method config_section kwargs',
verbose=False)
"""RemotedMethod is used by other modules that want to register a method to
be called on mode_start or mode_stop.
"""
class ModeController(object):
"""Parent class for the Mode Controller. There is one instance of this in
MPF and it's responsible for loading, unloading, and managing all game
modes.
"""
def __init__(self, machine):
self.machine = machine
self.log = logging.getLogger('ModeController')
self.active_modes = list()
self.mode_stop_count = 0
# The following two lists hold namedtuples of any remote components that
# need to be notified when a mode object is created and/or started.
self.loader_methods = list()
self.start_methods = list()
if 'modes' in self.machine.config:
self.machine.events.add_handler('init_phase_4',
self._load_modes)
def _load_modes(self):
#Loads the modes from the Modes: section of the machine configuration
#file.
for mode in self.machine.config['modes']:
self.machine.game_modes[mode] = self._load_mode(mode)
def _load_mode(self, mode_string):
"""Loads a mode, reads in its config, and creates the Mode object.
Args:
mode: String name of the mode you're loading. This is the name of
the mode's folder in your game's machine_files/modes folder.
"""
self.log.info('Processing mode: %s', mode_string)
mode_path = os.path.join(self.machine.machine_path,
self.machine.config['mediacontroller']['paths']['modes'], mode_string)
mode_config_file = os.path.join(self.machine.machine_path,
self.machine.config['mediacontroller']['paths']['modes'],
mode_string, 'config', mode_string + '.yaml')
config = Config.load_config_yaml(yaml_file=mode_config_file)
if 'code' in config['mode']:
import_str = ('modes.' + mode_string + '.code.' +
config['mode']['code'].split('.')[0])
i = __import__(import_str, fromlist=[''])
mode_object = getattr(i, config['mode']['code'].split('.')[1])(
self.machine, config, mode_string, mode_path)
else:
mode_object = Mode(self.machine, config, mode_string, mode_path)
return mode_object
def register_load_method(self, load_method, config_section_name=None,
**kwargs):
"""Used by system components, plugins, etc. to register themselves with
the Mode Controller for anything that they a mode to do when its
registered.
Args:
load_method: The method that will be called when this mode code
loads.
config_section_name: An optional string for the section of the
configuration file that will be passed to the load_method when
it's called.
**kwargs: Any additional keyword arguments specified will be passed
to the load_method.
Note that these methods will be called once, when the mode code is first
initialized.
"""
self.loader_methods.append(RemoteMethod(method=load_method,
config_section=config_section_name, kwargs=kwargs))
def register_start_method(self, start_method, config_section_name=None,
**kwargs):
"""Used by system components, plugins, etc. to register themselves with
the Mode Controller for anything that they a mode to do when it starts.
Args:
start_method: The method that will be called when this mode code
loads.
config_section_name: An optional string for the section of the
configuration file that will be passed to the start_method when
it's called.
**kwargs: Any additional keyword arguments specified will be passed
to the start_method.
Note that these methods will be called every single time this mode is
started.
"""
self.start_methods.append(RemoteMethod(method=start_method,
config_section=config_section_name, kwargs=kwargs))
def _active_change(self, mode, active):
# called when a mode goes active or inactive
if active:
self.active_modes.append(mode)
else:
self.active_modes.remove(mode)
# sort the active mode list by priority
self.active_modes.sort(key=lambda x: x.priority, reverse=True)
self.dump()
def dump(self):
"""Dumps the current status of the running modes to the log file."""
self.log.info('================ ACTIVE GAME MODES ===================')
for mode in self.active_modes:
if mode.active:
self.log.info('%s : %s', mode.name, mode.priority)
self.log.info('======================================================')
class Mode(object):
"""Parent class for in-game mode code."""
def __init__(self, machine, config, name, path):
self.machine = machine
self.config = config
self.name = name.lower()
self.path = path
self.log = logging.getLogger('Mode.' + name)
self.priority = 0
self._active = False
self.stop_methods = list()
self.start_callback = None
self.stop_callback = None
self.event_handlers = set()
if 'mode' in self.config:
self.configure_mode_settings(config['mode'])
for asset_manager in self.machine.asset_managers.values():
config_data = self.config.get(asset_manager.config_section, dict())
self.config[asset_manager.config_section] = (
asset_manager.register_assets(config=config_data,
mode_path=self.path))
# Call registered remote loader methods
for item in self.machine.modes.loader_methods:
if (item.config_section in self.config and
self.config[item.config_section]):
item.method(config=self.config[item.config_section],
mode_path=self.path,
**item.kwargs)
@property
def active(self):
return self._active
@active.setter
def active(self, active):
if self._active != active:
self._active = active
self.machine.modes._active_change(self, self._active)
def configure_mode_settings(self, config):
"""Processes this mode's configuration settings from a config
dictionary.
"""
if not ('priority' in config and type(config['priority']) is int):
config['priority'] = 0
if 'start_events' in config:
config['start_events'] = Config.string_to_list(
config['start_events'])
else:
config['start_events'] = list()
if 'stop_events' in config:
config['stop_events'] = Config.string_to_list(
config['stop_events'])
else:
config['stop_events'] = list()
# register mode start events
if 'start_events' in config:
for event in config['start_events']:
self.machine.events.add_handler(event, self.start)
self.config['mode'] = config
def start(self, priority=None, callback=None, **kwargs):
"""Starts this mode.
Args:
priority: Integer value of what you want this mode to run at. If you
don't specify one, it will use the "Mode: priority" setting from
this mode's configuration file.
**kwargs: Catch-all since this mode might start from events with
who-knows-what keyword arguments.
Warning: You can safely call this method, but do not override it in your
mode code. If you want to write your own mode code by subclassing Mode,
put whatever code you want to run when this mode starts in the
mode_start method which will be called automatically.
"""
if type(priority) is int:
self.priority = priority
else:
self.priority = self.config['mode']['priority']
self.log.info('Mode Start. Priority: %s', self.priority)
self.active = True
for item in self.machine.modes.start_methods:
if item.config_section in self.config:
self.stop_methods.append(
item.method(config=self.config[item.config_section],
priority=self.priority,
mode=self,
**item.kwargs))
self.machine.events.post('mode_' + self.name + '_started')
def stop(self, callback=None, **kwargs):
"""Stops this mode.
Args:
**kwargs: Catch-all since this mode might start from events with
who-knows-what keyword arguments.
Warning: You can safely call this method, but do not override it in your
mode code. If you want to write your own mode code by subclassing Mode,
put whatever code you want to run when this mode stops in the
mode_stop method which will be called automatically.
"""
self.log.debug('Mode Stop.')
self.priority = 0
self.active = False
for item in self.stop_methods:
try:
item[0](item[1])
except TypeError:
pass
self.stop_methods = list()
self.machine.events.post('mode_' + self.name + '_stopped')
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
|
jabdoa2/mpf
|
mpf/media_controller/core/modes.py
|
Python
|
mit
| 11,399
|
[
"Brian"
] |
e048a5981037a38cf4c9e4ac41caaac8fc1f681924ad22840e8a24f210b1e0b5
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class BoundingBox(Model):
"""Bounding box that defines a region of an image.
All required parameters must be populated in order to send to Azure.
:param left: Required. Coordinate of the left boundary.
:type left: float
:param top: Required. Coordinate of the top boundary.
:type top: float
:param width: Required. Width.
:type width: float
:param height: Required. Height.
:type height: float
"""
_validation = {
'left': {'required': True},
'top': {'required': True},
'width': {'required': True},
'height': {'required': True},
}
_attribute_map = {
'left': {'key': 'left', 'type': 'float'},
'top': {'key': 'top', 'type': 'float'},
'width': {'key': 'width', 'type': 'float'},
'height': {'key': 'height', 'type': 'float'},
}
def __init__(self, *, left: float, top: float, width: float, height: float, **kwargs) -> None:
super(BoundingBox, self).__init__(**kwargs)
self.left = left
self.top = top
self.width = width
self.height = height
class CreateProjectOptions(Model):
"""Options used for createProject.
:param export_model_container_uri: The uri to the Azure Storage container
that will be used to store exported models.
:type export_model_container_uri: str
:param notification_queue_uri: The uri to the Azure Storage queue that
will be used to send project-related notifications. See <a
href="https://go.microsoft.com/fwlink/?linkid=2144149">Storage
notifications</a> documentation for setup and message format.
:type notification_queue_uri: str
"""
_attribute_map = {
'export_model_container_uri': {'key': 'exportModelContainerUri', 'type': 'str'},
'notification_queue_uri': {'key': 'notificationQueueUri', 'type': 'str'},
}
def __init__(self, *, export_model_container_uri: str=None, notification_queue_uri: str=None, **kwargs) -> None:
super(CreateProjectOptions, self).__init__(**kwargs)
self.export_model_container_uri = export_model_container_uri
self.notification_queue_uri = notification_queue_uri
class CustomBaseModelInfo(Model):
"""CustomBaseModelInfo.
All required parameters must be populated in order to send to Azure.
:param project_id: Required. Project Id of the previously trained project
to be used for current iteration's training.
:type project_id: str
:param iteration_id: Required. Iteration Id of the previously trained
project to be used for current iteration's training.
:type iteration_id: str
"""
_validation = {
'project_id': {'required': True},
'iteration_id': {'required': True},
}
_attribute_map = {
'project_id': {'key': 'projectId', 'type': 'str'},
'iteration_id': {'key': 'iterationId', 'type': 'str'},
}
def __init__(self, *, project_id: str, iteration_id: str, **kwargs) -> None:
super(CustomBaseModelInfo, self).__init__(**kwargs)
self.project_id = project_id
self.iteration_id = iteration_id
class CustomVisionError(Model):
"""CustomVisionError.
All required parameters must be populated in order to send to Azure.
:param code: Required. The error code. Possible values include: 'NoError',
'BadRequest', 'BadRequestExceededBatchSize', 'BadRequestNotSupported',
'BadRequestInvalidIds', 'BadRequestProjectName',
'BadRequestProjectNameNotUnique', 'BadRequestProjectDescription',
'BadRequestProjectUnknownDomain',
'BadRequestProjectUnknownClassification',
'BadRequestProjectUnsupportedDomainTypeChange',
'BadRequestProjectUnsupportedExportPlatform',
'BadRequestProjectImagePreprocessingSettings',
'BadRequestProjectDuplicated', 'BadRequestIterationName',
'BadRequestIterationNameNotUnique', 'BadRequestIterationDescription',
'BadRequestIterationIsNotTrained', 'BadRequestIterationValidationFailed',
'BadRequestWorkspaceCannotBeModified', 'BadRequestWorkspaceNotDeletable',
'BadRequestTagName', 'BadRequestTagNameNotUnique',
'BadRequestTagDescription', 'BadRequestTagType',
'BadRequestMultipleNegativeTag', 'BadRequestMultipleGeneralProductTag',
'BadRequestImageTags', 'BadRequestImageRegions',
'BadRequestNegativeAndRegularTagOnSameImage',
'BadRequestUnsupportedDomain', 'BadRequestRequiredParamIsNull',
'BadRequestIterationIsPublished', 'BadRequestInvalidPublishName',
'BadRequestInvalidPublishTarget', 'BadRequestUnpublishFailed',
'BadRequestIterationNotPublished', 'BadRequestSubscriptionApi',
'BadRequestExceedProjectLimit',
'BadRequestExceedIterationPerProjectLimit',
'BadRequestExceedTagPerProjectLimit', 'BadRequestExceedTagPerImageLimit',
'BadRequestExceededQuota', 'BadRequestCannotMigrateProjectWithName',
'BadRequestNotLimitedTrial', 'BadRequestImageBatch',
'BadRequestImageStream', 'BadRequestImageUrl', 'BadRequestImageFormat',
'BadRequestImageSizeBytes', 'BadRequestImageDimensions',
'BadRequestImageAspectRatio', 'BadRequestImageExceededCount',
'BadRequestTrainingNotNeeded',
'BadRequestTrainingNotNeededButTrainingPipelineUpdated',
'BadRequestTrainingValidationFailed',
'BadRequestClassificationTrainingValidationFailed',
'BadRequestMultiClassClassificationTrainingValidationFailed',
'BadRequestMultiLabelClassificationTrainingValidationFailed',
'BadRequestDetectionTrainingValidationFailed',
'BadRequestTrainingAlreadyInProgress',
'BadRequestDetectionTrainingNotAllowNegativeTag',
'BadRequestInvalidEmailAddress',
'BadRequestRetiredDomainNotSupportedForTraining',
'BadRequestDomainNotSupportedForAdvancedTraining',
'BadRequestExportPlatformNotSupportedForAdvancedTraining',
'BadRequestReservedBudgetInHoursNotEnoughForAdvancedTraining',
'BadRequestCustomBaseModelIterationStatusNotCompleted',
'BadRequestCustomBaseModelDomainNotCompatible',
'BadRequestCustomBaseModelArchitectureRetired',
'BadRequestExportValidationFailed', 'BadRequestExportAlreadyInProgress',
'BadRequestPredictionIdsMissing', 'BadRequestPredictionIdsExceededCount',
'BadRequestPredictionTagsExceededCount',
'BadRequestPredictionResultsExceededCount',
'BadRequestPredictionInvalidApplicationName',
'BadRequestPredictionInvalidQueryParameters',
'BadRequestInvalidImportToken', 'BadRequestExportWhileTraining',
'BadRequestImageMetadataKey', 'BadRequestImageMetadataValue',
'BadRequestOperationNotSupported', 'BadRequestInvalidArtifactUri',
'BadRequestCustomerManagedKeyRevoked', 'BadRequestInvalidUri',
'BadRequestInvalid', 'UnsupportedMediaType', 'Forbidden', 'ForbiddenUser',
'ForbiddenUserResource', 'ForbiddenUserSignupDisabled',
'ForbiddenUserSignupAllowanceExceeded', 'ForbiddenUserDoesNotExist',
'ForbiddenUserDisabled', 'ForbiddenUserInsufficientCapability',
'ForbiddenDRModeEnabled', 'ForbiddenInvalid', 'NotFound',
'NotFoundProject', 'NotFoundProjectDefaultIteration', 'NotFoundIteration',
'NotFoundIterationPerformance', 'NotFoundTag', 'NotFoundImage',
'NotFoundDomain', 'NotFoundApimSubscription', 'NotFoundInvalid',
'Conflict', 'ConflictInvalid', 'ErrorUnknown', 'ErrorIterationCopyFailed',
'ErrorPreparePerformanceMigrationFailed', 'ErrorProjectInvalidWorkspace',
'ErrorProjectInvalidPipelineConfiguration', 'ErrorProjectInvalidDomain',
'ErrorProjectTrainingRequestFailed', 'ErrorProjectImportRequestFailed',
'ErrorProjectExportRequestFailed', 'ErrorFeaturizationServiceUnavailable',
'ErrorFeaturizationQueueTimeout', 'ErrorFeaturizationInvalidFeaturizer',
'ErrorFeaturizationAugmentationUnavailable',
'ErrorFeaturizationUnrecognizedJob',
'ErrorFeaturizationAugmentationError', 'ErrorExporterInvalidPlatform',
'ErrorExporterInvalidFeaturizer', 'ErrorExporterInvalidClassifier',
'ErrorPredictionServiceUnavailable', 'ErrorPredictionModelNotFound',
'ErrorPredictionModelNotCached', 'ErrorPrediction',
'ErrorPredictionStorage', 'ErrorRegionProposal', 'ErrorUnknownBaseModel',
'ErrorServerTimeOut', 'ErrorInvalid'
:type code: str or
~azure.cognitiveservices.vision.customvision.training.models.CustomVisionErrorCodes
:param message: Required. A message explaining the error reported by the
service.
:type message: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, *, code, message: str, **kwargs) -> None:
super(CustomVisionError, self).__init__(**kwargs)
self.code = code
self.message = message
class CustomVisionErrorException(HttpOperationError):
"""Server responded with exception of type: 'CustomVisionError'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(CustomVisionErrorException, self).__init__(deserialize, response, 'CustomVisionError', *args)
class Domain(Model):
"""Domains are used as the starting point for your project. Each domain is
optimized for specific types of images. Domains with compact in their name
can be exported. For more information visit the <a
href="https://go.microsoft.com/fwlink/?linkid=2117014">domain
documentation</a>.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Domain id.
:vartype id: str
:ivar name: Name of the domain, describing the types of images used to
train it.
:vartype name: str
:ivar type: Domain type: Classification or ObjectDetection. Possible
values include: 'Classification', 'ObjectDetection'
:vartype type: str or
~azure.cognitiveservices.vision.customvision.training.models.DomainType
:ivar exportable: Indicating if the domain is exportable.
:vartype exportable: bool
:ivar enabled: Indicating if the domain is enabled.
:vartype enabled: bool
:ivar exportable_platforms: Platforms that the domain can be exported to.
:vartype exportable_platforms: list[str]
:ivar model_information: Model information.
:vartype model_information:
~azure.cognitiveservices.vision.customvision.training.models.ModelInformation
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'exportable': {'readonly': True},
'enabled': {'readonly': True},
'exportable_platforms': {'readonly': True},
'model_information': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'exportable': {'key': 'exportable', 'type': 'bool'},
'enabled': {'key': 'enabled', 'type': 'bool'},
'exportable_platforms': {'key': 'exportablePlatforms', 'type': '[str]'},
'model_information': {'key': 'modelInformation', 'type': 'ModelInformation'},
}
def __init__(self, **kwargs) -> None:
super(Domain, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.exportable = None
self.enabled = None
self.exportable_platforms = None
self.model_information = None
class Export(Model):
"""Export.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar platform: Platform of the export. Possible values include: 'CoreML',
'TensorFlow', 'DockerFile', 'ONNX', 'VAIDK', 'OpenVino'
:vartype platform: str or
~azure.cognitiveservices.vision.customvision.training.models.ExportPlatform
:ivar status: Status of the export. Possible values include: 'Exporting',
'Failed', 'Done'
:vartype status: str or
~azure.cognitiveservices.vision.customvision.training.models.ExportStatus
:ivar download_uri: URI used to download the model. If VNET feature is
enabled this will be a relative path to be used with GetArtifact,
otherwise this will be an absolute URI to the resource.
:vartype download_uri: str
:ivar flavor: Flavor of the export. These are specializations of the
export platform.
Docker platform has valid flavors: Linux, Windows, ARM.
Tensorflow platform has valid flavors: TensorFlowNormal, TensorFlowLite.
ONNX platform has valid flavors: ONNX10, ONNX12. Possible values include:
'Linux', 'Windows', 'ONNX10', 'ONNX12', 'ARM', 'TensorFlowNormal',
'TensorFlowLite'
:vartype flavor: str or
~azure.cognitiveservices.vision.customvision.training.models.ExportFlavor
:ivar newer_version_available: Indicates an updated version of the export
package is available and should be re-exported for the latest changes.
:vartype newer_version_available: bool
"""
_validation = {
'platform': {'readonly': True},
'status': {'readonly': True},
'download_uri': {'readonly': True},
'flavor': {'readonly': True},
'newer_version_available': {'readonly': True},
}
_attribute_map = {
'platform': {'key': 'platform', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'download_uri': {'key': 'downloadUri', 'type': 'str'},
'flavor': {'key': 'flavor', 'type': 'str'},
'newer_version_available': {'key': 'newerVersionAvailable', 'type': 'bool'},
}
def __init__(self, **kwargs) -> None:
super(Export, self).__init__(**kwargs)
self.platform = None
self.status = None
self.download_uri = None
self.flavor = None
self.newer_version_available = None
class Image(Model):
"""Image model to be sent as JSON.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Id of the image.
:vartype id: str
:ivar created: Date the image was created.
:vartype created: datetime
:ivar width: Width of the image.
:vartype width: int
:ivar height: Height of the image.
:vartype height: int
:ivar resized_image_uri: The URI to the (resized) image used for training.
If VNET feature is enabled this will be a relative path to be used with
GetArtifact, otherwise this will be an absolute URI to the resource.
:vartype resized_image_uri: str
:ivar thumbnail_uri: The URI to the thumbnail of the original image. If
VNET feature is enabled this will be a relative path to be used with
GetArtifact, otherwise this will be an absolute URI to the resource.
:vartype thumbnail_uri: str
:ivar original_image_uri: The URI to the original uploaded image. If VNET
feature is enabled this will be a relative path to be used with
GetArtifact, otherwise this will be an absolute URI to the resource.
:vartype original_image_uri: str
:ivar tags: Tags associated with this image.
:vartype tags:
list[~azure.cognitiveservices.vision.customvision.training.models.ImageTag]
:ivar regions: Regions associated with this image.
:vartype regions:
list[~azure.cognitiveservices.vision.customvision.training.models.ImageRegion]
:ivar metadata: Metadata associated with this image.
:vartype metadata: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'created': {'readonly': True},
'width': {'readonly': True},
'height': {'readonly': True},
'resized_image_uri': {'readonly': True},
'thumbnail_uri': {'readonly': True},
'original_image_uri': {'readonly': True},
'tags': {'readonly': True},
'regions': {'readonly': True},
'metadata': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'created': {'key': 'created', 'type': 'iso-8601'},
'width': {'key': 'width', 'type': 'int'},
'height': {'key': 'height', 'type': 'int'},
'resized_image_uri': {'key': 'resizedImageUri', 'type': 'str'},
'thumbnail_uri': {'key': 'thumbnailUri', 'type': 'str'},
'original_image_uri': {'key': 'originalImageUri', 'type': 'str'},
'tags': {'key': 'tags', 'type': '[ImageTag]'},
'regions': {'key': 'regions', 'type': '[ImageRegion]'},
'metadata': {'key': 'metadata', 'type': '{str}'},
}
def __init__(self, **kwargs) -> None:
super(Image, self).__init__(**kwargs)
self.id = None
self.created = None
self.width = None
self.height = None
self.resized_image_uri = None
self.thumbnail_uri = None
self.original_image_uri = None
self.tags = None
self.regions = None
self.metadata = None
class ImageCreateResult(Model):
"""ImageCreateResult.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar source_url: Source URL of the image.
:vartype source_url: str
:ivar status: Status of the image creation. Possible values include: 'OK',
'OKDuplicate', 'ErrorSource', 'ErrorImageFormat', 'ErrorImageSize',
'ErrorStorage', 'ErrorLimitExceed', 'ErrorTagLimitExceed',
'ErrorRegionLimitExceed', 'ErrorUnknown',
'ErrorNegativeAndRegularTagOnSameImage', 'ErrorImageDimensions',
'ErrorInvalidTag'
:vartype status: str or
~azure.cognitiveservices.vision.customvision.training.models.ImageCreateStatus
:ivar image: The image.
:vartype image:
~azure.cognitiveservices.vision.customvision.training.models.Image
"""
_validation = {
'source_url': {'readonly': True},
'status': {'readonly': True},
'image': {'readonly': True},
}
_attribute_map = {
'source_url': {'key': 'sourceUrl', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'image': {'key': 'image', 'type': 'Image'},
}
def __init__(self, **kwargs) -> None:
super(ImageCreateResult, self).__init__(**kwargs)
self.source_url = None
self.status = None
self.image = None
class ImageCreateSummary(Model):
"""ImageCreateSummary.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar is_batch_successful: True if all of the images in the batch were
created successfully, otherwise false.
:vartype is_batch_successful: bool
:ivar images: List of the image creation results.
:vartype images:
list[~azure.cognitiveservices.vision.customvision.training.models.ImageCreateResult]
"""
_validation = {
'is_batch_successful': {'readonly': True},
'images': {'readonly': True},
}
_attribute_map = {
'is_batch_successful': {'key': 'isBatchSuccessful', 'type': 'bool'},
'images': {'key': 'images', 'type': '[ImageCreateResult]'},
}
def __init__(self, **kwargs) -> None:
super(ImageCreateSummary, self).__init__(**kwargs)
self.is_batch_successful = None
self.images = None
class ImageFileCreateBatch(Model):
"""ImageFileCreateBatch.
:param images:
:type images:
list[~azure.cognitiveservices.vision.customvision.training.models.ImageFileCreateEntry]
:param tag_ids:
:type tag_ids: list[str]
:param metadata: The metadata of image. Limited to 10 key-value pairs per
image. The length of key is limited to 128. The length of value is limited
to 256.
:type metadata: dict[str, str]
"""
_attribute_map = {
'images': {'key': 'images', 'type': '[ImageFileCreateEntry]'},
'tag_ids': {'key': 'tagIds', 'type': '[str]'},
'metadata': {'key': 'metadata', 'type': '{str}'},
}
def __init__(self, *, images=None, tag_ids=None, metadata=None, **kwargs) -> None:
super(ImageFileCreateBatch, self).__init__(**kwargs)
self.images = images
self.tag_ids = tag_ids
self.metadata = metadata
class ImageFileCreateEntry(Model):
"""ImageFileCreateEntry.
:param name:
:type name: str
:param contents:
:type contents: bytearray
:param tag_ids:
:type tag_ids: list[str]
:param regions:
:type regions:
list[~azure.cognitiveservices.vision.customvision.training.models.Region]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'contents': {'key': 'contents', 'type': 'bytearray'},
'tag_ids': {'key': 'tagIds', 'type': '[str]'},
'regions': {'key': 'regions', 'type': '[Region]'},
}
def __init__(self, *, name: str=None, contents: bytearray=None, tag_ids=None, regions=None, **kwargs) -> None:
super(ImageFileCreateEntry, self).__init__(**kwargs)
self.name = name
self.contents = contents
self.tag_ids = tag_ids
self.regions = regions
class ImageIdCreateBatch(Model):
"""ImageIdCreateBatch.
:param images:
:type images:
list[~azure.cognitiveservices.vision.customvision.training.models.ImageIdCreateEntry]
:param tag_ids:
:type tag_ids: list[str]
:param metadata: The metadata of image. Limited to 10 key-value pairs per
image. The length of key is limited to 128. The length of value is limited
to 256.
:type metadata: dict[str, str]
"""
_attribute_map = {
'images': {'key': 'images', 'type': '[ImageIdCreateEntry]'},
'tag_ids': {'key': 'tagIds', 'type': '[str]'},
'metadata': {'key': 'metadata', 'type': '{str}'},
}
def __init__(self, *, images=None, tag_ids=None, metadata=None, **kwargs) -> None:
super(ImageIdCreateBatch, self).__init__(**kwargs)
self.images = images
self.tag_ids = tag_ids
self.metadata = metadata
class ImageIdCreateEntry(Model):
"""ImageIdCreateEntry.
:param id: Id of the image.
:type id: str
:param tag_ids:
:type tag_ids: list[str]
:param regions:
:type regions:
list[~azure.cognitiveservices.vision.customvision.training.models.Region]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'tag_ids': {'key': 'tagIds', 'type': '[str]'},
'regions': {'key': 'regions', 'type': '[Region]'},
}
def __init__(self, *, id: str=None, tag_ids=None, regions=None, **kwargs) -> None:
super(ImageIdCreateEntry, self).__init__(**kwargs)
self.id = id
self.tag_ids = tag_ids
self.regions = regions
class ImageMetadataUpdateEntry(Model):
"""Entry associating a metadata to an image.
:param image_id: Id of the image.
:type image_id: str
:param status: Status of the metadata update. Possible values include:
'OK', 'ErrorImageNotFound', 'ErrorLimitExceed', 'ErrorUnknown'
:type status: str or
~azure.cognitiveservices.vision.customvision.training.models.ImageMetadataUpdateStatus
:param metadata: Metadata of the image.
:type metadata: dict[str, str]
"""
_attribute_map = {
'image_id': {'key': 'imageId', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'metadata': {'key': 'metadata', 'type': '{str}'},
}
def __init__(self, *, image_id: str=None, status=None, metadata=None, **kwargs) -> None:
super(ImageMetadataUpdateEntry, self).__init__(**kwargs)
self.image_id = image_id
self.status = status
self.metadata = metadata
class ImageMetadataUpdateSummary(Model):
"""ImageMetadataUpdateSummary.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar is_batch_successful:
:vartype is_batch_successful: bool
:ivar images:
:vartype images:
list[~azure.cognitiveservices.vision.customvision.training.models.ImageMetadataUpdateEntry]
"""
_validation = {
'is_batch_successful': {'readonly': True},
'images': {'readonly': True},
}
_attribute_map = {
'is_batch_successful': {'key': 'isBatchSuccessful', 'type': 'bool'},
'images': {'key': 'images', 'type': '[ImageMetadataUpdateEntry]'},
}
def __init__(self, **kwargs) -> None:
super(ImageMetadataUpdateSummary, self).__init__(**kwargs)
self.is_batch_successful = None
self.images = None
class ImagePerformance(Model):
"""Image performance model.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar predictions:
:vartype predictions:
list[~azure.cognitiveservices.vision.customvision.training.models.Prediction]
:ivar id: Id of the image.
:vartype id: str
:ivar created: Date the image was created.
:vartype created: datetime
:ivar width: Width of the image.
:vartype width: int
:ivar height: Height of the image.
:vartype height: int
:ivar image_uri: The URI to the image used for training. If VNET feature
is enabled this will be a relative path to be used with GetArtifact,
otherwise this will be an absolute URI to the resource.
:vartype image_uri: str
:ivar thumbnail_uri: The URI to the thumbnail of the original image. If
VNET feature is enabled this will be a relative path to be used with
GetArtifact, otherwise this will be an absolute URI to the resource.
:vartype thumbnail_uri: str
:ivar tags: Tags associated with this image.
:vartype tags:
list[~azure.cognitiveservices.vision.customvision.training.models.ImageTag]
:ivar regions: Regions associated with this image.
:vartype regions:
list[~azure.cognitiveservices.vision.customvision.training.models.ImageRegion]
"""
_validation = {
'predictions': {'readonly': True},
'id': {'readonly': True},
'created': {'readonly': True},
'width': {'readonly': True},
'height': {'readonly': True},
'image_uri': {'readonly': True},
'thumbnail_uri': {'readonly': True},
'tags': {'readonly': True},
'regions': {'readonly': True},
}
_attribute_map = {
'predictions': {'key': 'predictions', 'type': '[Prediction]'},
'id': {'key': 'id', 'type': 'str'},
'created': {'key': 'created', 'type': 'iso-8601'},
'width': {'key': 'width', 'type': 'int'},
'height': {'key': 'height', 'type': 'int'},
'image_uri': {'key': 'imageUri', 'type': 'str'},
'thumbnail_uri': {'key': 'thumbnailUri', 'type': 'str'},
'tags': {'key': 'tags', 'type': '[ImageTag]'},
'regions': {'key': 'regions', 'type': '[ImageRegion]'},
}
def __init__(self, **kwargs) -> None:
super(ImagePerformance, self).__init__(**kwargs)
self.predictions = None
self.id = None
self.created = None
self.width = None
self.height = None
self.image_uri = None
self.thumbnail_uri = None
self.tags = None
self.regions = None
class ImagePrediction(Model):
"""Result of an image prediction request.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Prediction Id.
:vartype id: str
:ivar project: Project Id.
:vartype project: str
:ivar iteration: Iteration Id.
:vartype iteration: str
:ivar created: Date this prediction was created.
:vartype created: datetime
:ivar predictions: List of predictions.
:vartype predictions:
list[~azure.cognitiveservices.vision.customvision.training.models.Prediction]
"""
_validation = {
'id': {'readonly': True},
'project': {'readonly': True},
'iteration': {'readonly': True},
'created': {'readonly': True},
'predictions': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'project': {'key': 'project', 'type': 'str'},
'iteration': {'key': 'iteration', 'type': 'str'},
'created': {'key': 'created', 'type': 'iso-8601'},
'predictions': {'key': 'predictions', 'type': '[Prediction]'},
}
def __init__(self, **kwargs) -> None:
super(ImagePrediction, self).__init__(**kwargs)
self.id = None
self.project = None
self.iteration = None
self.created = None
self.predictions = None
class ImageProcessingSettings(Model):
"""Represents image preprocessing settings used by image augmentation.
:param augmentation_methods: Gets or sets enabled image transforms. The
key corresponds to the transform name. If value is set to true, then
correspondent transform is enabled. Otherwise this transform will not be
used.
Augmentation will be uniformly distributed among enabled transforms.
:type augmentation_methods: dict[str, bool]
"""
_attribute_map = {
'augmentation_methods': {'key': 'augmentationMethods', 'type': '{bool}'},
}
def __init__(self, *, augmentation_methods=None, **kwargs) -> None:
super(ImageProcessingSettings, self).__init__(**kwargs)
self.augmentation_methods = augmentation_methods
class ImageRegion(Model):
"""ImageRegion.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar region_id:
:vartype region_id: str
:ivar tag_name:
:vartype tag_name: str
:ivar created:
:vartype created: datetime
:param tag_id: Required. Id of the tag associated with this region.
:type tag_id: str
:param left: Required. Coordinate of the left boundary.
:type left: float
:param top: Required. Coordinate of the top boundary.
:type top: float
:param width: Required. Width.
:type width: float
:param height: Required. Height.
:type height: float
"""
_validation = {
'region_id': {'readonly': True},
'tag_name': {'readonly': True},
'created': {'readonly': True},
'tag_id': {'required': True},
'left': {'required': True},
'top': {'required': True},
'width': {'required': True},
'height': {'required': True},
}
_attribute_map = {
'region_id': {'key': 'regionId', 'type': 'str'},
'tag_name': {'key': 'tagName', 'type': 'str'},
'created': {'key': 'created', 'type': 'iso-8601'},
'tag_id': {'key': 'tagId', 'type': 'str'},
'left': {'key': 'left', 'type': 'float'},
'top': {'key': 'top', 'type': 'float'},
'width': {'key': 'width', 'type': 'float'},
'height': {'key': 'height', 'type': 'float'},
}
def __init__(self, *, tag_id: str, left: float, top: float, width: float, height: float, **kwargs) -> None:
super(ImageRegion, self).__init__(**kwargs)
self.region_id = None
self.tag_name = None
self.created = None
self.tag_id = tag_id
self.left = left
self.top = top
self.width = width
self.height = height
class ImageRegionCreateBatch(Model):
"""Batch of image region information to create.
:param regions:
:type regions:
list[~azure.cognitiveservices.vision.customvision.training.models.ImageRegionCreateEntry]
"""
_attribute_map = {
'regions': {'key': 'regions', 'type': '[ImageRegionCreateEntry]'},
}
def __init__(self, *, regions=None, **kwargs) -> None:
super(ImageRegionCreateBatch, self).__init__(**kwargs)
self.regions = regions
class ImageRegionCreateEntry(Model):
"""Entry associating a region to an image.
All required parameters must be populated in order to send to Azure.
:param image_id: Required. Id of the image.
:type image_id: str
:param tag_id: Required. Id of the tag associated with this region.
:type tag_id: str
:param left: Required. Coordinate of the left boundary.
:type left: float
:param top: Required. Coordinate of the top boundary.
:type top: float
:param width: Required. Width.
:type width: float
:param height: Required. Height.
:type height: float
"""
_validation = {
'image_id': {'required': True},
'tag_id': {'required': True},
'left': {'required': True},
'top': {'required': True},
'width': {'required': True},
'height': {'required': True},
}
_attribute_map = {
'image_id': {'key': 'imageId', 'type': 'str'},
'tag_id': {'key': 'tagId', 'type': 'str'},
'left': {'key': 'left', 'type': 'float'},
'top': {'key': 'top', 'type': 'float'},
'width': {'key': 'width', 'type': 'float'},
'height': {'key': 'height', 'type': 'float'},
}
def __init__(self, *, image_id: str, tag_id: str, left: float, top: float, width: float, height: float, **kwargs) -> None:
super(ImageRegionCreateEntry, self).__init__(**kwargs)
self.image_id = image_id
self.tag_id = tag_id
self.left = left
self.top = top
self.width = width
self.height = height
class ImageRegionCreateResult(Model):
"""ImageRegionCreateResult.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar image_id:
:vartype image_id: str
:ivar region_id:
:vartype region_id: str
:ivar tag_name:
:vartype tag_name: str
:ivar created:
:vartype created: datetime
:param tag_id: Required. Id of the tag associated with this region.
:type tag_id: str
:param left: Required. Coordinate of the left boundary.
:type left: float
:param top: Required. Coordinate of the top boundary.
:type top: float
:param width: Required. Width.
:type width: float
:param height: Required. Height.
:type height: float
"""
_validation = {
'image_id': {'readonly': True},
'region_id': {'readonly': True},
'tag_name': {'readonly': True},
'created': {'readonly': True},
'tag_id': {'required': True},
'left': {'required': True},
'top': {'required': True},
'width': {'required': True},
'height': {'required': True},
}
_attribute_map = {
'image_id': {'key': 'imageId', 'type': 'str'},
'region_id': {'key': 'regionId', 'type': 'str'},
'tag_name': {'key': 'tagName', 'type': 'str'},
'created': {'key': 'created', 'type': 'iso-8601'},
'tag_id': {'key': 'tagId', 'type': 'str'},
'left': {'key': 'left', 'type': 'float'},
'top': {'key': 'top', 'type': 'float'},
'width': {'key': 'width', 'type': 'float'},
'height': {'key': 'height', 'type': 'float'},
}
def __init__(self, *, tag_id: str, left: float, top: float, width: float, height: float, **kwargs) -> None:
super(ImageRegionCreateResult, self).__init__(**kwargs)
self.image_id = None
self.region_id = None
self.tag_name = None
self.created = None
self.tag_id = tag_id
self.left = left
self.top = top
self.width = width
self.height = height
class ImageRegionCreateSummary(Model):
"""ImageRegionCreateSummary.
:param created:
:type created:
list[~azure.cognitiveservices.vision.customvision.training.models.ImageRegionCreateResult]
:param duplicated:
:type duplicated:
list[~azure.cognitiveservices.vision.customvision.training.models.ImageRegionCreateEntry]
:param exceeded:
:type exceeded:
list[~azure.cognitiveservices.vision.customvision.training.models.ImageRegionCreateEntry]
"""
_attribute_map = {
'created': {'key': 'created', 'type': '[ImageRegionCreateResult]'},
'duplicated': {'key': 'duplicated', 'type': '[ImageRegionCreateEntry]'},
'exceeded': {'key': 'exceeded', 'type': '[ImageRegionCreateEntry]'},
}
def __init__(self, *, created=None, duplicated=None, exceeded=None, **kwargs) -> None:
super(ImageRegionCreateSummary, self).__init__(**kwargs)
self.created = created
self.duplicated = duplicated
self.exceeded = exceeded
class ImageRegionProposal(Model):
"""ImageRegionProposal.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar project_id:
:vartype project_id: str
:ivar image_id:
:vartype image_id: str
:ivar proposals:
:vartype proposals:
list[~azure.cognitiveservices.vision.customvision.training.models.RegionProposal]
"""
_validation = {
'project_id': {'readonly': True},
'image_id': {'readonly': True},
'proposals': {'readonly': True},
}
_attribute_map = {
'project_id': {'key': 'projectId', 'type': 'str'},
'image_id': {'key': 'imageId', 'type': 'str'},
'proposals': {'key': 'proposals', 'type': '[RegionProposal]'},
}
def __init__(self, **kwargs) -> None:
super(ImageRegionProposal, self).__init__(**kwargs)
self.project_id = None
self.image_id = None
self.proposals = None
class ImageTag(Model):
"""ImageTag.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar tag_id:
:vartype tag_id: str
:ivar tag_name:
:vartype tag_name: str
:ivar created:
:vartype created: datetime
"""
_validation = {
'tag_id': {'readonly': True},
'tag_name': {'readonly': True},
'created': {'readonly': True},
}
_attribute_map = {
'tag_id': {'key': 'tagId', 'type': 'str'},
'tag_name': {'key': 'tagName', 'type': 'str'},
'created': {'key': 'created', 'type': 'iso-8601'},
}
def __init__(self, **kwargs) -> None:
super(ImageTag, self).__init__(**kwargs)
self.tag_id = None
self.tag_name = None
self.created = None
class ImageTagCreateBatch(Model):
"""Batch of image tags.
:param tags: Image Tag entries to include in this batch.
:type tags:
list[~azure.cognitiveservices.vision.customvision.training.models.ImageTagCreateEntry]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '[ImageTagCreateEntry]'},
}
def __init__(self, *, tags=None, **kwargs) -> None:
super(ImageTagCreateBatch, self).__init__(**kwargs)
self.tags = tags
class ImageTagCreateEntry(Model):
"""Entry associating a tag to an image.
:param image_id: Id of the image.
:type image_id: str
:param tag_id: Id of the tag.
:type tag_id: str
"""
_attribute_map = {
'image_id': {'key': 'imageId', 'type': 'str'},
'tag_id': {'key': 'tagId', 'type': 'str'},
}
def __init__(self, *, image_id: str=None, tag_id: str=None, **kwargs) -> None:
super(ImageTagCreateEntry, self).__init__(**kwargs)
self.image_id = image_id
self.tag_id = tag_id
class ImageTagCreateSummary(Model):
"""ImageTagCreateSummary.
:param created:
:type created:
list[~azure.cognitiveservices.vision.customvision.training.models.ImageTagCreateEntry]
:param duplicated:
:type duplicated:
list[~azure.cognitiveservices.vision.customvision.training.models.ImageTagCreateEntry]
:param exceeded:
:type exceeded:
list[~azure.cognitiveservices.vision.customvision.training.models.ImageTagCreateEntry]
"""
_attribute_map = {
'created': {'key': 'created', 'type': '[ImageTagCreateEntry]'},
'duplicated': {'key': 'duplicated', 'type': '[ImageTagCreateEntry]'},
'exceeded': {'key': 'exceeded', 'type': '[ImageTagCreateEntry]'},
}
def __init__(self, *, created=None, duplicated=None, exceeded=None, **kwargs) -> None:
super(ImageTagCreateSummary, self).__init__(**kwargs)
self.created = created
self.duplicated = duplicated
self.exceeded = exceeded
class ImageUrl(Model):
"""Image url.
All required parameters must be populated in order to send to Azure.
:param url: Required. Url of the image.
:type url: str
"""
_validation = {
'url': {'required': True},
}
_attribute_map = {
'url': {'key': 'url', 'type': 'str'},
}
def __init__(self, *, url: str, **kwargs) -> None:
super(ImageUrl, self).__init__(**kwargs)
self.url = url
class ImageUrlCreateBatch(Model):
"""ImageUrlCreateBatch.
:param images:
:type images:
list[~azure.cognitiveservices.vision.customvision.training.models.ImageUrlCreateEntry]
:param tag_ids:
:type tag_ids: list[str]
:param metadata: The metadata of image. Limited to 10 key-value pairs per
image. The length of key is limited to 128. The length of value is limited
to 256.
:type metadata: dict[str, str]
"""
_attribute_map = {
'images': {'key': 'images', 'type': '[ImageUrlCreateEntry]'},
'tag_ids': {'key': 'tagIds', 'type': '[str]'},
'metadata': {'key': 'metadata', 'type': '{str}'},
}
def __init__(self, *, images=None, tag_ids=None, metadata=None, **kwargs) -> None:
super(ImageUrlCreateBatch, self).__init__(**kwargs)
self.images = images
self.tag_ids = tag_ids
self.metadata = metadata
class ImageUrlCreateEntry(Model):
"""ImageUrlCreateEntry.
All required parameters must be populated in order to send to Azure.
:param url: Required. Url of the image.
:type url: str
:param tag_ids:
:type tag_ids: list[str]
:param regions:
:type regions:
list[~azure.cognitiveservices.vision.customvision.training.models.Region]
"""
_validation = {
'url': {'required': True},
}
_attribute_map = {
'url': {'key': 'url', 'type': 'str'},
'tag_ids': {'key': 'tagIds', 'type': '[str]'},
'regions': {'key': 'regions', 'type': '[Region]'},
}
def __init__(self, *, url: str, tag_ids=None, regions=None, **kwargs) -> None:
super(ImageUrlCreateEntry, self).__init__(**kwargs)
self.url = url
self.tag_ids = tag_ids
self.regions = regions
class Iteration(Model):
"""Iteration model to be sent over JSON.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Gets the id of the iteration.
:vartype id: str
:param name: Required. Gets or sets the name of the iteration.
:type name: str
:ivar status: Gets the current iteration status.
:vartype status: str
:ivar created: Gets the time this iteration was completed.
:vartype created: datetime
:ivar last_modified: Gets the time this iteration was last modified.
:vartype last_modified: datetime
:ivar trained_at: Gets the time this iteration was last modified.
:vartype trained_at: datetime
:ivar project_id: Gets the project id of the iteration.
:vartype project_id: str
:ivar exportable: Whether the iteration can be exported to another format
for download.
:vartype exportable: bool
:ivar exportable_to: A set of platforms this iteration can export to.
:vartype exportable_to: list[str]
:ivar domain_id: Get or sets a guid of the domain the iteration has been
trained on.
:vartype domain_id: str
:ivar classification_type: Gets the classification type of the project.
Possible values include: 'Multiclass', 'Multilabel'
:vartype classification_type: str or
~azure.cognitiveservices.vision.customvision.training.models.Classifier
:ivar training_type: Gets the training type of the iteration. Possible
values include: 'Regular', 'Advanced'
:vartype training_type: str or
~azure.cognitiveservices.vision.customvision.training.models.TrainingType
:ivar reserved_budget_in_hours: Gets the reserved advanced training budget
for the iteration.
:vartype reserved_budget_in_hours: int
:ivar training_time_in_minutes: Gets the training time for the iteration.
:vartype training_time_in_minutes: int
:ivar publish_name: Name of the published model.
:vartype publish_name: str
:ivar original_publish_resource_id: Resource Provider Id this iteration
was originally published to.
:vartype original_publish_resource_id: str
:ivar custom_base_model_info: Information of the previously trained
iteration which provides the base model for current iteration's training.
Default value of null specifies that no previously trained iteration will
be used for incremental learning.
:vartype custom_base_model_info:
~azure.cognitiveservices.vision.customvision.training.models.CustomBaseModelInfo
:ivar training_error_details: Training error details, when training fails.
Value is null when training succeeds.
:vartype training_error_details: str
"""
_validation = {
'id': {'readonly': True},
'name': {'required': True},
'status': {'readonly': True},
'created': {'readonly': True},
'last_modified': {'readonly': True},
'trained_at': {'readonly': True},
'project_id': {'readonly': True},
'exportable': {'readonly': True},
'exportable_to': {'readonly': True},
'domain_id': {'readonly': True},
'classification_type': {'readonly': True},
'training_type': {'readonly': True},
'reserved_budget_in_hours': {'readonly': True},
'training_time_in_minutes': {'readonly': True},
'publish_name': {'readonly': True},
'original_publish_resource_id': {'readonly': True},
'custom_base_model_info': {'readonly': True},
'training_error_details': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'created': {'key': 'created', 'type': 'iso-8601'},
'last_modified': {'key': 'lastModified', 'type': 'iso-8601'},
'trained_at': {'key': 'trainedAt', 'type': 'iso-8601'},
'project_id': {'key': 'projectId', 'type': 'str'},
'exportable': {'key': 'exportable', 'type': 'bool'},
'exportable_to': {'key': 'exportableTo', 'type': '[str]'},
'domain_id': {'key': 'domainId', 'type': 'str'},
'classification_type': {'key': 'classificationType', 'type': 'str'},
'training_type': {'key': 'trainingType', 'type': 'str'},
'reserved_budget_in_hours': {'key': 'reservedBudgetInHours', 'type': 'int'},
'training_time_in_minutes': {'key': 'trainingTimeInMinutes', 'type': 'int'},
'publish_name': {'key': 'publishName', 'type': 'str'},
'original_publish_resource_id': {'key': 'originalPublishResourceId', 'type': 'str'},
'custom_base_model_info': {'key': 'customBaseModelInfo', 'type': 'CustomBaseModelInfo'},
'training_error_details': {'key': 'trainingErrorDetails', 'type': 'str'},
}
def __init__(self, *, name: str, **kwargs) -> None:
super(Iteration, self).__init__(**kwargs)
self.id = None
self.name = name
self.status = None
self.created = None
self.last_modified = None
self.trained_at = None
self.project_id = None
self.exportable = None
self.exportable_to = None
self.domain_id = None
self.classification_type = None
self.training_type = None
self.reserved_budget_in_hours = None
self.training_time_in_minutes = None
self.publish_name = None
self.original_publish_resource_id = None
self.custom_base_model_info = None
self.training_error_details = None
class IterationPerformance(Model):
"""Represents the detailed performance data for a trained iteration.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar per_tag_performance: Gets the per-tag performance details for this
iteration.
:vartype per_tag_performance:
list[~azure.cognitiveservices.vision.customvision.training.models.TagPerformance]
:ivar precision: Gets the precision.
:vartype precision: float
:ivar precision_std_deviation: Gets the standard deviation for the
precision.
:vartype precision_std_deviation: float
:ivar recall: Gets the recall.
:vartype recall: float
:ivar recall_std_deviation: Gets the standard deviation for the recall.
:vartype recall_std_deviation: float
:ivar average_precision: Gets the average precision when applicable.
:vartype average_precision: float
"""
_validation = {
'per_tag_performance': {'readonly': True},
'precision': {'readonly': True},
'precision_std_deviation': {'readonly': True},
'recall': {'readonly': True},
'recall_std_deviation': {'readonly': True},
'average_precision': {'readonly': True},
}
_attribute_map = {
'per_tag_performance': {'key': 'perTagPerformance', 'type': '[TagPerformance]'},
'precision': {'key': 'precision', 'type': 'float'},
'precision_std_deviation': {'key': 'precisionStdDeviation', 'type': 'float'},
'recall': {'key': 'recall', 'type': 'float'},
'recall_std_deviation': {'key': 'recallStdDeviation', 'type': 'float'},
'average_precision': {'key': 'averagePrecision', 'type': 'float'},
}
def __init__(self, **kwargs) -> None:
super(IterationPerformance, self).__init__(**kwargs)
self.per_tag_performance = None
self.precision = None
self.precision_std_deviation = None
self.recall = None
self.recall_std_deviation = None
self.average_precision = None
class ModelInformation(Model):
"""Model information.
All required parameters must be populated in order to send to Azure.
:param estimated_model_size_in_megabytes: Estimation of the exported FP32
Onnx model size (2 tags) in megabytes. This information is not present if
the model cannot be exported.
:type estimated_model_size_in_megabytes: int
:param description: Required. Model description.
:type description: str
"""
_validation = {
'description': {'required': True},
}
_attribute_map = {
'estimated_model_size_in_megabytes': {'key': 'estimatedModelSizeInMegabytes', 'type': 'int'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(self, *, description: str, estimated_model_size_in_megabytes: int=None, **kwargs) -> None:
super(ModelInformation, self).__init__(**kwargs)
self.estimated_model_size_in_megabytes = estimated_model_size_in_megabytes
self.description = description
class Prediction(Model):
"""Prediction result.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar probability: Probability of the tag.
:vartype probability: float
:ivar tag_id: Id of the predicted tag.
:vartype tag_id: str
:ivar tag_name: Name of the predicted tag.
:vartype tag_name: str
:ivar bounding_box: Bounding box of the prediction.
:vartype bounding_box:
~azure.cognitiveservices.vision.customvision.training.models.BoundingBox
:ivar tag_type: Type of the predicted tag. Possible values include:
'Regular', 'Negative', 'GeneralProduct'
:vartype tag_type: str or
~azure.cognitiveservices.vision.customvision.training.models.TagType
"""
_validation = {
'probability': {'readonly': True},
'tag_id': {'readonly': True},
'tag_name': {'readonly': True},
'bounding_box': {'readonly': True},
'tag_type': {'readonly': True},
}
_attribute_map = {
'probability': {'key': 'probability', 'type': 'float'},
'tag_id': {'key': 'tagId', 'type': 'str'},
'tag_name': {'key': 'tagName', 'type': 'str'},
'bounding_box': {'key': 'boundingBox', 'type': 'BoundingBox'},
'tag_type': {'key': 'tagType', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(Prediction, self).__init__(**kwargs)
self.probability = None
self.tag_id = None
self.tag_name = None
self.bounding_box = None
self.tag_type = None
class PredictionQueryResult(Model):
"""Query result of the prediction images that were sent to your prediction
endpoint.
Variables are only populated by the server, and will be ignored when
sending a request.
:param token: Prediction Query Token.
:type token:
~azure.cognitiveservices.vision.customvision.training.models.PredictionQueryToken
:ivar results: Result of an image prediction request.
:vartype results:
list[~azure.cognitiveservices.vision.customvision.training.models.StoredImagePrediction]
"""
_validation = {
'results': {'readonly': True},
}
_attribute_map = {
'token': {'key': 'token', 'type': 'PredictionQueryToken'},
'results': {'key': 'results', 'type': '[StoredImagePrediction]'},
}
def __init__(self, *, token=None, **kwargs) -> None:
super(PredictionQueryResult, self).__init__(**kwargs)
self.token = token
self.results = None
class PredictionQueryTag(Model):
"""PredictionQueryTag.
:param id:
:type id: str
:param min_threshold:
:type min_threshold: float
:param max_threshold:
:type max_threshold: float
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'min_threshold': {'key': 'minThreshold', 'type': 'float'},
'max_threshold': {'key': 'maxThreshold', 'type': 'float'},
}
def __init__(self, *, id: str=None, min_threshold: float=None, max_threshold: float=None, **kwargs) -> None:
super(PredictionQueryTag, self).__init__(**kwargs)
self.id = id
self.min_threshold = min_threshold
self.max_threshold = max_threshold
class PredictionQueryToken(Model):
"""PredictionQueryToken.
:param session:
:type session: str
:param continuation:
:type continuation: str
:param max_count:
:type max_count: int
:param order_by: Possible values include: 'Newest', 'Oldest', 'Suggested'
:type order_by: str or
~azure.cognitiveservices.vision.customvision.training.models.OrderBy
:param tags:
:type tags:
list[~azure.cognitiveservices.vision.customvision.training.models.PredictionQueryTag]
:param iteration_id:
:type iteration_id: str
:param start_time:
:type start_time: datetime
:param end_time:
:type end_time: datetime
:param application:
:type application: str
"""
_attribute_map = {
'session': {'key': 'session', 'type': 'str'},
'continuation': {'key': 'continuation', 'type': 'str'},
'max_count': {'key': 'maxCount', 'type': 'int'},
'order_by': {'key': 'orderBy', 'type': 'str'},
'tags': {'key': 'tags', 'type': '[PredictionQueryTag]'},
'iteration_id': {'key': 'iterationId', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'application': {'key': 'application', 'type': 'str'},
}
def __init__(self, *, session: str=None, continuation: str=None, max_count: int=None, order_by=None, tags=None, iteration_id: str=None, start_time=None, end_time=None, application: str=None, **kwargs) -> None:
super(PredictionQueryToken, self).__init__(**kwargs)
self.session = session
self.continuation = continuation
self.max_count = max_count
self.order_by = order_by
self.tags = tags
self.iteration_id = iteration_id
self.start_time = start_time
self.end_time = end_time
self.application = application
class Project(Model):
"""Represents a project.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Gets the project id.
:vartype id: str
:param name: Required. Gets or sets the name of the project.
:type name: str
:param description: Required. Gets or sets the description of the project.
:type description: str
:param settings: Required. Gets or sets the project settings.
:type settings:
~azure.cognitiveservices.vision.customvision.training.models.ProjectSettings
:ivar created: Gets the date this project was created.
:vartype created: datetime
:ivar last_modified: Gets the date this project was last modified.
:vartype last_modified: datetime
:ivar thumbnail_uri: Gets the thumbnail url representing the image. If
VNET feature is enabled this will be a relative path to be used with
GetArtifact, otherwise this will be an absolute URI to the resource.
:vartype thumbnail_uri: str
:ivar dr_mode_enabled: Gets if the Disaster Recovery (DR) mode is on,
indicating the project is temporarily read-only.
:vartype dr_mode_enabled: bool
:param status: Gets the status of the project. Possible values include:
'Succeeded', 'Importing', 'Failed'
:type status: str or
~azure.cognitiveservices.vision.customvision.training.models.ProjectStatus
"""
_validation = {
'id': {'readonly': True},
'name': {'required': True},
'description': {'required': True},
'settings': {'required': True},
'created': {'readonly': True},
'last_modified': {'readonly': True},
'thumbnail_uri': {'readonly': True},
'dr_mode_enabled': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'settings': {'key': 'settings', 'type': 'ProjectSettings'},
'created': {'key': 'created', 'type': 'iso-8601'},
'last_modified': {'key': 'lastModified', 'type': 'iso-8601'},
'thumbnail_uri': {'key': 'thumbnailUri', 'type': 'str'},
'dr_mode_enabled': {'key': 'drModeEnabled', 'type': 'bool'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(self, *, name: str, description: str, settings, status=None, **kwargs) -> None:
super(Project, self).__init__(**kwargs)
self.id = None
self.name = name
self.description = description
self.settings = settings
self.created = None
self.last_modified = None
self.thumbnail_uri = None
self.dr_mode_enabled = None
self.status = status
class ProjectExport(Model):
"""Represents information about a project export.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar iteration_count: Count of iterations that will be exported.
:vartype iteration_count: int
:ivar image_count: Count of images that will be exported.
:vartype image_count: int
:ivar tag_count: Count of tags that will be exported.
:vartype tag_count: int
:ivar region_count: Count of regions that will be exported.
:vartype region_count: int
:ivar estimated_import_time_in_ms: Estimated time this project will take
to import, can change based on network connectivity and load between
source and destination regions.
:vartype estimated_import_time_in_ms: int
:ivar token: Opaque token that should be passed to ImportProject to
perform the import. This token grants access to import this
project to all that have the token.
:vartype token: str
"""
_validation = {
'iteration_count': {'readonly': True},
'image_count': {'readonly': True},
'tag_count': {'readonly': True},
'region_count': {'readonly': True},
'estimated_import_time_in_ms': {'readonly': True},
'token': {'readonly': True},
}
_attribute_map = {
'iteration_count': {'key': 'iterationCount', 'type': 'int'},
'image_count': {'key': 'imageCount', 'type': 'int'},
'tag_count': {'key': 'tagCount', 'type': 'int'},
'region_count': {'key': 'regionCount', 'type': 'int'},
'estimated_import_time_in_ms': {'key': 'estimatedImportTimeInMS', 'type': 'int'},
'token': {'key': 'token', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(ProjectExport, self).__init__(**kwargs)
self.iteration_count = None
self.image_count = None
self.tag_count = None
self.region_count = None
self.estimated_import_time_in_ms = None
self.token = None
class ProjectSettings(Model):
"""Represents settings associated with a project.
Variables are only populated by the server, and will be ignored when
sending a request.
:param domain_id: Gets or sets the id of the Domain to use with this
project.
:type domain_id: str
:param classification_type: Gets or sets the classification type of the
project. Possible values include: 'Multiclass', 'Multilabel'
:type classification_type: str or
~azure.cognitiveservices.vision.customvision.training.models.Classifier
:param target_export_platforms: A list of ExportPlatform that the trained
model should be able to support.
:type target_export_platforms: list[str]
:ivar use_negative_set: Indicates if negative set is being used.
:vartype use_negative_set: bool
:ivar detection_parameters: Detection parameters in use, if any.
:vartype detection_parameters: str
:param image_processing_settings: Gets or sets image preprocessing
settings.
:type image_processing_settings:
~azure.cognitiveservices.vision.customvision.training.models.ImageProcessingSettings
:ivar export_model_container_uri: The uri to the Azure Storage container
that will be used to store exported models.
:vartype export_model_container_uri: str
:ivar notification_queue_uri: The uri to the Azure Storage queue that will
be used to send project-related notifications. See <a
href="https://go.microsoft.com/fwlink/?linkid=2144149">Storage
notifications</a> documentation for setup and message format.
:vartype notification_queue_uri: str
"""
_validation = {
'use_negative_set': {'readonly': True},
'detection_parameters': {'readonly': True},
'export_model_container_uri': {'readonly': True},
'notification_queue_uri': {'readonly': True},
}
_attribute_map = {
'domain_id': {'key': 'domainId', 'type': 'str'},
'classification_type': {'key': 'classificationType', 'type': 'str'},
'target_export_platforms': {'key': 'targetExportPlatforms', 'type': '[str]'},
'use_negative_set': {'key': 'useNegativeSet', 'type': 'bool'},
'detection_parameters': {'key': 'detectionParameters', 'type': 'str'},
'image_processing_settings': {'key': 'imageProcessingSettings', 'type': 'ImageProcessingSettings'},
'export_model_container_uri': {'key': 'exportModelContainerUri', 'type': 'str'},
'notification_queue_uri': {'key': 'notificationQueueUri', 'type': 'str'},
}
def __init__(self, *, domain_id: str=None, classification_type=None, target_export_platforms=None, image_processing_settings=None, **kwargs) -> None:
super(ProjectSettings, self).__init__(**kwargs)
self.domain_id = domain_id
self.classification_type = classification_type
self.target_export_platforms = target_export_platforms
self.use_negative_set = None
self.detection_parameters = None
self.image_processing_settings = image_processing_settings
self.export_model_container_uri = None
self.notification_queue_uri = None
class Region(Model):
"""Region.
All required parameters must be populated in order to send to Azure.
:param tag_id: Required. Id of the tag associated with this region.
:type tag_id: str
:param left: Required. Coordinate of the left boundary.
:type left: float
:param top: Required. Coordinate of the top boundary.
:type top: float
:param width: Required. Width.
:type width: float
:param height: Required. Height.
:type height: float
"""
_validation = {
'tag_id': {'required': True},
'left': {'required': True},
'top': {'required': True},
'width': {'required': True},
'height': {'required': True},
}
_attribute_map = {
'tag_id': {'key': 'tagId', 'type': 'str'},
'left': {'key': 'left', 'type': 'float'},
'top': {'key': 'top', 'type': 'float'},
'width': {'key': 'width', 'type': 'float'},
'height': {'key': 'height', 'type': 'float'},
}
def __init__(self, *, tag_id: str, left: float, top: float, width: float, height: float, **kwargs) -> None:
super(Region, self).__init__(**kwargs)
self.tag_id = tag_id
self.left = left
self.top = top
self.width = width
self.height = height
class RegionProposal(Model):
"""RegionProposal.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar confidence:
:vartype confidence: float
:ivar bounding_box:
:vartype bounding_box:
~azure.cognitiveservices.vision.customvision.training.models.BoundingBox
"""
_validation = {
'confidence': {'readonly': True},
'bounding_box': {'readonly': True},
}
_attribute_map = {
'confidence': {'key': 'confidence', 'type': 'float'},
'bounding_box': {'key': 'boundingBox', 'type': 'BoundingBox'},
}
def __init__(self, **kwargs) -> None:
super(RegionProposal, self).__init__(**kwargs)
self.confidence = None
self.bounding_box = None
class StoredImagePrediction(Model):
"""Result of an image prediction request.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar resized_image_uri: The URI to the (resized) prediction image. If
VNET feature is enabled this will be a relative path to be used with
GetArtifact, otherwise this will be an absolute URI to the resource.
:vartype resized_image_uri: str
:ivar thumbnail_uri: The URI to the thumbnail of the original prediction
image. If VNET feature is enabled this will be a relative path to be used
with GetArtifact, otherwise this will be an absolute URI to the resource.
:vartype thumbnail_uri: str
:ivar original_image_uri: The URI to the original prediction image. If
VNET feature is enabled this will be a relative path to be used with
GetArtifact, otherwise this will be an absolute URI to the resource.
:vartype original_image_uri: str
:ivar domain: Domain used for the prediction.
:vartype domain: str
:ivar id: Prediction Id.
:vartype id: str
:ivar project: Project Id.
:vartype project: str
:ivar iteration: Iteration Id.
:vartype iteration: str
:ivar created: Date this prediction was created.
:vartype created: datetime
:ivar predictions: List of predictions.
:vartype predictions:
list[~azure.cognitiveservices.vision.customvision.training.models.Prediction]
"""
_validation = {
'resized_image_uri': {'readonly': True},
'thumbnail_uri': {'readonly': True},
'original_image_uri': {'readonly': True},
'domain': {'readonly': True},
'id': {'readonly': True},
'project': {'readonly': True},
'iteration': {'readonly': True},
'created': {'readonly': True},
'predictions': {'readonly': True},
}
_attribute_map = {
'resized_image_uri': {'key': 'resizedImageUri', 'type': 'str'},
'thumbnail_uri': {'key': 'thumbnailUri', 'type': 'str'},
'original_image_uri': {'key': 'originalImageUri', 'type': 'str'},
'domain': {'key': 'domain', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'project': {'key': 'project', 'type': 'str'},
'iteration': {'key': 'iteration', 'type': 'str'},
'created': {'key': 'created', 'type': 'iso-8601'},
'predictions': {'key': 'predictions', 'type': '[Prediction]'},
}
def __init__(self, **kwargs) -> None:
super(StoredImagePrediction, self).__init__(**kwargs)
self.resized_image_uri = None
self.thumbnail_uri = None
self.original_image_uri = None
self.domain = None
self.id = None
self.project = None
self.iteration = None
self.created = None
self.predictions = None
class StoredSuggestedTagAndRegion(Model):
"""Result of a suggested tags and regions request of the untagged image.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar width: Width of the resized image.
:vartype width: int
:ivar height: Height of the resized image.
:vartype height: int
:ivar resized_image_uri: The URI to the (resized) prediction image. If
VNET feature is enabled this will be a relative path to be used with
GetArtifact, otherwise this will be an absolute URI to the resource.
:vartype resized_image_uri: str
:ivar thumbnail_uri: The URI to the thumbnail of the original prediction
image. If VNET feature is enabled this will be a relative path to be used
with GetArtifact, otherwise this will be an absolute URI to the resource.
:vartype thumbnail_uri: str
:ivar original_image_uri: The URI to the original prediction image. If
VNET feature is enabled this will be a relative path to be used with
GetArtifact, otherwise this will be an absolute URI to the resource.
:vartype original_image_uri: str
:ivar domain: Domain used for the prediction.
:vartype domain: str
:ivar id: Prediction Id.
:vartype id: str
:ivar project: Project Id.
:vartype project: str
:ivar iteration: Iteration Id.
:vartype iteration: str
:ivar created: Date this prediction was created.
:vartype created: datetime
:ivar predictions: List of predictions.
:vartype predictions:
list[~azure.cognitiveservices.vision.customvision.training.models.Prediction]
:ivar prediction_uncertainty: Uncertainty (entropy) of suggested tags or
regions per image.
:vartype prediction_uncertainty: float
"""
_validation = {
'width': {'readonly': True},
'height': {'readonly': True},
'resized_image_uri': {'readonly': True},
'thumbnail_uri': {'readonly': True},
'original_image_uri': {'readonly': True},
'domain': {'readonly': True},
'id': {'readonly': True},
'project': {'readonly': True},
'iteration': {'readonly': True},
'created': {'readonly': True},
'predictions': {'readonly': True},
'prediction_uncertainty': {'readonly': True},
}
_attribute_map = {
'width': {'key': 'width', 'type': 'int'},
'height': {'key': 'height', 'type': 'int'},
'resized_image_uri': {'key': 'resizedImageUri', 'type': 'str'},
'thumbnail_uri': {'key': 'thumbnailUri', 'type': 'str'},
'original_image_uri': {'key': 'originalImageUri', 'type': 'str'},
'domain': {'key': 'domain', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'project': {'key': 'project', 'type': 'str'},
'iteration': {'key': 'iteration', 'type': 'str'},
'created': {'key': 'created', 'type': 'iso-8601'},
'predictions': {'key': 'predictions', 'type': '[Prediction]'},
'prediction_uncertainty': {'key': 'predictionUncertainty', 'type': 'float'},
}
def __init__(self, **kwargs) -> None:
super(StoredSuggestedTagAndRegion, self).__init__(**kwargs)
self.width = None
self.height = None
self.resized_image_uri = None
self.thumbnail_uri = None
self.original_image_uri = None
self.domain = None
self.id = None
self.project = None
self.iteration = None
self.created = None
self.predictions = None
self.prediction_uncertainty = None
class SuggestedTagAndRegion(Model):
"""Result of a suggested tags and regions request.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Prediction Id.
:vartype id: str
:ivar project: Project Id.
:vartype project: str
:ivar iteration: Iteration Id.
:vartype iteration: str
:ivar created: Date this prediction was created.
:vartype created: datetime
:ivar predictions: List of predictions.
:vartype predictions:
list[~azure.cognitiveservices.vision.customvision.training.models.Prediction]
:ivar prediction_uncertainty: Uncertainty (entropy) of suggested tags or
regions per image.
:vartype prediction_uncertainty: float
"""
_validation = {
'id': {'readonly': True},
'project': {'readonly': True},
'iteration': {'readonly': True},
'created': {'readonly': True},
'predictions': {'readonly': True},
'prediction_uncertainty': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'project': {'key': 'project', 'type': 'str'},
'iteration': {'key': 'iteration', 'type': 'str'},
'created': {'key': 'created', 'type': 'iso-8601'},
'predictions': {'key': 'predictions', 'type': '[Prediction]'},
'prediction_uncertainty': {'key': 'predictionUncertainty', 'type': 'float'},
}
def __init__(self, **kwargs) -> None:
super(SuggestedTagAndRegion, self).__init__(**kwargs)
self.id = None
self.project = None
self.iteration = None
self.created = None
self.predictions = None
self.prediction_uncertainty = None
class SuggestedTagAndRegionQuery(Model):
"""The array of result images and token containing session and continuation
Ids for the next query.
Variables are only populated by the server, and will be ignored when
sending a request.
:param token: Contains properties we need to fetch suggested tags for.
:type token:
~azure.cognitiveservices.vision.customvision.training.models.SuggestedTagAndRegionQueryToken
:ivar results: Result of a suggested tags and regions request of the
untagged image.
:vartype results:
list[~azure.cognitiveservices.vision.customvision.training.models.StoredSuggestedTagAndRegion]
"""
_validation = {
'results': {'readonly': True},
}
_attribute_map = {
'token': {'key': 'token', 'type': 'SuggestedTagAndRegionQueryToken'},
'results': {'key': 'results', 'type': '[StoredSuggestedTagAndRegion]'},
}
def __init__(self, *, token=None, **kwargs) -> None:
super(SuggestedTagAndRegionQuery, self).__init__(**kwargs)
self.token = token
self.results = None
class SuggestedTagAndRegionQueryToken(Model):
"""Contains properties we need to fetch suggested tags for. For the first
call, Session and continuation set to null.
Then on subsequent calls, uses the session/continuation from the previous
SuggestedTagAndRegionQuery result to fetch additional results.
:param tag_ids: Existing TagIds in project to filter suggested tags on.
:type tag_ids: list[str]
:param threshold: Confidence threshold to filter suggested tags on.
:type threshold: float
:param session: SessionId for database query. Initially set to null but
later used to paginate.
:type session: str
:param continuation: Continuation Id for database pagination. Initially
null but later used to paginate.
:type continuation: str
:param max_count: Maximum number of results you want to be returned in the
response.
:type max_count: int
:param sort_by: OrderBy. Ordering mechanism for your results. Possible
values include: 'UncertaintyAscending', 'UncertaintyDescending'
:type sort_by: str or
~azure.cognitiveservices.vision.customvision.training.models.SortBy
"""
_attribute_map = {
'tag_ids': {'key': 'tagIds', 'type': '[str]'},
'threshold': {'key': 'threshold', 'type': 'float'},
'session': {'key': 'session', 'type': 'str'},
'continuation': {'key': 'continuation', 'type': 'str'},
'max_count': {'key': 'maxCount', 'type': 'int'},
'sort_by': {'key': 'sortBy', 'type': 'str'},
}
def __init__(self, *, tag_ids=None, threshold: float=None, session: str=None, continuation: str=None, max_count: int=None, sort_by=None, **kwargs) -> None:
super(SuggestedTagAndRegionQueryToken, self).__init__(**kwargs)
self.tag_ids = tag_ids
self.threshold = threshold
self.session = session
self.continuation = continuation
self.max_count = max_count
self.sort_by = sort_by
class Tag(Model):
"""Represents a Tag.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Gets the Tag ID.
:vartype id: str
:param name: Required. Gets or sets the name of the tag.
:type name: str
:param description: Required. Gets or sets the description of the tag.
:type description: str
:param type: Required. Gets or sets the type of the tag. Possible values
include: 'Regular', 'Negative', 'GeneralProduct'
:type type: str or
~azure.cognitiveservices.vision.customvision.training.models.TagType
:ivar image_count: Gets the number of images with this tag.
:vartype image_count: int
"""
_validation = {
'id': {'readonly': True},
'name': {'required': True},
'description': {'required': True},
'type': {'required': True},
'image_count': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'image_count': {'key': 'imageCount', 'type': 'int'},
}
def __init__(self, *, name: str, description: str, type, **kwargs) -> None:
super(Tag, self).__init__(**kwargs)
self.id = None
self.name = name
self.description = description
self.type = type
self.image_count = None
class TagFilter(Model):
"""Model that query for counting of images whose suggested tags match given
tags and their probability are greater than or equal to the given
threshold.
:param tag_ids: Existing TagIds in project to get suggested tags count
for.
:type tag_ids: list[str]
:param threshold: Confidence threshold to filter suggested tags on.
:type threshold: float
"""
_attribute_map = {
'tag_ids': {'key': 'tagIds', 'type': '[str]'},
'threshold': {'key': 'threshold', 'type': 'float'},
}
def __init__(self, *, tag_ids=None, threshold: float=None, **kwargs) -> None:
super(TagFilter, self).__init__(**kwargs)
self.tag_ids = tag_ids
self.threshold = threshold
class TagPerformance(Model):
"""Represents performance data for a particular tag in a trained iteration.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id:
:vartype id: str
:ivar name:
:vartype name: str
:ivar precision: Gets the precision.
:vartype precision: float
:ivar precision_std_deviation: Gets the standard deviation for the
precision.
:vartype precision_std_deviation: float
:ivar recall: Gets the recall.
:vartype recall: float
:ivar recall_std_deviation: Gets the standard deviation for the recall.
:vartype recall_std_deviation: float
:ivar average_precision: Gets the average precision when applicable.
:vartype average_precision: float
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'precision': {'readonly': True},
'precision_std_deviation': {'readonly': True},
'recall': {'readonly': True},
'recall_std_deviation': {'readonly': True},
'average_precision': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'precision': {'key': 'precision', 'type': 'float'},
'precision_std_deviation': {'key': 'precisionStdDeviation', 'type': 'float'},
'recall': {'key': 'recall', 'type': 'float'},
'recall_std_deviation': {'key': 'recallStdDeviation', 'type': 'float'},
'average_precision': {'key': 'averagePrecision', 'type': 'float'},
}
def __init__(self, **kwargs) -> None:
super(TagPerformance, self).__init__(**kwargs)
self.id = None
self.name = None
self.precision = None
self.precision_std_deviation = None
self.recall = None
self.recall_std_deviation = None
self.average_precision = None
class TrainingParameters(Model):
"""Parameters used for training.
:param selected_tags: List of tags selected for this training session,
other tags in the project will be ignored.
:type selected_tags: list[str]
:param custom_base_model_info: Information of the previously trained
iteration which provides the base model for current iteration's training.
:type custom_base_model_info:
~azure.cognitiveservices.vision.customvision.training.models.CustomBaseModelInfo
"""
_attribute_map = {
'selected_tags': {'key': 'selectedTags', 'type': '[str]'},
'custom_base_model_info': {'key': 'customBaseModelInfo', 'type': 'CustomBaseModelInfo'},
}
def __init__(self, *, selected_tags=None, custom_base_model_info=None, **kwargs) -> None:
super(TrainingParameters, self).__init__(**kwargs)
self.selected_tags = selected_tags
self.custom_base_model_info = custom_base_model_info
|
Azure/azure-sdk-for-python
|
sdk/cognitiveservices/azure-cognitiveservices-vision-customvision/azure/cognitiveservices/vision/customvision/training/models/_models_py3.py
|
Python
|
mit
| 83,348
|
[
"VisIt"
] |
095b7ce2719b975949880475e0142a732b3db2cb35e3b5d818973131bb2e8b31
|
from pype.ast import *
from pype.symtab import *
from pype.lib_import import LibraryImporter
from pype.fgir import FGNodeType, FGNode, Flowgraph, FGIR
from pype.error import *
class SymbolTableVisitor(ASTVisitor):
def __init__(self):
self.symbol_table = SymbolTable()
self.currentComponent = None
def return_value(self):
return self.symbol_table
def visit(self, node):
if isinstance(node, ASTImport):
# Import statements make library functions available to PyPE
imp = LibraryImporter(node.module.name)
print (imp.add_symbols(self.symbol_table))
# TODO
# Add symbols for the following types of names:
# inputs: anything in an input expression
# the SymbolType should be input, and the ref can be None
# the scope should be the enclosing component
# assigned names: the bound name in an assignment expression
# the SymbolType should be var, and the ref can be None
# the scope should be the enclosing component
# components: the name of each component
# the SymbolType should be component, and the ref can be None
# the scope sould be *global*
# Note, you'll need to track scopes again for some of these.
# You may need to add class state to handle this.
if isinstance(node, ASTInputExpr):
for input_expression in node.children:
name = input_expression.name
self.symbol_table.addsym((name, SymbolType.input, None), self.currentComponent)
elif isinstance(node, ASTAssignmentExpr):
name = node.binding.name
self.symbol_table.addsym((name, SymbolType.var, None), self.currentComponent)
elif isinstance(node, ASTComponent):
name = node.name.name
self.currentComponent = name
self.symbol_table.addsym((name, SymbolType.component, None), 'global')
class LoweringVisitor(ASTModVisitor):
'Produces FGIR from an AST.'
def __init__(self,symtab):
self.symtab = symtab
self.ir = FGIR()
self.current_component = None
def visit(self, astnode):
if isinstance(astnode, ASTComponent):
name = astnode.name.name
self.ir[name] = Flowgraph(name=name)
self.current_component = name
return astnode
def post_visit(self, node, visit_value, child_values):
if isinstance(node, ASTProgram):
return self.ir
elif isinstance(node, ASTInputExpr):
fg = self.ir[self.current_component]
for child_v in child_values:
varname = child_v.name
var_nodeid = fg.get_var(varname)
if var_nodeid is None: # No use yet, declare it.
var_nodeid = fg.new_node(FGNodeType.input).nodeid
else: # use before declaration
fg.nodes[var_nodeid].type = FGNodeType.input
fg.set_var(varname,var_nodeid)
fg.add_input(var_nodeid)
return None
elif isinstance(node, ASTOutputExpr):
fg = self.ir[self.current_component]
for child_v in child_values:
n = fg.new_node(FGNodeType.output)
varname = child_v.name
var_nodeid = fg.get_var(varname)
if var_nodeid is None: # Use before declaration
# The "unknown" type will be replaced later
var_nodeid = fg.new_node(FGNodeType.unknown).nodeid
fg.set_var(varname, var_nodeid)
# Already declared in an assignment or input expression
n.inputs.append(var_nodeid)
fg.add_output(n.nodeid)
return None
elif isinstance(node, ASTAssignmentExpr):
fg = self.ir[self.current_component]
# If a variable use precedes its declaration, a stub will be in this table
stub_nodeid = fg.get_var(node.binding.name)
if stub_nodeid is not None: # Modify the existing stub
n = fg.nodes[stub_nodeid]
n.type = FGNodeType.assignment
else: # Create a new node
n = fg.new_node(FGNodeType.assignment)
child_v = child_values[1]
if isinstance(child_v, FGNode): # subexpressions or literals
n.inputs.append(child_v.nodeid)
elif isinstance(child_v, ASTID): # variable lookup
varname = child_v.name
var_nodeid = fg.get_var(varname)
if var_nodeid is None: # Use before declaration
# The "unknown" type will be replaced later
var_nodeid = fg.new_node(FGNodeType.unknown).nodeid
fg.set_var(varname, var_nodeid)
# Already declared in an assignment or input expression
n.inputs.append(var_nodeid)
fg.set_var(node.binding.name, n.nodeid)
return None
elif isinstance(node, ASTEvalExpr):
fg = self.ir[self.current_component]
op = self.symtab.lookupsym(node.op.name, scope=self.current_component)
if op is None:
raise PypeSyntaxError('Undefined operator: '+str(node.op.name))
if op.type==SymbolType.component:
n = fg.new_node(FGNodeType.component, ref=op.name)
elif op.type==SymbolType.libraryfunction:
n = fg.new_node(FGNodeType.libraryfunction, ref=op.ref)
elif op.type==SymbolType.librarymethod:
n = fg.new_node(FGNodeType.librarymethod, ref=op.ref)
else:
raise PypeSyntaxError('Invalid operator of type "'+str(SymbolType)+'" in expression: '+str(node.op.name))
n.inputs = []
for child_v in child_values[1:]:
if isinstance(child_v, FGNode): # subexpressions or literals
n.inputs.append(child_v.nodeid)
elif isinstance(child_v, ASTID): # variable lookup
varname = child_v.name
var_nodeid = fg.get_var(varname)
if var_nodeid is None: # Use before declaration
# The "unknown" type will be replaced later
var_nodeid = fg.new_node(FGNodeType.unknown).nodeid
fg.set_var(varname, var_nodeid)
# Already declared in an assignment or input expression
n.inputs.append(var_nodeid)
return n
elif isinstance(node, ASTLiteral):
fg = self.ir[self.current_component]
n = fg.new_node(FGNodeType.literal, ref=node.value)
return n
else:
return visit_value
|
207leftovers/cs207project
|
pype/translate.py
|
Python
|
mit
| 6,055
|
[
"VisIt"
] |
76f44412b069cca4f6d72af65302268da7c21d2462951c01b2dbbc6430e01af9
|
#! /usr/bin/env python
from __future__ import print_function, division
from collections import namedtuple
"""
Copyright (C) 2016 Wesley Fraser (westhefras@gmail.com, @wtfastro)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = 'Wesley Fraser (@wtfastro, github: fraserw <westhefras@gmail.com>), Academic email: wes.fraser@qub.ac.uk'
import imp
import os
import sys
import pylab as pyl
import scipy as sci
from scipy import optimize as opti, interpolate as interp
from scipy import signal
from . import bgFinder
# import weightedMeanSTD
try:
imp.find_module('astropy')
astropyFound = True
except ImportError:
astropyFound = False
if astropyFound:
from astropy.io import fits as pyf
else:
import pyfits as pyf
from .pill import pillPhot
from .trippy_utils import *
import time
class modelPSF:
"""
Round and moving object psf class.
The intent of this class is to provide a model PSF of both stationary and trailed point sources.
The basic profile is a moffat profile with super sampled constant look up table. Both are used
with linear convolution to calculate the trailed source PSF.
modelPSF takes as input:
-x,y are arrays of length equal to the width and height of the desired PSF image.
eg. x=numpy.arange(50), y=numpy.arange(70) would create a psf 50x70 pix
-alpha, beta are initial guesses for the moffat profile to be used. 5 and 2 are usually pretty good
-repFact is the supersampling factor. 10 is default, though for improvement in speed, 5 can be used
without much loss of PSF or photometric precision
optional arguments:
-verbose = True if you want to see a lot of unnecessary output
-restore = psf filename if you want to restore from a previously saved psf file.
The general steps for psf generation and photometry are:
-initialization
-lookup table generation
-psf generation
-line convolution
-linear aperture correction estimation
"""
def psfStore(self,fn, psfV2 = False):
"""
Store the psf into a fits file that you can view and reopen at a later point. The only option is the fits file
name.
"""
name=fn.split('.fits')[0]
if not psfV2:
HDU=pyf.PrimaryHDU(self.PSF)
hdu=pyf.ImageHDU(self.psf)
lookupHDU=pyf.ImageHDU(self.lookupTable)
lineHDU=pyf.ImageHDU(self.longPSF)
if self.aperCorrs is not None:
aperCorrHDU=pyf.ImageHDU(np.array([self.aperCorrs,self.aperCorrRadii]))
else:
aperCorrHDU=pyf.ImageHDU(np.array([[-1],[-1]]))
if self.lineAperCorrs is not None:
lineAperCorrHDU=pyf.ImageHDU(np.array([self.lineAperCorrs,self.lineAperCorrRadii]))
else:
lineAperCorrHDU=pyf.ImageHDU(np.array([[-1],[-1]]))
#distHDU=pyf.ImageHDU(np.array([self.rDist,self.fDist]))
list=pyf.HDUList([HDU,lookupHDU,lineHDU,hdu,aperCorrHDU,lineAperCorrHDU])
else:
lookupHDU=pyf.PrimaryHDU(self.lookupTable)
if self.aperCorrs is not None:
aperCorrHDU=pyf.ImageHDU(np.array([self.aperCorrs,self.aperCorrRadii]))
else:
aperCorrHDU=pyf.ImageHDU(np.array([[-1],[-1]]))
if self.lineAperCorrs is not None:
lineAperCorrHDU=pyf.ImageHDU(np.array([self.lineAperCorrs,self.lineAperCorrRadii]))
else:
lineAperCorrHDU=pyf.ImageHDU(np.array([[-1],[-1]]))
#distHDU=pyf.ImageHDU(np.array([self.rDist,self.fDist]))
list=pyf.HDUList([lookupHDU,aperCorrHDU,lineAperCorrHDU])
list[0].header.set('REPFACT',self.repFact)
for ii in range(len(self.psfStars)):
list[0].header.set('xSTAR%s'%(ii),self.psfStars[ii][0],'PSF Star x value.')
list[0].header.set('ySTAR%s'%(ii),self.psfStars[ii][1],'PSF Star y value.')
list[0].header['alpha']=self.alpha
list[0].header['beta']=self.beta
list[0].header['A']=self.A
list[0].header['rate']=self.rate
list[0].header['angle']=self.angle
list[0].header['dt']=self.dt
list[0].header['pixScale']=self.pixScale
try:
list.writeto(name + '.fits', overwrite = True)
except:
list.writeto(name + '.fits', clobber = True)
def _fitsReStore(self,fn):
"""
Hidden convenience function to restore a psf file.
"""
print('\nRestoring PSF...')
name=fn.split('.fits')[0]
with pyf.open(name+'.fits') as inHan:
#load the psf file
if len(inHan) == 6:
psfV2 = False
#load the psf file
self.PSF=inHan[0].data
self.lookupTable=inHan[1].data
self.longPSF=inHan[2].data
self.psf=inHan[3].data
self.aperCorrs=inHan[4].data[0]
self.aperCorrRadii=inHan[4].data[1]
self.lineAperCorrs=inHan[5].data[0]
self.lineAperCorrRadii=inHan[5].data[1]
else:
psfV2 = True
self.lookupTable=inHan[0].data
self.aperCorrs=inHan[1].data[0]
self.aperCorrRadii=inHan[1].data[1]
self.lineAperCorrs=inHan[2].data[0]
self.lineAperCorrRadii=inHan[2].data[1]
self.psfStars=[]
header=inHan[0].header
self.repFact=header['REPFACT']
x=header['xSTAR*']#.values()
y=header['ySTAR*']#.values()
for ii in range(len(x)):
self.psfStars.append([x[ii],y[ii]])
self.psfStars = np.array(self.psfStars)
self.alpha=header['alpha']
self.beta=header['beta']
self.A=header['A']
self.rate=header['RATE']
self.angle=header['ANGLE']
self.dt=header['DT']
self.pixScale=header['PIXSCALE']
self.boxSize=int( len(self.lookupTable)/self.repFact/2 )
#now recompute the necessary parameters
if len(self.aperCorrs)!=1:
self.aperCorrFunc=interp.interp1d(self.aperCorrRadii*1.,self.aperCorrs*1.)
if len(self.lineAperCorrs)!=1:
self.lineAperCorrFunc=interp.interp1d(self.lineAperCorrRadii*1.,self.lineAperCorrs*1.)
(A,B) = self.lookupTable.shape
self.shape = [A/self.repFact,B/self.repFact]
self.x=np.arange(self.shape[0])+0.5
self.y=np.arange(self.shape[1])+0.5
self.cent=np.array([len(self.y)/2.,len(self.x)/2.])
self.centx=self.cent[0]
self.centy=self.cent[1]
#self.psf=np.ones([len(self.y),len(self.x)]).astype('float')
self.inds=np.zeros((len(self.y),len(self.x),2)).astype('int')
for ii in range(len(self.y)):
self.inds[ii,:,1]=np.arange(len(self.x))
for ii in range(len(self.x)):
self.inds[:,ii,0]=np.arange(len(self.y))
self.coords=self.inds+np.array([0.5,0.5])
self.r=np.sqrt(np.sum((self.coords-self.cent)**2,axis=2))
self.X=np.arange(len(self.x)*self.repFact)/float(self.repFact)+0.5/self.repFact
self.Y=np.arange(len(self.y)*self.repFact)/float(self.repFact)+0.5/self.repFact
self.Inds=np.zeros((len(self.y)*self.repFact,len(self.x)*self.repFact,2)).astype('int')
for ii in range(len(self.y)*self.repFact):
self.Inds[ii,:,1]=np.arange(len(self.x)*self.repFact)
for ii in range(len(self.x)*self.repFact):
self.Inds[:,ii,0]=np.arange(len(self.y)*self.repFact)
self.Coords=(self.Inds+np.array([0.5,0.5]))/float(self.repFact)
self.R=np.sqrt(np.sum((self.Coords-self.cent)**2,axis=2))
self.genPSF()
self.fitted=True
if psfV2:
###code to generate the PSF and psf
self.PSF=self.moffat(self.R)
self.PSF/=np.sum(self.PSF)
self.psf=downSample2d(self.PSF,self.repFact)
###code to generate the line psf
self.longPSF = None
if self.rate is not None:
self.line(self.rate,self.angle,self.dt,pixScale = self.pixScale,display=False,useLookupTable=True, verbose=True)
print(' PSF restored.\n')
def __init__(self,x=-1,y=-1,alpha=-1,beta=-1,repFact=10,verbose=False,restore=False,ignoreRepFactWarning=False):
"""
Initialize the PSF.
x,y are the size of the PSF (width, height) in pixels. Can either be an integer value or a numpy.arange(x) array.
alpha, beta are the initial moffat parameters
repfact=5,10 is the supersampling factor. Only 5 and 10 are well tested!
verbose to see a bunch of unnecessary, but informative output.
restore=filename to restore a psf having the filename provided.
"""
self.nForFitting=0
self.imData=None
if repFact not in [3,5,10] and not ignoreRepFactWarning:
raise Warning('This has only been robustly tested with repFact=3, 5, or 10. I encourage you to stick with those.')
if not restore:
if type(x)==type(np.ones(1)):
if len(x)==1:
if x[0]%2==0 or x[0]%2==0:
raise Exception('Please use odd width PSFs. Even has not been tested yet.')
elif (len(x)%2==0 or len(y)%2==0):
raise Exception('Please use odd width PSFs. Even has not been tested yet.')
else:
if (x%2==0 or y%2==0):
raise Exception('Please use odd width PSFs. Even has not been tested yet.')
if restore:
self._fitsReStore(restore)
else:
self.A=None
self.alpha=alpha
self.beta=beta
self.chi=None
self.rate = None
self.angle = None
self.dt = None
self.pixScale = None
if type(x)!=type(np.ones(1)):
self.x=np.arange(x)+0.5
self.y=np.arange(y)+0.5
elif len(x)==1:
self.x=np.arange(x)+0.5
self.y=np.arange(y)+0.5
else:
self.x=x*1.0+0.5
self.y=y*1.0+0.5
self.cent=np.array([len(self.y)/2.,len(self.x)/2.])
self.centx=self.cent[0]
self.centy=self.cent[1]
self.repFact=repFact
self.psf=np.ones([len(self.y),len(self.x)]).astype('float')
self.inds=np.zeros((len(self.y),len(self.x),2)).astype('int')
for ii in range(len(self.y)):
self.inds[ii,:,1]=np.arange(len(self.x))
for ii in range(len(self.x)):
self.inds[:,ii,0]=np.arange(len(self.y))
self.coords=self.inds+np.array([0.5,0.5])
self.r=np.sqrt(np.sum((self.coords-self.cent)**2,axis=2))
self.X=np.arange(len(self.x)*self.repFact)/float(self.repFact)+0.5/self.repFact
self.Y=np.arange(len(self.y)*self.repFact)/float(self.repFact)+0.5/self.repFact
self.Inds=np.zeros((len(self.y)*self.repFact,len(self.x)*self.repFact,2)).astype('int')
for ii in range(len(self.y)*self.repFact):
self.Inds[ii,:,1]=np.arange(len(self.x)*self.repFact)
for ii in range(len(self.x)*self.repFact):
self.Inds[:,ii,0]=np.arange(len(self.y)*self.repFact)
self.Coords=(self.Inds+np.array([0.5,0.5]))/float(self.repFact)
self.R=np.sqrt(np.sum((self.Coords-self.cent)**2,axis=2))
self.PSF=self.moffat(self.R)
self.PSF/=np.sum(self.PSF)
self.psf=downSample2d(self.PSF,self.repFact)
self.fullPSF=None
self.fullpsf=None
self.shape=self.psf.shape
self.aperCorrFunc=None
self.aperCorrs=None
self.aperCorrRadii=None
self.lineAperCorrFunc=None
self.lineAperCorrs=None
self.lineAperCorrRadii=None
self.verbose=verbose
self.fitted=False
self.lookupTable=None
self.lookupF=None
self.lookupR=None
#self.rDist=None
#self.fDist=None
self.line2d=None
self.longPSF=None
self.longpsf=None
self.bgNoise=None
#from fitting a psf to a source
self.model=None
self.residual=None
self.psfStars=None
def computeRoundAperCorrFromPSF(self,radii,useLookupTable=True,display=True,displayAperture=True):
"""
This computes the aperture correction directly from the PSF. These vaules will be used for interpolation to
other values. The aperture correction is with respect tothe largest aperture provided in radii. I recommend
4*FWHM.
radii is an array of radii on which to calculate the aperture corrections. I recommend at least 10 values
between 1 and 4 FWHM.
useLookupTable=True/False to calculate either with just the moffat profile, or with lookuptable included.
display=True to show you some plots.
displayAperture=True to show you the aperture at each radius.
"""
self.aperCorrRadii=radii*1.0
aperCorrs=[]
(A,B)=self.PSF.shape
if useLookupTable:
phot=pillPhot(self.fullPSF,repFact=1)
else:
phot=pillPhot(self.PSF,repFact=1)
"""
#old individual radii call version
for iii in range(len(self.aperCorrRadii)):
r=radii[iii]
width=A/2#int(A/(r*self.repFact*2)+0.5)*0.75
phot(B/2.,A/2.,radius=r*self.repFact,l=0.,a=0.,skyRadius=None,zpt=0.0,width=width,display=displayAperture)
m=phot.magnitude
aperCorrs.append(m)
"""
#more efficient version with all radii passed at once.
width=int(A/2)
phot(B / 2., A / 2., radius=radii * self.repFact, l=0., a=0., skyRadius=None, zpt=0.0, width=width,
display=displayAperture)
aperCorrs = phot.magnitude
self.aperCorrs=np.array(aperCorrs)
self.aperCorrFunc=interp.interp1d(self.aperCorrRadii*1.,self.aperCorrs*1.)
if display:
fig=pyl.figure('psf')
pyl.plot(self.aperCorrRadii,self.aperCorrs,'k-o')
pyl.xlabel('Aperture Radius (pix')
pyl.ylabel('Normalized Magnitude')
pyl.show()
#still need to implement this!
def roundAperCorr(self,r):
"""
Return an aperture correction at given radius. Linear interpolation between values found in
computeRoundAperCorrFromPSF is used.
"""
if self.aperCorrFunc!=None:
return self.aperCorrFunc(r)-np.min(self.aperCorrs)
else:
raise Exception('Must first fun computeRoundAperCorrFromPSF before the aperture corrections can be evaluated here.')
def computeLineAperCorrFromTSF(self,radii,l,a,display=True,displayAperture=True):
"""
This computes the aperture correction directly from the TSF. These vaules will be used for interpolation to
other values. The aperture correction is with respect tothe largest aperture provided in radii. I recommend
4*FWHM.
radii is an array of radii on which to calculate the aperture corrections. I recommend at least 10 values
between 1 and 4 FWHM.
l and a are the length (in pixels) and angle of the pill aperture
useLookupTable=True/False to calculate either with just the moffat profile, or with lookuptable included.
display=True to show you some plots.
displayAperture=True to show you the aperture at each radius.
"""
self.lineAperCorrRadii=radii*1.0
self.lineAperCorrs=[]
(A,B)=self.PSF.shape
phot=pillPhot(self.longPSF,repFact=1)
"""
#old version where all radii are passed individually
for ii in range(len(self.lineAperCorrRadii)):
r=self.lineAperCorrRadii[ii]
width=A/2#int(A/(r*self.repFact*2))
phot(B/2.,A/2.,radius=r*self.repFact,l=l*self.repFact,a=a,skyRadius=None,zpt=0.0,width=width,display=displayAperture)
m=phot.magnitude
print ' ',r,phot.sourceFlux,m
self.lineAperCorrs.append(m)
"""
#new version where all radii are passed at once
width = int(A / 2)
phot(B / 2., A / 2., radius=radii * self.repFact, l=l * self.repFact, a=a, skyRadius=None, zpt=0.0, width=width,
display=displayAperture)
fluxes = phot.sourceFlux
self.lineAperCorrs = phot.magnitude
print(" Radius Flux Magnitude")
for ii in range(len(self.lineAperCorrRadii)):
print(' {:6.2f} {:10.3f} {:8.3f}'.format(radii[ii],phot.sourceFlux[ii],phot.magnitude[ii]))
self.lineAperCorrs=np.array(self.lineAperCorrs)
self.lineAperCorrFunc=interp.interp1d(self.lineAperCorrRadii,self.lineAperCorrs)
if display:
fig=pyl.figure('psf')
pyl.plot(self.lineAperCorrRadii,self.lineAperCorrs,'k-o')
pyl.xlabel('Aperture Radius (pix')
pyl.ylabel('Normalized Magnitude')
pyl.show()
def lineAperCorr(self,r):
"""
Return an aperture correction at given radius. Linear interpolation between values found in
computeRoundAperCorrFromTSF is used.
"""
if self.lineAperCorrFunc!=None:
return self.lineAperCorrFunc(r)-np.min(self.lineAperCorrs)
else:
raise Exception('Must first fun computeLineAperCorrFromMoffat before the aperture corrections can be evaluated here.')
def moffat(self,rad):
"""
Return a moffat profile evaluated at the radii in the input numpy array.
"""
#normalized flux profile return 1.-(1.+(rad/self.alpha)**2)**(1.-self.beta)
a2=self.alpha*self.alpha
return (self.beta-1)*(np.pi*a2)*(1.+(rad/self.alpha)**2)**(-self.beta)
def FWHM(self, fromMoffatProfile=False, fromImData = False, method = 'median',frac = 0.5):
"""
Return the moffat profile of the PSF. If fromMoffatProfile=True. the FWHM from a pure
moffat profile is returned. Otherwise the FWHM of the combined moffat profile and lookup
table is used. That is, from the full PSF.
If fromImData=True, then the FWHM is estimated from the image data passed to fitMoffat
function. This option requires that fitMoffat has been run on the source in question
before the FWHM(fromImData=True) is called.
When estimating FWHM from the PSF, or from image data, a running mean/median of pixels
sortred by radius from the centre is used. The width of the running window is 3*repFact.
The method variable takes either "median" or "mean" as input to decide which method is
used in the running window. Default is median.
"""
if fromMoffatProfile:
r=np.arange(0,(2*max(self.x.shape[0]/2.,self.y.shape[0]/2.)**2)**0.5,0.005)
m=self.moffat(r)
m/=np.max(m)
k=np.sum(np.greater(m,frac))
if k<0 or k>=len(m): return None
return r[k]*2.
else:
if fromImData:
im = self.repSubsec-self.bg/(self.repFact*self.repFact)
repRads = self.repRads
else:
im = self.fullPSF
a = self.y.shape[0]/2.
b = self.x.shape[0]/2.
rangeY = np.arange(-a*self.repFact,a*self.repFact)/float(self.repFact)
rangeX = np.arange(-b*self.repFact,b*self.repFact)/float(self.repFact)
dx2 = (0.5/self.repFact-rangeX)**2
repRads = []
for ii in range(len(rangeY)):
repRads.append((0.5/self.repFact-rangeY[ii])**2+dx2)
repRads = np.array(repRads)**0.5
if method not in ['median','mean']:
raise TypeError('Method must be either median or mean.')
numMedPix = self.repFact*3
#below steps through the pixels taking numMedPix
(A,B) = repRads.shape
rr = repRads.reshape(A*B)
rim = im.reshape(A*B)
s = np.max(im)
args = np.argsort(rr)
for ii in range(len(args)-numMedPix):
if method == 'median':
med_i = np.median(rim[args[ii:ii+numMedPix]])
med_r = np.median(rr[args[ii:ii+numMedPix]])
else:
med_i = np.mean(rim[args[ii:ii+numMedPix]])
med_r = np.mean(rr[args[ii:ii+numMedPix]])
if med_i<=frac*s:
return med_r*2.0
if method == 'median':
return np.mean(r[-numMedPix])*2.0
else:
return np.median(r[-numMedPix])*2.0
"""
#below cylces through a preset radius array
#this is probably less accurate than the above version
(A,B) = repRads.shape
r = np.arange(0.0,min(np.max(repRads[int(A/2),:]),np.max(repRads[:,int(B/2)]))+0.05,0.05)
s = np.max(im)
ind = 0
while r[ind+1]<min(np.max(repRads[int(A/2),:]),np.max(repRads[:,int(B/2)])) and ind+1<len(r):
w = np.where((repRads<r[ind+1])&(repRads>r[ind]))
if len(w[0])>=numMedPix:
med = np.median(im[w])
if med<=0.5*s:
return r[ind+1]*2.0
ind+=1
return r[len(r)-1]*2.0
"""
def __getitem__(self,key):
return self.psf[key]
def line(self,rate,angle,dt,pixScale=0.2,display=False,useLookupTable=True, verbose=True):
"""
Compute the TSF given input rate of motion, angle of motion, length of exposure, and pixelScale.
Units choice is irrelevant, as long as they are all the same! eg. rate in "/hr, and dt in hr.
Angle is in degrees +-90 from horizontal.
display=True to see the TSF
useLookupTable=True to use the lookupTable. OTherwise pure moffat is used.
"""
self.rate=rate
self.angle=angle
self.dt=dt
self.pixScale=pixScale
angr=angle*np.pi/180.
self.line2d=self.PSF*0.0
w=np.where(( np.abs(self.X-self.centx)<np.cos(angr)*rate*dt/pixScale/2.))
if len(w[0])>0:
x=self.X[w]*1.0
y=np.tan(angr)*(x-self.centx)+self.centy
X=(x*self.repFact).astype('int')
Y=(y*self.repFact).astype('int')
self.line2d[Y,X]=1.0
w=np.where(self.line2d>0)
yl,yh=np.min(w[0]),np.max(w[0])
xl,xh=np.min(w[1]),np.max(w[1])
self.line2d=self.line2d[yl:yh+1,xl:xh+1]
else:
self.line2d=np.array([[1.0]])
if useLookupTable:
if verbose:
print('Using the lookup table when generating the line PSF.')
#self.longPSF=signal.convolve2d(self.moffProf+self.lookupTable*self.repFact*self.repFact, self.line2d,mode='same')
self.longPSF=signal.fftconvolve(self.moffProf+self.lookupTable*self.repFact*self.repFact, self.line2d,mode='same')
self.longPSF*=np.sum(self.fullPSF)/np.sum(self.longPSF)
else:
if verbose:
print('Not using the lookup table when generating the line PSF')
#self.longPSF=signal.convolve2d(self.moffProf,self.line2d,mode='same')
self.longPSF=signal.fftconvolve(self.moffProf,self.line2d,mode='same')
self.longPSF*=np.sum(self.moffProf)/np.sum(self.longPSF)
self.longpsf=downSample2d(self.longPSF,self.repFact)
if display:
fig=pyl.figure('Line PSF')
pyl.imshow(self.longPSF,interpolation='nearest',origin='lower')
pyl.show()
def plant_old(self, x, y, amp, indata,
useLinePSF=False, returnModel=False, verbose=False,
addNoise=True, plantIntegerValues=False, gain=None, plantBoxWidth = None):
"""
keeping this for testing purposes only.
"""
rf2 = float(self.repFact*self.repFact)
#self.boxSize=len(self.lookupTable)/self.repFact/2
self.boxSize = int(len(self.R[0])/self.repFact/2)
#t1 = time.time()
(A,B) = indata.shape
bigIn = np.zeros((A+2*self.boxSize,B+2*self.boxSize),dtype=indata.dtype)
bigOut = np.zeros((A+2*self.boxSize,B+2*self.boxSize),dtype=indata.dtype)
bigIn[self.boxSize:A+self.boxSize,self.boxSize:B+self.boxSize] = indata
#t2 = time.time()
xint,yint = int(x)-self.boxSize,int(y)-self.boxSize
cx,cy = x-int(x)+self.boxSize,y-int(y)+self.boxSize
sx,sy = int(round((x-int(x))*self.repFact)),int(round((y-int(y))*self.repFact))
cut = np.copy(bigIn[self.boxSize+yint:yint+3*self.boxSize+1,self.boxSize+xint:self.boxSize+xint+3*self.boxSize+1])
if self.imData is not None:
origData = np.copy(self.imData)
else: origData = None
self.imData = cut
if type(cx)==type(1.0):
self._flatRadial(np.array([cx]),np.array([cy]))
else:
self._flatRadial(cx,cy)
if origData is not None:
self.imData = origData
#t2a = time.time()
if not useLinePSF:
###original moffat profile creation
#don't need to shift this up and right because the _flatRadial function handles the moffat sub-pixel centering.
#moff=downSample2d(self.moffat(self.repRads),self.repFact)*amp
if self.lookupTable is not None:
#(pa,pb)=moff.shape
#shift the lookuptable right and up to account for the off-zero centroid
slu = np.copy(self.lookupTable)
(a,b) = slu.shape
if sx>0:
sec = slu[:,b-sx:]
slu[:,sx:] = slu[:,:b-sx]
slu[:,:sx] = sec
if sy>0:
sec = slu[a-sy:,:]
slu[sy:,:] = slu[:a-sy,:]
slu[:sy,:] = sec
###original lookup table creation
#slu = downSample2d(slu,self.repFact)*amp*self.repFact*self.repFact
###this is a merger of the original moffat and lookup table lines above.
###results in a significant performance boost.
psf = downSample2d(slu+self.moffat(self.repRads)/rf2,self.repFact)*amp*rf2
###original sum of lookup table and moffat profile.
###not needed in the newer performance boosted version.
#psf = slu+moff
else:
psf = moff
if verbose: print("Lookup table is none. Just using Moffat profile.")
else:
lpsf = np.copy(self.longPSF)
(a,b) = lpsf.shape
#cubic interpolation doesn't do as good as the x10 subsampling
#quintic does just about as well, linear sucks
#f=sci.interpolate.interp2d(self.dx,self.dy,downSample2d(lpsf,self.repFact),kind='linear')
#psf=f(self.dx-float(sx)/self.repFact,self.dy-float(sy)/self.repFact)*amp
if sx>0:
sec = lpsf[:,b-sx:]
lpsf[:,sx:] = lpsf[:,:b-sx]
lpsf[:,:sx] = sec
if sy>0:
sec = lpsf[a-sy:,:]
lpsf[sy:,:] = lpsf[:a-sy,:]
lpsf[:sy,:] = sec
psf=downSample2d(lpsf,self.repFact)*amp
#this is a cheat to handle the outer edges that can go negative after convolution
w=np.where(psf<0)
psf[w]=0.0
self.fitFluxCorr=1. #HACK! Could get rid of this in the future...
#t3 = time.time()
(a,b) = psf.shape
if addNoise:
if gain is not None:
psf+=sci.randn(a,b)*np.sqrt(np.abs(psf)/float(gain) )
#old poisson experimenting
#psfg = (psf+bg)*gain
#psf = (np.random.poisson(np.clip(psfg,0,np.max(psfg))).astype('float64')/gain).astype(indata.dtype)
else:
print("Please set the gain variable before trying to plant with Poisson noise.")
raise TypeError
if plantIntegerValues:
psf = np.round(psf)
#t4 = time.time()
bigOut[yint+self.boxSize:yint+3*self.boxSize+1,xint+self.boxSize:xint+3*self.boxSize+1]+=psf
if returnModel:
return bigOut[self.boxSize:A+self.boxSize,self.boxSize:B+self.boxSize]
if plantBoxWidth is not None:
a = max(0,int(y)-plantBoxWidth)
b = min(A,int(y)+plantBoxWidth+1)
c = max(0,int(x)-plantBoxWidth)
d = min(B,int(x)+plantBoxWidth+1)
indata[a:b,c:d] += bigOut[self.boxSize:A + self.boxSize, self.boxSize:B + self.boxSize][a:b,c:d]
#indata[int(y)-plantBoxWidth:int(y)+plantBoxWidth+1,int(x)-plantBoxWidth:int(x)+plantBoxWidth] += bigOut[self.boxSize:A + self.boxSize, self.boxSize:B + self.boxSize][int(y)-plantBoxWidth:int(y)+plantBoxWidth+1,int(x)-plantBoxWidth:int(x)+plantBoxWidth]
else:
indata+=bigOut[self.boxSize:A+self.boxSize, self.boxSize:B+self.boxSize]
#t5 = time.time()
#print(t5-t4,t4-t3,t3-t2a,t2a-t2,t2-t1)
return indata
def plant(self, x_in, y_in, amp_in, indata,
useLinePSF=False, returnModel=False, verbose=False,
addNoise=True, plantIntegerValues=False, gain=None, plantBoxWidth = None):
"""
Plant a star at coordinates x_in,y_in with amplitude amp_in.
--x_in, y_in, and amp_in can be individual values, or 1D arrays of values.
-indata is the array in which you want to plant the source. Recommend passing as np.copy(indata)
-addNoise=True to add gaussian noise. gain variable must be set.
-gain must be manually set if adding noise.
-useLinePSF=True to use the TSF rather than the circular PSF.
-returnModel=True to not actually plant in the data, but return an array of the same size containing the TSF or
PSF without noise added.
-plantBoxWidth is the width of the planting region in pixels centred on the source location. If this is set to a
value, then the planted source pixels will only be within a box of width 2*plantBoxWidth+1. Only applies to a
single plant location. If more than one plant location is assigned, the entire image is altered.
-plant integer values will round all float values before adding to input data.
-verbose will do this all verboselly.
"""
if not hasattr(x_in,'__len__'):
x_in,y_in,amp_in = np.array([x_in]),np.array([y_in]),np.array([amp_in])
rf2 = float(self.repFact*self.repFact)
#self.boxSize=len(self.lookupTable)/self.repFact/2
self.boxSize = int(len(self.R[0])/self.repFact/2)
#t1 = time.time()
(A,B) = indata.shape
bigIn = np.zeros((A+2*self.boxSize,B+2*self.boxSize),dtype=indata.dtype)
bigOut = np.zeros((A+2*self.boxSize,B+2*self.boxSize),dtype=indata.dtype)
bigIn[self.boxSize:A+self.boxSize,self.boxSize:B+self.boxSize] = indata
if self.imData is not None:
origData = np.copy(self.imData)
else: origData = None
for ii in range(len(x_in)):
x,y,amp = x_in[ii],y_in[ii],amp_in[ii]
#t2 = time.time()
xint,yint = int(x)-self.boxSize,int(y)-self.boxSize
cx,cy = x-int(x)+self.boxSize,y-int(y)+self.boxSize
sx,sy = int(round((x-int(x))*self.repFact)),int(round((y-int(y))*self.repFact))
cut = np.copy(bigIn[self.boxSize+yint:yint+3*self.boxSize+1,self.boxSize+xint:self.boxSize+xint+3*self.boxSize+1])
self.imData = cut
if type(cx)==type(1.0):
self._flatRadial(np.array([cx]),np.array([cy]))
else:
self._flatRadial(cx,cy)
if origData is not None:
self.imData = origData
#t2a = time.time()
if not useLinePSF:
###original moffat profile creation
#don't need to shift this up and right because the _flatRadial function handles the moffat sub-pixel centering.
#moff=downSample2d(self.moffat(self.repRads),self.repFact)*amp
if self.lookupTable is not None:
#(pa,pb)=moff.shape
#shift the lookuptable right and up to account for the off-zero centroid
slu = np.copy(self.lookupTable)
(a,b) = slu.shape
if sx>0:
sec = slu[:,b-sx:]
slu[:,sx:] = slu[:,:b-sx]
slu[:,:sx] = sec
if sy>0:
sec = slu[a-sy:,:]
slu[sy:,:] = slu[:a-sy,:]
slu[:sy,:] = sec
###original lookup table creation
#slu = downSample2d(slu,self.repFact)*amp*self.repFact*self.repFact
###this is a merger of the original moffat and lookup table lines above.
###results in a significant performance boost.
psf = downSample2d(slu+self.moffat(self.repRads)/rf2,self.repFact)*amp*rf2
###original sum of lookup table and moffat profile.
###not needed in the newer performance boosted version.
#psf = slu+moff
else:
psf = moff
if verbose: print("Lookup table is none. Just using Moffat profile.")
else:
lpsf = np.copy(self.longPSF)
(a,b) = lpsf.shape
#cubic interpolation doesn't do as good as the x10 subsampling
#quintic does just about as well, linear sucks
#f=sci.interpolate.interp2d(self.dx,self.dy,downSample2d(lpsf,self.repFact),kind='linear')
#psf=f(self.dx-float(sx)/self.repFact,self.dy-float(sy)/self.repFact)*amp
if sx>0:
sec = lpsf[:,b-sx:]
lpsf[:,sx:] = lpsf[:,:b-sx]
lpsf[:,:sx] = sec
if sy>0:
sec = lpsf[a-sy:,:]
lpsf[sy:,:] = lpsf[:a-sy,:]
lpsf[:sy,:] = sec
psf=downSample2d(lpsf,self.repFact)*amp
#this is a cheat to handle the outer edges that can go negative after convolution
w=np.where(psf<0)
psf[w]=0.0
bigOut[yint+self.boxSize:yint+3*self.boxSize+1,xint+self.boxSize:xint+3*self.boxSize+1]+=psf
self.fitFluxCorr=1. #HACK! Could get rid of this in the future...
#t3 = time.time()
(a,b) = psf.shape
if addNoise:
if gain is not None:
bigOut+=sci.randn(bigOut.shape[0],bigOut.shape[1])*np.sqrt(np.abs(bigOut)/float(gain) )
#old poisson experimenting
#psfg = (psf+bg)*gain
#psf = (np.random.poisson(np.clip(psfg,0,np.max(psfg))).astype('float64')/gain).astype(indata.dtype)
else:
print()
print("Please set the gain variable before trying to plant with Poisson noise.")
print()
raise TypeError
if plantIntegerValues:
bigOut = np.round(bigOut)
#t4 = time.time()
if returnModel:
return bigOut[self.boxSize:A+self.boxSize,self.boxSize:B+self.boxSize]
if plantBoxWidth is not None and len(x_in) == 1:
x,y,amp = x_in,y_in,amp_in
a = max(0,int(y)-plantBoxWidth)
b = min(A,int(y)+plantBoxWidth+1)
c = max(0,int(x)-plantBoxWidth)
d = min(B,int(x)+plantBoxWidth+1)
indata[a:b,c:d] += bigOut[self.boxSize:A + self.boxSize, self.boxSize:B + self.boxSize][a:b,c:d]
#indata[int(y)-plantBoxWidth:int(y)+plantBoxWidth+1,int(x)-plantBoxWidth:int(x)+plantBoxWidth] += bigOut[self.boxSize:A + self.boxSize, self.boxSize:B + self.boxSize][int(y)-plantBoxWidth:int(y)+plantBoxWidth+1,int(x)-plantBoxWidth:int(x)+plantBoxWidth]
else:
indata+=bigOut[self.boxSize:A+self.boxSize, self.boxSize:B+self.boxSize]
#t5 = time.time()
#print(t5-t4,t4-t3,t3-t2a,t2a-t2,t2-t1)
return indata
def remove(self,x,y,amp,data,useLinePSF=False):
"""
The opposite of plant.
"""
self.model = self.plant(x,y,amp,data,addNoise=False,returnModel=True,useLinePSF=useLinePSF)
return data-self.model
def writeto(self,name):
"""
Convenient file saving function to save the round PSF. Probably not necessary.
"""
try:
os.remove(name)
except: pass
HDU=pyf.PrimaryHDU(self.psf)
List=pyf.HDUList([HDU])
List.writeto(name)
def fitMoffat(self, imData, centX, centY,
boxSize=25, bgRadius=20,
verbose=False, mode='smart',
quickFit = False, fixAB=False,
fitXY=False, fitMaxRadius=None, logRadPlot=False,
ftol = 1.49012e-8, maxfev = 250):
"""
Fit a moffat profile to the input data, imData, at point centX,centY.
- boxSize is the width around the centre used in the fitting.
- bgRadius is the radius beyond which the background is estimated.
This must be smaller than the PSF width itself used when initializing
the modelPSF object (parameters x and y).
- verbose=True to see a lot of fittnig output and a radial plot of each fit.
- logRadPlot=True to see the plot in log radius.
- mode='smart' is the background determination method used. See bgFinder for details.
if mode is set to a floating point number, the background will be fixed to that
value during the fitting procedure.
- fixAB=True to fit only the amplitude.
- fitXY=False *** this is currently not implemented***
- fitMaxRadius ***not currently implemented***
"""
self.verbose = verbose
self.imData = np.copy(imData)
self.boxSize = boxSize
self._flatRadial(centX-0.5,centY-0.5)#set the radial distribution pixels
w = np.where(self.rads>bgRadius)
if not (isinstance(mode, float) or isinstance(mode, int)):
bgf = bgFinder.bgFinder(self.subSec[w])
self.bg = bgf(method=mode)
else:
self.bg = float(mode) # doing this for safety
peakGuess_1 = (np.max(self.subSec)-self.bg)/(np.max(self.moffat(self.rads)))
peakGuess_2 = (np.sum(self.subSec)-self.bg*self.subSec.size)/(np.sum(self.moffat(self.rads)))
if (abs(peakGuess_1-peakGuess_2)/peakGuess_1)<0.5:
peakGuess = peakGuess_1
else:
peakGuess = peakGuess_2
if fitXY:
print('This is hacky and really slow. Not yet meant for production.')
self.verbose = False
best = [1.e8,-1.,-1.,-1.]
print('Fitting XYA')
deltaX = np.arange(-0.3,0.3+1./float(self.repFact),1./float(self.repFact)/2.)
deltaY = np.arange(-0.3,0.3+1./float(self.repFact),1./float(self.repFact)/2.)
for ii in range(len(deltaX)):
for jj in range(len(deltaY)):
self._flatRadial(centX+deltaX[ii],centY+deltaY[jj])
lsqf = opti.leastsq(self._residFAB,(peakGuess),args=(self.alpha,self.beta,fitMaxRadius),maxfev=maxfev)
res = np.sum(self._residFAB((lsqf[0][0]),self.alpha,self.beta,fitMaxRadius)**2)
if best[0]>= res:
best = [res,lsqf[0],deltaX[ii],deltaY[jj]]
return (best[2],best[3])
elif fixAB:
lsqf = opti.leastsq(self._residFAB,(peakGuess),args=(self.alpha,self.beta,fitMaxRadius),maxfev=maxfev)
elif quickFit:
lsqf = opti.leastsq(self._residNoRep,(peakGuess,self.alpha,self.beta),args=(fitMaxRadius),maxfev=maxfev,ftol=ftol)
else:
lsqf = opti.leastsq(self._resid,(peakGuess,self.alpha,self.beta),args=(fitMaxRadius),maxfev=maxfev,ftol=ftol)
if self.verbose: print(lsqf)
self.A = lsqf[0][0]
if not fixAB:
self.alpha = lsqf[0][1]
self.beta = lsqf[0][2]
if fixAB:
res=self._residFAB((self.A),self.alpha,self.beta,fitMaxRadius)
else:
res=self._resid((self.A,self.alpha,self.beta),fitMaxRadius)
self.chi = np.sqrt(np.sum(res**2)/float(len(res)-1))
self.chiFluxNorm = np.sqrt(np.sum((res/self.A)**2)/float(len(res)-1))
self.fitted = True
self.PSF = self.moffat(self.R)
self.PSF /= np.sum(self.PSF)
self.psf = downSample2d(self.PSF,self.repFact)
if self.verbose:
print(' A:%s, alpha:%s, beta:%s'%(self.A,self.alpha,self.beta))
fig = pyl.figure('Radial Profile')
ax = fig.add_subplot(111)
pyl.scatter(downSample2d(self.repRads,self.repFact),self.subSec)
r = np.linspace(0,np.max(self.rads),100)
pyl.plot(r,self.A*self.moffat(r)+self.bg,'r--')
fw = self.FWHM(fromMoffatProfile=True)
print('FWHM: {}'.format(fw))
pyl.title('FWHM: {:.3f} alpha: {:.3f} beta: {:.3f}'.format(fw,self.alpha,self.beta))
if logRadPlot: ax.set_xscale('log')
pyl.show()
return res
def genLookupTable(self,imData,centXs,centYs,verbose=False,bpMask=None,threeSigCut=True,bgRadius=20.,returnAmpsCutouts = False):
"""
Generate the lookup table from input imData and x/y coordinates in the numpy arrays centX,centY.
verbose=True to see a lot of fitting output.
bpMask=array to provide a bad pixel mask.
threeSigCut=True to apply a 3 sigma cut before reporting the mean lookupTable. Only useful for ~5 or more stars.
returnAmpsCutouts returns the fitted amplitudes of each moffat fit and the image cutouts, and the centroid x and y in each cutout
"""
#(AD,BD) = imData.shape
adjCentXs=centXs-0.5
adjCentYs=centYs-0.5
self.verbose=verbose
self.imData=imData*1.0
self.boxSize=int(len(self.R[0])/self.repFact/2)
self.psfStars=[]
if bpMask!=None:
w=np.where(bpMask==0)
imData[w]=np.median(imData)
shiftIms=[]
fluxes=[]
cutouts = []
cxs = []
cys = []
bgs = []
#print centXs,len(centXs)
for ii in range(len(centXs)):
#store the psf star location
self.psfStars.append([centXs[ii],centYs[ii]])
xint,yint=int(adjCentXs[ii])-self.boxSize-2,int(adjCentYs[ii])-self.boxSize-2
#if xint<=0 or yint<=0 or xint+2*self.boxSize+5>=BD or yint+2*self.boxSize+5>=BD: continue
cx,cy=adjCentXs[ii]-int(adjCentXs[ii])+self.boxSize+2,adjCentYs[ii]-int(adjCentYs[ii])+self.boxSize+2
cx+=0.5
cy+=0.5
cut=imData[yint:yint+2*self.boxSize+5,xint:xint+2*self.boxSize+5]
(cA,cB) = cut.shape
if cA!=2*self.boxSize+5 or cB!=2*self.boxSize+5: continue
self.fitMoffat(cut,np.array([cx]),np.array([cy]),self.boxSize,verbose=verbose,fixAB=True,fitXY=False,fitMaxRadius=3.,bgRadius=bgRadius)
self.imData=np.copy(imData) #this is necessary because the imdata gets set to the shifted image subsection
moff=downSample2d(self.moffat(self.repRads),self.repFact)*self.A
if returnAmpsCutouts:
cutouts.append(np.copy(cut))
cxs.append(cx)
cys.append(cy)
bgs.append(self.bg)
diff=cut-self.bg
diff[2:-2,2:-2]-=moff
fluxes.append(self.A)
self.psfStars[ii].append(self.A)
repCut=expand2d(diff,self.repFact)
cx,cy=adjCentXs[ii]-int(adjCentXs[ii])+self.boxSize+2,adjCentYs[ii]-int(adjCentYs[ii])+self.boxSize+2
kx,ky=int(round(cx*self.repFact)),int(round(cy*self.repFact))
shiftedImage=repCut[ky-self.repFact*self.boxSize:ky+self.repFact*self.boxSize+self.repFact,
kx-self.repFact*self.boxSize:kx+self.repFact*self.boxSize+self.repFact]
shiftIms.append(shiftedImage)
shiftIms=np.array(shiftIms)
fluxes=np.array(fluxes)
print(fluxes)
self.maxFlux=1.0
invFluxes=self.maxFlux/fluxes
for ii in range(len(shiftIms)):
shiftIms[ii]*=invFluxes[ii]
if threeSigCut:
meanLUT=np.median(shiftIms,axis=0)
stdLUT=np.std(shiftIms,axis=0)
bigMean=np.repeat(np.array([meanLUT]),len(shiftIms),axis=0)
w=np.where( np.abs(bigMean-shiftIms)>3*stdLUT)
shiftIms[w]=np.nan
self.lookupTable=np.nanmean(shiftIms,axis=0)/self.maxFlux
else:
self.lookupTable=np.nanmean(shiftIms,axis=0)/self.maxFlux
self.psfStar=np.array(self.psfStars)
self.genPSF()
if returnAmpsCutouts:
return (fluxes,cutouts,cxs,cys,bgs)
return None
def genPSF(self,A=1.0):
"""
generate the psf with lookup table. Convenience function only.
"""
self.moffProf=self.moffat(self.R-np.min(self.R))
self.fullPSF=(self.moffProf+self.lookupTable*self.repFact*self.repFact)*A
self.fullpsf=downSample2d(self.fullPSF,self.repFact)
def _flatRadial(self,centX,centY):
"""
Convenience function for the fitMoffat routines.
"""
if type(centX)!=type(1.) and type(centX)!=type(np.float64(1.)):
centX=centX[0]
centY=centY[0]
(A,B)=self.imData.shape
a=int(max(0,centY-self.boxSize))
b=int(min(A,centY+self.boxSize+1))
c=int(max(0,centX-self.boxSize))
d=int(min(B,centX+self.boxSize+1))
self.subSec=self.imData[a:b,c:d]
self.repSubsec=expand2d(self.subSec,self.repFact)
rangeY=np.arange(a*self.repFact,b*self.repFact)/float(self.repFact)
rangeX=np.arange(c*self.repFact,d*self.repFact)/float(self.repFact)
dx2=(centX-rangeX)**2
####slow version kept for clarity
#repRads=[]
#for ii in range(len(rangeY)):
# repRads.append((centY-rangeY[ii])**2+dx2)
#self.repRads=np.array(repRads)**0.5
#####
#this is the faster version that produces the same result
dy2 = (centY-rangeY)**2
self.repRads = (np.repeat(dy2,len(rangeY)).reshape(len(rangeY),len(rangeX)) + np.repeat(np.array([dx2]),len(rangeY),axis = 0).reshape(len(rangeY),len(rangeX)))**0.5
self.dX=centX-rangeX
self.dY=centY-rangeY
self.dx=centX-np.arange(c,d)
self.dy=centY-np.arange(a,b)
#there are more efficient ways to do this, but I leave it like this for clarity.
#subSec=[]
#arrR=[]
#for ii in range(a,b):
# arrR.append([])
# for jj in range(c,d):
# D=((centY-ii)**2+(centX-jj)**2)**0.5
#
# arrR[-1].append(D)
##faster version of the above just like done with repRads a 20 lines up.
arrR = []
dy2 = (centY - np.arange(a, b)) ** 2
dx2 = (centX - np.arange(c, d)) ** 2
for ii in range(len(dy2)):
arrR.append(dy2[ii] + dx2)
arrR = np.array(arrR) ** 0.5
#subSecFlat=self.subSec.reshape((b-a)*(c-d))
self.rads=np.copy(arrR)
#arrR=arrR.reshape((b-a)*(d-c))
#arg=np.argsort(arrR)
#self.rDist=arrR[arg]*1.
#self.fDist=subSecFlat[arg]*1.
def _resid(self,p,maxRad):
(A,alpha,beta)=p
self.alpha=alpha
self.beta=beta
err=(self.subSec-(self.bg+A*downSample2d(self.moffat(self.repRads),self.repFact))).reshape(self.subSec.size)
if self.alpha<=0 or self.beta<=0:
if maxRad is not None:
w = np.where(self.rads.reshape(self.subSec.size)<maxRad)
return err[w]
return err*np.inf
if self.verbose: print(A,alpha,beta,np.sqrt(np.sum(err**2)/(self.subSec.size-1.)))
if maxRad is not None:
w = np.where(self.rads.reshape(self.subSec.size)<maxRad)
return err[w]
return err
def _residNoRep(self,p,maxRad):
(A,alpha,beta)=p
self.alpha=alpha
self.beta=beta
err=(self.subSec-(self.bg+A*self.moffat(self.rads))).reshape(self.subSec.size)
if self.alpha<0 or self.beta<0:
if maxRad is not None:
w = np.where(self.rads.reshape(self.subSec.size)<maxRad)
return err[w]
return err*np.inf
if self.verbose: print(A,alpha,beta,np.sqrt(np.sum(err**2)/(self.subSec.size-1.)))
if maxRad is not None:
w = np.where(self.rads.reshape(self.subSec.size)<maxRad)
return err[w]
return err
def _residFAB(self,p,alpha,beta,maxRad):
(A)=p
self.alpha=alpha
self.beta=beta
err=(self.subSec-(self.bg+A*downSample2d(self.moffat(self.repRads),self.repFact))).reshape(self.subSec.size)
#if maxRad>0:
# w=np.where(self.rDist<=maxRad)
#else:
# w=np.arange(len(self.rDist))
#err=self.fDist[w]-(self.bg+A*self.moffat(self.rDist[w]))
if self.verbose: print(A,alpha,beta,np.sqrt(np.sum(err**2)/(self.subSec.size-1.)))
return err
"""
#much too slow compared to fitting each star individually
def _residMultiStarTest(self,p,maxRad):
#print p
alpha = p[-2]
beta = p[-1]
#(A,alpha,beta)=p
self.alpha=alpha
self.beta=beta
errs = []
n = 0
for ii in range(len(self.repRadsArr)):
A = p[ii]
err=(self.subSecs[ii]-(self.bgs[ii]+A*downSample2d(self.moffat(self.repRadsArr[ii]),self.repFact))).reshape(self.subSecs[ii].size)
errs.append(np.copy(err))
n+=len(err)
errs = np.array(errs).reshape(n)
if self.alpha<0 or self.beta<0: return np.inf
if self.verbose: print p,np.sqrt(np.sum(errs**2)/(n-1.))
return err
"""
if __name__=="__main__":
import pylab as pyl
psfNoLine=modelPSF(np.arange(25),np.arange(25),alpha=1.5,beta=2.0,repFact=10)
psfNoLine.writeto('noline.fits')
print()
psfLine=modelPSF(np.arange(25),np.arange(25),alpha=1.5,beta=2.0,repFact=10)
psfLine.line(4.0,32.,0.45)
psfLine.writeto('line.fits')
sys.exit()
(A,B)=psf.shape
for i in range(int(A/2),int(A/2+1)):
pyl.plot(psf.x,psf.psf[i,:])
for i in range(int(A*10/2),int(A*10/2+1)):
pyl.plot(psf.X,psf.PSF[i,:],linestyle=':')
pyl.show()
|
fraserw/PyMOP
|
trippy/psf.py
|
Python
|
gpl-2.0
| 52,063
|
[
"Gaussian"
] |
764c74d47c69bd84dabfa03a2a2f0ce1cd25e03d89dfd75c321320c68df0209a
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#===============================================================================
# Copyright (c) 2012 - 2014, GPy authors (see AUTHORS.txt).
# Copyright (c) 2014, James Hensman, Max Zwiessele
# Copyright (c) 2015, Max Zwiessele
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
from __future__ import print_function
import os
import sys
from setuptools import setup, Extension
import numpy as np
import codecs
def read(fname):
with codecs.open(fname, 'r', 'latin') as f:
return f.read()
def read_to_rst(fname):
try:
import pypandoc
rstname = "{}.{}".format(os.path.splitext(fname)[0], 'rst')
pypandoc.convert(read(fname), 'rst', format='md', outputfile=rstname)
with open(rstname, 'r') as f:
rststr = f.read()
return rststr
#return read(rstname)
except ImportError:
return read(fname)
desc = """
Please refer to the github homepage for detailed instructions on installation and usage.
"""
version_dummy = {}
exec(read('GPy/__version__.py'), version_dummy)
__version__ = version_dummy['__version__']
del version_dummy
#Mac OS X Clang doesn't support OpenMP at the current time.
#This detects if we are building on a Mac
def ismac():
return sys.platform[:6] == 'darwin'
if ismac():
compile_flags = [ '-O3', ]
link_args = []
else:
compile_flags = [ '-fopenmp', '-O3']
link_args = ['-lgomp' ]
ext_mods = [Extension(name='GPy.kern.src.stationary_cython',
sources=['GPy/kern/src/stationary_cython.c',
'GPy/kern/src/stationary_utils.c'],
include_dirs=[np.get_include(),'.'],
extra_compile_args=compile_flags,
extra_link_args = link_args),
Extension(name='GPy.util.choleskies_cython',
sources=['GPy/util/choleskies_cython.c'],
include_dirs=[np.get_include(),'.'],
extra_link_args = link_args,
extra_compile_args=compile_flags),
Extension(name='GPy.util.linalg_cython',
sources=['GPy/util/linalg_cython.c'],
include_dirs=[np.get_include(),'.'],
extra_compile_args=compile_flags),
Extension(name='GPy.kern.src.coregionalize_cython',
sources=['GPy/kern/src/coregionalize_cython.c'],
include_dirs=[np.get_include(),'.'],
extra_compile_args=compile_flags),
Extension(name='GPy.models.state_space_cython',
sources=['GPy/models/state_space_cython.c'],
include_dirs=[np.get_include(),'.'],
extra_compile_args=compile_flags)]
setup(name = 'GPy',
version = __version__,
author = read_to_rst('AUTHORS.txt'),
author_email = "gpy.authors@gmail.com",
description = ("The Gaussian Process Toolbox"),
long_description = desc,
license = "BSD 3-clause",
keywords = "machine-learning gaussian-processes kernels",
url = "http://sheffieldml.github.com/GPy/",
download_url='https://github.com/SheffieldML/GPy/',
ext_modules = ext_mods,
packages = ["GPy",
"GPy.core",
"GPy.core.parameterization",
"GPy.kern",
"GPy.kern.src",
"GPy.kern.src.psi_comp",
"GPy.models",
"GPy.inference",
"GPy.inference.optimization",
"GPy.inference.mcmc",
"GPy.inference.latent_function_inference",
"GPy.likelihoods",
"GPy.mappings",
"GPy.examples",
"GPy.testing",
"GPy.util",
"GPy.plotting",
"GPy.plotting.gpy_plot",
"GPy.plotting.matplot_dep",
"GPy.plotting.matplot_dep.controllers",
"GPy.plotting.plotly_dep",
],
package_dir={'GPy': 'GPy'},
#package_data = {'GPy': ['defaults.cfg', 'installation.cfg',
# 'util/data_resources.json',
# 'util/football_teams.json',
# 'testing/plotting_tests/baseline/*.png'
# ]},
#data_files=[('GPy/testing/plotting_tests/baseline', 'testing/plotting_tests/baseline/*.png'),
# ('GPy/testing/', 'GPy/testing/pickle_test.pickle'),
# ],
include_package_data = True,
py_modules = ['GPy.__init__'],
test_suite = 'GPy.testing',
setup_requires = ['numpy>=1.7'],
install_requires = ['numpy>=1.7', 'scipy>=0.16', 'six', 'paramz==0.7.4'],
extras_require = {'docs':['sphinx'],
'optional':['mpi4py',
'ipython>=4.0.0',
],
'plotting':['matplotlib >= 1.3',
'plotly >= 1.8.6'],
'notebook':['jupyter_client >= 4.0.6',
'ipywidgets >= 4.0.3',
'ipykernel >= 4.1.0',
'notebook >= 4.0.5',
],
},
classifiers=['License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Framework :: IPython',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
# Check config files and settings:
local_file = os.path.abspath(os.path.join(os.path.dirname(__file__), 'GPy', 'installation.cfg'))
home = os.getenv('HOME') or os.getenv('USERPROFILE')
user_file = os.path.join(home,'.config', 'GPy', 'user.cfg')
print("")
try:
if not os.path.exists(user_file):
# Does an old config exist?
old_user_file = os.path.join(home,'.gpy_user.cfg')
if os.path.exists(old_user_file):
# Move it to new location:
print("GPy: Found old config file, moving to new location {}".format(user_file))
if not os.path.exists(os.path.dirname(user_file)):
os.makedirs(os.path.dirname(user_file))
os.rename(old_user_file, user_file)
else:
# No config file exists, save informative stub to user config folder:
print("GPy: Saving user configuration file to {}".format(user_file))
if not os.path.exists(os.path.dirname(user_file)):
os.makedirs(os.path.dirname(user_file))
with open(user_file, 'w') as f:
with open(local_file, 'r') as l:
tmp = l.read()
f.write(tmp)
else:
print("GPy: User configuration file at location {}".format(user_file))
except:
print("GPy: Could not write user configuration file {}".format(user_file))
|
ysekky/GPy
|
setup.py
|
Python
|
bsd-3-clause
| 9,169
|
[
"Gaussian"
] |
028b0c6a87f1f3b2fab82425d32094b5faa0238294b6d9bf57408632a5485996
|
from __future__ import print_function
from builtins import range
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import random
def cv_carsGLM():
# read in the dataset and construct training set (and validation set)
cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
# choose the type model-building exercise (multinomial classification or regression). 0:regression, 1:binomial,
# 2:poisson
problem = random.sample(list(range(3)),1)[0]
# pick the predictors and response column, along with the correct family
predictors = ["displacement","power","weight","acceleration","year"]
if problem == 1 :
response_col = "economy_20mpg"
family = "binomial"
cars[response_col] = cars[response_col].asfactor()
elif problem == 2 :
family = "poisson"
response_col = "cylinders"
else :
family = "gaussian"
response_col = "economy"
print("Distribution: {0}".format(family))
print("Response column: {0}".format(response_col))
## cross-validation
# 1. check that cv metrics are the same over repeated "Modulo" runs
nfolds = random.randint(3,10)
glm1 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, family=family, fold_assignment="Modulo")
glm2 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, family=family, fold_assignment="Modulo")
pyunit_utils.check_models(glm1, glm2, True)
# 2. check that cv metrics are different over repeated "Random" runs
nfolds = random.randint(3,10)
glm1 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, family=family, fold_assignment="Random")
glm2 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, family=family, fold_assignment="Random")
try:
pyunit_utils.check_models(glm1, glm2, True)
assert False, "Expected models to be different over repeated Random runs"
except AssertionError:
assert True
# 3. folds_column
num_folds = random.randint(2,5)
fold_assignments = h2o.H2OFrame([[random.randint(0,num_folds-1)] for f in range(cars.nrow)])
fold_assignments.set_names(["fold_assignments"])
cars = cars.cbind(fold_assignments)
glm = h2o.glm(y=cars[response_col], x=cars[predictors], training_frame=cars, family=family,
fold_column="fold_assignments", keep_cross_validation_predictions=True)
num_cv_models = len(glm._model_json['output']['cross_validation_models'])
assert num_cv_models==num_folds, "Expected {0} cross-validation models, but got " \
"{1}".format(num_folds, num_cv_models)
cv_model1 = h2o.get_model(glm._model_json['output']['cross_validation_models'][0]['name'])
cv_model2 = h2o.get_model(glm._model_json['output']['cross_validation_models'][1]['name'])
# 4. keep_cross_validation_predictions
cv_predictions = glm1._model_json['output']['cross_validation_predictions']
assert cv_predictions is None, "Expected cross-validation predictions to be None, but got {0}".format(cv_predictions)
cv_predictions = glm._model_json['output']['cross_validation_predictions']
assert len(cv_predictions)==num_folds, "Expected the same number of cross-validation predictions " \
"as folds, but got {0}".format(len(cv_predictions))
# # 5. manually construct models
# fold1 = cars[cars["fold_assignments"]==0]
# fold2 = cars[cars["fold_assignments"]==1]
# manual_model1 = h2o.glm(y=fold2[response_col],
# x=fold2[predictors],
# validation_y=fold1[response_col],
# validation_x=fold1[predictors],
# family=family)
# manual_model2 = h2o.glm(y=fold1[response_col],
# x=fold1[predictors],
# validation_y=fold2[response_col],
# validation_x=fold2[predictors],
# family=family)
## boundary cases
# 1. nfolds = number of observations (leave-one-out cross-validation)
# TODO: PUBDEV-1776
#glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=cars.nrow, family=family,
# fold_assignment="Modulo")
# 2. nfolds = 0
glm1 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=0, family=family)
# check that this is equivalent to no nfolds
glm2 = h2o.glm(y=cars[response_col], x=cars[predictors], family=family)
pyunit_utils.check_models(glm1, glm2)
# 3. cross-validation and regular validation attempted
glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=random.randint(3,10), validation_y=cars[response_col],
validation_x=cars[predictors], family=family)
## error cases
# 1. nfolds == 1 or < 0
try:
glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=random.sample([-1,1], 1)[0],
family=family)
assert False, "Expected model-build to fail when nfolds is 1 or < 0"
except EnvironmentError:
assert True
# 2. more folds than observations
try:
glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=cars.nrow+1, family=family,
fold_assignment="Modulo")
assert False, "Expected model-build to fail when nfolds > nobs"
except EnvironmentError:
assert True
# 3. fold_column and nfolds both specified
try:
glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=3, fold_column="fold_assignments",
family=family, training_frame=cars)
assert False, "Expected model-build to fail when fold_column and nfolds both specified"
except EnvironmentError:
assert True
# # 4. fold_column and fold_assignment both specified
# try:
# glm = h2o.glm(y=cars[response_col], x=cars[predictors], fold_assignment="Random", fold_column="fold_assignments",
# family=family, training_frame=cars)
# assert False, "Expected model-build to fail when fold_column and fold_assignment both specified"
# except EnvironmentError:
# assert True
if __name__ == "__main__":
pyunit_utils.standalone_test(cv_carsGLM)
else:
cv_carsGLM()
|
nilbody/h2o-3
|
h2o-py/tests/testdir_algos/glm/pyunit_DEPRECATED_cv_carsGLM.py
|
Python
|
apache-2.0
| 6,421
|
[
"Gaussian"
] |
080fba51e726e227f19c8c8785d4ae3c3b9bf65421b36cbbe5995b96c21e4347
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import espressomd
import numpy as np
import unittest as ut
import unittest_decorators as utx
import tests_common
class InteractionsNonBondedTest(ut.TestCase):
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
box_l = 10.
start_pos = np.random.rand(3) * box_l
axis = np.random.rand(3)
axis /= np.linalg.norm(axis)
step = axis * 0.01
step_width = np.linalg.norm(step)
def setUp(self):
self.system.box_l = [self.box_l] * 3
self.system.cell_system.skin = 0.
self.system.time_step = .1
self.system.part.add(id=0, pos=self.start_pos, type=0)
self.system.part.add(id=1, pos=self.start_pos, type=0)
def tearDown(self):
self.system.non_bonded_inter.reset()
self.system.part.clear()
# Required, since assertAlmostEqual does NOT check significant places
def assertFractionAlmostEqual(self, a, b, **args):
if abs(b) < 1E-8:
self.assertAlmostEqual(a, b, **args)
else:
self.assertAlmostEqual(a / b, 1., **args)
def assertItemsFractionAlmostEqual(self, a, b):
for i, ai in enumerate(a):
self.assertFractionAlmostEqual(ai, b[i])
#
# Tests
#
# Test Generic Lennard-Jones Potential
@utx.skipIfMissingFeatures("LENNARD_JONES_GENERIC")
def test_lj_generic(self):
lj_eps = 2.12
lj_sig = 1.37
lj_cut = 2.122
lj_off = 0.185
lj_b1 = 4.22
lj_b2 = 3.63
lj_e1 = 10.32
lj_e2 = 5.81
lj_shift = -0.13
self.system.non_bonded_inter[0, 0].generic_lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, offset=lj_off,
b1=lj_b1, b2=lj_b2, e1=lj_e1, e2=lj_e2, shift=lj_shift)
E_ref = tests_common.lj_generic_potential(
r=np.arange(1, 232) * self.step_width, eps=lj_eps, sig=lj_sig,
cutoff=lj_cut, offset=lj_off, b1=lj_b1, b2=lj_b2, e1=lj_e1,
e2=lj_e2, shift=lj_shift)
for i in range(231):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
# Calculate forces
f0_sim = np.copy(self.system.part[0].f)
f1_sim = np.copy(self.system.part[1].f)
f1_ref = self.axis * tests_common.lj_generic_force(
espressomd, r=(i + 1) * self.step_width, eps=lj_eps, sig=lj_sig,
cutoff=lj_cut, offset=lj_off, b1=lj_b1, b2=lj_b2, e1=lj_e1,
e2=lj_e2)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref[i])
# force equals minus the counter-force ...
np.testing.assert_array_equal(f0_sim, -f1_sim)
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].generic_lennard_jones.set_params(
epsilon=0.)
# Test WCA Potential
@utx.skipIfMissingFeatures("WCA")
def test_wca(self):
wca_eps = 2.12
wca_sig = 1.37
wca_cutoff = wca_sig * 2.**(1. / 6.)
wca_shift = -((wca_sig / wca_cutoff)**12 - (wca_sig / wca_cutoff)**6)
self.system.non_bonded_inter[0, 0].wca.set_params(epsilon=wca_eps,
sigma=wca_sig)
E_ref = tests_common.lj_generic_potential(
r=np.arange(1, 232) * self.step_width, eps=wca_eps, sig=wca_sig,
cutoff=wca_cutoff, shift=4. * wca_shift)
for i in range(231):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
# Calculate forces
f0_sim = np.copy(self.system.part[0].f)
f1_sim = np.copy(self.system.part[1].f)
f1_ref = self.axis * tests_common.lj_generic_force(
espressomd, r=(i + 1) * self.step_width, eps=wca_eps,
sig=wca_sig, cutoff=wca_cutoff)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref[i])
# force equals minus the counter-force ...
np.testing.assert_array_equal(f0_sim, -f1_sim)
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].wca.set_params(epsilon=0., sigma=1.)
# Test Generic Lennard-Jones Softcore Potential
@utx.skipIfMissingFeatures("LJGEN_SOFTCORE")
def test_lj_generic_softcore(self):
lj_eps = 2.12
lj_sig = 1.37
lj_cut = 2.125
lj_off = 0.182
lj_b1 = 6.22
lj_b2 = 3.63
lj_e1 = 13.32
lj_e2 = 3.74
lj_shift = 0.13
lj_delta = 0.1
lj_lam = 0.34
self.system.non_bonded_inter[0, 0].generic_lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, offset=lj_off,
b1=lj_b1, b2=lj_b2, e1=lj_e1, e2=lj_e2, shift=lj_shift,
delta=lj_delta, lam=lj_lam)
for i in range(231):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.lj_generic_potential(
r=(i + 1) * self.step_width, eps=lj_eps, sig=lj_sig,
cutoff=lj_cut, offset=lj_off, b1=lj_b1, b2=lj_b2, e1=lj_e1,
e2=lj_e2, shift=lj_shift, delta=lj_delta, lam=lj_lam)
# Calculate forces
f0_sim = np.copy(self.system.part[0].f)
f1_sim = np.copy(self.system.part[1].f)
f1_ref = self.axis * tests_common.lj_generic_force(
espressomd, r=(i + 1) * self.step_width, eps=lj_eps, sig=lj_sig,
cutoff=lj_cut, offset=lj_off, b1=lj_b1, b2=lj_b2, e1=lj_e1,
e2=lj_e2, delta=lj_delta, lam=lj_lam)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
np.testing.assert_array_equal(f0_sim, -f1_sim)
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].generic_lennard_jones.set_params(
epsilon=0.)
# Test Lennard-Jones Potential
@utx.skipIfMissingFeatures("LENNARD_JONES")
def test_lj(self):
lj_eps = 1.92
lj_sig = 1.03
lj_cut = 1.123
lj_shift = 0.92
self.system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift=lj_shift)
for i in range(113):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.lj_potential(
(i + 1) * self.step_width, lj_eps, lj_sig, lj_cut,
shift=lj_shift)
# Calculate forces
f0_sim = np.copy(self.system.part[0].f)
f1_sim = np.copy(self.system.part[1].f)
f1_ref = self.axis * \
tests_common.lj_force(espressomd, r=(i + 1) * self.step_width,
eps=lj_eps, sig=lj_sig, cutoff=lj_cut)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
np.testing.assert_array_equal(f0_sim, -f1_sim)
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].lennard_jones.set_params(epsilon=0.)
# Test Lennard-Jones Cosine Potential
@utx.skipIfMissingFeatures("LJCOS")
def test_lj_cos(self):
ljcos_eps = 3.32
ljcos_sig = 0.73
ljcos_cut = 1.523
ljcos_offset = 0.223
self.system.non_bonded_inter[0, 0].lennard_jones_cos.set_params(
epsilon=ljcos_eps, sigma=ljcos_sig, cutoff=ljcos_cut,
offset=ljcos_offset)
for i in range(175):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.lj_cos_potential(
(i + 1) * self.step_width, eps=ljcos_eps, sig=ljcos_sig,
cutoff=ljcos_cut, offset=ljcos_offset)
# Calculate forces
f0_sim = np.copy(self.system.part[0].f)
f1_sim = np.copy(self.system.part[1].f)
f1_ref = self.axis * tests_common.lj_cos_force(
espressomd, (i + 1) * self.step_width, eps=ljcos_eps,
sig=ljcos_sig, cutoff=ljcos_cut, offset=ljcos_offset)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
np.testing.assert_array_equal(f0_sim, -f1_sim)
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[
0, 0].lennard_jones_cos.set_params(epsilon=0.)
# Test Lennard-Jones Cosine^2 Potential
@utx.skipIfMissingFeatures("LJCOS2")
def test_lj_cos2(self):
ljcos2_eps = 0.31
ljcos2_sig = 0.73
ljcos2_width = 1.523
ljcos2_offset = 0.321
self.system.non_bonded_inter[0, 0].lennard_jones_cos2.set_params(
epsilon=ljcos2_eps, sigma=ljcos2_sig, offset=ljcos2_offset,
width=ljcos2_width)
for i in range(267):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.lj_cos2_potential(
(i + 1) * self.step_width, eps=ljcos2_eps, sig=ljcos2_sig,
offset=ljcos2_offset, width=ljcos2_width)
# Calculate forces
f0_sim = np.copy(self.system.part[0].f)
f1_sim = np.copy(self.system.part[1].f)
f1_ref = self.axis * tests_common.lj_cos2_force(
espressomd, r=(i + 1) * self.step_width, eps=ljcos2_eps,
sig=ljcos2_sig, offset=ljcos2_offset, width=ljcos2_width)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
np.testing.assert_array_equal(f0_sim, -f1_sim)
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[
0, 0].lennard_jones_cos2.set_params(epsilon=0.)
# Test Smooth-step Potential
@utx.skipIfMissingFeatures("SMOOTH_STEP")
def test_smooth_step(self):
sst_eps = 4.92
sst_sig = 3.03
sst_cut = 1.253
sst_d = 2.52
sst_n = 11
sst_k0 = 2.13
self.system.non_bonded_inter[0, 0].smooth_step.set_params(
eps=sst_eps, sig=sst_sig, cutoff=sst_cut, d=sst_d, n=sst_n,
k0=sst_k0)
for i in range(126):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.smooth_step_potential(
r=(i + 1) * self.step_width, eps=sst_eps, sig=sst_sig,
cutoff=sst_cut, d=sst_d, n=sst_n, k0=sst_k0)
# Calculate forces
f0_sim = np.copy(self.system.part[0].f)
f1_sim = np.copy(self.system.part[1].f)
f1_ref = self.axis * tests_common.smooth_step_force(
r=(i + 1) * self.step_width, eps=sst_eps, sig=sst_sig,
cutoff=sst_cut, d=sst_d, n=sst_n, k0=sst_k0)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
np.testing.assert_array_equal(f0_sim, -f1_sim)
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].smooth_step.set_params(d=0., eps=0.)
# Test BMHTF Potential
@utx.skipIfMissingFeatures("BMHTF_NACL")
def test_bmhtf(self):
bmhtf_a = 3.92
bmhtf_b = 2.43
bmhtf_c = 1.23
bmhtf_d = 3.33
bmhtf_sig = 0.123
bmhtf_cut = 1.253
self.system.non_bonded_inter[0, 0].bmhtf.set_params(
a=bmhtf_a, b=bmhtf_b, c=bmhtf_c, d=bmhtf_d, sig=bmhtf_sig,
cutoff=bmhtf_cut)
for i in range(126):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.bmhtf_potential(
r=(i + 1) * self.step_width, a=bmhtf_a, b=bmhtf_b, c=bmhtf_c,
d=bmhtf_d, sig=bmhtf_sig, cutoff=bmhtf_cut)
# Calculate forces
f0_sim = np.copy(self.system.part[0].f)
f1_sim = np.copy(self.system.part[1].f)
f1_ref = self.axis * tests_common.bmhtf_force(
r=(i + 1) * self.step_width, a=bmhtf_a, b=bmhtf_b, c=bmhtf_c,
d=bmhtf_d, sig=bmhtf_sig, cutoff=bmhtf_cut)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
np.testing.assert_array_equal(f0_sim, -f1_sim)
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].bmhtf.set_params(a=0., c=0., d=0.)
# Test Morse Potential
@utx.skipIfMissingFeatures("MORSE")
def test_morse(self):
m_eps = 1.92
m_alpha = 3.03
m_cut = 1.253
m_rmin = 0.123
self.system.non_bonded_inter[0, 0].morse.set_params(
eps=m_eps, alpha=m_alpha, cutoff=m_cut, rmin=m_rmin)
for i in range(126):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.morse_potential(
r=(i + 1) * self.step_width, eps=m_eps, alpha=m_alpha,
cutoff=m_cut, rmin=m_rmin)
# Calculate forces
f0_sim = np.copy(self.system.part[0].f)
f1_sim = np.copy(self.system.part[1].f)
f1_ref = self.axis * tests_common.morse_force(
r=(i + 1) * self.step_width, eps=m_eps, alpha=m_alpha,
cutoff=m_cut, rmin=m_rmin)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
np.testing.assert_array_equal(f0_sim, -f1_sim)
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].morse.set_params(eps=0.)
# Test Buckingham Potential
@utx.skipIfMissingFeatures("BUCKINGHAM")
def test_buckingham(self):
b_a = 3.71
b_b = 2.92
b_c = 5.32
b_d = 4.11
b_disc = 1.03
b_cut = 2.253
b_shift = 0.133
self.system.non_bonded_inter[0, 0].buckingham.set_params(
a=b_a, b=b_b, c=b_c, d=b_d, discont=b_disc, cutoff=b_cut,
shift=b_shift)
for i in range(226):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.buckingham_potential(
r=(i + 1) * self.step_width, a=b_a, b=b_b, c=b_c, d=b_d,
discont=b_disc, cutoff=b_cut, shift=b_shift)
# Calculate forces
f0_sim = np.copy(self.system.part[0].f)
f1_sim = np.copy(self.system.part[1].f)
f1_ref = self.axis * tests_common.buckingham_force(
r=(i + 1) * self.step_width, a=b_a, b=b_b, c=b_c, d=b_d,
discont=b_disc, cutoff=b_cut, shift=b_shift)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
np.testing.assert_array_equal(f0_sim, -f1_sim)
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[
0, 0].buckingham.set_params(a=0., c=0., d=0., shift=0.)
# Test Soft-sphere Potential
@utx.skipIfMissingFeatures("SOFT_SPHERE")
def test_soft_sphere(self):
ss_a = 1.92
ss_n = 3.03
ss_cut = 1.123
ss_off = 0.123
self.system.non_bonded_inter[0, 0].soft_sphere.set_params(
a=ss_a, n=ss_n, cutoff=ss_cut, offset=ss_off)
for i in range(12):
self.system.part[1].pos = self.system.part[1].pos + self.step
for i in range(113):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.soft_sphere_potential(
r=(i + 13) * self.step_width, a=ss_a, n=ss_n, cutoff=ss_cut,
offset=ss_off)
# Calculate forces
f0_sim = np.copy(self.system.part[0].f)
f1_sim = np.copy(self.system.part[1].f)
f1_ref = self.axis * tests_common.soft_sphere_force(
r=(i + 13) * self.step_width, a=ss_a, n=ss_n, cutoff=ss_cut,
offset=ss_off)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
np.testing.assert_array_equal(f0_sim, -f1_sim)
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].soft_sphere.set_params(a=0.)
# Test Hertzian Potential
@utx.skipIfMissingFeatures("HERTZIAN")
def test_hertzian(self):
h_eps = 6.92
h_sig = 2.432
self.system.non_bonded_inter[0, 0].hertzian.set_params(
eps=h_eps, sig=h_sig)
for i in range(244):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.hertzian_potential(
r=(i + 1) * self.step_width, eps=h_eps, sig=h_sig)
# Calculate forces
f0_sim = np.copy(self.system.part[0].f)
f1_sim = np.copy(self.system.part[1].f)
f1_ref = self.axis * tests_common.hertzian_force(
r=(i + 1) * self.step_width, eps=h_eps, sig=h_sig)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
np.testing.assert_array_equal(f0_sim, -f1_sim)
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].hertzian.set_params(eps=0.)
# Test Gaussian Potential
@utx.skipIfMissingFeatures("GAUSSIAN")
def test_gaussian(self):
g_eps = 6.92
g_sig = 4.03
g_cut = 1.243
self.system.non_bonded_inter[0, 0].gaussian.set_params(
eps=g_eps, sig=g_sig, cutoff=g_cut)
for i in range(125):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.gaussian_potential(
r=(i + 1) * self.step_width, eps=g_eps, sig=g_sig, cutoff=g_cut)
# Calculate forces
f0_sim = np.copy(self.system.part[0].f)
f1_sim = np.copy(self.system.part[1].f)
f1_ref = self.axis * tests_common.gaussian_force(
r=(i + 1) * self.step_width, eps=g_eps, sig=g_sig, cutoff=g_cut)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
np.testing.assert_array_equal(f0_sim, -f1_sim)
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].gaussian.set_params(eps=0.)
# Test the Gay-Berne potential and the resulting force and torque
@utx.skipIfMissingFeatures("GAY_BERNE")
def test_gb(self):
# helper function definitions
def gradient(func, x0, dx=1.0e-7):
"""
Approximate the gradient of a function at a point x0
using the two-point central difference formula with spacing 2dx.
Parameters
----------
func: :obj:`function`
function for which the gradient is calculated
x0: (3,) array_like of :obj:`float`
Point in N-dimensional space where the derivatives are calculated
dx: :obj:`float`, optional
Spacing
Returns
-------
(3,) array_like of obj:`float`
the approximated gradient of func at x0
"""
def partial_x(x):
return (func(x0 + x) - func(x0 - x)) / (
2.0 * np.linalg.norm(x))
delta = np.array([dx, 0.0, 0.0])
return np.array([partial_x(np.roll(delta, i)) for i in range(3)])
def setup_system(gb_params):
k_1, k_2, mu, nu, sigma_0, epsilon_0, cut = gb_params
self.system.part.clear()
self.system.part.add(
id=0, pos=(1, 2, 3), rotation=(1, 1, 1), type=0)
self.system.part.add(
id=1, pos=(2.2, 2.1, 2.9), rotation=(1, 1, 1), type=0)
self.system.non_bonded_inter[0, 0].gay_berne.set_params(
sig=sigma_0, cut=cut, eps=epsilon_0, k1=k_1, k2=k_2, mu=mu,
nu=nu)
def advance_and_rotate_part(particle):
particle.pos = particle.pos + self.step
particle.rotate(axis=(1, 2, 3), angle=0.3)
particle.rotate(axis=(1, -2, -4), angle=1.2)
def get_simulation_energy():
return self.system.analysis.energy()["non_bonded"]
def get_reference_energy(gb_params, r, director1, director2):
k_1, k_2, mu, nu, sigma_0, epsilon_0, cut = gb_params
r_cut = r * cut / np.linalg.norm(r)
E_ref = tests_common.gay_berne_potential(
r, director1, director2, epsilon_0, sigma_0, mu, nu, k_1, k_2)
E_ref -= tests_common.gay_berne_potential(
r_cut, director1, director2, epsilon_0, sigma_0, mu, nu,
k_1, k_2)
return E_ref
def get_reference_force(gb_params, r, dir1, dir2):
return -gradient(
lambda x: get_reference_energy(gb_params, x, dir1, dir2),
x0=r, dx=1.0e-7)
def get_reference_torque(gb_params, r, dir1, dir2):
force_in_dir1 = gradient(
lambda x: get_reference_energy(gb_params, r, x, dir2),
x0=dir1, dx=1.0e-7)
return np.cross(-dir1, force_in_dir1)
# actual tests of the gb potential
k_1 = 1.2
k_2 = 2.4
mu = 2.
nu = 5.
sigma_0 = 1.2
epsilon_0 = 0.8
cut = 3.3
gb_params = (k_1, k_2, mu, nu, sigma_0, epsilon_0, cut)
setup_system(gb_params)
p1 = self.system.part[0]
p2 = self.system.part[1]
delta = 1.0e-6
for _ in range(100):
advance_and_rotate_part(p2)
self.system.integrator.run(recalc_forces=True, steps=0)
r = self.system.distance_vec(p1, p2)
director1 = p1.director
director2 = p2.director
# Calc energies
E_sim = get_simulation_energy()
E_ref = get_reference_energy(gb_params, r, director1, director2)
# Test energies
self.assertAlmostEqual(E_sim, E_ref, delta=delta)
# Calc forces
f1_sim = np.copy(p1.f)
f2_sim = np.copy(p2.f)
f2_ref = get_reference_force(gb_params, r, director1, director2)
# Test forces
# force equals minus the counter-force
np.testing.assert_array_equal(f1_sim, -f2_sim)
# compare force to reference force
for i in range(3):
self.assertAlmostEqual(f2_sim[i], f2_ref[i], delta=delta)
# Calc torques
torque1_sim = p1.torque_lab
torque2_sim = p2.torque_lab
torque1_ref = get_reference_torque(
gb_params, r, director1, director2)
torque2_ref = get_reference_torque(
gb_params, r, director2, director1)
# Test torques
for i in range(3):
self.assertAlmostEqual(
torque1_sim[i],
torque1_ref[i],
delta=delta)
self.assertAlmostEqual(
torque2_sim[i],
torque2_ref[i],
delta=delta)
# Test zero energy
self.system.non_bonded_inter[0, 0].gay_berne.set_params(
sig=sigma_0, cut=0, eps=0, k1=k_1, k2=k_2, mu=mu, nu=nu)
self.system.integrator.run(0)
self.assertEqual(self.system.analysis.energy()["non_bonded"], 0.0)
if __name__ == '__main__':
ut.main()
|
KaiSzuttor/espresso
|
testsuite/python/interactions_non-bonded.py
|
Python
|
gpl-3.0
| 27,774
|
[
"ESPResSo",
"Gaussian"
] |
845ca4db8fb100917b223234b96af9caf07e80a59ba62d21569f0383f430d0aa
|
from asyncio import gather, Semaphore, sleep, Task, CancelledError
from datetime import datetime
from statistics import median
from sys import platform
from cyrandom import shuffle
from collections import deque
from itertools import dropwhile
from time import time, monotonic
from aiopogo import HashServer
from sqlalchemy.exc import OperationalError
from .db import SIGHTING_CACHE, MYSTERY_CACHE
from .utils import get_current_hour, dump_pickle, get_start_coords, get_bootstrap_points, randomize_point, best_factors, percentage_split
from .shared import get_logger, LOOP, run_threaded, ACCOUNTS
from . import bounds, db_proc, spawns, sanitized as conf
from .worker import Worker
ANSI = '\x1b[2J\x1b[H'
if platform == 'win32':
try:
from platform import win32_ver
from distutils.version import LooseVersion
if LooseVersion(win32_ver()[1]) >= LooseVersion('10.0.10586'):
import ctypes
ctypes.windll.kernel32.SetConsoleMode(kernel32.GetStdHandle(-11), 7)
else:
from os import system
ANSI = ''
except Exception:
from os import system
ANSI = ''
BAD_STATUSES = (
'FAILED LOGIN',
'EXCEPTION',
'NOT AUTHENTICATED',
'KEY EXPIRED',
'HASHING OFFLINE',
'NIANTIC OFFLINE',
'BAD REQUEST',
'INVALID REQUEST',
'CAPTCHA',
'BANNED',
'BENCHING',
'REMOVING',
'IP BANNED',
'MALFORMED RESPONSE',
'AIOPOGO ERROR',
'MAX RETRIES',
'HASHING ERROR',
'PROXY ERROR',
'TIMEOUT'
)
class Overseer:
def __init__(self, manager):
self.log = get_logger('overseer')
self.workers = []
self.manager = manager
self.things_count = deque(maxlen=9)
self.paused = False
self.coroutines_count = 0
self.skipped = 0
self.visits = 0
self.coroutine_semaphore = Semaphore(conf.COROUTINES_LIMIT, loop=LOOP)
self.redundant = 0
self.running = True
self.all_seen = False
self.idle_seconds = 0
self.log.info('Overseer initialized')
self.pokemon_found = ''
def start(self, status_bar):
self.captcha_queue = self.manager.captcha_queue()
Worker.captcha_queue = self.manager.captcha_queue()
self.extra_queue = self.manager.extra_queue()
Worker.extra_queue = self.manager.extra_queue()
if conf.MAP_WORKERS:
Worker.worker_dict = self.manager.worker_dict()
for username, account in ACCOUNTS.items():
account['username'] = username
if account.get('banned'):
continue
if account.get('captcha'):
self.captcha_queue.put(account)
else:
self.extra_queue.put(account)
self.workers = tuple(Worker(worker_no=x) for x in range(conf.GRID[0] * conf.GRID[1]))
db_proc.start()
LOOP.call_later(10, self.update_count)
LOOP.call_later(max(conf.SWAP_OLDEST, conf.MINIMUM_RUNTIME), self.swap_oldest)
LOOP.call_soon(self.update_stats)
if status_bar:
LOOP.call_soon(self.print_status)
def update_count(self):
self.things_count.append(str(db_proc.count))
self.pokemon_found = (
'Pokemon found count (10s interval):\n'
+ ' '.join(self.things_count)
+ '\n')
LOOP.call_later(10, self.update_count)
def swap_oldest(self, interval=conf.SWAP_OLDEST, minimum=conf.MINIMUM_RUNTIME):
if not self.paused and not self.extra_queue.empty():
oldest, minutes = self.longest_running()
if minutes > minimum:
LOOP.create_task(oldest.lock_and_swap(minutes))
LOOP.call_later(interval, self.swap_oldest)
def print_status(self, refresh=conf.REFRESH_RATE):
try:
self._print_status()
except CancelledError:
return
except Exception as e:
self.log.exception('{} occurred while printing status.', e.__class__.__name__)
self.print_handle = LOOP.call_later(refresh, self.print_status)
async def exit_progress(self):
while self.coroutines_count > 2:
try:
self.update_coroutines_count(simple=False)
pending = len(db_proc)
# Spaces at the end are important, as they clear previously printed
# output - \r doesn't clean whole line
print(
'{} coroutines active, {} DB items pending '.format(
self.coroutines_count, pending),
end='\r'
)
await sleep(.5)
except CancelledError:
return
except Exception as e:
self.log.exception('A wild {} appeared in exit_progress!', e.__class__.__name__)
def update_stats(self, refresh=conf.STAT_REFRESH, med=median, count=conf.GRID[0] * conf.GRID[1]):
visits = []
seen_per_worker = []
after_spawns = []
speeds = []
for w in self.workers:
after_spawns.append(w.after_spawn)
seen_per_worker.append(w.total_seen)
visits.append(w.visits)
speeds.append(w.speed)
self.stats = (
'Seen per worker: min {}, max {}, med {:.0f}\n'
'Visits per worker: min {}, max {}, med {:.0f}\n'
'Visit delay: min {:.1f}, max {:.1f}, med {:.1f}\n'
'Speed: min {:.1f}, max {:.1f}, med {:.1f}\n'
'Extra accounts: {}, CAPTCHAs needed: {}\n'
).format(
min(seen_per_worker), max(seen_per_worker), med(seen_per_worker),
min(visits), max(visits), med(visits),
min(after_spawns), max(after_spawns), med(after_spawns),
min(speeds), max(speeds), med(speeds),
self.extra_queue.qsize(), self.captcha_queue.qsize()
)
self.sighting_cache_size = len(SIGHTING_CACHE.store)
self.mystery_cache_size = len(MYSTERY_CACHE.store)
self.update_coroutines_count()
self.counts = (
'Known spawns: {}, unknown: {}, more: {}\n'
'{} workers, {} coroutines\n'
'sightings cache: {}, mystery cache: {}, DB queue: {}\n'
).format(
len(spawns), len(spawns.unknown), spawns.cells_count,
count, self.coroutines_count,
len(SIGHTING_CACHE), len(MYSTERY_CACHE), len(db_proc)
)
LOOP.call_later(refresh, self.update_stats)
def get_dots_and_messages(self):
"""Returns status dots and status messages for workers
Dots meaning:
. = visited more than a minute ago
, = visited less than a minute ago, no pokemon seen
0 = visited less than a minute ago, no pokemon or forts seen
: = visited less than a minute ago, pokemon seen
! = currently visiting
| = cleaning bag
$ = spinning a PokéStop
* = sending a notification
~ = encountering a Pokémon
I = initial, haven't done anything yet
» = waiting to log in (limited by SIMULTANEOUS_LOGINS)
° = waiting to start app simulation (limited by SIMULTANEOUS_SIMULATION)
∞ = bootstrapping
L = logging in
A = simulating app startup
T = completing the tutorial
X = something bad happened
C = CAPTCHA
Other letters: various errors and procedures
"""
dots = []
messages = []
row = []
for i, worker in enumerate(self.workers):
if i > 0 and i % conf.GRID[1] == 0:
dots.append(row)
row = []
if worker.error_code in BAD_STATUSES:
row.append('X')
messages.append(worker.status.ljust(20))
elif worker.error_code:
row.append(worker.error_code[0])
else:
row.append('.')
if row:
dots.append(row)
return dots, messages
def update_coroutines_count(self, simple=True, loop=LOOP):
try:
tasks = Task.all_tasks(loop)
self.coroutines_count = len(tasks) if simple else sum(not t.done() for t in tasks)
except RuntimeError:
# Set changed size during iteration
self.coroutines_count = '-1'
def _print_status(self, _ansi=ANSI, _start=datetime.now(), _notify=conf.NOTIFY):
running_for = datetime.now() - _start
seconds_since_start = running_for.seconds - self.idle_seconds or 0.1
hours_since_start = seconds_since_start / 3600
output = [
'{}Monocle running for {}'.format(_ansi, running_for),
self.counts,
self.stats,
self.pokemon_found,
('Visits: {}, per second: {:.2f}\n'
'Skipped: {}, unnecessary: {}').format(
self.visits, self.visits / seconds_since_start,
self.skipped, self.redundant)
]
try:
seen = Worker.g['seen']
captchas = Worker.g['captchas']
output.append('Seen per visit: {v:.2f}, per minute: {m:.0f}'.format(
v=seen / self.visits, m=seen / (seconds_since_start / 60)))
if captchas:
captchas_per_request = captchas / (self.visits / 1000)
captchas_per_hour = captchas / hours_since_start
output.append(
'CAPTCHAs per 1K visits: {r:.1f}, per hour: {h:.1f}, total: {t:d}'.format(
r=captchas_per_request, h=captchas_per_hour, t=captchas))
except ZeroDivisionError:
pass
try:
hash_status = HashServer.status
output.append('Hashes: {}/{}, refresh in {:.0f}'.format(
hash_status['remaining'],
hash_status['maximum'],
hash_status['period'] - time()
))
except (KeyError, TypeError):
pass
if _notify:
sent = Worker.notifier.sent
output.append('Notifications sent: {}, per hour {:.1f}'.format(
sent, sent / hours_since_start))
output.append('')
if not self.all_seen:
no_sightings = ', '.join(str(w.worker_no)
for w in self.workers
if w.total_seen == 0)
if no_sightings:
output += ['Workers without sightings so far:', no_sightings, '']
else:
self.all_seen = True
dots, messages = self.get_dots_and_messages()
output += [' '.join(row) for row in dots]
previous = 0
for i in range(4, len(messages) + 4, 4):
output.append('\t'.join(messages[previous:i]))
previous = i
if self.paused:
output.append('\nCAPTCHAs are needed to proceed.')
if not _ansi:
system('cls')
print('\n'.join(output))
def longest_running(self):
workers = (x for x in self.workers if x.start_time)
worker = next(workers)
earliest = worker.start_time
for w in workers:
if w.start_time < earliest:
worker = w
earliest = w.start_time
minutes = ((time() * 1000) - earliest) / 60000
return worker, minutes
def get_start_point(self):
smallest_diff = float('inf')
now = time() % 3600
closest = None
for spawn_id, spawn_time in spawns.known.values():
time_diff = now - spawn_time
if 0 < time_diff < smallest_diff:
smallest_diff = time_diff
closest = spawn_id
if smallest_diff < 3:
break
return closest
async def update_spawns(self, initial=False):
while True:
try:
await run_threaded(spawns.update)
LOOP.create_task(run_threaded(spawns.pickle))
except OperationalError as e:
self.log.exception('Operational error while trying to update spawns.')
if initial:
raise OperationalError('Could not update spawns, ensure your DB is set up.') from e
await sleep(15, loop=LOOP)
except CancelledError:
raise
except Exception as e:
self.log.exception('A wild {} appeared while updating spawns!', e.__class__.__name__)
await sleep(15, loop=LOOP)
else:
break
async def launch(self, bootstrap, pickle):
exceptions = 0
self.next_mystery_reload = 0
if not pickle or not spawns.unpickle():
await self.update_spawns(initial=True)
if not spawns or bootstrap:
try:
await self.bootstrap()
await self.update_spawns()
except CancelledError:
return
update_spawns = False
self.mysteries = spawns.mystery_gen()
while True:
try:
await self._launch(update_spawns)
update_spawns = True
except CancelledError:
return
except Exception:
exceptions += 1
if exceptions > 25:
self.log.exception('Over 25 errors occured in launcher loop, exiting.')
return False
else:
self.log.exception('Error occured in launcher loop.')
update_spawns = False
async def _launch(self, update_spawns):
if update_spawns:
await self.update_spawns()
LOOP.create_task(run_threaded(dump_pickle, 'accounts', ACCOUNTS))
spawns_iter = iter(spawns.items())
else:
start_point = self.get_start_point()
if start_point and not spawns.after_last():
spawns_iter = dropwhile(
lambda s: s[1][0] != start_point, spawns.items())
else:
spawns_iter = iter(spawns.items())
current_hour = get_current_hour()
if spawns.after_last():
current_hour += 3600
captcha_limit = conf.MAX_CAPTCHAS
skip_spawn = conf.SKIP_SPAWN
for point, (spawn_id, spawn_seconds) in spawns_iter:
try:
if self.captcha_queue.qsize() > captcha_limit:
self.paused = True
self.idle_seconds += await run_threaded(self.captcha_queue.full_wait, conf.MAX_CAPTCHAS)
self.paused = False
except (EOFError, BrokenPipeError, FileNotFoundError):
pass
spawn_time = spawn_seconds + current_hour
# negative = hasn't happened yet
# positive = already happened
time_diff = time() - spawn_time
while time_diff < 0.5:
try:
mystery_point = next(self.mysteries)
await self.coroutine_semaphore.acquire()
LOOP.create_task(self.try_point(mystery_point))
except StopIteration:
if self.next_mystery_reload < monotonic():
self.mysteries = spawns.mystery_gen()
self.next_mystery_reload = monotonic() + conf.RESCAN_UNKNOWN
else:
await sleep(min(spawn_time - time() + .5, self.next_mystery_reload - monotonic()), loop=LOOP)
time_diff = time() - spawn_time
if time_diff > 5 and spawn_id in SIGHTING_CACHE.store:
self.redundant += 1
continue
elif time_diff > skip_spawn:
self.skipped += 1
continue
await self.coroutine_semaphore.acquire()
LOOP.create_task(self.try_point(point, spawn_time, spawn_id))
async def try_again(self, point):
async with self.coroutine_semaphore:
worker = await self.best_worker(point, False)
async with worker.busy:
if await worker.visit(point):
self.visits += 1
async def bootstrap(self):
try:
self.log.warning('Starting bootstrap phase 1.')
await self.bootstrap_one()
except CancelledError:
raise
except Exception:
self.log.exception('An exception occurred during bootstrap phase 1.')
try:
self.log.warning('Starting bootstrap phase 2.')
await self.bootstrap_two()
except CancelledError:
raise
except Exception:
self.log.exception('An exception occurred during bootstrap phase 2.')
self.log.warning('Starting bootstrap phase 3.')
unknowns = list(spawns.unknown)
shuffle(unknowns)
tasks = (self.try_again(point) for point in unknowns)
await gather(*tasks, loop=LOOP)
self.log.warning('Finished bootstrapping.')
async def bootstrap_one(self):
async def visit_release(worker, num, *args):
async with self.coroutine_semaphore:
async with worker.busy:
point = get_start_coords(num, *args)
self.log.warning('start_coords: {}', point)
self.visits += await worker.bootstrap_visit(point)
if bounds.multi:
areas = [poly.polygon.area for poly in bounds.polygons]
area_sum = sum(areas)
percentages = [area / area_sum for area in areas]
tasks = []
for i, workers in enumerate(percentage_split(
self.workers, percentages)):
grid = best_factors(len(workers))
tasks.extend(visit_release(w, n, grid, bounds.polygons[i])
for n, w in enumerate(workers))
else:
tasks = (visit_release(w, n) for n, w in enumerate(self.workers))
await gather(*tasks, loop=LOOP)
async def bootstrap_two(self):
async def bootstrap_try(point):
async with self.coroutine_semaphore:
randomized = randomize_point(point, randomization)
LOOP.call_later(1790, LOOP.create_task, self.try_again(randomized))
worker = await self.best_worker(point, False)
async with worker.busy:
self.visits += await worker.bootstrap_visit(point)
# randomize to within ~140m of the nearest neighbor on the second visit
randomization = conf.BOOTSTRAP_RADIUS / 155555 - 0.00045
tasks = (bootstrap_try(x) for x in get_bootstrap_points(bounds))
await gather(*tasks, loop=LOOP)
async def try_point(self, point, spawn_time=None, spawn_id=None):
try:
point = randomize_point(point)
skip_time = monotonic() + (conf.GIVE_UP_KNOWN if spawn_time else conf.GIVE_UP_UNKNOWN)
worker = await self.best_worker(point, skip_time)
if not worker:
if spawn_time:
self.skipped += 1
return
async with worker.busy:
if spawn_time:
worker.after_spawn = time() - spawn_time
if await worker.visit(point, spawn_id):
self.visits += 1
except CancelledError:
raise
except Exception:
self.log.exception('An exception occurred in try_point')
finally:
self.coroutine_semaphore.release()
async def best_worker(self, point, skip_time):
good_enough = conf.GOOD_ENOUGH
while self.running:
gen = (w for w in self.workers if not w.busy.locked())
try:
worker = next(gen)
lowest_speed = worker.travel_speed(point)
except StopIteration:
lowest_speed = float('inf')
for w in gen:
speed = w.travel_speed(point)
if speed < lowest_speed:
lowest_speed = speed
worker = w
if speed < good_enough:
break
if lowest_speed < conf.SPEED_LIMIT:
worker.speed = lowest_speed
return worker
if skip_time and monotonic() > skip_time:
return None
await sleep(conf.SEARCH_SLEEP, loop=LOOP)
def refresh_dict(self):
while not self.extra_queue.empty():
account = self.extra_queue.get()
username = account['username']
ACCOUNTS[username] = account
|
sebast1219/Monocle
|
monocle/overseer.py
|
Python
|
mit
| 20,753
|
[
"VisIt"
] |
8dff0ff0bec10a0654a8faf3ed77be66a5a1f65ad04d4a158aa017d87055b5b4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: skip-file
import os
import sys
from setuptools import find_packages, setup
##### Dependencies of GPflow
# We do not want to install tensorflow in the readthedocs environment, where we
# use autodoc_mock_imports instead. Hence we use this flag to decide whether or
# not to append tensorflow and tensorflow_probability to the requirements:
if os.environ.get("READTHEDOCS") != "True":
requirements = [
"tensorflow>=2.4.0",
"tensorflow-probability>=0.12.0",
# NOTE: once we require tensorflow-probability>=0.12, we can remove our custom deepcopy handling
"setuptools>=41.0.0", # to satisfy dependency constraints
]
else:
requirements = []
requirements.extend(
[
"numpy",
"scipy",
"multipledispatch>=0.6",
"tabulate",
"typing_extensions",
"packaging",
"deprecated",
"lark>=1.1.0",
]
)
def read_file(filename: str) -> str:
with open(filename, encoding="utf-8") as f:
return f.read().strip()
version = read_file("VERSION")
readme_text = read_file("README.md")
packages = find_packages(".", exclude=["tests"])
setup(
name="gpflow",
version=version,
author="James Hensman, Alex Matthews",
author_email="james.hensman@gmail.com",
description="Gaussian process methods in TensorFlow",
long_description=readme_text,
long_description_content_type="text/markdown",
license="Apache License 2.0",
keywords="machine-learning gaussian-processes kernels tensorflow",
url="https://www.gpflow.org",
project_urls={
"Source on GitHub": "https://github.com/GPflow/GPflow",
"Documentation": "https://gpflow.readthedocs.io",
},
packages=packages,
package_data={"": ["*.lark"]},
include_package_data=True,
install_requires=requirements,
extras_require={"ImageToTensorBoard": ["matplotlib"]},
python_requires=">=3.7",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: GPU :: NVIDIA CUDA",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Typing :: Typed",
],
)
|
GPflow/GPflow
|
setup.py
|
Python
|
apache-2.0
| 2,779
|
[
"Gaussian"
] |
d5f89f146c2842fd347406abea777b15f7d95f1784d139511c9de0518a5fd280
|
#!/usr/bin/env python
"""
Install.py tool to download, unpack, build, and link to the plumed2 library
used to automate the steps described in the README file in this dir
"""
from __future__ import print_function
import sys, os, platform, subprocess, shutil
from argparse import ArgumentParser
sys.path.append('..')
from install_helpers import get_cpus, fullpath, geturl, checkmd5sum
parser = ArgumentParser(prog='Install.py',
description="LAMMPS library build wrapper script")
# settings
version = "2.5.2"
mode = "static"
# help message
HELP = """
Syntax from src dir: make lib-plumed args="-b"
or: make lib-plumed args="-b -v 2.4.3"
or: make lib-plumed args="-p /usr/local/plumed2 -m shared"
Syntax from lib dir: python Install.py -b -v 2.4.3
or: python Install.py -b
or: python Install.py -p /usr/local/plumed2 -m shared
Example:
make lib-plumed args="-b" # download/build in lib/plumed/plumed2
make lib-plumed args="-p $HOME/plumed2 -m shared" # use existing Plumed2 installation in $HOME/plumed2
"""
# known checksums for different PLUMED versions. used to validate the download.
checksums = { \
'2.4.2' : '88188743a6e03ef076e5377d03ebb0e7', \
'2.4.3' : 'b1be7c48971627febc11c61b70767fc5', \
'2.4.4' : '71ed465bdc7c2059e282dbda8d564e71', \
'2.5.0' : '6224cd089493661e19ceacccd35cf911', \
'2.5.1' : 'c2a7b519e32197a120cdf47e0f194f81', \
'2.5.2' : 'bd2f18346c788eb54e1e52f4f6acf41a', \
}
# parse and process arguments
pgroup = parser.add_mutually_exclusive_group()
pgroup.add_argument("-b", "--build", action="store_true",
help="download and build the plumed2 library")
pgroup.add_argument("-p", "--path",
help="specify folder of existing plumed2 installation")
parser.add_argument("-v", "--version", default=version, choices=checksums.keys(),
help="set version of plumed to download and build (default: %s)" % version)
parser.add_argument("-m", "--mode", default=mode, choices=['static', 'shared', 'runtime'],
help="set plumed linkage mode: static (default), shared, or runtime")
args = parser.parse_args()
# print help message and exit, if neither build nor path options are given
if not args.build and not args.path:
parser.print_help()
sys.exit(HELP)
buildflag = args.build
pathflag = args.path is not None
plumedpath = args.path
mode = args.mode
homepath = fullpath('.')
homedir = "%s/plumed2" % (homepath)
if pathflag:
if not os.path.isdir(plumedpath):
sys.exit("Plumed2 path %s does not exist" % plumedpath)
homedir = fullpath(plumedpath)
if not os.path.isdir(os.path.join(homedir, 'include', 'plumed', 'core')):
sys.exit("No Plumed2 installation found at %s" % plumedpath)
# download and unpack plumed2 tarball
if buildflag:
url = "https://github.com/plumed/plumed2/releases/download/v%s/plumed-src-%s.tgz" % (version, version)
filename = "plumed-src-%s.tar.gz" %version
print("Downloading plumed ...")
geturl(url, filename)
# verify downloaded archive integrity via md5 checksum, if known.
if version in checksums:
if not checkmd5sum(checksums[version], filename):
sys.exit("Checksum for plumed2 library does not match")
print("Unpacking plumed2 source tarball ...")
if os.path.exists("%s/plumed-%s" % (homepath, version)):
shutil.rmtree("%s/plumed-%s" % (homepath, version))
if os.path.exists(homedir):
shutil.rmtree(homedir)
cmd = 'cd "%s"; tar -xzvf %s' % (homepath, filename)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
os.remove(os.path.join(homepath, filename))
# build plumed
print("Building plumed ...")
n_cpus = get_cpus()
cmd = 'cd %s/plumed-%s; ./configure --prefix=%s --enable-modules=all --enable-static-patch ; make -j%d ; make install' % (homepath, version, homedir, n_cpus)
try:
txt = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print(txt.decode('UTF-8'))
except subprocess.CalledProcessError as e:
print("Make failed with:\n %s" % e.output.decode('UTF-8'))
sys.exit(1)
# create 2 links in lib/plumed to plumed2 installation dir
print("Creating links to plumed2 include and lib files")
if os.path.isfile("includelink") or os.path.islink("includelink"):
os.remove("includelink")
if os.path.isfile("liblink") or os.path.islink("liblink"):
os.remove("liblink")
os.symlink(os.path.join(homedir, 'include'), 'includelink')
libpath = os.path.join(homedir, 'lib64')
if not os.path.exists(libpath):
libpath = os.path.join(homedir, 'lib')
os.symlink(libpath, 'liblink')
if os.path.isfile("Makefile.lammps.%s" % mode):
print("Creating Makefile.lammps")
plumedinc = os.path.join('liblink', 'plumed', 'src', 'lib', 'Plumed.inc.' + mode)
lines1 = open(plumedinc, 'r').readlines()
if (platform.system() == 'Darwin' and os.path.isfile("Makefile.lammps.%s.macosx" % mode)):
lines2 = open("Makefile.lammps.%s.macosx" % mode, 'r').readlines()
else:
lines2 = open("Makefile.lammps.%s" % mode, 'r').readlines()
fp = open("Makefile.lammps", 'w')
fp.write("PLUMED_LIBDIR=" + os.path.join(homedir, "lib\n"))
for line in lines1:
fp.write(line)
for line in lines2:
fp.write(line)
fp.close()
|
Pakketeretet2/lammps
|
lib/plumed/Install.py
|
Python
|
gpl-2.0
| 5,336
|
[
"LAMMPS"
] |
de2a945bd584a5d31f7cf0db55d7074fd3f4259ab4c855b4f0eb5aafb3b14254
|
# Copyright (C) 2012,2013,2015
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*************************
espressopp.FixedTupleList
*************************
.. function:: espressopp.FixedTupleList(storage)
:param storage:
:type storage:
.. function:: espressopp.FixedTupleList.size()
:rtype:
"""
from espressopp import pmi
import _espressopp
import espressopp
from espressopp.esutil import cxxinit
class FixedTupleListLocal(_espressopp.FixedTupleList):
def __init__(self, storage):
if pmi.workerIsActive():
cxxinit(self, _espressopp.FixedTupleList, storage)
"""def addTuples(self, tuplelist):
'add tuple to fixed tuple list'
if pmi.workerIsActive():
return self.cxxclass.addTuple(self, tuplelist)"""
def size(self):
if pmi.workerIsActive():
return self.cxxclass.size(self)
if pmi.isController:
class FixedTupleList(metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.FixedTupleListLocal',
#localcall = [ "add" ],
pmicall = [ "addTuple", "getTuples" ],
pmiinvoke = ["size"]
)
|
espressopp/espressopp
|
src/FixedTupleList.py
|
Python
|
gpl-3.0
| 1,989
|
[
"ESPResSo"
] |
499e3ea93744044522e59c86f2d55958b9e558ff6463d54a56b3c32496099b04
|
"""This demo program solves the incompressible Navier-Stokes equations
on an L-shaped domain using Chorin's splitting method."""
# Copyright (C) 2010-2011 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Mikael Mortensen 2011
#
# First added: 2010-08-30
# Last changed: 2011-06-30
#
# SC14 Paraview's Catalyst tutorial
#
# Step 2 : plug to catalyst python API, add a coProcess function
#
# [SC14-Catalyst] we need a python environment that enables import of both Dolfin and ParaView
execfile("simulation-env.py")
# [SC14-Catalyst] import paraview, vtk and paraview's simple API
import sys
import paraview
import paraview.vtk as vtk
import paraview.simple as pvsimple
# [SC14-Catalyst] check for command line arguments
if len(sys.argv) != 3:
print "command is 'python",sys.argv[0],"<script name> <number of time steps>'"
sys.exit(1)
# [SC14-Catalyst] initialize and read input parameters
paraview.options.batch = True
paraview.options.symmetric = True
# [SC14-Catalyst] import user co-processing script
import vtkPVCatalystPython
import os
scriptpath, scriptname = os.path.split(sys.argv[1])
sys.path.append(scriptpath)
if scriptname.endswith(".py"):
print 'script name is ', scriptname
scriptname = scriptname[0:len(scriptname)-3]
try:
cpscript = __import__(scriptname)
except:
print sys.exc_info()
print 'Cannot find ', scriptname, ' -- no coprocessing will be performed.'
sys.exit(1)
# [SC14-Catalyst] Co-Processing routine to be called at the end of each simulation time step
def coProcess(grid, time, step):
# initialize data description
datadescription = vtkPVCatalystPython.vtkCPDataDescription()
datadescription.SetTimeData(time, step)
datadescription.AddInput("input")
cpscript.RequestDataDescription(datadescription)
# to be continued ...
# Begin demo
from dolfin import *
# Print log messages only from the root process in parallel
parameters["std_out_all_processes"] = False;
# Load mesh from file
mesh = Mesh(DOLFIN_EXAMPLE_DATA_DIR+"/lshape.xml.gz")
# Define function spaces (P2-P1)
V = VectorFunctionSpace(mesh, "Lagrange", 2)
Q = FunctionSpace(mesh, "Lagrange", 1)
# Define trial and test functions
u = TrialFunction(V)
p = TrialFunction(Q)
v = TestFunction(V)
q = TestFunction(Q)
# Set parameter values
dt = 0.01
T = 3
nu = 0.01
# Define time-dependent pressure boundary condition
p_in = Expression("sin(3.0*t)", t=0.0)
# Define boundary conditions
noslip = DirichletBC(V, (0, 0),
"on_boundary && \
(x[0] < DOLFIN_EPS | x[1] < DOLFIN_EPS | \
(x[0] > 0.5 - DOLFIN_EPS && x[1] > 0.5 - DOLFIN_EPS))")
inflow = DirichletBC(Q, p_in, "x[1] > 1.0 - DOLFIN_EPS")
outflow = DirichletBC(Q, 0, "x[0] > 1.0 - DOLFIN_EPS")
bcu = [noslip]
bcp = [inflow, outflow]
# Create functions
u0 = Function(V)
u1 = Function(V)
p1 = Function(Q)
# Define coefficients
k = Constant(dt)
f = Constant((0, 0))
# Tentative velocity step
F1 = (1/k)*inner(u - u0, v)*dx + inner(grad(u0)*u0, v)*dx + \
nu*inner(grad(u), grad(v))*dx - inner(f, v)*dx
a1 = lhs(F1)
L1 = rhs(F1)
# Pressure update
a2 = inner(grad(p), grad(q))*dx
L2 = -(1/k)*div(u1)*q*dx
# Velocity update
a3 = inner(u, v)*dx
L3 = inner(u1, v)*dx - k*inner(grad(p1), v)*dx
# Assemble matrices
A1 = assemble(a1)
A2 = assemble(a2)
A3 = assemble(a3)
# Use amg preconditioner if available
prec = "amg" if has_krylov_solver_preconditioner("amg") else "default"
# Create files for storing solution
ufile = File("results/velocity.pvd")
pfile = File("results/pressure.pvd")
# Time-stepping
maxtimestep = int(sys.argv[2])
tstep = 0
t = dt
while tstep < maxtimestep:
# Update pressure boundary condition
p_in.t = t
# Compute tentative velocity step
begin("Computing tentative velocity")
b1 = assemble(L1)
[bc.apply(A1, b1) for bc in bcu]
solve(A1, u1.vector(), b1, "gmres", "default")
end()
# Pressure correction
begin("Computing pressure correction")
b2 = assemble(L2)
[bc.apply(A2, b2) for bc in bcp]
solve(A2, p1.vector(), b2, "gmres", prec)
end()
# Velocity correction
begin("Computing velocity correction")
b3 = assemble(L3)
[bc.apply(A3, b3) for bc in bcu]
solve(A3, u1.vector(), b3, "gmres", "default")
end()
# Plot solution [SC14-Catalyst] Not anymore
# plot(p1, title="Pressure", rescale=True)
# plot(u1, title="Velocity", rescale=True)
# Save to file [SC14-Catalyst] Not anymore
# ufile << u1
# pfile << p1
# [SC14-Catalyst] convert solution to VTK grid
ugrid = None
# [SC14-Catalyst] trigger catalyst execution
coProcess(ugrid,t,tstep)
# Move to next time step
u0.assign(u1)
t += dt
tstep += 1
print "t =", t, "step =",tstep
# Hold plot [SC14-Catalyst] Not anymore
# interactive()
|
mathstuf/ParaViewCatalystExampleCode
|
PythonDolfinExample/simulation-catalyst-step2.py
|
Python
|
bsd-3-clause
| 5,485
|
[
"ParaView",
"VTK"
] |
d5cd88f7ef062756b591dffdfffdbd73957105fa90225f3c94f9cdd971b065e1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
#
# PCR-GLOBWB (PCRaster Global Water Balance) Global Hydrological Model
#
# Copyright (C) 2016, Edwin H. Sutanudjaja, Rens van Beek, Niko Wanders, Yoshihide Wada,
# Joyce H. C. Bosmans, Niels Drost, Ruud J. van der Ent, Inge E. M. de Graaf, Jannis M. Hoch,
# Kor de Jong, Derek Karssenberg, Patricia López López, Stefanie Peßenteiner, Oliver Schmitz,
# Menno W. Straatsma, Ekkamol Vannametee, Dominik Wisser, and Marc F. P. Bierkens
# Faculty of Geosciences, Utrecht University, Utrecht, The Netherlands
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# EHS (20 March 2013): This is the list of general functions.
# The list is continuation from Rens's and Dominik's.
import shutil
import subprocess
import datetime
import random
import os
import gc
import re
import math
import sys
import types
import calendar
import glob
import netCDF4 as nc
import numpy as np
import numpy.ma as ma
import pcraster as pcr
import logging
from six.moves import range
logger = logging.getLogger(__name__)
# file cache to minimize/reduce opening/closing files.
filecache = dict()
# Global variables:
MV = 1e20
smallNumber = 1E-39
# tuple of netcdf file suffixes (extensions) that can be used:
netcdf_suffixes = ('.nc4','.nc')
def getFileList(inputDir, filePattern):
'''creates a dictionary of files meeting the pattern specified'''
fileNameList = glob.glob(os.path.join(inputDir, filePattern))
ll= {}
for fileName in fileNameList:
ll[os.path.split(fileName)[-1]]= fileName
return ll
def checkVariableInNC(ncFile,varName):
logger.debug('Check whether the variable: '+str(varName)+' is defined in the file: '+str(ncFile))
if ncFile in list(filecache.keys()):
f = filecache[ncFile]
#~ print "Cached: ", ncFile
else:
f = nc.Dataset(ncFile)
filecache[ncFile] = f
#~ print "New: ", ncFile
varName = str(varName)
return varName in list(f.variables.keys())
def netcdf2PCRobjCloneWithoutTime(ncFile, varName,
cloneMapFileName = None,\
LatitudeLongitude = True,\
specificFillValue = None,\
absolutePath = None):
if absolutePath != None: ncFile = getFullPath(ncFile, absolutePath)
logger.debug('reading variable: '+str(varName)+' from the file: '+str(ncFile))
#
# EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
# --- with clone checking
# Only works if cells are 'square'.
# Only works if cellsizeClone <= cellsizeInput
# Get netCDF file and variable name:
if ncFile in list(filecache.keys()):
f = filecache[ncFile]
#~ print "Cached: ", ncFile
else:
f = nc.Dataset(ncFile)
filecache[ncFile] = f
#~ print "New: ", ncFile
#print ncFile
#f = nc.Dataset(ncFile)
varName = str(varName)
if LatitudeLongitude == True:
try:
f.variables['lat'] = f.variables['latitude']
f.variables['lon'] = f.variables['longitude']
except:
pass
sameClone = True
# check whether clone and input maps have the same attributes:
if cloneMapFileName != None:
# get the attributes of cloneMap
attributeClone = getMapAttributesALL(cloneMapFileName)
cellsizeClone = attributeClone['cellsize']
rowsClone = attributeClone['rows']
colsClone = attributeClone['cols']
xULClone = attributeClone['xUL']
yULClone = attributeClone['yUL']
# get the attributes of input (netCDF)
cellsizeInput = f.variables['lat'][0]- f.variables['lat'][1]
cellsizeInput = float(cellsizeInput)
rowsInput = len(f.variables['lat'])
colsInput = len(f.variables['lon'])
xULInput = f.variables['lon'][0]-0.5*cellsizeInput
yULInput = f.variables['lat'][0]+0.5*cellsizeInput
# check whether both maps have the same attributes
if cellsizeClone != cellsizeInput: sameClone = False
if rowsClone != rowsInput: sameClone = False
if colsClone != colsInput: sameClone = False
if xULClone != xULInput: sameClone = False
if yULClone != yULInput: sameClone = False
cropData = f.variables[varName][:,:] # still original data
factor = 1 # needed in regridData2FinerGrid
if sameClone == False:
# crop to cloneMap:
minX = min(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput))) # ; print(minX)
xIdxSta = int(np.where(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput)) == minX)[0])
xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
minY = min(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput))) # ; print(minY)
yIdxSta = int(np.where(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput)) == minY)[0])
yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
cropData = f.variables[varName][yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
factor = int(round(float(cellsizeInput)/float(cellsizeClone)))
if factor > 1: logger.debug('Resample: input cell size = '+str(float(cellsizeInput))+' ; output/clone cell size = '+str(float(cellsizeClone)))
# convert to PCR object and close f
if specificFillValue != None:
outPCR = pcr.numpy2pcr(pcr.Scalar, \
regridData2FinerGrid(factor,cropData,MV), \
float(specificFillValue))
else:
outPCR = pcr.numpy2pcr(pcr.Scalar, \
regridData2FinerGrid(factor,cropData,MV), \
float(f.variables[varName]._FillValue))
#~ # debug:
#~ pcr.report(outPCR,"tmp.map")
#~ print(varName)
#~ os.system('aguila tmp.map')
#f.close();
f = None ; cropData = None
# PCRaster object
return (outPCR)
def netcdf2PCRobjClone(ncFile,varName,dateInput,\
useDoy = None,
cloneMapFileName = None,\
LatitudeLongitude = True,\
specificFillValue = None):
#
# EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
# --- with clone checking
# Only works if cells are 'square'.
# Only works if cellsizeClone <= cellsizeInput
# Get netCDF file and variable name:
#~ print ncFile
logger.debug('reading variable: '+str(varName)+' from the file: '+str(ncFile))
if ncFile in list(filecache.keys()):
f = filecache[ncFile]
#~ print "Cached: ", ncFile
else:
f = nc.Dataset(ncFile)
filecache[ncFile] = f
#~ print "New: ", ncFile
varName = str(varName)
if LatitudeLongitude == True:
try:
f.variables['lat'] = f.variables['latitude']
f.variables['lon'] = f.variables['longitude']
except:
pass
if varName == "evapotranspiration":
try:
f.variables['evapotranspiration'] = f.variables['referencePotET']
except:
pass
if varName == "kc": # the variable name in PCR-GLOBWB
try:
f.variables['kc'] = \
f.variables['Cropcoefficient'] # the variable name in the netcdf file
except:
pass
if varName == "interceptCapInput": # the variable name in PCR-GLOBWB
try:
f.variables['interceptCapInput'] = \
f.variables['Interceptioncapacity'] # the variable name in the netcdf file
except:
pass
if varName == "coverFractionInput": # the variable name in PCR-GLOBWB
try:
f.variables['coverFractionInput'] = \
f.variables['Coverfraction'] # the variable name in the netcdf file
except:
pass
if varName == "fracVegCover": # the variable name in PCR-GLOBWB
try:
f.variables['fracVegCover'] = \
f.variables['vegetation_fraction'] # the variable name in the netcdf file
except:
pass
if varName == "minSoilDepthFrac": # the variable name in PCR-GLOBWB
try:
f.variables['minSoilDepthFrac'] = \
f.variables['minRootDepthFraction'] # the variable name in the netcdf file
except:
pass
if varName == "maxSoilDepthFrac": # the variable name in PCR-GLOBWB
try:
f.variables['maxSoilDepthFrac'] = \
f.variables['maxRootDepthFraction'] # the variable name in the netcdf file
except:
pass
if varName == "arnoBeta": # the variable name in PCR-GLOBWB
try:
f.variables['arnoBeta'] = \
f.variables['arnoSchemeBeta'] # the variable name in the netcdf file
except:
pass
# date
date = dateInput
if useDoy == "Yes":
logger.debug('Finding the date based on the given climatology doy index (1 to 366, or index 0 to 365)')
idx = int(dateInput) - 1
elif useDoy == "month": # PS: WE NEED THIS ONE FOR NETCDF FILES that contain only 12 monthly values (e.g. cropCoefficientWaterNC).
logger.debug('Finding the date based on the given climatology month index (1 to 12, or index 0 to 11)')
# make sure that date is in the correct format
if isinstance(date, str) == True: date = \
datetime.datetime.strptime(str(date),'%Y-%m-%d')
idx = int(date.month) - 1
else:
# make sure that date is in the correct format
if isinstance(date, str) == True: date = \
datetime.datetime.strptime(str(date),'%Y-%m-%d')
date = datetime.datetime(date.year,date.month,date.day)
if useDoy == "yearly":
date = datetime.datetime(date.year,int(1),int(1))
if useDoy == "monthly":
date = datetime.datetime(date.year,date.month,int(1))
if useDoy == "yearly" or useDoy == "monthly" or useDoy == "daily_seasonal":
# if the desired year is not available, use the first year or the last year that is available
first_year_in_nc_file = findFirstYearInNCTime(f.variables['time'])
last_year_in_nc_file = findLastYearInNCTime(f.variables['time'])
#
if date.year < first_year_in_nc_file:
if date.day == 29 and date.month == 2 and calendar.isleap(date.year) and calendar.isleap(first_year_in_nc_file) == False:
date = datetime.datetime(first_year_in_nc_file, date.month, 28)
else:
date = datetime.datetime(first_year_in_nc_file, date.month, date.day)
msg = "\n"
msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
msg += "The date "+str(dateInput)+" is NOT available. "
msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is used."
msg += "\n"
logger.warning(msg)
if date.year > last_year_in_nc_file:
if date.day == 29 and date.month == 2 and calendar.isleap(date.year) and calendar.isleap(last_year_in_nc_file) == False:
date = datetime.datetime(last_year_in_nc_file, date.month, 28)
else:
date = datetime.datetime(last_year_in_nc_file, date.month, date.day)
msg = "\n"
msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
msg += "The date "+str(dateInput)+" is NOT available. "
msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is used."
msg += "\n"
logger.warning(msg)
try:
idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
select ='exact')
msg = "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is available. The 'exact' option is used while selecting netcdf time."
logger.debug(msg)
except:
msg = "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'exact' option CANNOT be used while selecting netcdf time."
logger.debug(msg)
try:
idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
select = 'before')
msg = "\n"
msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'before' option is used while selecting netcdf time."
msg += "\n"
except:
idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
select = 'after')
msg = "\n"
msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'after' option is used while selecting netcdf time."
msg += "\n"
logger.warning(msg)
idx = int(idx)
logger.debug('Using the date index '+str(idx))
sameClone = True
# check whether clone and input maps have the same attributes:
if cloneMapFileName != None:
# get the attributes of cloneMap
attributeClone = getMapAttributesALL(cloneMapFileName)
cellsizeClone = attributeClone['cellsize']
rowsClone = attributeClone['rows']
colsClone = attributeClone['cols']
xULClone = attributeClone['xUL']
yULClone = attributeClone['yUL']
# get the attributes of input (netCDF)
cellsizeInput = f.variables['lat'][0]- f.variables['lat'][1]
cellsizeInput = float(cellsizeInput)
rowsInput = len(f.variables['lat'])
colsInput = len(f.variables['lon'])
xULInput = f.variables['lon'][0]-0.5*cellsizeInput
yULInput = f.variables['lat'][0]+0.5*cellsizeInput
# check whether both maps have the same attributes
if cellsizeClone != cellsizeInput: sameClone = False
if rowsClone != rowsInput: sameClone = False
if colsClone != colsInput: sameClone = False
if xULClone != xULInput: sameClone = False
if yULClone != yULInput: sameClone = False
# check data on dimensions - this correction is needed in case of the WFDEI_Forcing which has includes levels for surface varables (time, height/level, lat, lon)
if f.variables[varName].ndim == 4:
# not standard NC format
logger.warning('WARNING: the netCDF file %s has an additional dimension for variable %s ; the last two are read as latitude, longitude' % (ncFile, varName))
# file with additional layer/dimension
cropData = f.variables[varName][int(idx),0,:,:] # still original data
else:
# standard nc file
cropData = f.variables[varName][int(idx),:,:] # still original data
factor = 1 # needed in regridData2FinerGrid
if sameClone == False:
logger.debug('Crop to the clone map with lower left corner (x,y): '+str(xULClone)+' , '+str(yULClone))
# crop to cloneMap:
#~ xIdxSta = int(np.where(f.variables['lon'][:] == xULClone + 0.5*cellsizeInput)[0])
minX = min(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput))) # ; print(minX)
xIdxSta = int(np.where(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput)) == minX)[0])
xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
#~ yIdxSta = int(np.where(f.variables['lat'][:] == yULClone - 0.5*cellsizeInput)[0])
minY = min(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput))) # ; print(minY)
yIdxSta = int(np.where(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput)) == minY)[0])
yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
# retrieve data from netCDF for slice
if f.variables[varName].ndim == 4:
# not standard NC format
logger.warning('WARNING: the netCDF file %s has an additional dimension for variable %s ; the last two are read as latitude, longitude' % (ncFile, varName))
#-file with additional layer
cropData = f.variables[varName][int(idx),0,yIdxSta:yIdxEnd,xIdxSta:xIdxEnd] # selection of original data
else:
# standard nc file
cropData = f.variables[varName][int(idx),yIdxSta:yIdxEnd,xIdxSta:xIdxEnd] # selection of original data
# get resampling factor
factor = int(round(float(cellsizeInput)/float(cellsizeClone)))
if factor > 1: logger.debug('Resample: input cell size = '+str(float(cellsizeInput))+' ; output/clone cell size = '+str(float(cellsizeClone)))
# convert to PCR object and close f
if specificFillValue != None:
outPCR = pcr.numpy2pcr(pcr.Scalar, \
regridData2FinerGrid(factor,cropData,MV), \
float(specificFillValue))
else:
outPCR = pcr.numpy2pcr(pcr.Scalar, \
regridData2FinerGrid(factor,cropData,MV), \
float(f.variables[varName]._FillValue))
#f.close();
f = None ; cropData = None
# PCRaster object
return (outPCR)
def netcdf2PCRobjCloneBeforeRensCorrection(
ncFile,varName,dateInput,\
useDoy = None,
cloneMapFileName = None,\
LatitudeLongitude = True,\
specificFillValue = None):
#
# EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
# --- with clone checking
# Only works if cells are 'square'.
# Only works if cellsizeClone <= cellsizeInput
# Get netCDF file and variable name:
#~ print ncFile
logger.debug('reading variable: '+str(varName)+' from the file: '+str(ncFile))
if ncFile in list(filecache.keys()):
f = filecache[ncFile]
#~ print "Cached: ", ncFile
else:
f = nc.Dataset(ncFile)
filecache[ncFile] = f
#~ print "New: ", ncFile
varName = str(varName)
if LatitudeLongitude == True:
try:
f.variables['lat'] = f.variables['latitude']
f.variables['lon'] = f.variables['longitude']
except:
pass
if varName == "evapotranspiration":
try:
f.variables['evapotranspiration'] = f.variables['referencePotET']
except:
pass
if varName == "kc": # the variable name in PCR-GLOBWB
try:
f.variables['kc'] = \
f.variables['Cropcoefficient'] # the variable name in the netcdf file
except:
pass
if varName == "interceptCapInput": # the variable name in PCR-GLOBWB
try:
f.variables['interceptCapInput'] = \
f.variables['Interceptioncapacity'] # the variable name in the netcdf file
except:
pass
if varName == "coverFractionInput": # the variable name in PCR-GLOBWB
try:
f.variables['coverFractionInput'] = \
f.variables['Coverfraction'] # the variable name in the netcdf file
except:
pass
if varName == "fracVegCover": # the variable name in PCR-GLOBWB
try:
f.variables['fracVegCover'] = \
f.variables['vegetation_fraction'] # the variable name in the netcdf file
except:
pass
if varName == "minSoilDepthFrac": # the variable name in PCR-GLOBWB
try:
f.variables['minSoilDepthFrac'] = \
f.variables['minRootDepthFraction'] # the variable name in the netcdf file
except:
pass
if varName == "maxSoilDepthFrac": # the variable name in PCR-GLOBWB
try:
f.variables['maxSoilDepthFrac'] = \
f.variables['maxRootDepthFraction'] # the variable name in the netcdf file
except:
pass
if varName == "arnoBeta": # the variable name in PCR-GLOBWB
try:
f.variables['arnoBeta'] = \
f.variables['arnoSchemeBeta'] # the variable name in the netcdf file
except:
pass
# date
date = dateInput
if useDoy == "Yes":
logger.debug('Finding the date based on the given climatology doy index (1 to 366, or index 0 to 365)')
idx = int(dateInput) - 1
elif useDoy == "month": # PS: WE NEED THIS ONE FOR NETCDF FILES that contain only 12 monthly values (e.g. cropCoefficientWaterNC).
logger.debug('Finding the date based on the given climatology month index (1 to 12, or index 0 to 11)')
# make sure that date is in the correct format
if isinstance(date, str) == True: date = \
datetime.datetime.strptime(str(date),'%Y-%m-%d')
idx = int(date.month) - 1
else:
# make sure that date is in the correct format
if isinstance(date, str) == True: date = \
datetime.datetime.strptime(str(date),'%Y-%m-%d')
date = datetime.datetime(date.year,date.month,date.day)
if useDoy == "yearly":
date = datetime.datetime(date.year,int(1),int(1))
if useDoy == "monthly":
date = datetime.datetime(date.year,date.month,int(1))
if useDoy == "yearly" or useDoy == "monthly" or useDoy == "daily_seasonal":
# if the desired year is not available, use the first year or the last year that is available
first_year_in_nc_file = findFirstYearInNCTime(f.variables['time'])
last_year_in_nc_file = findLastYearInNCTime(f.variables['time'])
#
if date.year < first_year_in_nc_file:
if date.day == 29 and date.month == 2 and calendar.isleap(date.year) and calendar.isleap(first_year_in_nc_file) == False:
date = datetime.datetime(first_year_in_nc_file, date.month, 28)
else:
date = datetime.datetime(first_year_in_nc_file, date.month, date.day)
msg = "\n"
msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
msg += "The date "+str(dateInput)+" is NOT available. "
msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is used."
msg += "\n"
logger.warning(msg)
if date.year > last_year_in_nc_file:
if date.day == 29 and date.month == 2 and calendar.isleap(date.year) and calendar.isleap(last_year_in_nc_file) == False:
date = datetime.datetime(last_year_in_nc_file, date.month, 28)
else:
date = datetime.datetime(last_year_in_nc_file, date.month, date.day)
msg = "\n"
msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
msg += "The date "+str(dateInput)+" is NOT available. "
msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is used."
msg += "\n"
logger.warning(msg)
try:
idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
select ='exact')
msg = "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is available. The 'exact' option is used while selecting netcdf time."
logger.debug(msg)
except:
msg = "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'exact' option CANNOT be used while selecting netcdf time."
logger.debug(msg)
try:
idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
select = 'before')
msg = "\n"
msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'before' option is used while selecting netcdf time."
msg += "\n"
except:
idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
select = 'after')
msg = "\n"
msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'after' option is used while selecting netcdf time."
msg += "\n"
logger.warning(msg)
idx = int(idx)
logger.debug('Using the date index '+str(idx))
sameClone = True
# check whether clone and input maps have the same attributes:
if cloneMapFileName != None:
# get the attributes of cloneMap
attributeClone = getMapAttributesALL(cloneMapFileName)
cellsizeClone = attributeClone['cellsize']
rowsClone = attributeClone['rows']
colsClone = attributeClone['cols']
xULClone = attributeClone['xUL']
yULClone = attributeClone['yUL']
# get the attributes of input (netCDF)
cellsizeInput = f.variables['lat'][0]- f.variables['lat'][1]
cellsizeInput = float(cellsizeInput)
rowsInput = len(f.variables['lat'])
colsInput = len(f.variables['lon'])
xULInput = f.variables['lon'][0]-0.5*cellsizeInput
yULInput = f.variables['lat'][0]+0.5*cellsizeInput
# check whether both maps have the same attributes
if cellsizeClone != cellsizeInput: sameClone = False
if rowsClone != rowsInput: sameClone = False
if colsClone != colsInput: sameClone = False
if xULClone != xULInput: sameClone = False
if yULClone != yULInput: sameClone = False
cropData = f.variables[varName][int(idx),:,:] # still original data
factor = 1 # needed in regridData2FinerGrid
# a bug fix for the file "Tair_daily_EI_1979_to_2014_30arcmin.nc" # TODO: FIX ME
if varName == "Tair": cropData = f.variables[varName][int(idx),0,:,:]
if sameClone == False:
logger.debug('Crop to the clone map with lower left corner (x,y): '+str(xULClone)+' , '+str(yULClone))
# crop to cloneMap:
#~ xIdxSta = int(np.where(f.variables['lon'][:] == xULClone + 0.5*cellsizeInput)[0])
minX = min(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput))) # ; print(minX)
xIdxSta = int(np.where(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput)) == minX)[0])
xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
#~ yIdxSta = int(np.where(f.variables['lat'][:] == yULClone - 0.5*cellsizeInput)[0])
minY = min(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput))) # ; print(minY)
yIdxSta = int(np.where(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput)) == minY)[0])
yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
#~ cropData = f.variables[varName][idx,yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
cropData = cropData[yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
factor = int(round(float(cellsizeInput)/float(cellsizeClone)))
if factor > 1: logger.debug('Resample: input cell size = '+str(float(cellsizeInput))+' ; output/clone cell size = '+str(float(cellsizeClone)))
# convert to PCR object and close f
if specificFillValue != None:
outPCR = pcr.numpy2pcr(pcr.Scalar, \
regridData2FinerGrid(factor,cropData,MV), \
float(specificFillValue))
else:
outPCR = pcr.numpy2pcr(pcr.Scalar, \
regridData2FinerGrid(factor,cropData,MV), \
float(f.variables[varName]._FillValue))
#f.close();
f = None ; cropData = None
# PCRaster object
return (outPCR)
def netcdf2PCRobjCloneJOYCE(ncFile,varName,dateInput,\
useDoy = None,
cloneMapFileName = None,\
LatitudeLongitude = True,\
specificFillValue = None):
#
# EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
# --- with clone checking
# Only works if cells are 'square'.
# Only works if cellsizeClone <= cellsizeInput
# Get netCDF file and variable name:
#~ print ncFile
logger.debug('reading variable: '+str(varName)+' from the file: '+str(ncFile))
if ncFile in list(filecache.keys()):
f = filecache[ncFile]
#~ print "Cached: ", ncFile
else:
f = nc.Dataset(ncFile)
filecache[ncFile] = f
#~ print "New: ", ncFile
varName = str(varName)
if LatitudeLongitude == True:
try:
f.variables['lat'] = f.variables['latitude']
f.variables['lon'] = f.variables['longitude']
except:
pass
if varName == "evapotranspiration":
try:
f.variables['evapotranspiration'] = f.variables['referencePotET']
except:
pass
if varName == "kc": # the variable name in PCR-GLOBWB
try:
f.variables['kc'] = \
f.variables['Cropcoefficient'] # the variable name in the netcdf file
except:
pass
if varName == "interceptCapInput": # the variable name in PCR-GLOBWB
try:
f.variables['interceptCapInput'] = \
f.variables['Interceptioncapacity'] # the variable name in the netcdf file
except:
pass
if varName == "coverFractionInput": # the variable name in PCR-GLOBWB
try:
f.variables['coverFractionInput'] = \
f.variables['Coverfraction'] # the variable name in the netcdf file
except:
pass
if varName == "fracVegCover": # the variable name in PCR-GLOBWB
try:
f.variables['fracVegCover'] = \
f.variables['vegetation_fraction'] # the variable name in the netcdf file
except:
pass
if varName == "arnoBeta": # the variable name in PCR-GLOBWB
try:
f.variables['arnoBeta'] = \
f.variables['arnoSchemeBeta'] # the variable name in the netcdf file
except:
pass
# date
date = dateInput
if useDoy == "Yes":
logger.debug('Finding the date based on the given climatology doy index (1 to 366, or index 0 to 365)')
idx = int(dateInput) - 1
else:
# make sure that date is in the correct format
if isinstance(date, str) == True: date = \
datetime.datetime.strptime(str(date),'%Y-%m-%d')
date = datetime.datetime(date.year,date.month,date.day)
if useDoy == "month":
logger.debug('Finding the date based on the given climatology month index (1 to 12, or index 0 to 11)')
idx = int(date.month) - 1
if useDoy == "yearly":
date = datetime.datetime(date.year,int(1),int(1))
if useDoy == "monthly":
date = datetime.datetime(date.year,date.month,int(1))
if useDoy == "yearly" or useDoy == "monthly" or useDoy == "daily_seasonal":
# if the desired year is not available, use the first year or the last year that is available
first_year_in_nc_file = findFirstYearInNCTime(f.variables['time'])
last_year_in_nc_file = findLastYearInNCTime(f.variables['time'])
#
if date.year < first_year_in_nc_file:
date = datetime.datetime(first_year_in_nc_file,date.month,date.day)
msg = "\n"
msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
msg += "The date "+str(dateInput)+" is NOT available. "
msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is used."
msg += "\n"
logger.warning(msg)
if date.year > last_year_in_nc_file:
date = datetime.datetime(last_year_in_nc_file,date.month,date.day)
msg = "\n"
msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
msg += "The date "+str(dateInput)+" is NOT available. "
msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is used."
msg += "\n"
logger.warning(msg)
try:
idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
select ='exact')
msg = "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is available. The 'exact' option is used while selecting netcdf time."
logger.debug(msg)
except:
msg = "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'exact' option CANNOT be used while selecting netcdf time."
logger.debug(msg)
try:
idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
select = 'before')
msg = "\n"
msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'before' option is used while selecting netcdf time."
msg += "\n"
except:
idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
select = 'after')
msg = "\n"
msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'after' option is used while selecting netcdf time."
msg += "\n"
logger.warning(msg)
idx = int(idx)
logger.debug('Using the date index '+str(idx))
cropData = f.variables[varName][int(idx),:,:].copy() # still original data
factor = 1 # needed in regridData2FinerGrid
# store latitudes and longitudes to a new variable
latitude = f.variables['lat']
longitude = f.variables['lon']
# check the orientation of the latitude and flip it if necessary
we_have_to_flip = False
if (latitude[0]- latitude[1]) < 0.0:
we_have_to_flip = True
latitude = np.flipud(latitude)
sameClone = True
# check whether clone and input maps have the same attributes:
if cloneMapFileName != None:
# get the attributes of cloneMap
attributeClone = getMapAttributesALL(cloneMapFileName)
cellsizeClone = attributeClone['cellsize']
rowsClone = attributeClone['rows']
colsClone = attributeClone['cols']
xULClone = attributeClone['xUL']
yULClone = attributeClone['yUL']
# get the attributes of input (netCDF)
cellsizeInput = latitude[0]- latitude[1]
cellsizeInput = float(cellsizeInput)
rowsInput = len(latitude)
colsInput = len(longitude)
xULInput = longitude[0]-0.5*cellsizeInput
yULInput = latitude[0] +0.5*cellsizeInput
# check whether both maps have the same attributes
if cellsizeClone != cellsizeInput: sameClone = False
if rowsClone != rowsInput: sameClone = False
if colsClone != colsInput: sameClone = False
if xULClone != xULInput: sameClone = False
if yULClone != yULInput: sameClone = False
# flip cropData if necessary
if we_have_to_flip:
#~ cropData = cropData[::-1,:]
#~ cropData = cropData[::-1,:].copy()
#~ cropData = np.flipud(cropData)
#~ cropData = np.flipud(cropData)
#~ cropData = np.flipud(cropData).copy()
#~ original = cropData.copy()
#~
#~ print id(cropData)
#~ print id(original)
#~ cropData = None
#~ del cropData
#~ cropData = np.flipud(original).copy()
#~ print type(cropData)
#~ cropData2 = cropData[::-1,:]
#~ cropData = None
#~ cropData = original[::-1,:]
#~ cropData = cropData[::-1,:]
cropData = cropData[::-1,:]
print(type(cropData))
print("Test test tet")
print(id(cropData))
#~ print id(original)
#~ cropData = cropData[::-1,:].copy()
pcr_map = pcr.numpy2pcr(pcr.Scalar, cropData, -999.9)
pcr.report(pcr_map, "test2.map")
os.system("aguila test2.map")
if sameClone == False:
logger.debug('Crop to the clone map with lower left corner (x,y): '+str(xULClone)+' , '+str(yULClone))
# crop to cloneMap:
minX = min(abs(longitude[:] - (xULClone + 0.5*cellsizeInput))) # ; print(minX)
xIdxSta = int(np.where(abs(longitude[:] - (xULClone + 0.5*cellsizeInput)) == minX)[0])
xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
minY = min(abs(latitude[:] - (yULClone - 0.5*cellsizeInput))) # ; print(minY)
yIdxSta = int(np.where(abs(latitude[:] - (yULClone - 0.5*cellsizeInput)) == minY)[0])
yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
cropData = cropData[yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
factor = int(round(float(cellsizeInput)/float(cellsizeClone)))
if factor > 1: logger.debug('Resample: input cell size = '+str(float(cellsizeInput))+' ; output/clone cell size = '+str(float(cellsizeClone)))
# convert to PCR object and close f
if specificFillValue != None:
outPCR = pcr.numpy2pcr(pcr.Scalar, \
regridData2FinerGrid(factor,cropData,MV), \
float(specificFillValue))
else:
outPCR = pcr.numpy2pcr(pcr.Scalar, \
regridData2FinerGrid(factor,cropData,MV), \
float(f.variables[varName]._FillValue))
#f.close();
f = None ; cropData = None
# PCRaster object
return (outPCR)
def netcdf2PCRobjCloneWindDist(ncFile,varName,dateInput,useDoy = None,
cloneMapFileName=None):
# EHS (02 SEP 2013): This is a special function made by Niko Wanders (for his DA framework).
# EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
# --- with clone checking
# Only works if cells are 'square'.
# Only works if cellsizeClone <= cellsizeInput
# Get netCDF file and variable name:
f = nc.Dataset(ncFile)
varName = str(varName)
# date
date = dateInput
if useDoy == "Yes":
idx = dateInput - 1
else:
if isinstance(date, str) == True: date = \
datetime.datetime.strptime(str(date),'%Y-%m-%d')
date = datetime.datetime(date.year,date.month,date.day)
# time index (in the netCDF file)
nctime = f.variables['time'] # A netCDF time variable object.
idx = nc.date2index(date, nctime, calendar=nctime.calendar, \
select='exact')
idx = int(idx)
sameClone = True
# check whether clone and input maps have the same attributes:
if cloneMapFileName != None:
# get the attributes of cloneMap
attributeClone = getMapAttributesALL(cloneMapFileName)
cellsizeClone = attributeClone['cellsize']
rowsClone = attributeClone['rows']
colsClone = attributeClone['cols']
xULClone = attributeClone['xUL']
yULClone = attributeClone['yUL']
# get the attributes of input (netCDF)
cellsizeInput = f.variables['lat'][0]- f.variables['lat'][1]
cellsizeInput = float(cellsizeInput)
rowsInput = len(f.variables['lat'])
colsInput = len(f.variables['lon'])
xULInput = f.variables['lon'][0]-0.5*cellsizeInput
yULInput = f.variables['lat'][0]+0.5*cellsizeInput
# check whether both maps have the same attributes
if cellsizeClone != cellsizeInput: sameClone = False
if rowsClone != rowsInput: sameClone = False
if colsClone != colsInput: sameClone = False
if xULClone != xULInput: sameClone = False
if yULClone != yULInput: sameClone = False
cropData = f.variables[varName][int(idx),:,:] # still original data
factor = 1 # needed in regridData2FinerGrid
if sameClone == False:
# crop to cloneMap:
xIdxSta = int(np.where(f.variables['lon'][:] == xULClone + 0.5*cellsizeInput)[0])
xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
yIdxSta = int(np.where(f.variables['lat'][:] == yULClone - 0.5*cellsizeInput)[0])
yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
cropData = f.variables[varName][idx,yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
factor = int(float(cellsizeInput)/float(cellsizeClone))
# convert to PCR object and close f
outPCR = pcr.numpy2pcr(pcr.Scalar, \
regridData2FinerGrid(factor,cropData,MV), \
float(0.0))
f.close();
f = None ; cropData = None
# PCRaster object
return (outPCR)
def netcdf2PCRobjCloneWind(ncFile,varName,dateInput,useDoy = None,
cloneMapFileName=None):
# EHS (02 SEP 2013): This is a special function made by Niko Wanders (for his DA framework).
# EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
# --- with clone checking
# Only works if cells are 'square'.
# Only works if cellsizeClone <= cellsizeInput
# Get netCDF file and variable name:
f = nc.Dataset(ncFile)
varName = str(varName)
# date
date = dateInput
if useDoy == "Yes":
idx = dateInput - 1
else:
if isinstance(date, str) == True: date = \
datetime.datetime.strptime(str(date),'%Y-%m-%d')
date = datetime.datetime(date.year,date.month,date.day, 0, 0)
# time index (in the netCDF file)
nctime = f.variables['time'] # A netCDF time variable object.
idx = nc.date2index(date, nctime, select="exact")
idx = int(idx)
sameClone = True
# check whether clone and input maps have the same attributes:
if cloneMapFileName != None:
# get the attributes of cloneMap
attributeClone = getMapAttributesALL(cloneMapFileName)
cellsizeClone = attributeClone['cellsize']
rowsClone = attributeClone['rows']
colsClone = attributeClone['cols']
xULClone = attributeClone['xUL']
yULClone = attributeClone['yUL']
# get the attributes of input (netCDF)
cellsizeInput = f.variables['lat'][0]- f.variables['lat'][1]
cellsizeInput = float(cellsizeInput)
rowsInput = len(f.variables['lat'])
colsInput = len(f.variables['lon'])
xULInput = f.variables['lon'][0]-0.5*cellsizeInput
yULInput = f.variables['lat'][0]+0.5*cellsizeInput
# check whether both maps have the same attributes
if cellsizeClone != cellsizeInput: sameClone = False
if rowsClone != rowsInput: sameClone = False
if colsClone != colsInput: sameClone = False
if xULClone != xULInput: sameClone = False
if yULClone != yULInput: sameClone = False
cropData = f.variables[varName][int(idx),:,:] # still original data
factor = 1 # needed in regridData2FinerGrid
if sameClone == False:
# crop to cloneMap:
xIdxSta = int(np.where(f.variables['lon'][:] == xULClone + 0.5*cellsizeInput)[0])
xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
yIdxSta = int(np.where(f.variables['lat'][:] == yULClone - 0.5*cellsizeInput)[0])
yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
cropData = f.variables[varName][idx,yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
factor = int(float(cellsizeInput)/float(cellsizeClone))
# convert to PCR object and close f
outPCR = pcr.numpy2pcr(pcr.Scalar, \
regridData2FinerGrid(factor,cropData,MV), \
float(f.variables[varName]._FillValue))
f.close();
f = None ; cropData = None
# PCRaster object
return (outPCR)
def netcdf2PCRobj(ncFile,varName,dateInput):
# EHS (04 APR 2013): To convert netCDF (tss) file to PCR file.
# The cloneMap is globally defined (outside this method).
# Get netCDF file and variable name:
f = nc.Dataset(ncFile)
varName = str(varName)
# date
date = dateInput
if isinstance(date, str) == True: date = \
datetime.datetime.strptime(str(date),'%Y-%m-%d')
date = datetime.datetime(date.year,date.month,date.day)
# time index (in the netCDF file)
nctime = f.variables['time'] # A netCDF time variable object.
idx = nc.date2index(date, nctime, calendar=nctime.calendar, \
select='exact')
# convert to PCR object and close f
outPCR = pcr.numpy2pcr(pcr.Scalar,(f.variables[varName][idx].data), \
float(f.variables[varName]._FillValue))
f.close(); f = None ; del f
# PCRaster object
return (outPCR)
def makeDir(directoryName):
try:
os.makedirs(directoryName)
except OSError:
pass
def writePCRmapToDir(v,outFileName,outDir):
# v: inputMapFileName or floating values
# cloneMapFileName: If the inputMap and cloneMap have different clones,
# resampling will be done. Then,
fullFileName = getFullPath(outFileName,outDir)
logger.debug('Writing a pcraster map to : '+str(fullFileName))
pcr.report(v,fullFileName)
def readPCRmapClone(v,cloneMapFileName,tmpDir,absolutePath=None,isLddMap=False,cover=None,isNomMap=False):
# v: inputMapFileName or floating values
# cloneMapFileName: If the inputMap and cloneMap have different clones,
# resampling will be done.
logger.debug('read file/values: '+str(v))
if v == "None":
#~ PCRmap = str("None")
PCRmap = None # 29 July: I made an experiment by changing the type of this object.
elif not re.match(r"[0-9.-]*$",v):
if absolutePath != None: v = getFullPath(v,absolutePath)
# print(v)
sameClone = isSameClone(v,cloneMapFileName)
if sameClone == True:
PCRmap = pcr.readmap(v)
else:
# resample using GDAL:
output = tmpDir+'temp.map'
warp = gdalwarpPCR(v,output,cloneMapFileName,tmpDir,isLddMap,isNomMap)
# read from temporary file and delete the temporary file:
PCRmap = pcr.readmap(output)
if isLddMap == True: PCRmap = pcr.ifthen(pcr.scalar(PCRmap) < 10., PCRmap)
if isLddMap == True: PCRmap = pcr.ldd(PCRmap)
if isNomMap == True: PCRmap = pcr.ifthen(pcr.scalar(PCRmap) > 0., PCRmap)
if isNomMap == True: PCRmap = pcr.nominal(PCRmap)
if os.path.isdir(tmpDir):
shutil.rmtree(tmpDir)
os.makedirs(tmpDir)
else:
PCRmap = pcr.spatial(pcr.scalar(float(v)))
if cover != None:
PCRmap = pcr.cover(PCRmap, cover)
co = None; cOut = None; err = None; warp = None
del co; del cOut; del err; del warp
stdout = None; del stdout
stderr = None; del stderr
return PCRmap
def readPCRmap(v):
# v : fileName or floating values
if not re.match(r"[0-9.-]*$", v):
PCRmap = pcr.readmap(v)
else:
PCRmap = pcr.scalar(float(v))
return PCRmap
def isSameClone(inputMapFileName,cloneMapFileName):
# reading inputMap:
attributeInput = getMapAttributesALL(inputMapFileName)
cellsizeInput = attributeInput['cellsize']
rowsInput = attributeInput['rows']
colsInput = attributeInput['cols']
xULInput = attributeInput['xUL']
yULInput = attributeInput['yUL']
# reading cloneMap:
attributeClone = getMapAttributesALL(cloneMapFileName)
cellsizeClone = attributeClone['cellsize']
rowsClone = attributeClone['rows']
colsClone = attributeClone['cols']
xULClone = attributeClone['xUL']
yULClone = attributeClone['yUL']
# check whether both maps have the same attributes?
sameClone = True
if cellsizeClone != cellsizeInput: sameClone = False
if rowsClone != rowsInput: sameClone = False
if colsClone != colsInput: sameClone = False
if xULClone != xULInput: sameClone = False
if yULClone != yULInput: sameClone = False
return sameClone
def gdalwarpPCR(input,output,cloneOut,tmpDir,isLddMap=False,isNominalMap=False):
# 19 Mar 2013 created by Edwin H. Sutanudjaja
# all input maps must be in PCRaster maps
#
# remove temporary files:
co = 'rm '+str(tmpDir)+'*.*'
cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
#
# converting files to tif:
co = 'gdal_translate -ot Float64 '+str(input)+' '+str(tmpDir)+'tmp_inp.tif'
if isLddMap == True: co = 'gdal_translate -ot Int32 '+str(input)+' '+str(tmpDir)+'tmp_inp.tif'
if isNominalMap == True: co = 'gdal_translate -ot Int32 '+str(input)+' '+str(tmpDir)+'tmp_inp.tif'
cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
#
# get the attributes of PCRaster map:
cloneAtt = getMapAttributesALL(cloneOut)
xmin = cloneAtt['xUL']
ymin = cloneAtt['yUL'] - cloneAtt['rows']*cloneAtt['cellsize']
xmax = cloneAtt['xUL'] + cloneAtt['cols']*cloneAtt['cellsize']
ymax = cloneAtt['yUL']
xres = cloneAtt['cellsize']
yres = cloneAtt['cellsize']
te = '-te '+str(xmin)+' '+str(ymin)+' '+str(xmax)+' '+str(ymax)+' '
tr = '-tr '+str(xres)+' '+str(yres)+' '
co = 'gdalwarp '+te+tr+ \
' -srcnodata -3.4028234663852886e+38 -dstnodata mv '+ \
str(tmpDir)+'tmp_inp.tif '+ \
str(tmpDir)+'tmp_out.tif'
cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
#
co = 'gdal_translate -of PCRaster '+ \
str(tmpDir)+'tmp_out.tif '+str(output)
cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
#
co = 'mapattr -c '+str(cloneOut)+' '+str(output)
cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
#
#~ co = 'aguila '+str(output)
#~ print(co)
#~ cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
#
co = 'rm '+str(tmpDir)+'tmp*.*'
cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
co = None; cOut = None; err = None
del co; del cOut; del err
stdout = None; del stdout
stderr = None; del stderr
n = gc.collect() ; del gc.garbage[:] ; n = None ; del n
def getFullPath(inputPath,absolutePath,completeFileName = True):
# 19 Mar 2013 created by Edwin H. Sutanudjaja
# Function: to get the full absolute path of a folder or a file
# replace all \ with /
inputPath = str(inputPath).replace("\\", "/")
absolutePath = str(absolutePath).replace("\\", "/")
# tuple of suffixes (extensions) that can be used:
suffix = ('/','_','.nc4','.map','.nc','.dat','.txt','.asc','.ldd','.tbl',\
'.001','.002','.003','.004','.005','.006',\
'.007','.008','.009','.010','.011','.012')
if inputPath.startswith('/') or str(inputPath)[1] == ":" or inputPath.startswith('http'):
fullPath = str(inputPath)
else:
if absolutePath.endswith('/'):
absolutePath = str(absolutePath)
else:
absolutePath = str(absolutePath)+'/'
fullPath = str(absolutePath)+str(inputPath)
if completeFileName:
if fullPath.endswith(suffix):
fullPath = str(fullPath)
else:
fullPath = str(fullPath)+'/'
return fullPath
def findISIFileName(year,model,rcp,prefix,var):
histYears = [1951,1961,1971,1981,1991,2001]
sYears = [2011,2021,2031,2041,2051,2061,2071,2081,2091]
rcpStr = rcp
if year >= sYears[0]:
sYear = [i for i in range(len(sYears)) if year >= sYears[i]]
sY = sYears[sYear[-1]]
elif year < histYears[-1]:
sYear = [i for i in range(len(histYears)) if year >= histYears[i] ]
sY = histYears[sYear[-1]]
if year >= histYears[-1] and year < sYears[0]:
if model == 'HadGEM2-ES':
if year < 2005:
rcpStr = 'historical'
sY = 2001
eY = 2004
else:
rcpStr = rcp
sY = 2005
eY = 2010
if model == 'IPSL-CM5A-LR' or model == 'GFDL-ESM2M':
if year < 2006:
rcpStr = 'historical'
sY = 2001
eY = 2005
else:
rcpStr = rcp
sY = 2006
eY = 2010
else:
eY = sY + 9
if sY == 2091:
eY = 2099
if model == 'HadGEM2-ES':
if year < 2005:
rcpStr = 'historical'
if model == 'IPSL-CM5A-LR' or model == 'GFDL-ESM2M':
if year < 2006:
rcpStr = 'historical'
#print year,sY,eY
return "%s_%s_%s_%s_%i-%i.nc" %(var,prefix,model.lower(),rcpStr,sY,eY)
def get_random_word(wordLen):
word = ''
for i in range(wordLen):
word += random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789')
return word
def isLastDayOfMonth(date):
if (date + datetime.timedelta(days=1 )).day == 1:
return True
else:
return False
def getMapAttributesALL(cloneMap,arcDegree=True):
cOut,err = subprocess.Popen(str('mapattr -p %s ' %(cloneMap)), stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
if err !=None or cOut == []:
print("Something wrong with mattattr in virtualOS, maybe clone Map does not exist ? ")
sys.exit()
cellsize = float(cOut.split()[7])
if arcDegree == True: cellsize = round(cellsize * 360000.)/360000.
mapAttr = {'cellsize': float(cellsize) ,\
'rows' : float(cOut.split()[3]) ,\
'cols' : float(cOut.split()[5]) ,\
'xUL' : float(cOut.split()[17]),\
'yUL' : float(cOut.split()[19])}
co = None; cOut = None; err = None
del co; del cOut; del err
n = gc.collect() ; del gc.garbage[:] ; n = None ; del n
return mapAttr
def getMapAttributes(cloneMap,attribute,arcDegree=True):
cOut,err = subprocess.Popen(str('mapattr -p %s ' %(cloneMap)), stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
#print cOut
if err !=None or cOut == []:
print("Something wrong with mattattr in virtualOS, maybe clone Map does not exist ? ")
sys.exit()
#print cOut.split()
co = None; err = None
del co; del err
n = gc.collect() ; del gc.garbage[:] ; n = None ; del n
if attribute == 'cellsize':
cellsize = float(cOut.split()[7])
if arcDegree == True: cellsize = round(cellsize * 360000.)/360000.
return cellsize
if attribute == 'rows':
return int(cOut.split()[3])
#return float(cOut.split()[3])
if attribute == 'cols':
return int(cOut.split()[5])
#return float(cOut.split()[5])
if attribute == 'xUL':
return float(cOut.split()[17])
if attribute == 'yUL':
return float(cOut.split()[19])
def getMapTotal(mapFile):
''' outputs the sum of all values in a map file '''
total, valid = pcr.cellvalue(pcr.maptotal(mapFile),1)
return total
def getMapTotalHighPrecisionButOnlyForPositiveValues_NEEDMORETEST(mapFile):
''' outputs the sum of all values in a map file '''
# STILL UNDER DEVELOPMENT - NOT FULLY TESTED
# input map - note that all values must be positive
remainingMapValue = pcr.max(0.0, mapFile)
# loop from biggest values
min_power_number = 0 # The minimum value is zero.
max_power_number = int(pcr.mapmaximum(pcr.log10(remainingMapValue))) + 1
step = 1
total_map_for_every_power_number = {}
for power_number in range(max_power_number, min_power_number - step, -step):
# cell value in this loop
currentCellValue = pcr.rounddown(remainingMapValue * pcr.scalar(10.**(power_number))) / pcr.scalar(10.**(power_number))
if power_number == min_power_number: currentCellValue = remainingMapValue
# map total in this loop
total_in_this_loop, valid = pcr.cellvalue(pcr.maptotal(currentCellValue), 1)
total_map_for_every_power_number[str(power_number)] = total_in_this_loop
# remaining map value
remainingMapValue = pcr.max(0.0, remainingMapValue - currentCellValue)
# sum from the smallest values (minimizing numerical errors)
total = pcr.scalar(0.0)
for power_number in range(min_power_number, max_power_number + step, step):
total += total_map_for_every_power_number[str(power_number)]
return total
def get_rowColAboveThreshold(map, threshold):
npMap = pcr.pcr2numpy(map, -9999)
(nr, nc) = np.shape(npMap)
for r in range(0, nr):
for c in range(0, nc):
if npMap[r, c] != -9999:
if np.abs(npMap[r, c]) > threshold:
return (r, c)
def getLastDayOfMonth(date):
''' returns the last day of the month for a given date '''
if date.month == 12:
return date.replace(day=31)
return date.replace(month=date.month + 1, day=1) - datetime.timedelta(days=1)
def getMinMaxMean(mapFile,ignoreEmptyMap=False):
mn = pcr.cellvalue(pcr.mapminimum(mapFile),1)[0]
mx = pcr.cellvalue(pcr.mapmaximum(mapFile),1)[0]
nrValues = pcr.cellvalue(pcr.maptotal(pcr.scalar(pcr.defined(mapFile))), 1 )[0] #/ getNumNonMissingValues(mapFile)
if nrValues == 0.0 and ignoreEmptyMap:
return 0.0,0.0,0.0
else:
return mn,mx,(getMapTotal(mapFile) / nrValues)
def getMapVolume(mapFile, cellareaFile):
''' returns the sum of all grid cell values '''
volume = mapFile * cellareaFile
return (getMapTotal(volume) / 1)
def secondsPerDay():
return float(3600 * 24)
def getValDivZero(x,y,y_lim=smallNumber,z_def= 0.):
#-returns the result of a division that possibly involves a zero
# denominator; in which case, a default value is substituted:
# x/y= z in case y > y_lim,
# x/y= z_def in case y <= y_lim, where y_lim -> 0.
# z_def is set to zero if not otherwise specified
return pcr.ifthenelse(y > y_lim,x/pcr.max(y_lim,y),z_def)
def getValFloatDivZero(x,y,y_lim,z_def= 0.):
#-returns the result of a division that possibly involves a zero
# denominator; in which case, a default value is substituted:
# x/y= z in case y > y_lim,
# x/y= z_def in case y <= y_lim, where y_lim -> 0.
# z_def is set to zero if not otherwise specified
if y > y_lim:
return x / max(y_lim,y)
else:
return z_def
def retrieveMapValue(pcrX,coordinates):
#-retrieves values from a map and returns an array conform the IDs stored in properties
nrRows= coordinates.shape[0]
x= np.ones((nrRows))* MV
tmpIDArray= pcr.pcr2numpy(pcrX,MV)
for iCnt in range(nrRows):
row,col= coordinates[iCnt,:]
if row != MV and col != MV:
x[iCnt]= tmpIDArray[row,col]
return x
def returnMapValue(pcrX,x,coord):
#-retrieves value from an array and update values in the map
if x.ndim == 1:
nrRows= 1
tempIDArray= pcr.pcr2numpy(pcrX,MV)
#print tempIDArray
temporary= tempIDArray
nrRows= coord.shape[0]
for iCnt in range(nrRows):
row,col= coord[iCnt,:]
if row != MV and col != MV:
tempIDArray[row,col]= (x[iCnt])
# print iCnt,row,col,x[iCnt]
pcrX= pcr.numpy2pcr(pcr.Scalar,tempIDArray,MV)
return pcrX
def getQAtBasinMouths(discharge, basinMouth):
temp = pcr.ifthenelse(basinMouth != 0 , discharge * secondsPerDay(),0.)
pcr.report(temp,"temp.map")
return (getMapTotal(temp) / 1e9)
def regridMapFile2FinerGrid (rescaleFac,coarse):
if rescaleFac ==1:
return coarse
return pcr.numpy2pcr(pcr.Scalar, regridData2FinerGrid(rescaleFac,pcr.pcr2numpy(coarse,MV),MV),MV)
def regridData2FinerGrid(rescaleFac,coarse,MV):
if rescaleFac ==1:
return coarse
nr,nc = np.shape(coarse)
fine= np.zeros(nr*nc*rescaleFac*rescaleFac).reshape(nr*rescaleFac,nc*rescaleFac) + MV
ii = -1
nrF,ncF = np.shape(fine)
for i in range(0 , nrF):
if i % rescaleFac == 0:
ii += 1
fine [i,:] = coarse[ii,:].repeat(rescaleFac)
nr = None; nc = None
del nr; del nc
nrF = None; ncF = None
del nrF; del ncF
n = gc.collect() ; del gc.garbage[:] ; n = None ; del n
return fine
def regridToCoarse(fine,fac,mode,missValue):
nr,nc = np.shape(fine)
coarse = np.zeros(nr/fac * nc / fac).reshape(nr/fac,nc/fac) + MV
nr,nc = np.shape(coarse)
for r in range(0,nr):
for c in range(0,nc):
ar = fine[r * fac : fac * (r+1),c * fac: fac * (c+1)]
m = np.ma.masked_values(ar,missValue)
if ma.count(m) == 0:
coarse[r,c] = MV
else:
if mode == 'average':
coarse [r,c] = ma.average(m)
elif mode == 'median':
coarse [r,c] = ma.median(m)
elif mode == 'sum':
coarse [r,c] = ma.sum(m)
elif mode =='min':
coarse [r,c] = ma.min(m)
elif mode == 'max':
coarse [r,c] = ma.max(m)
return coarse
def waterBalanceCheck(fluxesIn,fluxesOut,preStorages,endStorages,processName,PrintOnlyErrors,dateStr,threshold=1e-5,landmask=None):
""" Returns the water balance for a list of input, output, and storage map files """
# modified by Edwin (22 Apr 2013)
inMap = pcr.spatial(pcr.scalar(0.0))
outMap = pcr.spatial(pcr.scalar(0.0))
dsMap = pcr.spatial(pcr.scalar(0.0))
for fluxIn in fluxesIn:
inMap += fluxIn
for fluxOut in fluxesOut:
outMap += fluxOut
for preStorage in preStorages:
dsMap += preStorage
for endStorage in endStorages:
dsMap -= endStorage
a,b,c = getMinMaxMean(inMap + dsMap- outMap)
if abs(a) > threshold or abs(b) > threshold:
if PrintOnlyErrors:
msg = "\n"
msg += "\n"
msg = "\n"
msg += "\n"
msg += "##############################################################################################################################################\n"
msg += "WARNING !!!!!!!! Water Balance Error %s Min %f Max %f Mean %f" %(processName,a,b,c)
msg += "\n"
msg += "##############################################################################################################################################\n"
msg += "\n"
msg += "\n"
msg += "\n"
logger.error(msg)
#~ pcr.report(inMap + dsMap - outMap,"wb.map")
#~ os.system("aguila wb.map")
#~ # for debugging:
#~ error = inMap + dsMap- outMap
#~ os.system('rm error.map')
#~ pcr.report(error,"error.map")
#~ os.system('aguila error.map')
#~ os.system('rm error.map')
#~ wb = inMap + dsMap - outMap
#~ maxWBError = pcr.cellvalue(pcr.mapmaximum(pcr.abs(wb)), 1, 1)[0]
#~ #return wb
def waterBalance( fluxesIn, fluxesOut, deltaStorages, processName, PrintOnlyErrors, dateStr,threshold=1e-5):
""" Returns the water balance for a list of input, output, and storage map files and """
inMap = pcr.spatial(pcr.scalar(0.0))
dsMap = pcr.spatial(pcr.scalar(0.0))
outMap = pcr.spatial(pcr.scalar(0.0))
inflow = 0
outflow = 0
deltaS = 0
for fluxIn in fluxesIn:
inflow += getMapTotal(fluxIn)
inMap += fluxIn
for fluxOut in fluxesOut:
outflow += getMapTotal(fluxOut)
outMap += fluxOut
for deltaStorage in deltaStorages:
deltaS += getMapTotal(deltaStorage)
dsMap += deltaStorage
#if PrintOnlyErrors:
a,b,c = getMinMaxMean(inMap + dsMap- outMap)
# if abs(a) > 1e-5 or abs(b) > 1e-5:
# if abs(a) > 1e-4 or abs(b) > 1e-4:
if abs(a) > threshold or abs(b) > threshold:
print("WBError %s Min %f Max %f Mean %f" %(processName,a,b,c))
# if abs(inflow + deltaS - outflow) > 1e-5:
# print "Water balance Error for %s on %s: in = %f\tout=%f\tdeltaS=%f\tBalance=%f" \
# %(processName,dateStr,inflow,outflow,deltaS,inflow + deltaS - outflow)
#else:
# print "Water balance for %s: on %s in = %f\tout=%f\tdeltaS=%f\tBalance=%f" \
# %(processName,dateStr,inflow,outflow,deltaS,inflow + deltaS - outflow)
wb = inMap + dsMap - outMap
maxWBError = pcr.cellvalue(pcr.mapmaximum(pcr.abs(wb)), 1, 1)[0]
#if maxWBError > 0.001 / 1000:
#row = 0
#col = 0
#cellID = 1
#troubleCell = 0
#print "Water balance for %s on %s: %f mm !!! " %(processName,dateStr,maxWBError * 1000)
#pcr.report(wb,"%s-WaterBalanceError-%s" %(processName,dateStr))
#npWBMError = pcr2numpy(wb, -9999)
#(nr, nc) = np.shape(npWBMError)
#for r in range(0, nr):
#for c in range(0, nc):
## print r,c
#if npWBMError[r, c] != -9999.0:
#val = npWBMError[r, c]
#if math.fabs(val) > 0.0001 / 1000:
## print npWBMError[r,c]
#row = r
#col = c
#troubleCell = cellID
#cellID += 1
#print 'Water balance for %s on %s: %f mm row %i col %i cellID %i!!! ' % (
#processName,
#dateStr,
#maxWBError * 1000,
#row,
#col,
#troubleCell,
#)
return inMap + dsMap - outMap
def waterAbstractionAndAllocationHighPrecision_NEEDMORETEST(water_demand_volume, \
available_water_volume, \
allocation_zones,\
zone_area = None,
debug_water_balance = True,\
extra_info_for_water_balance_reporting = ""):
# STILL UNDER DEVELOPMENT - NOT FULLY TESTED
logger.debug("Allocation of abstraction. - using high precision option")
# demand volume in each cell (unit: m3)
remainingcellVolDemand = pcr.max(0.0, water_demand_volume)
# available water volume in each cell
remainingCellAvlWater = pcr.max(0.0, available_water_volume)
# loop from biggest values of cellAvlWater
min_power_number = 0 # The minimum value is zero.
max_power_number = int(pcr.mapmaximum(pcr.log10(remainingCellAvlWater))) + 1
step = 1
cell_abstrac_for_every_power_number = {}
cell_allocat_for_every_power_number = {}
for power_number in range(max_power_number, min_power_number - step, -step):
logger.debug("Allocation of abstraction. - using high precision option - loop power number: " + str(power_number))
# cell available water in this loop
cellAvlWater = pcr.rounddown(remainingCellAvlWater * pcr.scalar(10.**(power_number))) / pcr.scalar(10.**(power_number))
if power_number == min_power_number: cellAvlWater = pcr.max(0.0, remainingCellAvlWater)
# zonal available water in this loop
zoneAvlWater = pcr.areatotal(cellAvlWater, allocation_zones)
# total actual water abstraction volume in each zone/segment (unit: m3)
# - limited to available water
zoneVolDemand = pcr.areatotal(remainingcellVolDemand, allocation_zones)
zoneAbstraction = pcr.min(zoneAvlWater, zoneVolDemand)
# actual water abstraction volume in each cell (unit: m3)
cellAbstraction = getValDivZero(\
cellAvlWater, zoneAvlWater, smallNumber) * zoneAbstraction
cellAbstraction = pcr.min(cellAbstraction, cellAvlWater)
# allocation water to meet water demand (unit: m3)
cellAllocation = getValDivZero(\
remainingcellVolDemand, zoneVolDemand, smallNumber) * zoneAbstraction
# water balance check
if debug_water_balance and zone_area is not None:
waterBalanceCheck([pcr.cover(pcr.areatotal(cellAbstraction, allocation_zones)/zone_area, 0.0)],\
[pcr.cover(pcr.areatotal(cellAllocation , allocation_zones)/zone_area, 0.0)],\
[pcr.scalar(0.0)],\
[pcr.scalar(0.0)],\
'abstraction - allocation per zone/segment (with high precision) - loop (power number): ' + str(power_number) ,\
True,\
extra_info_for_water_balance_reporting, threshold = 1e-5)
# actual water abstraction and allocation in this current loop (power number)
cell_abstrac_for_every_power_number[str(power_number)] = cellAbstraction
cell_allocat_for_every_power_number[str(power_number)] = cellAllocation
# remaining cell available water and demand
remainingCellAvlWater = pcr.max(0.0, remainingCellAvlWater - cellAbstraction)
remainingcellVolDemand = pcr.max(0.0, remainingcellVolDemand - cellAllocation )
# sum from the smallest values (minimizing numerical errors)
sumCellAbstraction = pcr.scalar(0.0)
sumCellAllocation = pcr.scalar(0.0)
for power_number in range(min_power_number, max_power_number + step, step):
sumCellAbstraction += cell_abstrac_for_every_power_number[str(power_number)]
sumCellAllocation += cell_allocat_for_every_power_number[str(power_number)]
# water balance check
if debug_water_balance and zone_area is not None:
waterBalanceCheck([pcr.cover(pcr.areatotal(sumCellAbstraction, allocation_zones)/zone_area, 0.0)],\
[pcr.cover(pcr.areatotal(sumCellAllocation , allocation_zones)/zone_area, 0.0)],\
[pcr.scalar(0.0)],\
[pcr.scalar(0.0)],\
'abstraction - allocation per zone/segment (with high precision) - sum after loop' ,\
True,\
extra_info_for_water_balance_reporting, threshold = 1e-5)
return sumCellAbstraction, sumCellAllocation
def waterAbstractionAndAllocationFAILED(water_demand_volume,available_water_volume,allocation_zones,\
zone_area = None,
high_volume_treshold = 1000000.,
debug_water_balance = True,\
extra_info_for_water_balance_reporting = "",
landmask = None,
ignore_small_values = False):
logger.debug("Allocation of abstraction - first, satisfy demand with local source.")
# demand volume in each cell (unit: m3)
cellVolDemand = pcr.max(0.0, water_demand_volume)
if landmask is not None:
cellVolDemand = pcr.ifthen(landmask, pcr.cover(cellVolDemand, 0.0))
if ignore_small_values: # ignore small values to avoid runding error
cellVolDemand = pcr.rounddown(pcr.max(0.0, cellVolDemand))
else:
cellVolDemand = pcr.max(0.0, cellVolDemand)
# total available water volume in each cell
cellAvlWater = pcr.max(0.0, available_water_volume)
if landmask is not None:
cellAvlWater = pcr.ifthen(landmask, pcr.cover(cellAvlWater, 0.0))
if ignore_small_values: # ignore small values to avoid runding error
cellAvlWater = pcr.rounddown(pcr.max(0.00, cellAvlWater))
else:
cellAvlWater = pcr.max(0.0, cellAvlWater)
# first, satisfy demand with local source
cellAllocation = pcr.min(cellVolDemand, cellAvlWater)
cellAbstraction = cellAllocation * 1.0
logger.debug("Allocation of abstraction - then, satisfy demand with neighbour sources.")
# the remaining demand and available water
cellVolDemand = pcr.max(0.0, cellVolDemand - cellAllocation)
cellAvlWater = pcr.max(0.0, cellAvlWater - cellAbstraction)
cellAvlWater = pcr.rounddown(pcr.max(0.00, cellAvlWater))
# total demand volume in each zone/segment (unit: m3)
zoneVolDemand = pcr.areatotal(cellVolDemand, allocation_zones)
# avoid very high values
cellAvlWater = pcr.min(cellAvlWater, zoneVolDemand)
# avoid small values
cellAvlWater = pcr.cover(
pcr.ifthenelse(cellAvlWater > pcr.areaaverage(pcr.ifthen(cellAvlWater > 0.0, cellAvlWater), allocation_zones), cellAvlWater, 0.0), 0.0)
cellAvlWater = pcr.ifthen(landmask, cellAvlWater)
# total available water volume in each zone/segment (unit: m3)
# - to minimize numerical errors, separating cellAvlWater
if high_volume_treshold is not None:
# mask: 0 for small volumes ; 1 for large volumes (e.g. in lakes and reservoirs)
mask = pcr.cover(\
pcr.ifthen(cellAvlWater > high_volume_treshold, pcr.boolean(1)), pcr.boolean(0))
zoneAvlWater = pcr.areatotal(
pcr.ifthenelse(mask, 0.0, cellAvlWater), allocation_zones)
zoneAvlWater += pcr.areatotal(
pcr.ifthenelse(mask, cellAvlWater, 0.0), allocation_zones)
else:
zoneAvlWater = pcr.areatotal(cellAvlWater, allocation_zones)
zoneAvlWater = pcr.areatotal(cellAvlWater, allocation_zones)
# total actual water abstraction volume in each zone/segment (unit: m3)
# - limited to available water
zoneAbstraction = pcr.min(zoneAvlWater, zoneVolDemand)
# allocation water to meet water demand (unit: m3)
factor = getValDivZero(\
cellVolDemand, zoneVolDemand, smallNumber)
factor = pcr.min(0.99, factor)
factor = pcr.rounddown(factor * 100.) / 100.
addCellAllocation = pcr.min(cellVolDemand, factor * zoneAbstraction)
addCellAllocation = pcr.ifthenelse(addCellAllocation > cellVolDemand, cellVolDemand, pcr.rounddown(addCellAllocation))
cellAllocation += addCellAllocation
# correcting zonal abstraction
zoneAbstraction = pcr.areatotal(addCellAllocation, allocation_zones)
# actual water abstraction volume in each cell (unit: m3)
factor = getValDivZero(\
cellAvlWater, zoneAvlWater, smallNumber)
cellAbstraction += factor * zoneAbstraction
# local abstraction to minimize numerical errors
additionalLocalAbstraction = pcr.max(0.0,\
pcr.areaaverage(cellAllocation , allocation_zones) -\
pcr.areaaverage(cellAbstraction, allocation_zones))
remainingCellAvlWater = pcr.max(0.0, cellAvlWater - cellAbstraction)
additionalLocalAbstraction = pcr.min(additionalLocalAbstraction, \
remainingCellAvlWater)
cellAbstraction += additionalLocalAbstraction
# extraAbstraction to minimize numerical errors:
zoneDeficitAbstraction = pcr.max(0.0,\
pcr.areatotal(cellAllocation , allocation_zones) -\
pcr.areatotal(cellAbstraction, allocation_zones))
remainingCellAvlWater = pcr.max(0.0, cellAvlWater - cellAbstraction)
cellAbstraction += zoneDeficitAbstraction * getValDivZero(\
remainingCellAvlWater,
pcr.areatotal(remainingCellAvlWater, allocation_zones),
smallNumber)
#
# extraAllocation to minimize numerical errors:
zoneDeficitAllocation = pcr.max(0.0,\
pcr.areatotal(cellAbstraction, allocation_zones) -\
pcr.areatotal(cellAllocation , allocation_zones))
remainingCellDemand = pcr.max(0.0, cellVolDemand - cellAllocation)
cellAllocation += zoneDeficitAllocation * getValDivZero(\
remainingCellDemand,
pcr.areatotal(remainingCellDemand, allocation_zones),
smallNumber)
#~ # another extraAbstraction to minimize numerical errors:
#~ zoneDeficitAbstraction = pcr.max(0.0,\
#~ pcr.areatotal(cellAllocation , allocation_zones) -\
#~ pcr.areatotal(cellAbstraction, allocation_zones))
#~ remainingCellAvlWater = pcr.max(0.0, cellAvlWater - cellAbstraction)
#~ cellAbstraction += zoneDeficitAbstraction * getValDivZero(\
#~ remainingCellAvlWater,
#~ pcr.areatotal(remainingCellAvlWater, allocation_zones),
#~ smallNumber)
zoneDeficitAbstraction = pcr.areatotal(cellAllocation , allocation_zones) -\
pcr.areatotal(cellAbstraction, allocation_zones)
pcr.report(pcr.max(0.0, zoneDeficitAbstraction), "test.map")
os.system('aguila test.map')
if debug_water_balance and zone_area is not None:
waterBalanceCheck([pcr.cover(pcr.areatotal(cellAbstraction, allocation_zones)/zone_area, 0.0)],\
[pcr.cover(pcr.areatotal(cellAllocation , allocation_zones)/zone_area, 0.0)],\
[pcr.scalar(0.0)],\
[pcr.scalar(0.0)],\
'abstraction - allocation per zone/segment (PS: Error here may be caused by rounding error.)' ,\
True,\
extra_info_for_water_balance_reporting,threshold=1e-4)
return cellAbstraction, cellAllocation
def waterAbstractionAndAllocation(water_demand_volume,
available_water_volume,
allocation_zones,
zone_area = None,
high_volume_treshold = None,
debug_water_balance = True,\
extra_info_for_water_balance_reporting = "",
landmask = None,
ignore_small_values = False,
prioritizing_local_source = True):
logger.debug("Allocation of abstraction.")
if landmask is not None:
water_demand_volume = pcr.ifthen(landmask, pcr.cover(water_demand_volume, 0.0))
available_water_volume = pcr.ifthen(landmask, pcr.cover(available_water_volume, 0.0))
allocation_zones = pcr.ifthen(landmask, allocation_zones)
# satistify demand with local sources:
localAllocation = pcr.scalar(0.0)
localAbstraction = pcr.scalar(0.0)
cellVolDemand = pcr.max(0.0, water_demand_volume)
cellAvlWater = pcr.max(0.0, available_water_volume)
if prioritizing_local_source:
logger.debug("Allocation of abstraction - first, satisfy demand with local source.")
# demand volume in each cell (unit: m3)
if landmask is not None:
cellVolDemand = pcr.ifthen(landmask, pcr.cover(cellVolDemand, 0.0))
# total available water volume in each cell
if landmask is not None:
cellAvlWater = pcr.ifthen(landmask, pcr.cover(cellAvlWater, 0.0))
# first, satisfy demand with local source
localAllocation = pcr.max(0.0, pcr.min(cellVolDemand, cellAvlWater))
localAbstraction = localAllocation * 1.0
logger.debug("Allocation of abstraction - satisfy demand with neighbour sources.")
# the remaining demand and available water
cellVolDemand = pcr.max(0.0, cellVolDemand - localAllocation )
cellAvlWater = pcr.max(0.0, cellAvlWater - localAbstraction)
# ignoring small values of water availability
if ignore_small_values: available_water_volume = pcr.max(0.0, pcr.rounddown(available_water_volume))
# demand volume in each cell (unit: m3)
cellVolDemand = pcr.max(0.0, cellVolDemand)
if landmask is not None:
cellVolDemand = pcr.ifthen(landmask, pcr.cover(cellVolDemand, 0.0))
# total demand volume in each zone/segment (unit: m3)
zoneVolDemand = pcr.areatotal(cellVolDemand, allocation_zones)
# avoid very high values of available water
cellAvlWater = pcr.min(cellAvlWater, zoneVolDemand)
# total available water volume in each cell
cellAvlWater = pcr.max(0.0, cellAvlWater)
if landmask is not None:
cellAvlWater = pcr.ifthen(landmask, pcr.cover(cellAvlWater, 0.0))
# total available water volume in each zone/segment (unit: m3)
zoneAvlWater = pcr.areatotal(cellAvlWater, allocation_zones)
# total actual water abstraction volume in each zone/segment (unit: m3)
# - limited to available water
zoneAbstraction = pcr.min(zoneAvlWater, zoneVolDemand)
# actual water abstraction volume in each cell (unit: m3)
cellAbstraction = getValDivZero(\
cellAvlWater, zoneAvlWater, smallNumber) * zoneAbstraction
cellAbstraction = pcr.min(cellAbstraction, cellAvlWater)
# to minimize numerical errors
if high_volume_treshold is not None:
# mask: 0 for small volumes ; 1 for large volumes (e.g. lakes and reservoirs)
mask = pcr.cover(\
pcr.ifthen(cellAbstraction > high_volume_treshold, pcr.boolean(1)), pcr.boolean(0))
zoneAbstraction = pcr.areatotal(
pcr.ifthenelse(mask, 0.0, cellAbstraction), allocation_zones)
zoneAbstraction += pcr.areatotal(
pcr.ifthenelse(mask, cellAbstraction, 0.0), allocation_zones)
# allocation water to meet water demand (unit: m3)
cellAllocation = getValDivZero(\
cellVolDemand, zoneVolDemand, smallNumber) * zoneAbstraction
cellAllocation = pcr.min(cellAllocation, cellVolDemand)
# adding local abstraction and local allocation
cellAbstraction = cellAbstraction + localAbstraction
cellAllocation = cellAllocation + localAllocation
if debug_water_balance and zone_area is not None:
waterBalanceCheck([pcr.cover(pcr.areatotal(cellAbstraction, allocation_zones)/zone_area, 0.0)],\
[pcr.cover(pcr.areatotal(cellAllocation , allocation_zones)/zone_area, 0.0)],\
[pcr.scalar(0.0)],\
[pcr.scalar(0.0)],\
'abstraction - allocation per zone/segment (PS: Error here may be caused by rounding error.)' ,\
True,\
extra_info_for_water_balance_reporting,threshold=1e-4)
return cellAbstraction, cellAllocation
def waterAbstractionAndAllocationBeforeRefactoringFinalizing(water_demand_volume,available_water_volume,allocation_zones,\
zone_area = None,
high_volume_treshold = 1000000.,
debug_water_balance = True,\
extra_info_for_water_balance_reporting = "",
landmask = None,
ignore_small_values = False):
# disactivate the following
high_volume_treshold = None
ignore_small_values = False
logger.debug("Allocation of abstraction.")
if landmask is not None:
water_demand_volume = pcr.ifthen(landmask, pcr.cover(water_demand_volume, 0.0))
available_water_volume = pcr.ifthen(landmask, pcr.cover(available_water_volume, 0.0))
allocation_zones = pcr.ifthen(landmask, allocation_zones)
logger.debug("Allocation of abstraction - first, satisfy demand with local source.")
# demand volume in each cell (unit: m3)
cellVolDemand = pcr.max(0.0, water_demand_volume)
if landmask is not None:
cellVolDemand = pcr.ifthen(landmask, pcr.cover(cellVolDemand, 0.0))
# total available water volume in each cell
cellAvlWater = pcr.max(0.0, available_water_volume)
if landmask is not None:
cellAvlWater = pcr.ifthen(landmask, pcr.cover(cellAvlWater, 0.0))
# first, satisfy demand with local source
localAllocation = pcr.min(cellVolDemand, cellAvlWater)
localAbstraction = localAllocation * 1.0
logger.debug("Allocation of abstraction - then, satisfy demand with neighbour sources.")
# the remaining demand and available water
cellVolDemand = pcr.max(0.0, cellVolDemand - localAllocation )
cellAvlWater = pcr.max(0.0, cellAvlWater - localAbstraction)
# demand volume in each cell (unit: m3)
cellVolDemand = pcr.max(0.0, cellVolDemand)
if landmask is not None:
cellVolDemand = pcr.ifthen(landmask, pcr.cover(cellVolDemand, 0.0))
# total demand volume in each zone/segment (unit: m3)
zoneVolDemand = pcr.areatotal(cellVolDemand, allocation_zones)
# avoid very high values of available water
cellAvlWater = pcr.min(cellAvlWater, zoneVolDemand)
# total available water volume in each cell
cellAvlWater = pcr.max(0.0, cellAvlWater)
if landmask is not None:
cellAvlWater = pcr.ifthen(landmask, pcr.cover(cellAvlWater, 0.0))
# total available water volume in each zone/segment (unit: m3)
zoneAvlWater = pcr.areatotal(cellAvlWater, allocation_zones)
# total actual water abstraction volume in each zone/segment (unit: m3)
# - limited to available water
zoneAbstraction = pcr.min(zoneAvlWater, zoneVolDemand)
# actual water abstraction volume in each cell (unit: m3)
cellAbstraction = getValDivZero(\
cellAvlWater, zoneAvlWater, smallNumber) * zoneAbstraction
cellAbstraction = pcr.min(cellAbstraction, cellAvlWater)
# allocation water to meet water demand (unit: m3)
cellAllocation = getValDivZero(\
cellVolDemand, zoneVolDemand, smallNumber) * zoneAbstraction
cellAllocation = pcr.min(cellAllocation, cellVolDemand)
# adding local abstraction and local allocation
cellAbstraction = cellAbstraction + localAbstraction
cellAllocation = cellAllocation + localAllocation
if debug_water_balance and zone_area is not None:
waterBalanceCheck([pcr.cover(pcr.areatotal(cellAbstraction, allocation_zones)/zone_area, 0.0)],\
[pcr.cover(pcr.areatotal(cellAllocation , allocation_zones)/zone_area, 0.0)],\
[pcr.scalar(0.0)],\
[pcr.scalar(0.0)],\
'abstraction - allocation per zone/segment (PS: Error here may be caused by rounding error.)' ,\
True,\
extra_info_for_water_balance_reporting,threshold=1e-4)
return cellAbstraction, cellAllocation
def waterAbstractionAndAllocationOLD(water_demand_volume,available_water_volume,allocation_zones,\
zone_area = None,
high_volume_treshold = 1000000.,
debug_water_balance = True,\
extra_info_for_water_balance_reporting = "",
landmask = None,
ignore_small_values = False):
logger.debug("Allocation of abstraction.")
# demand volume in each cell (unit: m3)
cellVolDemand = pcr.max(0.0, water_demand_volume)
if landmask is not None:
cellVolDemand = pcr.ifthen(landmask, pcr.cover(cellVolDemand, 0.0))
if ignore_small_values: # ignore small values to avoid runding error
cellVolDemand = pcr.rounddown(pcr.max(0.0, water_demand_volume))
else:
cellVolDemand = pcr.max(0.0, water_demand_volume)
# total demand volume in each zone/segment (unit: m3)
zoneVolDemand = pcr.areatotal(cellVolDemand, allocation_zones)
# total available water volume in each cell
cellAvlWater = pcr.max(0.0, available_water_volume)
if landmask is not None:
cellAvlWater = pcr.ifthen(landmask, pcr.cover(cellAvlWater, 0.0))
if ignore_small_values: # ignore small values to avoid runding error
cellAvlWater = pcr.rounddown(pcr.max(0.00, available_water_volume))
else:
cellAvlWater = pcr.max(0.0, available_water_volume)
# total available water volume in each zone/segment (unit: m3)
# - to minimize numerical errors, separating cellAvlWater
if high_volume_treshold is not None:
# mask: 0 for small volumes ; 1 for large volumes (e.g. in lakes and reservoirs)
mask = pcr.cover(\
pcr.ifthen(cellAvlWater > high_volume_treshold, pcr.boolean(1)), pcr.boolean(0))
zoneAvlWater = pcr.areatotal(
pcr.ifthenelse(mask, 0.0, cellAvlWater), allocation_zones)
zoneAvlWater += pcr.areatotal(
pcr.ifthenelse(mask, cellAvlWater, 0.0), allocation_zones)
else:
zoneAvlWater = pcr.areatotal(cellAvlWater, allocation_zones)
# total actual water abstraction volume in each zone/segment (unit: m3)
# - limited to available water
zoneAbstraction = pcr.min(zoneAvlWater, zoneVolDemand)
# actual water abstraction volume in each cell (unit: m3)
cellAbstraction = getValDivZero(\
cellAvlWater, zoneAvlWater, smallNumber)*zoneAbstraction
cellAbstraction = pcr.min(cellAbstraction, cellAvlWater)
if ignore_small_values: # ignore small values to avoid runding error
cellAbstraction = pcr.rounddown(pcr.max(0.00, cellAbstraction))
# to minimize numerical errors, separating cellAbstraction
if high_volume_treshold is not None:
# mask: 0 for small volumes ; 1 for large volumes (e.g. in lakes and reservoirs)
mask = pcr.cover(\
pcr.ifthen(cellAbstraction > high_volume_treshold, pcr.boolean(1)), pcr.boolean(0))
zoneAbstraction = pcr.areatotal(
pcr.ifthenelse(mask, 0.0, cellAbstraction), allocation_zones)
zoneAbstraction += pcr.areatotal(
pcr.ifthenelse(mask, cellAbstraction, 0.0), allocation_zones)
else:
zoneAbstraction = pcr.areatotal(cellAbstraction, allocation_zones)
# allocation water to meet water demand (unit: m3)
cellAllocation = getValDivZero(\
cellVolDemand, zoneVolDemand, smallNumber)*zoneAbstraction
# extraAbstraction to minimize numerical errors:
zoneDeficitAbstraction = pcr.max(0.0,\
pcr.areatotal(cellAllocation , allocation_zones) -\
pcr.areatotal(cellAbstraction, allocation_zones))
remainingCellAvlWater = pcr.max(0.0, cellAvlWater - cellAbstraction)
cellAbstraction += zoneDeficitAbstraction * getValDivZero(\
remainingCellAvlWater,
pcr.areatotal(remainingCellAvlWater, allocation_zones),
smallNumber)
#
# extraAllocation to minimize numerical errors:
zoneDeficitAllocation = pcr.max(0.0,\
pcr.areatotal(cellAbstraction, allocation_zones) -\
pcr.areatotal(cellAllocation , allocation_zones))
remainingCellDemand = pcr.max(0.0, cellVolDemand - cellAllocation)
cellAllocation += zoneDeficitAllocation * getValDivZero(\
remainingCellDemand,
pcr.areatotal(remainingCellDemand, allocation_zones),
smallNumber)
if debug_water_balance and zone_area is not None:
waterBalanceCheck([pcr.cover(pcr.areatotal(cellAbstraction, allocation_zones)/zone_area, 0.0)],\
[pcr.cover(pcr.areatotal(cellAllocation , allocation_zones)/zone_area, 0.0)],\
[pcr.scalar(0.0)],\
[pcr.scalar(0.0)],\
'abstraction - allocation per zone/segment (PS: Error here may be caused by rounding error.)' ,\
True,\
extra_info_for_water_balance_reporting,threshold=1e-4)
return cellAbstraction, cellAllocation
def findLastYearInNCFile(ncFile):
# open a netcdf file:
if ncFile in list(filecache.keys()):
f = filecache[ncFile]
else:
f = nc.Dataset(ncFile)
filecache[ncFile] = f
# last datetime
last_datetime_year = findLastYearInNCTime(f.variables['time'])
return last_datetime_year
def findLastYearInNCTime(ncTimeVariable):
# last datetime
last_datetime = nc.num2date(ncTimeVariable[len(ncTimeVariable) - 1],\
ncTimeVariable.units,\
ncTimeVariable.calendar)
return last_datetime.year
def findFirstYearInNCTime(ncTimeVariable):
# first datetime
first_datetime = nc.num2date(ncTimeVariable[0],\
ncTimeVariable.units,\
ncTimeVariable.calendar)
return first_datetime.year
def cmd_line(command_line,using_subprocess = True):
msg = "Call: "+str(command_line)
logger.debug(msg)
co = command_line
if using_subprocess:
cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open('/dev/null'),shell=True).communicate()
else:
os.system(co)
def plot_variable(pcr_variable, filename = "test.map"):
pcr.report(pcr_variable, filename)
cmd = 'aguila '+str(filename)
os.system(cmd)
|
UU-Hydro/PCR-GLOBWB_model
|
model/virtualOS.py
|
Python
|
gpl-3.0
| 98,035
|
[
"NetCDF"
] |
f6319b51bd318d7b8cdf1b33ece11c597254a0f7ff3b99c4972367303ad0b292
|
# (C) 2017, Markus Wildi, markus.wildi@bluewin.ch
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Or visit http://www.gnu.org/licenses/gpl.html.
#
import unittest
import subprocess
import os
import sys
import pwd
import grp
import signal
import logging
import time
import glob
import psycopg2
basepath='/tmp/u_point_unittest'
if not os.path.exists(basepath):
# make sure that it is writabel (ssh user@host)
ret=os.makedirs(basepath, mode=0o0777)
logging.basicConfig(filename=os.path.join(basepath,'unittest.log'), level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
logger = logging.getLogger()
class Args(object):
def __init__(self):
pass
class RTS2Environment(unittest.TestCase):
def tearDown(self):
processes=['rts2-centrald','rts2-executor', 'rts2-httpd','rts2-focusd-dummy','rts2-filterd-dummy', 'rts2-camd-dummy', 'rts2-teld-dummy']
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.decode("utf-8").splitlines():
# wildi 7432 0.0 0.1 24692 5192 pts/1 S 17:34 0:01 /usr/local/bin/rts2-centrald
itms= line.split()
exe= itms[10].split('/')[-1]
if self.uid in itms[0] and exe in processes:
pid = int(itms[1])
os.kill(pid, signal.SIGTERM)
# reove th lock files
for fn in glob.glob(self.lockPrefix+'*'):
os.unlink (fn)
def setUp(self):
# by name
self.uid=pwd.getpwuid(os.getuid())[0]
self.gid= grp.getgrgid(os.getgid())[0]
# lock prefix
self.lockPrefix = '/tmp/rts2_{}'.format(self.uid)
# sometimes they are present
self.tearDown()
# set up RTS2
# rts2-centrald
cmd=[ '/usr/local/bin/rts2-centrald',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--local-port', '1617',
'--logfile', os.path.join(basepath,'rts2-debug'),
'--lock-prefix', self.lockPrefix,
'--config', './rts2-unittest.ini'
]
self.p_centrald= subprocess.Popen(cmd)
# rts2-executor
cmd=[ '/usr/local/bin/rts2-executor',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--lock-prefix', self.lockPrefix,
'--config', './rts2-unittest.ini',
'--server', '127.0.0.1:1617',
'--noauth'
]
self.p_exec= subprocess.Popen(cmd)
# rts2-httpd
cmd=[ '/usr/local/bin/rts2-httpd',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--lock-prefix', self.lockPrefix,
'--config', './rts2-unittest.ini',
'--server', '127.0.0.1:1617',
'-p', '9999',
'--noauth',# seems not to work
]
self.p_httpd= subprocess.Popen(cmd)
# rts2-focusd-dummy
focName='F0'
cmd=[ '/usr/local/bin/rts2-focusd-dummy',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--lock-prefix', self.lockPrefix,
'--server', '127.0.0.1:1617',
'-d', focName,
'--modefile', './f0.modefile',
#'--config', './rts2-unittest.ini'
]
self.p_focusd_dummy= subprocess.Popen(cmd)
# rts2-filterd-dummy
ftwnName='W0'
cmd=[ '/usr/local/bin/rts2-filterd-dummy',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--lock-prefix', self.lockPrefix,
'--server', '127.0.0.1:1617',
'-d', ftwnName,
#'--config', './rts2-unittest.ini'
]
ftnames = 'U:B:V:R:I:H:X'
cmd.append('-F')
cmd.append(ftnames)
self.p_filterd_dummy= subprocess.Popen(cmd)
# rts2-camd-dummy
name='C0'
# '--wheeldev', 'W0', '--filter-offsets', '1:2:3:4:5:6:7'
cmd=[ '/usr/local/bin/rts2-camd-dummy',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--lock-prefix', self.lockPrefix,
'--server', '127.0.0.1:1617',
'-d', name,
'--focdev', focName,
# not available
#'--config', './rts2-unittest.ini'
]
cmd.append('--wheeldev')
cmd.append(ftwnName)
cmd.append('--filter-offsets')
cmd.append('1:2:3:4:5:6:7')
self.p_camd_dummy= subprocess.Popen(cmd)
# rts2-teld-dummy
mntName='T0'
cmd=[ '/usr/local/bin/rts2-teld-dummy',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--lock-prefix', self.lockPrefix,
'--server', '127.0.0.1:1617',
'-d', mntName,
'--modefile', './t0.modefile',
#'--config', './rts2-unittest.ini'
]
self.p_teld_dummy= subprocess.Popen(cmd)
#
print('sleeping 10 sec')
time.sleep(10)
print('sleeping over')
|
RTS2/rts2
|
scripts/u_point/unittest/rts2_environment.py
|
Python
|
lgpl-3.0
| 5,725
|
[
"VisIt"
] |
b4d37e9243f28fd3354ab008f169ae86e0debe71bdb23bdbd3866aa6904e9faa
|
r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, mac, or ce, e.g. unlink, stat, etc.
- os.path is one of the modules posixpath, ntpath, or macpath
- os.name is 'posix', 'nt', 'os2', 'mac', 'ce' or 'riscos'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator ('.' or '/')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path", "devnull",
"SEEK_SET", "SEEK_CUR", "SEEK_END"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath as path
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'mac' in _names:
name = 'mac'
linesep = '\r'
from mac import *
try:
from mac import _exit
except ImportError:
pass
import macpath as path
import mac
__all__.extend(_get_exports_list(mac))
del mac
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
elif 'riscos' in _names:
name = 'riscos'
linesep = '\n'
from riscos import *
try:
from riscos import _exit
except ImportError:
pass
import riscospath as path
import riscos
__all__.extend(_get_exports_list(riscos))
del riscos
else:
raise ImportError, 'no os specific module found'
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0777):
"""makedirs(path [, mode=0777])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode)
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists
return
mkdir(name, mode)
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print root, "consumes",
print sum([getsize(join(root, name)) for name in files]),
print "bytes in", len(files), "non-directory files"
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
from os.path import join, isdir, islink
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if not islink(path):
for x in walk(path, topdown, onerror):
yield x
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
func = execve
argrest = (args, env)
else:
func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
func(file, *argrest)
return
if 'PATH' in env:
envpath = env['PATH']
else:
envpath = defpath
PATH = envpath.split(pathsep)
saved_exc = None
saved_tb = None
for dir in PATH:
fullname = path.join(dir, file)
try:
func(fullname, *argrest)
except error as e:
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise error, saved_exc, saved_tb
raise error, e, tb
# Change environ to automatically call putenv() if it exists
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
import UserDict
# Fake unsetenv() for Windows
# not sure about os2 here but
# I'm guessing they are the same.
if name in ('os2', 'nt'):
def unsetenv(key):
putenv(key, "")
if name == "riscos":
# On RISC OS, all env access goes through getenv and putenv
from riscosenviron import _Environ
elif name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE
# But we store them as upper case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
data = self.data
for k, v in environ.items():
data[k.upper()] = v
def __setitem__(self, key, item):
putenv(key, item)
self.data[key.upper()] = item
def __getitem__(self, key):
return self.data[key.upper()]
try:
unsetenv
except NameError:
def __delitem__(self, key):
del self.data[key.upper()]
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key.upper()]
def has_key(self, key):
return key.upper() in self.data
def __contains__(self, key):
return key.upper() in self.data
def get(self, key, failobj=None):
return self.data.get(key.upper(), failobj)
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
def copy(self):
return dict(self)
else: # Where Env Var Names Can Be Mixed Case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
self.data = environ
def __setitem__(self, key, item):
putenv(key, item)
self.data[key] = item
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
try:
unsetenv
except NameError:
pass
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key]
def copy(self):
return dict(self)
environ = _Environ(environ)
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default."""
return environ.get(key, default)
__all__.append("getenv")
def _exists(name):
try:
eval(name)
return True
except NameError:
return False
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error, "Not stopped, signaled or exited???"
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
# Supply popen2 etc. (for Unix)
if _exists("fork"):
if not _exists("popen2"):
def popen2(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout) are returned."""
import popen2
stdout, stdin = popen2.popen2(cmd, bufsize)
return stdin, stdout
__all__.append("popen2")
if not _exists("popen3"):
def popen3(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout, child_stderr) are returned."""
import popen2
stdout, stdin, stderr = popen2.popen3(cmd, bufsize)
return stdin, stdout, stderr
__all__.append("popen3")
if not _exists("popen4"):
def popen4(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout_stderr) are returned."""
import popen2
stdout, stdin = popen2.popen4(cmd, bufsize)
return stdin, stdout
__all__.append("popen4")
import copy_reg as _copy_reg
def _make_stat_result(tup, dict):
return stat_result(tup, dict)
def _pickle_stat_result(sr):
(type, args) = sr.__reduce__()
return (_make_stat_result, args)
try:
_copy_reg.pickle(stat_result, _pickle_stat_result, _make_stat_result)
except NameError: # stat_result may not exist
pass
def _make_statvfs_result(tup, dict):
return statvfs_result(tup, dict)
def _pickle_statvfs_result(sr):
(type, args) = sr.__reduce__()
return (_make_statvfs_result, args)
try:
_copy_reg.pickle(statvfs_result, _pickle_statvfs_result,
_make_statvfs_result)
except NameError: # statvfs_result may not exist
pass
if not _exists("urandom"):
def urandom(n):
"""urandom(n) -> str
Return a string of n random bytes suitable for cryptographic use.
"""
try:
_urandomfd = open("/dev/urandom", O_RDONLY)
except (OSError, IOError):
raise NotImplementedError("/dev/urandom (or equivalent) not found")
bytes = ""
while len(bytes) < n:
bytes += read(_urandomfd, n - len(bytes))
close(_urandomfd)
return bytes
|
christer155/Django-facebook
|
docs/docs_env/Lib/os.py
|
Python
|
bsd-3-clause
| 24,645
|
[
"VisIt"
] |
ec3a2b2880c379d8030afc3a67eeb10dba37d13eead1ade43b84a9a69e67a4cf
|
# -*- coding: utf-8 -*-
"""
caching of the bibglossaries
Originally adapted from: https://github.com/mcmtroffaes/sphinxcontrib-bibtex
"""
import six
try: # pragma: no cover
from collections import OrderedDict
except ImportError: # pragma: no cover
from ordereddict import OrderedDict
import ast
import collections
import copy
from ordered_set import OrderedSet
import re
def _raise_invalid_node(node):
"""Helper method to raise an exception when an invalid node is
visited.
"""
raise ValueError("invalid node %s in filter expression" % node)
class _FilterVisitor(ast.NodeVisitor):
"""Visit the abstract syntax tree of a parsed filter expression."""
entry = None
"""The bibliographic entry to which the filter must be applied."""
cited_docnames = False
"""The documents where the entry is cited (empty if not cited)."""
def __init__(self, entry, docname, cited_docnames):
self.entry = entry
self.docname = docname
self.cited_docnames = cited_docnames
def visit_Module(self, node): # noqa: N802
if len(node.body) != 1:
raise ValueError("filter expression cannot contain multiple expressions")
return self.visit(node.body[0])
def visit_Expr(self, node): # noqa: N802
return self.visit(node.value)
def visit_BoolOp(self, node): # noqa: N802
outcomes = (self.visit(value) for value in node.values)
if isinstance(node.op, ast.And):
return all(outcomes)
elif isinstance(node.op, ast.Or):
return any(outcomes)
else: # pragma: no cover
# there are no other boolean operators
# so this code should never execute
assert False, "unexpected boolean operator %s" % node.op
def visit_UnaryOp(self, node): # noqa: N802
if isinstance(node.op, ast.Not):
return not self.visit(node.operand)
else:
_raise_invalid_node(node)
def visit_BinOp(self, node): # noqa: N802
left = self.visit(node.left)
op = node.op
right = self.visit(node.right)
if isinstance(op, ast.Mod):
# modulo operator is used for regular expression matching
if not isinstance(left, six.string_types):
raise ValueError("expected a string on left side of %s" % node.op)
if not isinstance(right, six.string_types):
raise ValueError("expected a string on right side of %s" % node.op)
return re.search(right, left, re.IGNORECASE)
elif isinstance(op, ast.BitOr):
return left | right
elif isinstance(op, ast.BitAnd):
return left & right
else:
_raise_invalid_node(node)
def visit_Compare(self, node): # noqa: N802
# keep it simple: binary comparators only
if len(node.ops) != 1:
raise ValueError("syntax for multiple comparators not supported")
left = self.visit(node.left)
op = node.ops[0]
right = self.visit(node.comparators[0])
if isinstance(op, ast.Eq):
return left == right
elif isinstance(op, ast.NotEq):
return left != right
elif isinstance(op, ast.Lt):
return left < right
elif isinstance(op, ast.LtE):
return left <= right
elif isinstance(op, ast.Gt):
return left > right
elif isinstance(op, ast.GtE):
return left >= right
elif isinstance(op, ast.In):
return left in right
elif isinstance(op, ast.NotIn):
return left not in right
else:
# not used currently: ast.Is | ast.IsNot
_raise_invalid_node(op)
def visit_Name(self, node): # noqa: N802
"""Calculate the value of the given identifier."""
id_ = node.id
if id_ == "type":
return self.entry.type.lower()
elif id_ == "key":
return self.entry.key.lower()
elif id_ == "cited":
return bool(self.cited_docnames)
elif id_ == "docname":
return self.docname
elif id_ == "docnames":
return self.cited_docnames
elif id_ == "True":
return True
elif id_ == "False":
return False
else:
return self.entry.get(id_, "")
def visit_Set(self, node): # noqa: N802
return frozenset(self.visit(elt) for elt in node.elts)
def visit_Str(self, node): # noqa: N802
return node.s
# NameConstant is Python 3.4 only so do not insist on coverage
def visit_NameConstant(self, node): # noqa: N802
return node.value
def generic_visit(self, node):
_raise_invalid_node(node)
class Cache:
"""Global bibgloss extension information cache. Stored in
``app.env.bibgloss_cache``, so must be picklable.
"""
bibfiles = None
"""A :class:`dict` mapping .bib file names (relative to the top
source folder) to :class:`BibfileCache` instances.
"""
_bibliographies = None
"""Each bibglossary directive is assigned an id of the form
bibtex-bibglossary-xxx. This :class:`dict` maps each docname
to another :class:`dict` which maps each id
to information about the bibliography directive,
:class:`BibliographyCache`. We need to store this extra
information separately because it cannot be stored in the
:class:`~ipypublish.sphinx.gls.nodes.BibGlossaryNode` nodes
themselves.
"""
_cited = None
"""A :class:`dict` mapping each docname to a :class:`set` of
citation keys.
"""
_enum_count = None
"""A :class:`dict` mapping each docname to an :class:`int`
representing the current bibliography enumeration counter.
"""
def __init__(self):
self.bibfiles = {}
self._bibliographies = collections.defaultdict(dict)
self._cited = collections.defaultdict(OrderedSet)
self._enum_count = {}
def purge(self, docname):
"""Remove all information related to *docname*.
:param docname: The document name.
:type docname: :class:`str`
"""
self._bibliographies.pop(docname, None)
self._cited.pop(docname, None)
self._enum_count.pop(docname, None)
def inc_enum_count(self, docname):
"""Increment enumeration list counter for document *docname*."""
self._enum_count[docname] += 1
def set_enum_count(self, docname, value):
"""Set enumeration list counter for document *docname* to *value*."""
self._enum_count[docname] = value
def get_enum_count(self, docname):
"""Get enumeration list counter for document *docname*."""
return self._enum_count[docname]
def add_cited(self, key, docname):
"""Add the given *key* to the set of cited keys for
*docname*.
:param key: The citation key.
:type key: :class:`str`
:param docname: The document name.
:type docname: :class:`str`
"""
self._cited[docname].add(key)
def get_cited_docnames(self, key):
"""Return the *docnames* from which the given *key* is cited.
:param key: The citation key.
:type key: :class:`str`
"""
return frozenset(
[docname for docname, keys in six.iteritems(self._cited) if key in keys]
)
def get_label_from_key(self, key):
"""Return label for the given key."""
for bibcache in self.get_all_bibliography_caches():
if key in bibcache.labels:
return bibcache.labels[key]
else:
raise KeyError("%s not found" % key)
def get_plural_from_key(self, key):
"""Return label for the given key."""
for bibcache in self.get_all_bibliography_caches():
if key in bibcache.plurals:
return bibcache.plurals[key]
else:
raise KeyError("%s not found" % key)
def get_all_cited_keys(self):
"""Yield all citation keys, sorted first by document
(alphabetical), then by citation order in the document.
"""
for docname in sorted(self._cited):
for key in self._cited[docname]:
yield key
def set_bibliography_cache(self, docname, id_, bibcache):
"""Register *bibcache* (:class:`BibliographyCache`)
with id *id_* for document *docname*.
"""
assert id_ not in self._bibliographies[docname]
self._bibliographies[docname][id_] = bibcache
def get_bibliography_cache(self, docname, id_):
"""Return :class:`BibliographyCache` with id *id_* in
document *docname*.
"""
return self._bibliographies[docname][id_]
def get_all_bibliography_caches(self):
"""Return all bibliography caches."""
for bibcaches in six.itervalues(self._bibliographies):
for bibcache in six.itervalues(bibcaches):
yield bibcache
def _get_bibliography_entries(self, docname, id_, warn):
"""Return filtered bibliography entries, sorted by occurence
in the bib file.
"""
# get the information of this bibliography node
bibcache = self.get_bibliography_cache(docname=docname, id_=id_)
# generate entries
for bibfile in bibcache.bibfiles:
for entry in self.bibfiles[bibfile].data.values():
# beware: the prefix is not stored in the data
# to allow reusing the data for multiple bibliographies
cited_docnames = self.get_cited_docnames(bibcache.keyprefix + entry.key)
visitor = _FilterVisitor(
entry=entry, docname=docname, cited_docnames=cited_docnames
)
try:
success = visitor.visit(bibcache.filter_)
except ValueError as err:
warn("syntax error in :filter: expression; %s" % err)
# recover by falling back to the default
success = bool(cited_docnames)
if success:
# entries are modified in an unpickable way
# when formatting, so fetch a deep copy
# and return this copy with prefixed key
# we do not deep copy entry.collection because that
# consumes enormous amounts of memory
# entry.collection = None
entry2 = copy.deepcopy(entry)
entry2.key = bibcache.keyprefix + entry.key
yield entry2
def get_bibliography_entries(self, docname, id_, warn):
"""Return filtered bibliography entries, sorted by citation order."""
# get entries, ordered by bib file occurrence
entries = OrderedDict(
(entry.key, entry)
for entry in self._get_bibliography_entries(
docname=docname, id_=id_, warn=warn
)
)
# order entries according to which were cited first
# first, we add all keys that were cited
# then, we add all remaining keys
sorted_entries = []
for key in self.get_all_cited_keys():
try:
entry = entries.pop(key)
except KeyError:
pass
else:
sorted_entries.append(entry)
sorted_entries += six.itervalues(entries)
return sorted_entries
class BibfileCache(collections.namedtuple("BibfileCache", "mtime data")):
"""Contains information about a parsed .bib file.
.. attribute:: mtime
A :class:`float` representing the modification time of the .bib
file when it was last parsed.
.. attribute:: data
A ipypublish.bib2glossary.BibGlossDB instance
"""
class BibliographyCache(
collections.namedtuple(
"BibliographyCache",
"""bibfiles encoding style unsorted labels plurals filter_ keyprefix
""",
)
):
"""Contains information about a bibliography directive.
.. attribute:: bibfiles
A :class:`list` of :class:`str`\\ s containing the .bib file
names (relative to the top source folder) that contain the
references.
.. attribute:: encoding
The encoding of the glossary file.
.. attribute:: style
The glossary style.
.. attribute:: unsorted
If True the glossary terms will be sorted by order of use,
rather than alphabetically
.. attribute:: labels
Maps citation keys to their final labels.
.. attribute:: plurals
Maps citation keys to their final pluralised labels.
.. attribute:: keyprefix
This bibliography's string prefix for citation keys.
.. attribute:: filter_
An :class:`ast.AST` node, containing the parsed filter expression.
"""
|
chrisjsewell/ipypublish
|
ipypublish/sphinx/gls/cache.py
|
Python
|
bsd-3-clause
| 12,953
|
[
"VisIt"
] |
3a4f7cf11ede3fe94ca24d10ffb49ae340eb8f5bb4d488b4d880e5a806cbe95b
|
import sys
import copy
import pprint
import pytest
import numpy as np
from utils import *
from addons import *
import qcelemental as qcel
import qcdb
subject1 = """O 0 0 0
no_com
H 1 ,, 0 \t 0 # stuff-n-nonsense"""
ans1 = {'geom': [0., 0., 0., 1., 0., 0.],
'elbl': ['O', 'H'],
'fix_com': True,
'fragment_separators': [],
'fragment_charges': [None],
'fragment_multiplicities': [None],
'fragment_files': [],
'geom_hints': [],
'hint_types': [],
}
fullans1a = {'geom': np.array([ 0., 0., 0., 1., 0., 0.]),
'elea': np.array([16, 1]),
'elez': np.array([8, 1]),
'elem': np.array(['O', 'H']),
'mass': np.array([ 15.99491462, 1.00782503]),
'real': np.array([ True, True]),
'elbl': np.array(['', '']),
'units': 'Angstrom',
'fix_com': True,
'fix_orientation': False,
'fragment_separators': [],
'fragment_charges': [0.0],
'fragment_multiplicities': [2],
'molecular_charge': 0.0,
'molecular_multiplicity': 2,
}
fullans1c = copy.deepcopy(fullans1a)
fullans1c.update({'fragment_charges': [1.],
'fragment_multiplicities': [1],
'molecular_charge': 1.,
'molecular_multiplicity': 1})
def test_psi4_qm_1a():
subject = subject1
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
assert compare_dicts(ans1, intermed, 4, sys._getframe().f_code.co_name + ': intermediate')
assert compare_molrecs(fullans1a, final['qm'], 4, sys._getframe().f_code.co_name + ': full')
def test_psi4_qm_1b():
subject = '\n' + '\t' + subject1 + '\n\n'
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
assert compare_dicts(ans1, intermed, 4, sys._getframe().f_code.co_name + ': intermediate')
assert compare_molrecs(fullans1a, final['qm'], 4, sys._getframe().f_code.co_name + ': full')
def test_psi4_qm_1c():
subject = '1 1\n -- \n' + subject1
ans = copy.deepcopy(ans1)
ans.update({'molecular_charge': 1.,
'molecular_multiplicity': 1})
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
assert compare_dicts(ans, intermed, 4, sys._getframe().f_code.co_name + ': intermediate')
assert compare_molrecs(fullans1c, final['qm'], 4, sys._getframe().f_code.co_name + ': full')
def test_psi4_qm_1d():
subject = subject1 + '\n1 1'
ans = copy.deepcopy(ans1)
ans.update({'fragment_charges': [1.],
'fragment_multiplicities': [1]})
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
assert compare_dicts(ans, intermed, 4, sys._getframe().f_code.co_name + ': intermediate')
assert compare_molrecs(fullans1c, final['qm'], 4, sys._getframe().f_code.co_name + ': full')
def test_psi4_qm_1e():
"""duplicate com"""
subject = subject1 + '\n nocom'
with pytest.raises(qcel.MoleculeFormatError):
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
subject2 = ["""
6Li 0.0 0.0 0.0
units a.u.
H_specIAL@2.014101 100 0 0""",
"""@Ne 2 4 6""",
"""h .0,1,2
Gh(he3) 0 1 3
noreorient"""]
ans2 = {'geom': [ 0., 0., 0., 100., 0., 0., 2., 4., 6., 0., 1., 2., 0., 1., 3.],
'elbl': ['6Li', 'H_specIAL@2.014101', '@Ne', 'h', 'Gh(he3)'],
'units': 'Bohr',
'fix_orientation': True,
'fragment_separators': [2, 3],
'fragment_charges': [None, None, None],
'fragment_multiplicities': [None, None, None],
'fragment_files': [],
'geom_hints': [],
'hint_types': [],
}
fullans2 = {'geom': np.array([ 0., 0., 0., 100., 0., 0., 2., 4., 6., 0., 1., 2., 0., 1., 3.]),
'elea': np.array([6, 2, 20, 1, 4]),
'elez': np.array([3, 1, 10, 1, 2]),
'elem': np.array(['Li', 'H', 'Ne', 'H', 'He']),
'mass': np.array([ 6.015122794, 2.014101, 19.99244017542, 1.00782503, 4.00260325415]),
'real': np.array([ True, True, False, True, False]),
'elbl': np.array(['', '_special', '', '', '3']),
'units': 'Bohr',
'fix_com': False,
'fix_orientation': True,
'fragment_separators': [2, 3],
}
def test_psi4_qm_2a():
subject = '\n--\n'.join(subject2)
fullans = copy.deepcopy(fullans2)
fullans.update({'molecular_charge': 0.,
'molecular_multiplicity': 2,
'fragment_charges': [0., 0., 0.],
'fragment_multiplicities': [1, 1, 2]})
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
assert compare_dicts(ans2, intermed, 4, sys._getframe().f_code.co_name + ': intermediate')
assert compare_molrecs(fullans, final['qm'], 4, sys._getframe().f_code.co_name + ': full')
def test_psi4_qm_2b():
subject = copy.deepcopy(subject2)
subject.insert(0, '1 3')
subject = '\n--\n'.join(subject)
ans = copy.deepcopy(ans2)
ans.update({'molecular_charge': 1.,
'molecular_multiplicity': 3})
fullans = copy.deepcopy(fullans2)
fullans.update({'molecular_charge': 1.,
'molecular_multiplicity': 3,
'fragment_charges': [1., 0., 0.],
'fragment_multiplicities': [2, 1, 2]})
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
assert compare_dicts(ans, intermed, 4, sys._getframe().f_code.co_name + ': intermediate')
assert compare_molrecs(fullans, final['qm'], 4, sys._getframe().f_code.co_name + ': full')
def test_psi4_qm_2c():
"""double overall chg/mult spec"""
subject = copy.deepcopy(subject2)
subject.insert(0, '1 3\n1 3')
subject = '\n--\n'.join(subject)
with pytest.raises(qcel.MoleculeFormatError):
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
def test_psi4_qm_2d():
"""trailing comma"""
subject = copy.deepcopy(subject2)
subject.insert(0, 'H 10,10,10,')
subject = '\n--\n'.join(subject)
with pytest.raises(qcel.MoleculeFormatError):
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
#def test_psi4_qm_2e():
# """empty fragment"""
# subject = copy.deepcopy(subject2)
# subject.insert(2, '\n')
# subject = '\n--\n'.join(subject)
#
# with pytest.raises(qcel.MoleculeFormatError):
# final, intermed = qcel.molparse.from_string(subject, return_processed=True)
def test_psi4_qm_2f():
"""double frag chgmult"""
subject = copy.deepcopy(subject2)
subject[1] += '\n 1 2\n 5 6'
subject = '\n--\n'.join(subject)
with pytest.raises(qcel.MoleculeFormatError):
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
def test_psi4_qm_2g():
"""illegal chars in nucleus"""
subject = copy.deepcopy(subject2)
subject[1] = """@Ne_{CN}_O 2 4 6"""
subject = '\n--\n'.join(subject)
with pytest.raises(qcel.MoleculeFormatError):
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
def test_psi4_qm_3():
"""psi4/psi4#731"""
subject = """0 1
Mg 0 0"""
with pytest.raises(qcel.ValidationError):
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
subject4 = """pubchem:benzene"""
ans4 = {'geom': [
-1.213100 , -0.688400 , 0.000000 ,
-1.202800 , 0.706400 , 0.000100 ,
-0.010300 , -1.394800 , 0.000000 ,
0.010400 , 1.394800 , -0.000100 ,
1.202800 , -0.706300 , 0.000000 ,
1.213100 , 0.688400 , 0.000000 ,
-2.157700 , -1.224400 , 0.000000 ,
-2.139300 , 1.256400 , 0.000100 ,
-0.018400 , -2.480900 , -0.000100 ,
0.018400 , 2.480800 , 0.000000 ,
2.139400 , -1.256300 , 0.000100 ,
2.157700 , 1.224500 , 0.000000 ],
'elbl': ['C', 'C', 'C', 'C', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H'],
'units': 'Angstrom',
'fragment_separators': [],
'fragment_charges': [None],
'fragment_multiplicities': [None],
'fragment_files': [],
'geom_hints': [],
'hint_types': [],
'molecular_charge': 0.0,
'name': 'IUPAC benzene',
}
fullans4 = {'geom': np.array([
-1.213100 , -0.688400 , 0.000000 ,
-1.202800 , 0.706400 , 0.000100 ,
-0.010300 , -1.394800 , 0.000000 ,
0.010400 , 1.394800 , -0.000100 ,
1.202800 , -0.706300 , 0.000000 ,
1.213100 , 0.688400 , 0.000000 ,
-2.157700 , -1.224400 , 0.000000 ,
-2.139300 , 1.256400 , 0.000100 ,
-0.018400 , -2.480900 , -0.000100 ,
0.018400 , 2.480800 , 0.000000 ,
2.139400 , -1.256300 , 0.000100 ,
2.157700 , 1.224500 , 0.000000 ]),
'elbl': np.array(['C', 'C', 'C', 'C', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H']),
'elea': np.array([12, 12, 12, 12, 12, 12, 1, 1, 1, 1, 1, 1]),
'elez': np.array([6, 6, 6, 6, 6, 6, 1, 1, 1, 1, 1, 1]),
'elem': np.array(['C', 'C', 'C', 'C', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H']),
'mass': np.array([ 12., 12., 12., 12., 12., 12., 1.00782503, 1.00782503, 1.00782503, 1.00782503, 1.00782503, 1.00782503]),
'real': np.array([ True, True, True, True, True, True, True, True, True, True, True, True]),
'elbl': np.array(['', '', '', '', '', '', '', '', '', '', '', '']),
'units': 'Angstrom',
'fix_com': False,
'fix_orientation': False,
'fragment_separators': [],
'molecular_charge': 0.,
'molecular_multiplicity': 1,
'fragment_charges': [0.],
'fragment_multiplicities': [1],
'name': 'IUPAC benzene',
}
def test_psi4_pubchem_4a():
subject = subject4
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
assert compare_molrecs(ans4, intermed, 4, sys._getframe().f_code.co_name + ': intermediate')
assert compare_molrecs(fullans4, final['qm'], 4, sys._getframe().f_code.co_name + ': full')
def test_psi4_pubchem_4b():
"""user units potentially contradicting pubchem units"""
subject = subject4 + '\nunits au'
with pytest.raises(qcel.MoleculeFormatError):
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
def test_psi4_pubchem_4c():
subject = """
pubchem : 241
"""
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
assert compare_molrecs(ans4, intermed, 4, sys._getframe().f_code.co_name + ': intermediate')
assert compare_molrecs(fullans4, final['qm'], 4, sys._getframe().f_code.co_name + ': full')
subject5 = """
efp C6H6 -0.30448173 -2.24210052 -0.29383131 -0.642499 7.817407 -0.568147 # second to last equiv to 1.534222
--
efp C6H6 -0.60075437 1.36443336 0.78647823 3.137879 1.557344 -2.568550
"""
ans5 = {
'fragment_files': ['C6H6', 'C6H6'],
'hint_types': ['xyzabc', 'xyzabc'],
'geom_hints': [[-0.30448173, -2.24210052, -0.29383131, -0.642499, 7.817407, -0.568147],
[-0.60075437, 1.36443336, 0.78647823, 3.137879, 1.557344, -2.568550]],
'geom': [],
'elbl': [],
'fragment_charges': [None],
'fragment_multiplicities': [None],
'fragment_separators': [],
}
fullans5b = {'efp': {}}
fullans5b['efp']['hint_types'] = ans5['hint_types']
fullans5b['efp']['geom_hints'] = ans5['geom_hints']
fullans5b['efp']['units'] = 'Bohr'
fullans5b['efp']['fix_com'] = True
fullans5b['efp']['fix_orientation'] = True
fullans5b['efp']['fix_symmetry'] = 'c1'
fullans5b['efp']['fragment_files'] = ['c6h6', 'c6h6']
def test_psi4_efp_5a():
subject = subject5
hintsans = [[(val / qcel.constants.bohr2angstroms if i < 3 else val) for i, val in enumerate(ans5['geom_hints'][0])],
[(val / qcel.constants.bohr2angstroms if i < 3 else val) for i, val in enumerate(ans5['geom_hints'][1])]]
hintsans[0][4] = 1.534222
fullans = copy.deepcopy(fullans5b)
fullans['efp']['units'] = 'Angstrom'
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
assert compare_dicts(ans5, intermed, 4, sys._getframe().f_code.co_name + ': intermediate')
assert compare_molrecs(fullans['efp'], final['efp'], 4, sys._getframe().f_code.co_name + ': final efp')
hintsstd = qcel.util.standardize_efp_angles_units('Angstrom', final['efp']['geom_hints'])
final['efp']['geom_hints'] = hintsstd
fullans['efp']['geom_hints'] = hintsans
assert compare_molrecs(fullans['efp'], final['efp'], 4, sys._getframe().f_code.co_name + ': final efp standardized')
def test_psi4_efp_5b():
subject = subject5 + '\nunits bohr'
ans = copy.deepcopy(ans5)
ans['units'] = 'Bohr'
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
assert compare_dicts(ans, intermed, 4, sys._getframe().f_code.co_name + ': intermediate')
assert compare_molrecs(fullans5b['efp'], final['efp'], 4, sys._getframe().f_code.co_name + ': final efp')
def test_psi4_efp_5c():
"""fix_orientation not mol kw"""
subject = subject5 + '\nno_com\nfix_orientation\nsymmetry c1'
with pytest.raises(qcel.MoleculeFormatError):
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
def test_psi4_efp_5d():
subject = subject5 + '\nno_com\nno_reorient\nsymmetry c1\nunits a.u.'
ans = copy.deepcopy(ans5)
ans['units'] = 'Bohr'
ans['fix_com'] = True
ans['fix_orientation'] = True
ans['fix_symmetry'] = 'c1'
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
assert compare_dicts(ans, intermed, 4, sys._getframe().f_code.co_name + ': intermediate')
assert compare_molrecs(fullans5b['efp'], final['efp'], 4, sys._getframe().f_code.co_name + ': final')
def test_psi4_efp_5e():
"""symmetry w/efp"""
subject = subject5 + 'symmetry cs\nunits a.u.'
with pytest.raises(qcel.ValidationError):
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
subject6 = """
0 1
O1 0 0 0.118720
h2 -0.753299, 0.0, -0.474880
H3 0.753299, 0.0, -0.474880
--
efp h2O -2.12417561 1.22597097 -0.95332054 -2.902133 1.734999 -1.953647
--
efp ammoniA
0.98792 1.87681 2.85174
units au
1.68798 1.18856 3.09517
1.45873 2.55904 2.27226
"""
ans6 = {'units': 'Bohr',
'geom': [0., 0., 0.118720, -0.753299, 0.0, -0.474880, 0.753299, 0.0, -0.474880],
'elbl': ['O1', 'h2', 'H3'],
'fragment_charges': [0.],
'fragment_multiplicities': [1],
'fragment_separators': [],
'fragment_files': ['h2O', 'ammoniA'],
'geom_hints': [[-2.12417561, 1.22597097, -0.95332054, -2.902133, 1.734999, -1.953647],
[0.98792, 1.87681, 2.85174, 1.68798 , 1.18856 , 3.09517, 1.45873 , 2.55904 , 2.27226]],
'hint_types': ['xyzabc', 'points'],
}
fullans6 = {'qm': {'geom': np.array([0., 0., 0.118720, -0.753299, 0.0, -0.474880, 0.753299, 0.0, -0.474880]),
'elea': np.array([16, 1, 1]),
'elez': np.array([8, 1, 1]),
'elem': np.array(['O', 'H', 'H']),
'mass': np.array([ 15.99491462, 1.00782503, 1.00782503]),
'real': np.array([True, True, True]),
'elbl': np.array(['1', '2', '3']),
'units': 'Bohr',
'fix_com': True,
'fix_orientation': True,
'fix_symmetry': 'c1',
'fragment_charges': [0.],
'fragment_multiplicities': [1],
'fragment_separators': [],
'molecular_charge': 0.,
'molecular_multiplicity': 1},
'efp': {'fragment_files': ['h2o', 'ammonia'],
'geom_hints': [[-2.12417561, 1.22597097, -0.95332054, -2.902133, 1.734999, -1.953647],
[0.98792, 1.87681, 2.85174, 1.68798 , 1.18856 , 3.09517, 1.45873 , 2.55904 , 2.27226]],
'hint_types': ['xyzabc', 'points'],
'units': 'Bohr',
'fix_com': True,
'fix_orientation': True,
'fix_symmetry': 'c1',
}}
def test_psi4_qmefp_6a():
subject = subject6
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
assert compare_dicts(ans6, intermed, 4, sys._getframe().f_code.co_name + ': intermediate')
assert compare_molrecs(fullans6['efp'], final['efp'], 4, sys._getframe().f_code.co_name + ': full efp')
assert compare_molrecs(fullans6['qm'], final['qm'], 4, sys._getframe().f_code.co_name + ': full qm')
def test_psi4_qmefp_6b():
subject = subject6.replace('au', 'ang')
ans = copy.deepcopy(ans6)
ans['units'] = 'Angstrom'
fullans = copy.deepcopy(fullans6)
fullans['qm']['units'] = 'Angstrom'
fullans['efp']['units'] = 'Angstrom'
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
assert compare_dicts(ans, intermed, 4, sys._getframe().f_code.co_name + ': intermediate')
assert compare_molrecs(fullans['efp'], final['efp'], 4, sys._getframe().f_code.co_name + ': full efp')
assert compare_molrecs(fullans['qm'], final['qm'], 4, sys._getframe().f_code.co_name + ': full qm')
def test_psi4_qmefp_6c():
"""try to give chgmult to an efp"""
subject = subject6.replace(' efp h2O', '0 1\n efp h2O')
with pytest.raises(qcel.MoleculeFormatError):
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
@using_pylibefp
def test_psi4_qmefp_6d():
subject = subject6
fullans = copy.deepcopy(fullans6)
fullans['efp']['geom'] = np.array([-2.22978429, 1.19270015, -0.99721732, -1.85344873, 1.5734809 ,
0.69660583, -0.71881655, 1.40649303, -1.90657336, 0.98792 ,
1.87681 , 2.85174 , 2.31084386, 0.57620385, 3.31175679,
1.87761143, 3.16604791, 1.75667803, 0.55253064, 2.78087794,
4.47837555])
fullans['efp']['elea'] = np.array([16, 1, 1, 14, 1, 1, 1])
fullans['efp']['elez'] = np.array([8, 1, 1, 7, 1, 1, 1])
fullans['efp']['elem'] = np.array(['O', 'H', 'H', 'N', 'H', 'H', 'H'])
fullans['efp']['mass'] = np.array([15.99491462, 1.00782503, 1.00782503, 14.00307400478, 1.00782503, 1.00782503, 1.00782503])
fullans['efp']['real'] = np.array([True, True, True, True, True, True, True])
fullans['efp']['elbl'] = np.array(['_a01o1', '_a02h2', '_a03h3', '_a01n1', '_a02h2', '_a03h3', '_a04h4'])
fullans['efp']['fragment_separators'] = [3]
fullans['efp']['fragment_charges'] = [0., 0.]
fullans['efp']['fragment_multiplicities'] = [1, 1]
fullans['efp']['molecular_charge'] = 0.
fullans['efp']['molecular_multiplicity'] = 1
fullans['efp']['hint_types'] = ['xyzabc', 'xyzabc']
fullans['efp']['geom_hints'][1] = [1.093116487139866, 1.9296501432128303, 2.9104336205167156, -1.1053108079381473, 2.0333070957565544, -1.488586877218809]
final, intermed = qcel.molparse.from_string(subject, return_processed=True)
import pylibefp
efpobj = pylibefp.from_dict(final['efp'])
efpfinal = efpobj.to_dict()
efpfinal = qcel.molparse.from_arrays(speclabel=False, domain='efp', **efpfinal)
assert compare_molrecs(fullans['qm'], final['qm'], 4, sys._getframe().f_code.co_name + ': full qm')
assert compare_molrecs(fullans['efp'], efpfinal, 4, sys._getframe().f_code.co_name + ': full efp')
subject7 = """\
5
stuffs
6Li 0.0 0.0 0.0
H_specIAL@2.014101 100 0 0
@Ne 2 4 6
h .0,1,2
Gh(he3) 0 1 3
"""
ans7 = {'geom': [ 0., 0., 0., 100., 0., 0., 2., 4., 6., 0., 1., 2., 0., 1., 3.],
'elbl': ['6Li', 'H_specIAL@2.014101', '@Ne', 'h', 'Gh(he3)'],
'units': 'Angstrom',
'geom_hints': [], # shouldn't be needed
}
fullans7 = {'geom': np.array([ 0., 0., 0., 100., 0., 0., 2., 4., 6., 0., 1., 2., 0., 1., 3.]),
'elea': np.array([6, 2, 20, 1, 4]),
'elez': np.array([3, 1, 10, 1, 2]),
'elem': np.array(['Li', 'H', 'Ne', 'H', 'He']),
'mass': np.array([ 6.015122794, 2.014101, 19.99244017542, 1.00782503, 4.00260325415]),
'real': np.array([ True, True, False, True, False]),
'elbl': np.array(['', '_special', '', '', '3']),
'units': 'Angstrom',
'fix_com': False,
'fix_orientation': False,
'fragment_separators': [],
'fragment_charges': [0.],
'fragment_multiplicities': [2],
'molecular_charge': 0.,
'molecular_multiplicity': 2,
}
def test_xyzp_qm_7a():
"""XYZ doesn't fit into psi4 string"""
subject = subject7
with pytest.raises(qcel.MoleculeFormatError):
final, intermed = qcel.molparse.from_string(subject, return_processed=True, dtype='psi4')
def test_xyzp_qm_7b():
"""XYZ doesn't fit into strict xyz string"""
subject = subject7
with pytest.raises(qcel.MoleculeFormatError):
final, intermed = qcel.molparse.from_string(subject, return_processed=True, dtype='xyz')
def test_xyzp_qm_7c():
subject = subject7
final, intermed = qcel.molparse.from_string(subject, return_processed=True, dtype='xyz+')
assert compare_dicts(ans7, intermed, 4, sys._getframe().f_code.co_name + ': intermediate')
assert compare_molrecs(fullans7, final['qm'], 4, sys._getframe().f_code.co_name + ': full qm')
def test_xyzp_qm_7d():
subject = subject7.replace('5', '5 au ')
subject = subject.replace('stuff', '-1 3 slkdjfl2 32#$^& ')
ans = copy.deepcopy(ans7)
ans['units'] = 'Bohr'
ans['molecular_charge'] = -1.
ans['molecular_multiplicity'] = 3
fullans = copy.deepcopy(fullans7)
fullans['units'] = 'Bohr'
fullans['fragment_charges'] = [-1.]
fullans['fragment_multiplicities'] = [3]
fullans['molecular_charge'] = -1.
fullans['molecular_multiplicity'] = 3
final, intermed = qcel.molparse.from_string(subject, return_processed=True, dtype='xyz+')
assert compare_dicts(ans, intermed, 4, sys._getframe().f_code.co_name + ': intermediate')
assert compare_molrecs(fullans, final['qm'], 4, sys._getframe().f_code.co_name + ': full qm')
subject8 = """\
3
stuffs
Li 0.0 0.0 0.0
1 100 0 0
Ne 2 4 6
h .0,1,2
2 0 1 3
"""
ans8 = {'geom': [ 0., 0., 0., 100., 0., 0., 2., 4., 6., 0., 1., 2., 0., 1., 3.],
'elbl': ['Li', '1', 'Ne', 'h', '2'],
'units': 'Angstrom',
'geom_hints': [], # shouldn't be needed
}
fullans8 = {'geom': np.array([ 0., 0., 0., 100., 0., 0., 2., 4., 6., 0., 1., 2., 0., 1., 3.]),
'elea': np.array([7, 1, 20, 1, 4]),
'elez': np.array([3, 1, 10, 1, 2]),
'elem': np.array(['Li', 'H', 'Ne', 'H', 'He']),
'mass': np.array([ 7.016004548, 1.00782503, 19.99244017542, 1.00782503, 4.00260325415]),
'real': np.array([ True, True, True, True, True]),
'elbl': np.array(['', '', '', '', '']),
'units': 'Angstrom',
'fix_com': False,
'fix_orientation': False,
'fragment_separators': [],
'fragment_charges': [0.],
'fragment_multiplicities': [2],
'molecular_charge': 0.,
'molecular_multiplicity': 2,
}
def test_xyzp_qm_8a():
subject = subject8
final, intermed = qcel.molparse.from_string(subject, return_processed=True, dtype='xyz+')
assert compare_dicts(ans8, intermed, 4, sys._getframe().f_code.co_name + ': intermediate')
assert compare_molrecs(fullans8, final['qm'], 4, sys._getframe().f_code.co_name + ': full qm')
fullans10qm = {'geom': np.array([0., 0., 0.]),
'elea': np.array([12]),
'elez': np.array([6]),
'elem': np.array(['C']),
'mass': np.array([12.]),
'real': np.array([True]),
'elbl': np.array(['']),
'units': 'Angstrom',
'fix_com': False,
'fix_orientation': False,
'fragment_separators': [],
'fragment_charges': [0.],
'fragment_multiplicities': [1],
'molecular_charge': 0.,
'molecular_multiplicity': 1}
fullans10efp = {'fragment_files': ['cl2'],
'hint_types': ['xyzabc'],
'geom_hints': [[0., 0., 0., 0., 0., 0.]],
'units': 'Angstrom',
'fix_com': True,
'fix_orientation': True,
'fix_symmetry': 'c1'}
blankqm = {'geom': np.array([]),
'elea': np.array([]),
'elez': np.array([]),
'elem': np.array([]),
'mass': np.array([]),
'real': np.array([]),
'elbl': np.array([]),
'units': 'Angstrom',
'fix_com': False,
'fix_orientation': False,
'fragment_separators': [],
'fragment_charges': [0.],
'fragment_multiplicities': [1],
'molecular_charge': 0.,
'molecular_multiplicity': 1}
blankefp = {'fragment_files': [],
'hint_types': [],
'geom_hints': [],
'units': 'Angstrom',
'fix_com': True,
'fix_orientation': True,
'fix_symmetry': 'c1'}
def test_arrays_10a():
subject = {'geom': [0, 0, 0],
'elem': ['C'],
'fragment_files': ['cl2'],
'hint_types': ['xyzabc'],
'geom_hints': [[0, 0, 0, 0, 0, 0]],
'enable_qm': True,
'enable_efp': True}
fullans = {'qm': copy.deepcopy(fullans10qm),
'efp': copy.deepcopy(fullans10efp)}
fullans['qm']['fix_com'] = True
fullans['qm']['fix_orientation'] = True
fullans['qm']['fix_symmetry'] = 'c1'
final = qcel.molparse.from_input_arrays(**subject)
assert compare_molrecs(fullans['qm'], final['qm'], 4, sys._getframe().f_code.co_name + ': full qm')
assert compare_molrecs(fullans['efp'], final['efp'], 4, sys._getframe().f_code.co_name + ': full efp')
def test_arrays_10b():
subject = {'geom': [0, 0, 0],
'elem': ['C'],
'fragment_files': ['cl2'],
'hint_types': ['xyzabc'],
'geom_hints': [[0, 0, 0, 0, 0, 0]],
'enable_qm': False,
'enable_efp': True}
fullans = {'efp': fullans10efp}
final = qcel.molparse.from_input_arrays(**subject)
with pytest.raises(KeyError):
final['qm']
assert compare_molrecs(fullans['efp'], final['efp'], 4, sys._getframe().f_code.co_name + ': full efp')
def test_arrays_10c():
subject = {'geom': [0, 0, 0],
'elem': ['C'],
'fragment_files': ['cl2'],
'hint_types': ['xyzabc'],
'geom_hints': [[0, 0, 0, 0, 0, 0]],
'enable_qm': True,
'enable_efp': False}
fullans = {'qm': fullans10qm}
final = qcel.molparse.from_input_arrays(**subject)
assert compare_molrecs(fullans['qm'], final['qm'], 4, sys._getframe().f_code.co_name + ': full qm')
with pytest.raises(KeyError):
final['efp']
def test_arrays_10d():
subject = {'geom': [0, 0, 0],
'elem': ['C'],
'enable_qm': True,
'enable_efp': True,
'missing_enabled_return_efp': 'none'}
fullans = {'qm': fullans10qm}
final = qcel.molparse.from_input_arrays(**subject)
assert compare_molrecs(fullans['qm'], final['qm'], 4, sys._getframe().f_code.co_name + ': full qm')
with pytest.raises(KeyError):
final['efp']
def test_arrays_10e():
subject = {'geom': [0, 0, 0],
'elem': ['C'],
'enable_qm': True,
'enable_efp': True,
'missing_enabled_return_efp': 'minimal'}
fullans = {'qm': fullans10qm,
'efp': blankefp}
final = qcel.molparse.from_input_arrays(**subject)
assert compare_molrecs(fullans['qm'], final['qm'], 4, sys._getframe().f_code.co_name + ': full qm')
assert compare_molrecs(fullans['efp'], final['efp'], 4, sys._getframe().f_code.co_name + ': full efp')
def test_arrays_10f():
subject = {'geom': [0, 0, 0],
'elem': ['C'],
'enable_qm': True,
'enable_efp': True,
'missing_enabled_return_efp': 'error'}
with pytest.raises(qcel.ValidationError):
qcel.molparse.from_input_arrays(**subject)
def test_arrays_10g():
subject = {'geom': [0, 0, 0],
'elem': ['C'],
'enable_qm': False,
'enable_efp': True,
'missing_enabled_return_efp': 'none'}
fullans = {}
final = qcel.molparse.from_input_arrays(**subject)
with pytest.raises(KeyError):
final['qm']
with pytest.raises(KeyError):
final['efp']
def test_arrays_10h():
subject = {'geom': [0, 0, 0],
'elem': ['C'],
'enable_qm': False,
'enable_efp': True,
'missing_enabled_return_efp': 'minimal'}
fullans = {'efp': blankefp}
final = qcel.molparse.from_input_arrays(**subject)
with pytest.raises(KeyError):
final['qm']
assert compare_molrecs(fullans['efp'], final['efp'], 4, sys._getframe().f_code.co_name + ': full efp')
def test_arrays_10i():
subject = {'geom': [0, 0, 0],
'elem': ['C'],
'enable_qm': False,
'enable_efp': True,
'missing_enabled_return_efp': 'error'}
with pytest.raises(qcel.ValidationError):
qcel.molparse.from_input_arrays(**subject)
def test_arrays_10j():
subject = {'geom': [0, 0, 0],
'elem': ['C'],
'enable_qm': True,
'enable_efp': False}
fullans = {'qm': fullans10qm}
final = qcel.molparse.from_input_arrays(**subject)
assert compare_molrecs(fullans['qm'], final['qm'], 4, sys._getframe().f_code.co_name + ': full qm')
with pytest.raises(KeyError):
final['efp']
def test_arrays_10k():
subject = {'fragment_files': ['cl2'],
'hint_types': ['xyzabc'],
'geom_hints': [[0, 0, 0, 0, 0, 0]],
'enable_qm': True,
'enable_efp': True,
'missing_enabled_return_qm': 'none'}
fullans = {'efp': fullans10efp}
final = qcel.molparse.from_input_arrays(**subject)
with pytest.raises(KeyError):
final['qm']
assert compare_molrecs(fullans['efp'], final['efp'], 4, sys._getframe().f_code.co_name + ': full efp')
def test_arrays_10l():
subject = {'fragment_files': ['cl2'],
'hint_types': ['xyzabc'],
'geom_hints': [[0, 0, 0, 0, 0, 0]],
'enable_qm': True,
'enable_efp': True,
'missing_enabled_return_qm': 'minimal'}
fullans = {'qm': copy.deepcopy(blankqm),
'efp': fullans10efp}
fullans['qm']['fix_com'] = True
fullans['qm']['fix_orientation'] = True
fullans['qm']['fix_symmetry'] = 'c1'
final = qcel.molparse.from_input_arrays(**subject)
assert compare_molrecs(fullans['qm'], final['qm'], 4, sys._getframe().f_code.co_name + ': full qm')
assert compare_molrecs(fullans['efp'], final['efp'], 4, sys._getframe().f_code.co_name + ': full efp')
def test_arrays_10m():
subject = {'fragment_files': ['cl2'],
'hint_types': ['xyzabc'],
'geom_hints': [[0, 0, 0, 0, 0, 0]],
'enable_qm': True,
'enable_efp': True,
'missing_enabled_return_qm': 'error'}
with pytest.raises(qcel.ValidationError):
qcel.molparse.from_input_arrays(**subject)
def test_arrays_10n():
subject = {'fragment_files': ['cl2'],
'hint_types': ['xyzabc'],
'geom_hints': [[0, 0, 0, 0, 0, 0]],
'enable_qm': False,
'enable_efp': True}
fullans = {'efp': fullans10efp}
final = qcel.molparse.from_input_arrays(**subject)
with pytest.raises(KeyError):
final['qm']
assert compare_molrecs(fullans['efp'], final['efp'], 4, sys._getframe().f_code.co_name + ': full efp')
def test_arrays_10o():
subject = {'fragment_files': ['cl2'],
'hint_types': ['xyzabc'],
'geom_hints': [[0, 0, 0, 0, 0, 0]],
'enable_qm': True,
'enable_efp': False,
'missing_enabled_return_qm': 'none'}
fullans = {}
final = qcel.molparse.from_input_arrays(**subject)
with pytest.raises(KeyError):
final['qm']
with pytest.raises(KeyError):
final['efp']
def test_arrays_10p():
subject = {'fragment_files': ['cl2'],
'hint_types': ['xyzabc'],
'geom_hints': [[0, 0, 0, 0, 0, 0]],
'enable_qm': True,
'enable_efp': False,
'missing_enabled_return_qm': 'minimal'}
fullans = {'qm': blankqm}
final = qcel.molparse.from_input_arrays(**subject)
assert compare_molrecs(fullans['qm'], final['qm'], 4, sys._getframe().f_code.co_name + ': full qm')
with pytest.raises(KeyError):
final['efp']
def test_arrays_10q():
subject = {'fragment_files': ['cl2'],
'hint_types': ['xyzabc'],
'geom_hints': [[0, 0, 0, 0, 0, 0]],
'enable_qm': True,
'enable_efp': False,
'missing_enabled_return_qm': 'error'}
with pytest.raises(qcel.ValidationError):
qcel.molparse.from_input_arrays(**subject)
def test_strings_10r():
subject = ''
final = qcel.molparse.from_string(subject, enable_qm=True,
enable_efp=True,
missing_enabled_return_qm='none',
missing_enabled_return_efp='none')
print('final', final)
with pytest.raises(KeyError):
final['qm']
with pytest.raises(KeyError):
final['efp']
def test_strings_10s():
subject = ''
final = qcel.molparse.from_string(subject, enable_qm=True,
enable_efp=True,
missing_enabled_return_qm='minimal',
missing_enabled_return_efp='minimal')
fullans = {'qm': blankqm,
'efp': blankefp}
assert compare_molrecs(fullans['qm'], final['qm'], 4, sys._getframe().f_code.co_name + ': full qm')
assert compare_molrecs(fullans['efp'], final['efp'], 4, sys._getframe().f_code.co_name + ': full efp')
def test_strings_10t():
subject = ''
with pytest.raises(qcel.ValidationError):
qcel.molparse.from_string(subject, enable_qm=True,
enable_efp=True,
missing_enabled_return_qm='error',
missing_enabled_return_efp='error')
def assess_mol_11(mol, label):
dmol = mol.to_dict()
assert compare_molrecs(fullans1a, dmol, 4, label, relative_geoms='align')
assert compare_integers(2, mol.natom(), label)
def test_qmol_11a():
asdf = qcdb.Molecule(fullans1a)
assess_mol_11(asdf, '[1] qcdb.Molecule(dict)')
def test_qmol_11b():
asdf = qcdb.Molecule(geom=[ 0., 0., 0., 1., 0., 0.], elez=[8, 1], fix_com=True)
assess_mol_11(asdf, '[2] qcdb.Molecule(geom, elez)')
def test_qmol_11c():
asdf = qcdb.Molecule("""nocom\n8 0 0 0\n1 1 0 0""", dtype='psi4')
assess_mol_11(asdf, '[3] qcdb.Molecule(str, dtype="psi4")')
def test_qmol_11d():
asdf = qcdb.Molecule("""nocom\n8 0 0 0\n1 1 0 0""", dtype='psi4+')
assess_mol_11(asdf, '[4] qcdb.Molecule(str, dtype="psi4+")')
def test_qmol_11e():
asdf = qcdb.Molecule("""2\n\nO 0 0 0 \n1 1 0 0 """, dtype='xyz', fix_com=True)
assess_mol_11(asdf, '[5] qcdb.Molecule(str, dtype="xyz")')
def test_qmol_11f():
asdf = qcdb.Molecule.from_dict(fullans1a)
assess_mol_11(asdf, '[6] qcdb.Molecule.from_dict(dict)')
def test_qmol_11g():
asdf = qcdb.Molecule.from_arrays(geom=[ 0., 0., 0., 1., 0., 0.], elez=[8, 1], fix_com=True)
assess_mol_11(asdf, '[7] qcdb.Molecule.from_arrays(geom, elez)')
def test_qmol_11h():
asdf = qcdb.Molecule.from_string("""nocom\n8 0 0 0\n1 1 0 0""")
assess_mol_11(asdf, '[8] qcdb.Molecule.from_string(str, dtype="psi4")')
def test_qmol_11i():
asdf = qcdb.Molecule.from_string("""nocom\n8 0 0 0\n1 1 0 0""")
assess_mol_11(asdf, '[9] qcdb.Molecule.from_string(str, dtype="psi4+")')
def test_qmol_11j():
asdf = qcdb.Molecule.from_string("""2\n\nO 0 0 0 \n1 1 0 0 """, fix_com=True)
assess_mol_11(asdf, '[10] qcdb.Molecule.from_string(str, dtype="xyz")')
@using_psi4_molrec
def test_pmol_11k():
import psi4
asdf = psi4.core.Molecule.from_dict(fullans1a)
assess_mol_11(asdf, '[16] psi4.core.Molecule.from_dict(dict)')
@using_psi4_molrec
def test_pmol_11l():
import psi4
asdf = psi4.core.Molecule.from_arrays(geom=[ 0., 0., 0., 1., 0., 0.], elez=[8, 1], fix_com=True)
assess_mol_11(asdf, '[17] psi4.core.Molecule.from_arrays(geom, elez)')
@using_psi4_molrec
def test_pmol_11m():
import psi4
asdf = psi4.core.Molecule.from_string("""nocom\n8 0 0 0\n1 1 0 0""")
assess_mol_11(asdf, '[18] psi4.core.Molecule.from_string(str, dtype="psi4")')
@using_psi4_molrec
def test_pmol_11n():
import psi4
asdf = psi4.core.Molecule.from_string("""nocom\n8 0 0 0\n1 1 0 0""")
assess_mol_11(asdf, '[19] psi4.core.Molecule.from_string(str, dtype="psi4+")')
@using_psi4_molrec
def test_pmol_11o():
import psi4
asdf = psi4.core.Molecule.from_string("""2\n\nO 0 0 0 \n1 1 0 0 """, fix_com=True)
assess_mol_11(asdf, '[20] psi4.core.Molecule.from_string(str, dtype="xyz")')
def test_qmol_11p():
asdf = qcdb.Molecule.from_arrays(geom=[ 0., 0., 0., 1., 0., 0.], elez=[8, 1], fix_com=True, units='AngSTRom')
assess_mol_11(asdf, '[7] qcdb.Molecule.from_arrays(geom, elez)')
def test_qmol_12():
asdf = qcdb.Molecule(geom=[ 0., 0., 0., 1., 0., 0.], elez=[8, 1], fix_com=True)
assess_mol_11(asdf, 'qcdb.Molecule(geom, elez)')
import json
smol = json.dumps(asdf.to_dict(np_out=False))
dmol = json.loads(smol)
asdf2 = qcdb.Molecule(dmol)
assess_mol_11(asdf, 'qcdb.Molecule(jsondict)')
|
CDSherrill/psi4
|
psi4/driver/qcdb/pytest/test_from_string.py
|
Python
|
lgpl-3.0
| 38,751
|
[
"Psi4"
] |
57cd8fd4a422575426f7cb76149621936271ce8336dcc98cfc42bf3300b0526f
|
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
import module_utils
import vtk
class contour(ScriptedConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
# call parent constructor
ModuleBase.__init__(self, module_manager)
self._contourFilter = vtk.vtkContourFilter()
module_utils.setup_vtk_object_progress(self, self._contourFilter,
'Extracting iso-surface')
# now setup some defaults before our sync
self._config.iso_value = 128
config_list = [
('ISO value:', 'iso_value', 'base:float', 'text',
'Surface will pass through points with this value.')]
ScriptedConfigModuleMixin.__init__(
self, config_list,
{'Module (self)' : self,
'vtkContourFilter' : self._contourFilter})
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
self.set_input(0, None)
# this will take care of all display thingies
ScriptedConfigModuleMixin.close(self)
ModuleBase.close(self)
# get rid of our reference
del self._contourFilter
def get_input_descriptions(self):
return ('vtkImageData',)
def set_input(self, idx, inputStream):
self._contourFilter.SetInput(inputStream)
def get_output_descriptions(self):
return (self._contourFilter.GetOutput().GetClassName(),)
def get_output(self, idx):
return self._contourFilter.GetOutput()
def logic_to_config(self):
self._config.iso_value = self._contourFilter.GetValue(0)
def config_to_logic(self):
self._contourFilter.SetValue(0, self._config.iso_value)
def execute_module(self):
self._contourFilter.Update()
def streaming_execute_module(self):
self._contourFilter.Update()
|
chrisidefix/devide
|
modules/filters/contour.py
|
Python
|
bsd-3-clause
| 2,032
|
[
"VTK"
] |
24567ab3bf9f937cd4b4cd55e390015687e6466d59df5361cd07244204f3a750
|
# This module provides color definitions for use in Visualization.
#
# Written by: Konrad Hinsen <hinsen@cnrs-orleans.fr>
# Last revision: 1999-7-23
#
"""This module provides color definitions that are used in the modules
VRML, VRML2, and VMD.
"""
import Numeric, string
#
# Colors
#
class Color:
"""RGB Color specification
Constructor: Color(|rgb|), where |rgb| is a sequence of three numbers
between zero and one, specifying the red, green, and blue intensities.
Color objects can be added and multiplied with scalars.
"""
def __init__(self, rgb):
self.rgb = (min(1.,max(0.,rgb[0])),
min(1.,max(0.,rgb[1])),
min(1.,max(0.,rgb[2])))
def __mul__(self, scale):
return Color(map(lambda i, s=scale: s*i, self.rgb))
__rmul__ = __mul__
def __add__(self, other):
return Color(map(lambda a, b: a+b, self.rgb, other.rgb))
def __cmp__(self, other):
return cmp(self.rgb, other.rgb)
def __hash__(self):
return hash(self.rgb)
def __str__(self):
return str(self.rgb[0])+' '+str(self.rgb[1])+' '+str(self.rgb[2])
def __repr__(self):
return 'Color(' + repr(self.rgb) + ')'
#
# Color scales
#
class ColorScale:
"""Mapping of a number interval to a color range
Constructor: ColorScale(|range|), where |range| can be a tuple of
two numbers (the center of the interval and its width), or a
single number specifying the widths for a default center of zero.
Evaluation: colorscale(|number|) returns the Color object
corresponding to |number|. If |number| is outside the
predefined interval, the closest extreme value of the interval
is used.
The color scale is blue - green - yellow - orange - red.
"""
def __init__(self, range):
if type(range) == type(()):
self.zero, self.range = range
self.range = self.range-self.zero
else:
self.range = range
self.zero = 0.
def __call__(self, value):
value = (value-self.zero)/self.range
value = max(min(value, 1.), 0.)
if value <= 0.25:
red = 0.
green = 4.*value
blue = 1.
elif value <= 0.5:
red = 0.
green = 1.
blue = 1.-4.*(value-0.25)
elif value <= 0.75:
red = 4.*(value-0.5)
green = 1.
blue = 0.
else:
red = 1.
green = 1.-4.*(value-0.75)
blue = 0.
return Color((red, green, blue))
class SymmetricColorScale:
"""Mapping of a symmetric number interval to a color range
Constructor: SymmetricColorScale(|range|), where |range| is a
single number defining the interval, which is -|range| to |range|.
Evaluation: colorscale(|number|) returns the Color object
corresponding to |number|. If |number| is outside the
predefined interval, the closest extreme value of the interval
is used.
The colors are red for negative numbers and green for positive
numbers, with a color intensity proportional to the absolute
value of the argument.
"""
def __init__(self, max, n = 20):
self.range = max
self.n = n
self.colors = {}
def __call__(self, value):
negative = value < 0.
index = Numeric.floor(abs(value)*self.n/self.range)
if index > self.n:
raise ValueError, 'Value outside range'
try:
return self.colors[(negative, index)]
except KeyError:
white = 1.*(self.n-index)/self.n
if negative:
color = Color((1., white, white))
else:
color = Color((white, 1., white))
self.colors[(negative, index)] = color
return color
#
# Predefined colors
#
full_colors = {
'black': Color((0.,0.,0.)),
'white': Color((1.,1.,1.)),
'grey': Color((0.5,0.5,0.5)),
'red': Color((1.,0.,0.)),
'green': Color((0.,1.,0.)),
'blue': Color((0.,0.,1.)),
'yellow': Color((1.,1.,0.)),
'magenta': Color((1.,0.,1.)),
'cyan': Color((0.,1.,1.)),
'orange': Color((1.,0.5,0.)),
'violet': Color((1.,0.,0.5)),
'olive': Color((0.1,0.6,0.2)),
'brown': Color((0.6,0.4,0.)),
}
dark_colors = {}
for name, value in full_colors.items():
dark_colors[name] = 0.3*value
light_colors = {}
for name, value in full_colors.items():
light_colors[name] = 0.7*value + 0.3*full_colors['white']
def ColorByName(name):
"""Returns a Color object corresponding to |name|. The known names
are black, white, grey, red, green, blue, yellow, magenta, cyan,
orange, violet, olive, and brown. Any color can be prefixed by
"light " or "dark " to yield a variant.
"""
name = string.split(string.lower(name))
dict = full_colors
if len(name) == 2:
if name[0] == 'light':
dict = light_colors
elif name[0] == 'dark':
dict = dark_colors
return dict[name[-1]]
|
OS2World/DEV-PYTHON-UTIL-ScientificPython
|
src/Lib/site-packages/Scientific/Visualization/Color.py
|
Python
|
isc
| 4,647
|
[
"VMD"
] |
2737d20fcd9aa9385e87a48b64fec907cece8ee9ad4b5a08a4ccc695f768d141
|
import logging
import george
import numpy as np
from scipy import optimize
from robo.util import normalization
from robo.models.base_model import BaseModel
logger = logging.getLogger(__name__)
class GaussianProcess(BaseModel):
def __init__(self, kernel, prior=None,
noise=1e-3, use_gradients=False,
normalize_output=False,
normalize_input=True,
lower=None, upper=None, rng=None):
"""
Interface to the george GP library. The GP hyperparameter are obtained
by optimizing the marginal log likelihood.
Parameters
----------
kernel : george kernel object
Specifies the kernel that is used for all Gaussian Process
prior : prior object
Defines a prior for the hyperparameters of the GP. Make sure that
it implements the Prior interface.
noise : float
Noise term that is added to the diagonal of the covariance matrix
for the Cholesky decomposition.
use_gradients : bool
Use gradient information to optimize the negative log likelihood
lower : np.array(D,)
Lower bound of the input space which is used for the input space normalization
upper : np.array(D,)
Upper bound of the input space which is used for the input space normalization
normalize_output : bool
Zero mean unit variance normalization of the output values
normalize_input : bool
Normalize all inputs to be in [0, 1]. This is important to define good priors for the
length scales.
rng: np.random.RandomState
Random number generator
"""
if rng is None:
self.rng = np.random.RandomState(np.random.randint(0, 10000))
else:
self.rng = rng
self.kernel = kernel
self.gp = None
self.prior = prior
self.noise = noise
self.use_gradients = use_gradients
self.normalize_output = normalize_output
self.normalize_input = normalize_input
self.X = None
self.y = None
self.hypers = []
self.is_trained = False
self.lower = lower
self.upper = upper
@BaseModel._check_shapes_train
def train(self, X, y, do_optimize=True):
"""
Computes the Cholesky decomposition of the covariance of X and
estimates the GP hyperparameters by optimizing the marginal
loglikelihood. The prior mean of the GP is set to the empirical
mean of X.
Parameters
----------
X: np.ndarray (N, D)
Input data points. The dimensionality of X is (N, D),
with N as the number of points and D is the number of features.
y: np.ndarray (N,)
The corresponding target values.
do_optimize: boolean
If set to true the hyperparameters are optimized otherwise
the default hyperparameters of the kernel are used.
"""
if self.normalize_input:
# Normalize input to be in [0, 1]
self.X, self.lower, self.upper = normalization.zero_one_normalization(X, self.lower, self.upper)
else:
self.X = X
if self.normalize_output:
# Normalize output to have zero mean and unit standard deviation
self.y, self.y_mean, self.y_std = normalization.zero_mean_unit_var_normalization(y)
if self.y_std == 0:
raise ValueError("Cannot normalize output. All targets have the same value")
else:
self.y = y
# Use the empirical mean of the data as mean for the GP
self.mean = np.mean(self.y, axis=0)
self.gp = george.GP(self.kernel, mean=self.mean)
if do_optimize:
self.hypers = self.optimize()
self.gp.kernel[:] = self.hypers[:-1]
self.noise = np.exp(self.hypers[-1]) # sigma^2
else:
self.hypers = self.gp.kernel[:]
self.hypers = np.append(self.hypers, np.log(self.noise))
logger.debug("GP Hyperparameters: " + str(self.hypers))
try:
self.gp.compute(self.X, yerr=np.sqrt(self.noise))
except np.linalg.LinAlgError:
self.noise *= 10
self.gp.compute(self.X, yerr=np.sqrt(self.noise))
self.is_trained = True
def get_noise(self):
return self.noise
def nll(self, theta):
"""
Returns the negative marginal log likelihood (+ the prior) for
a hyperparameter configuration theta.
(negative because we use scipy minimize for optimization)
Parameters
----------
theta : np.ndarray(H)
Hyperparameter vector. Note that all hyperparameter are
on a log scale.
Returns
----------
float
lnlikelihood + prior
"""
# Specify bounds to keep things sane
if np.any((-20 > theta) + (theta > 20)):
return 1e25
# The last entry of theta is always the noise
self.gp.kernel[:] = theta[:-1]
noise = np.exp(theta[-1]) # sigma^2
try:
self.gp.compute(self.X, yerr=np.sqrt(noise))
except np.linalg.LinAlgError:
return 1e25
ll = self.gp.lnlikelihood(self.y, quiet=True)
# Add prior
if self.prior is not None:
ll += self.prior.lnprob(theta)
# We add a minus here because scipy is minimizing
return -ll if np.isfinite(ll) else 1e25
def grad_nll(self, theta):
self.gp.kernel[:] = theta[:-1]
noise = np.exp(theta[-1])
self.gp.compute(self.X, yerr=np.sqrt(noise))
self.gp._compute_alpha(self.y)
K_inv = self.gp.solver.apply_inverse(np.eye(self.gp._alpha.size),
in_place=True)
# The gradients of the Gram matrix, for the noise this is just
# the identity matrix
Kg = self.gp.kernel.gradient(self.gp._x)
Kg = np.concatenate((Kg, np.eye(Kg.shape[0])[:, :, None]), axis=2)
# Calculate the gradient.
A = np.outer(self.gp._alpha, self.gp._alpha) - K_inv
g = 0.5 * np.einsum('ijk,ij', Kg, A)
if self.prior is not None:
g += self.prior.gradient(theta)
return -g
def optimize(self):
"""
Optimizes the marginal log likelihood and returns the best found
hyperparameter configuration theta.
Returns
-------
theta : np.ndarray(H)
Hyperparameter vector that maximizes the marginal log likelihood
"""
# Start optimization from the previous hyperparameter configuration
p0 = self.gp.kernel.vector
p0 = np.append(p0, np.log(self.noise))
if self.use_gradients:
theta, _, _ = optimize.minimize(self.nll, p0,
method="BFGS",
jac=self.grad_nll)
else:
try:
results = optimize.minimize(self.nll, p0, method='L-BFGS-B')
theta = results.x
except ValueError:
logging.error("Could not find a valid hyperparameter configuration! Use initial configuration")
theta = p0
return theta
def predict_variance(self, x1, X2):
r"""
Predicts the variance between a test points x1 and a set of points X2 by
math: \sigma(X_1, X_2) = k_{X_1,X_2} - k_{X_1,X} * (K_{X,X}
+ \sigma^2*\mathds{I})^-1 * k_{X,X_2})
Parameters
----------
x1: np.ndarray (1, D)
First test point
X2: np.ndarray (N, D)
Set of test point
Returns
----------
np.array(N, 1)
predictive variance between x1 and X2
"""
if not self.is_trained:
raise Exception('Model has to be trained first!')
x_ = np.concatenate((x1, X2))
_, var = self.predict(x_, full_cov=True)
var = var[-1, :-1, np.newaxis]
return var
@BaseModel._check_shapes_predict
def predict(self, X_test, full_cov=False, **kwargs):
r"""
Returns the predictive mean and variance of the objective function at
the given test points.
Parameters
----------
X_test: np.ndarray (N, D)
Input test points
full_cov: bool
If set to true than the whole covariance matrix between the test points is returned
Returns
----------
np.array(N,)
predictive mean
np.array(N,) or np.array(N, N) if full_cov == True
predictive variance
"""
if not self.is_trained:
raise Exception('Model has to be trained first!')
if self.normalize_input:
X_test_norm, _, _ = normalization.zero_one_normalization(X_test, self.lower, self.upper)
else:
X_test_norm = X_test
mu, var = self.gp.predict(self.y, X_test_norm)
if self.normalize_output:
mu = normalization.zero_mean_unit_var_unnormalization(mu, self.y_mean, self.y_std)
var *= self.y_std ** 2
if not full_cov:
var = np.diag(var)
# Clip negative variances and set them to the smallest
# positive float value
if var.shape[0] == 1:
var = np.clip(var, np.finfo(var.dtype).eps, np.inf)
else:
var = np.clip(var, np.finfo(var.dtype).eps, np.inf)
var[np.where((var < np.finfo(var.dtype).eps) & (var > -np.finfo(var.dtype).eps))] = 0
return mu, var
def sample_functions(self, X_test, n_funcs=1):
"""
Samples F function values from the current posterior at the N
specified test points.
Parameters
----------
X_test: np.ndarray (N, D)
Input test points
n_funcs: int
Number of function values that are drawn at each test point.
Returns
----------
function_samples: np.array(F, N)
The F function values drawn at the N test points.
"""
if self.normalize_input:
X_test_norm, _, _ = normalization.zero_one_normalization(X_test, self.lower, self.upper)
else:
X_test_norm = X_test
if not self.is_trained:
raise Exception('Model has to be trained first!')
funcs = self.gp.sample_conditional(self.y, X_test_norm, n_funcs)
if self.normalize_output:
funcs = normalization.zero_mean_unit_var_unnormalization(funcs, self.y_mean, self.y_std)
if len(funcs.shape) == 1:
return funcs[None, :]
else:
return funcs
def get_incumbent(self):
"""
Returns the best observed point and its function value
Returns
----------
incumbent: ndarray (D,)
current incumbent
incumbent_value: ndarray (N,)
the observed value of the incumbent
"""
inc, inc_value = super(GaussianProcess, self).get_incumbent()
if self.normalize_input:
inc = normalization.zero_one_unnormalization(inc, self.lower, self.upper)
if self.normalize_output:
inc_value = normalization.zero_mean_unit_var_unnormalization(inc_value, self.y_mean, self.y_std)
return inc, inc_value
|
numairmansur/RoBO
|
robo/models/gaussian_process.py
|
Python
|
bsd-3-clause
| 11,571
|
[
"Gaussian"
] |
b20a5e6fb39c2bc38d082d42027389f8b921548b0fa2f2e81f4f9d931a234c02
|
#!/usr/bin/env python
import os, re, sys
import readVasp
import ScanOutcar
import numpy as np
#====================================================================
def badparms( msg):
print '\nError: %s' % (msg,)
print 'Parms:'
print ' -bugLev <int> debug level'
print ' -readType <string> outcar / xml'
print ' -inDir <string> dir containing input OUTCAR or vasprun.xml'
print ''
sys.exit(1)
#====================================================================
def main():
'''
Test driver: Extracts info from the output of a VASP run.
Command line parameters:
================ ========= ==============================================
Parameter Type Description
================ ========= ==============================================
**-bugLev** integer Debug level. Normally 0.
**-readType string outcar / xml
**-inDir** string Input directory containing OUTCAR
and/or vasprun.xml.
================ ========= ==============================================
'''
bugLev = 0
readType = None
inDir = None
if len(sys.argv) % 2 != 1:
badparms('Parms must be key/value pairs')
for iarg in range( 1, len(sys.argv), 2):
key = sys.argv[iarg]
val = sys.argv[iarg+1]
if key == '-bugLev': bugLev = int( val)
elif key == '-readType': readType = val
elif key == '-inDir': inDir = val
else: badparms('unknown key: "%s"' % (key,))
if bugLev == None: badparms('parm not specified: -bugLev')
if readType == None: badparms('parm not specified: -readType')
if inDir == None: badparms('parm not specified: -inDir')
rmap = readVasp.parseDir( bugLev, readType, inDir, -1) # print = -1
fpath = os.path.join( inDir, 'OUTCAR')
scanner = ScanOutcar.ScanOutcar( bugLev, fpath)
smap = scanner.scan()
# Compare rmap and smap, key for key
rkeys = rmap.__dict__.keys()
rkeys.sort()
skeys = smap.__dict__.keys()
skeys.sort()
irr = 0 # index into rkeys
iss = 0 # index into skeys
while True:
if irr >= len( rkeys) and iss >= len( skeys): break
elif irr >= len( rkeys):
print '\nTesta: Unique S:'
print 'skey: %s val: %s' % (skeys[iss], smap.__dict__[skeys[iss]],)
iss += 1
elif iss >= len( skeys):
print '\nTesta: Unique R:'
print 'rkey: %s val: %s' % (rkeys[irr], rmap.__dict__[rkeys[irr]],)
irr += 1
else:
rkey = rkeys[irr]
skey = skeys[iss]
rval = rmap.__dict__[rkey]
sval = smap.__dict__[skey]
if rkey == skey:
epsilon = 5.e-5
compMsg = deepCompare( epsilon, rval, sval)
if compMsg == None:
print '\nTesta: Match:'
print 'rkey: %s val: %s' % (rkey, rval,)
else:
print '\nTesta: Mismatch: %s' % (compMsg,)
print 'rkey: %s val: %s' % (rkey, rval,)
print 'skey: %s val: %s' % (skey, sval,)
irr += 1
iss += 1
elif rkey < skey:
print '\nTesta: Unique R:'
print 'rkey: %s val: %s' % (rkey, rval,)
irr += 1
else:
print '\nTesta: Unique S:'
print 'skey: %s val: %s' % (skey, sval)
iss += 1
#====================================================================
def fixType( val):
tpa = type(val).__name__
if tpa == 'float64': val = float( val)
elif tpa == 'int64': val = int( val)
elif tpa == 'string_': val = str( val)
elif tpa == 'list': val = np.array( val)
return val
#====================================================================
def deepCompare( epsilon, va, vb):
res = None
va = fixType( va)
vb = fixType( vb)
tpa = type(va).__name__
tpb = type(vb).__name__
if tpa != tpb:
res = 'types differ: %s vs %s' % ( tpa, tpb,)
elif tpa == 'list':
if len(va) != len(vb):
res = 'len mismatch: %d vs %d' % (len(va), len(vb),)
else:
for ii in range(len(va)):
res = deepCompare( epsilon, va[ii], vb[ii])
if res != None: break
elif tpa == 'ndarray':
if va.shape != vb.shape:
res = 'shape mismatch: %s vs %s' % (va.shape, vb.shape,)
else:
fa = va.flatten()
fb = vb.flatten()
for ii in range( fa.size):
res = deepCompare( epsilon, fa[ii], fb[ii])
if res != None: break
elif type(va).__name__ == 'float':
if abs( va - vb) > epsilon:
res = 'float value mismatch: %g vs %g vb-va: %g (vb-va)/va: %g' \
% (va, vb, vb - va, (vb - va) / va)
elif type(va).__name__ in [ 'datetime', 'int', 'str']:
if va != vb:
res = 'scalar value mismatch: %s vs %s' % (repr(va), repr(vb),)
else: throwerr('unknown type: %s' % (type(va).__name__,))
return res
#====================================================================
def throwerr( msg):
fullMsg = '%s\n' % (msg,)
raise Exception( fullMsg)
#====================================================================
if __name__ == '__main__': main()
#====================================================================
|
ssullivangh/nrelmat
|
nrelmat/ScanOutcarTesta.py
|
Python
|
gpl-3.0
| 5,092
|
[
"VASP"
] |
93532cc2d6b1ca9a80f094e4939eac746f3e4e8554f49084615964a0c627d158
|
"""
Quantum dynamics simulation with Crank Nicolson method
"""
from QD.dynamics import CrankNicolson
import numpy as np
# Parameters
a = 0.5 # Spatial resolution
L = 100 # Domain size
sigma = 9.5 # Wave function shape
k = 0.8*(2*np.pi)/a # Wave vector
mu = 15 # Wave function position
n = 3
# Time evolution
tau = 0.1
duration = 140
particle = CrankNicolson(a,L,"gaussian",sigma,k,mu) # Initialize particle
particle.potential("rectangular barrier",40,50,2) # Initialize potential
particle.timeEvolution(tau,duration) # Start time evolution of particle
particle.animate(saveAnimation=True) # Plot or save animation video
|
dbouman1/iccp-assignment-3
|
main.py
|
Python
|
mit
| 740
|
[
"Gaussian"
] |
2415ff3c60d787c438e47703def3d856aa1551dbe97380d506c726149bf64b61
|
#!/usr/bin/env python
#
# Buildfarm client for PostgreSQL and Greenplum
#
# written by: Andreas Scherbaum <ascherbaum@pivotal.io>
# Andreas Scherbaum <ads@pgug.de>
#
import re
import os
import sys
import logging
import tempfile
import atexit
import shutil
import time
import subprocess
from subprocess import Popen
import socket
import sqlite3
import datetime
from time import gmtime, localtime, strftime
# config functions
from config import Config
# repository functions
from repository import Repository
from build import Build
from patch import Patch
from database import Database
from buildfarm import Buildfarm
import copy
# start with 'info', can be overriden by '-q' later on
logging.basicConfig(level = logging.INFO,
format = '%(levelname)s: %(message)s')
# exit_handler()
#
# exit handler, called upon exit of the script
# main job: remove the temp directory
#
# parameters:
# none
# return:
# none
def exit_handler():
# do something in the end ...
pass
# register exit handler
atexit.register(exit_handler)
#######################################################################
# main code
config = Config()
config.parse_parameters()
config.load_config()
config.build_and_verify_config()
config.cleanup_old_dirs_and_files()
database = Database(config)
all_log_data = database.init_dataset()
# FIXME: provide option for a user-defined cluster setup
# hostfile and gpinitsystem file
# FIXME: optional clone from the original repository, not creating a detached copy
# FIXME: Orca (for Greenplum only)
# FIXME: random ports for regression tests
# FIXME: check if socket files still exist (any of them) after the test, emit a warning
# FIXME: files changed since last run
# FIXME: revisions since last run
# FIXME: delete log entry, entries (1, 2, 3, 4-6)
# FIXME: delete job entries
# FIXME: catch if the directory for the lockfile is not writable, or does not exist
# add more env keys, see run_build.pl (around line 60)
# run_extra_targets
# test_locales
#######################################################################
# list results of previous runs, in compact mode
if (config.get('list-results') is True):
data = database.fetch_all_from_build_status()
print("")
if (data is None or len(data) == 0):
print("No previous records in database")
print("")
sys.exit(0)
if (len(data) > 1):
print("" + str(len(data)) + " records found")
else:
print("1 record found")
print("")
for i in data:
tmp_id = i['id']
tmp_repository = i['repository']
tmp_repository_type = i['repository_type']
tmp_branch = i['branch']
tmp_revision = i['revision']
tmp_start_time_local = i['start_time_local']
status = []
if (i['is_head'] == 1):
status.append('head')
if (i['is_buildfarm'] == 1):
status.append('buildfarm')
if (i['orca'] == 1):
status.append('orca')
if (i['extra_patches'] == 1):
status.append('patches')
if (i['run_git_update'] == 1):
status.append('update')
if (i['run_configure'] == 1):
status.append('configure')
if (i['run_make'] == 1):
status.append('make')
if (i['run_install'] == 1):
status.append('install')
if (i['run_tests'] == 1):
status.append('tests')
if (i['run_extra_targets'] == 1):
status.append('extra targets')
if (len(i['test_locales']) > 0):
status.append('locales')
error = 'OK'
if (i['run_git_update'] == 1 and i['result_git_update'] > 0):
error = 'ERROR'
if (i['run_configure'] == 1 and i['result_configure'] > 0):
error = 'ERROR'
if (i['run_make'] == 1 and i['result_make'] > 0):
error = 'ERROR'
if (i['run_install'] == 1 and i['result_install'] > 0):
error = 'ERROR'
if (i['run_tests'] == 1 and i['result_tests'] > 0):
error = 'ERROR'
if (i['result_portcheck'] is not None and i['result_portcheck'] > 0):
error = 'ERROR'
if (i['result_portcheck'] is None and i['is_buildfarm'] == 1 and i['run_configure'] == 1):
# buildfarm mode requires running regression tests
error = 'ERROR'
if (i['result_portcheck'] is None and i['run_tests'] == 1):
# running regression tests requires running portcheck
error = 'ERROR'
if (tmp_repository_type is None):
tmp_repository_type = '?'
if (tmp_revision is None):
tmp_revision = '?'
if (tmp_branch is None):
tmp_branch = '?'
print("{0:5d}: {1:s} {2:5s} {3:s} / {4:s} ({5:s}) ({6:s} / {7:s})".format(tmp_id, tmp_start_time_local.replace('_', ' '), error, str(tmp_branch), str(tmp_revision), '/'.join(status), tmp_repository, tmp_repository_type))
print("")
sys.exit(0)
#######################################################################
# show full result of a previous run
if (len(config.get('show-result')) > 0):
if (config.get('show-result') == 'latest' or config.get('show-result') == 'last'):
logging.debug("looking up latest result")
data = database.fetch_last_build_status_id()
if (str(data['id']) == 'not set'):
logging.error("No entries in database!")
sys.exit(1)
logging.debug("last result has id: " + str(data['id']))
config.set('show-result', str(data['id']))
logging.debug("show one result: " + config.get('show-result'))
try:
id = int(config.get('show-result'))
except ValueError:
logging.error("Not a number: " + str(config.get('show-result')))
sys.exit(1)
if (id <= 0):
logging.error("Invalid ID: " + str(id))
sys.exit(1)
data = database.fetch_specific_build_status(id)
if (data is None or len(data) == 0):
print("")
print("Error: record '" + config.get('show-result') + "' does not exist!")
print("")
sys.exit(1)
# first handle all special flags, in case only one data value is to show
if (config.get('show-id') is True):
print(str(data['id']))
sys.exit(0)
if (config.get('show-repository') is True):
print(str(data['repository']))
sys.exit(0)
if (config.get('show-branch') is True):
print(str(data['branch']))
sys.exit(0)
if (config.get('show-revision') is True):
print(str(data['revision']))
sys.exit(0)
if (config.get('show-build-dir') is True):
if ('build_dir' in data):
print(str(data['build_dir']))
sys.exit(0)
else:
print("")
print("Error: build dir is not available")
print("")
sys.exit(1)
if (config.get('show-install-dir') is True):
if ('install_dir' in data):
print(str(data['install_dir']))
sys.exit(0)
else:
print("")
print("Error: install dir is not available")
print("")
sys.exit(1)
# start regular output here
print("")
print("{:>17}: {:s}".format("ID", str(data['id'])))
print("{:>17}: {:s}".format("Time", str(data['start_time'])))
print("{:>17}: {:s}".format("Time", str(data['start_time_local'].replace('_', ' '))))
print("")
print("{:>17}: {:s}".format("Repository", str(data['repository'])))
if (data['repository_type'] is None):
print("{:>17}: {:s}".format("Repository Type", 'n/a'))
else:
print("{:>17}: {:s}".format("Repository Type", str(data['repository_type'])))
if (data['branch'] is None):
print("{:>17}: {:s}".format("Branch", 'n/a'))
else:
print("{:>17}: {:s}".format("Branch", str(data['branch'])))
if (data['revision'] is None):
print("{:>17}: {:s}".format("Revision", 'n/a'))
else:
print("{:>17}: {:s}".format("Revision", str(data['revision'])))
if (data['is_head'] is None):
print("{:>17}: {:s}".format("is HEAD", 'n/a'))
elif (data['is_head'] == 1):
print("{:>17}: {:s}".format("is HEAD", "yes"))
else:
print("{:>17}: {:s}".format("is HEAD", "no"))
if (data['orca'] == 1):
print("{:>17}: {:s}".format("Orca", "yes"))
#elif (data['orca'] is None):
# print("{:>17}: {:s}".format("Orca", 'n/a'))
#else:
# print("{:>17}: {:s}".format("Orca", "no"))
print("")
print("{:>17}: {:s}".format("Run git update", str(data['run_git_update'])))
print("{:>17}: {:s}".format("Run configure", str(data['run_configure'])))
print("{:>17}: {:s}".format("Run make", str(data['run_make'])))
print("{:>17}: {:s}".format("Run install", str(data['run_install'])))
print("{:>17}: {:s}".format("Run tests", str(data['run_tests'])))
if (len(str(data['run_extra_targets'])) > 0):
print("{:>17}: {:s}".format("Run extra targets", str(data['run_extra_targets'])))
if (len(str(data['extra_patches'])) > 0):
print("{:>17}: {:s}".format("Extra patches", str(data['extra_patches'])))
print("")
if (data['result_portcheck'] is None):
print("{:>17}: {:s}".format("Result portcheck", 'n/a'))
elif (data['result_portcheck'] == 0):
print("{:>17}: {:s}".format("Result portcheck", 'OK'))
else:
print("{:>17}: {:s}".format("Result portcheck", 'Error'))
if (data['result_git_update'] is None):
print("{:>17}: {:s}".format("Result git update", 'n/a'))
elif (data['result_git_update'] == 0):
print("{:>17}: {:s}".format("Result git update", 'OK'))
else:
print("{:>17}: {:s}".format("Result git update", str(data['result_git_update'])))
if (data['result_configure'] is None):
print("{:>17}: {:s}".format("Result configure", 'n/a'))
elif (data['result_configure'] == 0):
print("{:>17}: {:s}".format("Result configure", 'OK'))
else:
print("{:>17}: {:s}".format("Result configure", str(data['result_configure'])))
if (data['result_make'] is None):
print("{:>17}: {:s}".format("Result make", 'n/a'))
elif (data['result_make'] == 0):
print("{:>17}: {:s}".format("Result make", 'OK'))
else:
print("{:>17}: {:s}".format("Result make", str(data['result_make'])))
if (data['result_install'] is None):
print("{:>17}: {:s}".format("Result install", 'n/a'))
elif (data['result_install'] == 0):
print("{:>17}: {:s}".format("Result install", 'OK'))
else:
print("{:>17}: {:s}".format("Result install", str(data['result_install'])))
if (data['result_tests'] is None):
print("{:>17}: {:s}".format("Result tests", 'n/a'))
elif (data['result_tests'] == 0):
print("{:>17}: {:s}".format("Result tests", 'OK'))
else:
print("{:>17}: {:s}".format("Result tests", str(data['result_tests'])))
print("")
print("{:>17}: {:s}".format("Time git update", str(data['time_git_update'])))
print("{:>17}: {:s}".format("Time configure", str(data['time_configure'])))
print("{:>17}: {:s}".format("Time make", str(data['time_make'])))
print("{:>17}: {:s}".format("Time install", str(data['time_install'])))
print("{:>17}: {:s}".format("Time tests", str(data['time_tests'])))
print("")
print("{:>17}: {:s}".format("Extra configure", str(data['extra_configure'])))
print("{:>17}: {:s}".format("Extra make", str(data['extra_make'])))
print("{:>17}: {:s}".format("Extra install", str(data['extra_install'])))
print("{:>17}: {:s}".format("Extra tests", str(data['extra_tests'])))
# list additional patches
if (len(str(data['patches'])) > 0):
print("")
patches = str(data['patches']).split('|')
for patch in patches:
print("{:>17}: {:s}".format("Extra patch", patch))
# list locales
if (len(str(data['test_locales'])) > 0):
print("{:>17}: {:s}".format("Locales", str(data['test_locales'])))
if (len(data['errorstr']) > 0):
print("")
print("{:>17}: {:s}".format("Error", data['errorstr']))
print("")
sys.exit(0)
#######################################################################
# show the list of pending or finished jobs, then exit
if (config.get('list-jobs') is True or config.get('list-all-jobs') is True):
if (config.get('list-jobs') is True):
jobs = database.list_pending_buildfarm_jobs()
elif (config.get('list-all-jobs') is True):
jobs = database.list_all_buildfarm_jobs()
else:
logging.error("internal error")
sys.exit(1)
if (jobs is None or len(jobs) == 0):
print("")
if (config.get('list-jobs') is True):
print("No pending buildfarm jobs!")
if (config.get('list-all-jobs') is True):
print("No buildfarm jobs!")
print("")
sys.exit(0)
for job in jobs:
print("")
print("{:>13}: {:s}".format("ID", str(job['id'])))
if (config.get('list-all-jobs') is True):
if (job['finished'] == 1):
print("{:>13}: {:s}".format("pending", 'no'))
else:
print("{:>13}: {:s}".format("pending", 'yes'))
time_added = time.strftime("%Y-%m-%d %H:%M", time.localtime(int(job['added_ts'])))
print("{:>13}: {:s}".format("Time added", str(time_added)))
if (job['executed_ts'] > 0):
time_executed = time.strftime("%Y-%m-%d %H:%M", time.localtime(int(job['executed_ts'])))
print("{:>13}: {:s}".format("Time executed", str(time_executed)))
print("{:>13}: {:s}".format("Repository", str(job['repository'])))
print("{:>13}: {:s}".format("Branch", str(job['branch'])))
print("{:>13}: {:s}".format("Revision", str(job['revision'])))
if (job['is_head'] == 1):
print("{:>13}: {:s}".format("is HEAD", "yes"))
else:
print("{:>13}: {:s}".format("is HEAD", "no"))
if (job['orca'] == 1):
print("{:>13}: {:s}".format("Orca", "yes"))
if (len(job['extra_configure']) > 0):
print("{:>13}: {:s}".format("extra configure", str(job['extra_configure'])))
if (len(job['extra_make']) > 0):
print("{:>13}: {:s}".format("extra make", str(job['extra_make'])))
if (len(job['extra_install']) > 0):
print("{:>13}: {:s}".format("extra install", str(job['extra_install'])))
if (len(job['extra_tests']) > 0):
print("{:>13}: {:s}".format("extra tests", str(job['extra_tests'])))
if (len(job['extra_tests']) > 0):
print("{:>13}: {:s}".format("extra tests", str(job['extra_tests'])))
if (len(job['run_extra_targets']) > 0):
print("{:>13}: {:s}".format("extra targets", str(job['run_extra_targets'])))
if (len(job['test_locales']) > 0):
print("{:>13}: {:s}".format("locales", str(job['test_locales'])))
print("")
sys.exit(0)
#######################################################################
# buildfarm mode, requeue a finished or pending job
if (len(config.get('requeue-job')) > 0):
logging.debug("requeue buildfarm job: " + config.get('requeue-job'))
try:
id = int(config.get('requeue-job'))
except ValueError:
logging.error("Not a number: " + str(config.get('requeue-job')))
sys.exit(1)
if (id <= 0):
logging.error("Invalid ID: " + str(id))
sys.exit(1)
data = database.fetch_specific_buildfarm_job(id)
if (data is None or len(data) == 0):
print("")
print("Record '" + config.get('requeue-job') + "' does not exist!")
print("")
sys.exit(1)
database.update_buildfarm_job_requeued(id)
print("")
print("Job '" + config.get('requeue-job') + "' requeued")
print("")
sys.exit(0)
#######################################################################
# buildfarm mode, create jobs
if (config.get('buildfarm') is True):
logging.debug("buildfarm mode: create new jobs")
if (len(config.get('repository-url')) == 0):
logging.error("Error: No repository url specified")
sys.exit(1)
log_data = copy.deepcopy(all_log_data)
log_data['is_buildfarm'] = True
log_data['repository'] = config.get('repository-url')
log_data['start_time'] = int(time.time())
current_time = time.strftime("%Y-%m-%d_%H%M%S", time.localtime(log_data['start_time']))
log_data['start_time_local'] = current_time
# from here on, only one repository is possible
# create a repository instance
repository = Repository(config, database, config.get('repository-url'), config.get('cache-dir'))
repository.handle_update(True, log_data)
# from here on a local copy of the repository is available
# create a list of all jobs
jobs = []
for branch in config.get('build-branch'):
job = {}
job['added_ts'] = int(time.time())
job['repository'] = config.get('repository-url')
job['repository_type'] = repository.repository_type
job['branch'] = branch
if (config.get('build-revision') == 'HEAD'):
job['revision'] = repository.repository_head(repository.full_path, branch)
job['is_head'] = True
else:
job['revision'] = config.get('build-revision')
job['is_head'] = False
job['extra-configure'] = config.get('extra-configure')
job['extra-make'] = config.get('extra-make')
job['extra-install'] = config.get('extra-install')
job['extra-tests'] = config.get('extra-tests')
job['run-extra-targets'] = config.get('test-extra-targets')
job['test-locales'] = config.get('test-locales')
# create one job with Orca=off in any case, just to ensure that we test this case
job['orca'] = False
jobs.append(job)
if (config.get('enable-orca') is True):
# if Orca is enabled, create another job with Orca=on
job['orca'] = False
jobs.append(job)
# figure out if this combination was built before
# this only checks if this combination is in the job table for the buildfarm
# it does not take into account if the job is already finished
for job in jobs:
if (database.buildfarm_job_exists(job['repository'], job['branch'], job['revision'], job['extra-configure'],
job['extra-make'], job['extra-install'], job['extra-tests'],
job['run-extra-targets'], job['test-locales'],
orca = job['orca']) is False):
# not found, add this job to the queue
logging.info("add to buildfarm queue: " + job['branch'] + " / " + job['revision'])
database.add_bildfarm_job(job)
# write log entry into database
database.log_build(log_data)
if (config.get('add-jobs-only') is True):
logging.debug("only add new jobs, exit")
sys.exit(0)
#######################################################################
# buildfarm mode, execute jobs
if (config.get('buildfarm') is True):
logging.debug("buildfarm mode: execute pending jobs")
stats_jobs_executed = 0
stats_jobs_successful = 0
stats_jobs_delayed = 0
while True:
# loop until the job table has no more pending entries
# it is possible that --add-jobs-only adds more jobs while this here is running
jobs = database.list_pending_buildfarm_jobs()
if (len(jobs) == 0):
logging.info("no pending jobs")
break
# note: from here on, every job can have a different repository
job_number = 0
for job in jobs:
log_data = copy.deepcopy(all_log_data)
job_number += 1
stats_jobs_executed += 1
logging.debug("run buildfarm job: " + str(job['id']) + " (" + str(job_number) + " out of " + str(len(jobs)) + ")")
# a local copy of the repository was created when the job was created
# handle_update(False, ...) will ensure that the directory is still there
log_data['repository'] = job['repository']
log_data['branch'] = job['branch']
# the correct revision was extracted when the job was created
# it does not necessary mean that it is still the HEAD of the branch
log_data['revision'] = job['revision']
# copy this flag from the job, for logging purposes
log_data['is_head'] = job['is_head']
log_data['orca'] = job['orca']
log_data['extra_configure'] = job['extra_configure']
log_data['extra_make'] = job['extra_make']
log_data['extra_install'] = job['extra_install']
log_data['extra_tests'] = job['extra_tests']
log_data['run_extra_targets'] = job['run_extra_targets']
log_data['test_locales'] = job['test_locales']
log_data['start_time'] = int(time.time())
current_time = time.strftime("%Y-%m-%d_%H%M%S", time.localtime(log_data['start_time']))
log_data['start_time_local'] = current_time
log_data['is_buildfarm'] = True
# create a repository instance
repository = Repository(config, database, log_data['repository'], config.get('cache-dir'))
# do not update the repository again, assume that all necessary updates were fetched during job creation
repository.handle_update(False, log_data)
# from here on a local copy of the repository is available
log_data['repository_type'] = repository.identify_repository_type(repository.full_path)
logging.info("repository: " + log_data['repository'])
if (log_data['is_head'] == 1):
logging.info("building branch/revision: " + log_data['branch'] + '/' + log_data['revision'] + ' (HEAD)')
else:
logging.info("building branch/revision: " + log_data['branch'] + '/' + log_data['revision'])
build_dir_name = str(current_time).replace('-', '') + '_bf_' + log_data['branch']
# the config module ensures that all necessary --run-* options are set
build_dir = repository.copy_repository(build_dir_name, log_data['branch'], log_data['revision'])
build = Build(config, repository, build_dir)
# test if ports for regression tests are available
if (build.portcheck(log_data['repository_type'], log_data) is True):
result_configure = build.run_configure(log_data['extra_configure'], build_dir_name, log_data)
build.add_entry_to_delete_clean(build_dir)
# FIXME: Orca
if (result_configure is True):
result_make = build.run_make(log_data['extra_make'], log_data)
if (result_make is True):
install_dir = build.run_make_install(log_data['extra_install'], log_data, log_data['extra_make'])
if (install_dir is not False):
build.add_entry_to_delete_clean(install_dir)
if (install_dir is not False):
result_tests = build.run_tests(log_data['extra_tests'], log_data)
if (result_tests is not False):
stats_jobs_successful += 1
# mark job as finished, regardless of the result
database.update_buildfarm_job_finished(job['id'], log_data['start_time'])
else:
# mark job as delayed
stats_jobs_delayed += 1
database.update_buildfarm_job_delayed(job['id'], log_data['start_time'])
# write log entry into database
database.log_build(log_data)
# gather data for buildfarm website
buildfarm = Buildfarm(config, repository, build_dir, database)
buildfarm.send_results(log_data)
if (stats_jobs_executed > 0):
logging.info(" jobs executed: " + str(stats_jobs_executed))
if (stats_jobs_successful > 0):
logging.info("jobs successful: " + str(stats_jobs_successful))
if ((stats_jobs_executed - stats_jobs_successful - stats_jobs_delayed) > 0):
logging.info(" jobs failed: " + str(stats_jobs_executed - stats_jobs_successful - stats_jobs_delayed))
if (stats_jobs_delayed > 0):
logging.info(" jobs delayed: " + str(stats_jobs_delayed))
sys.exit(0)
#######################################################################
# manual mode
for branch in config.get('build-branch'):
log_data = copy.deepcopy(all_log_data)
log_data['repository'] = config.get('repository-url')
log_data['branch'] = branch
log_data['start_time'] = int(time.time())
current_time = time.strftime("%Y-%m-%d_%H%M%S", time.localtime(log_data['start_time']))
log_data['start_time_local'] = current_time
log_data['is_buildfarm'] = False
if (config.get('build-revision') == 'HEAD'):
log_data['is_head'] = True
else:
log_data['is_head'] = False
# create a repository instance
repository = Repository(config, database, config.get('repository-url'), config.get('cache-dir'))
repository.handle_update(config.get('run-update'), log_data)
# from here on a local copy of the repository is available
log_data['repository_type'] = repository.identify_repository_type(repository.full_path)
if (config.get('build-revision') == 'HEAD'):
head = repository.repository_head(repository.full_path, branch)
logging.info("branch/revision: " + branch + '/' + head + ' (' + config.get('build-revision') + ')')
log_data['revision'] = head
else:
logging.info("branch/revision: " + branch + '/' + config.get('build-revision'))
log_data['revision'] = config.get('build-revision')
# use the current timestamp and the branch name as build dir name
build_dir_name = str(current_time) + '_' + branch
if (config.get('build-revision') != 'HEAD'):
# add the revision name, if it's not HEAD
build_dir_name += '_' + config.get('build-revision')
if (config.get('run-configure') is True):
# create "Patch" instance before creating the repository
# https://github.com/andreasscherbaum/buildfarm-client/issues/1
patch = Patch(config.get('patch'), config, repository, None, config.get('cache-dir'))
if (patch.have_patches() is True):
# retrieve all patches
result_retrieve_patches = patch.retrieve_patches()
log_data['patches'] = '|'.join(patch.patches)
if (result_retrieve_patches is False):
# retrieving patches failed, don't bother with the rest of the job
# continue with next branch in list
# don't care about logging, this is manual mode
continue
build_dir = repository.copy_repository(build_dir_name, branch, config.get('build-revision'))
# the "Patch" instance is initialized without the build_dir information
patch.set_build_dir(build_dir)
log_data['build_dir'] = build_dir
if (patch.have_patches() is True):
# patches are already retrieved - error is checked above
result_apply_patches = patch.apply_patches()
if (result_apply_patches is False):
# continue with next branch in list
# don't care about logging, this is manual mode
continue
build = Build(config, repository, build_dir)
if (patch.have_patches() is True):
log_data['extra_patches'] = True
patch.remove_patches_after_build(build)
# test if ports for regression tests are available
# only check if regression tests will run later
if (config.get('run-tests') is False or (config.get('run-tests') is True and build.portcheck(log_data['repository_type'], log_data) is True)):
result_configure = build.run_configure(config.get('extra-configure'), build_dir_name, log_data)
build.add_entry_to_delete_clean(build_dir)
# FIXME: Orca
if (result_configure is True and config.get('run-make') is True):
result_make = build.run_make(config.get('extra-make'), log_data)
if (result_make is True and config.get('run-install') is True):
install_dir = build.run_make_install(config.get('extra-install'), log_data, config.get('extra-make'))
if (install_dir is not False):
build.add_entry_to_delete_clean(install_dir)
log_data['install_dir'] = install_dir
if (install_dir is not False and config.get('run-tests') is True):
result_tests = build.run_tests(config.get('extra-tests'), log_data)
# write log entry into database
database.log_build(log_data)
sys.exit(0)
|
andreasscherbaum/buildfarm-client
|
buildclient.py
|
Python
|
bsd-3-clause
| 29,338
|
[
"ORCA"
] |
654aa98f295d0e02688d42052643c3349d43f0ba26b04b872d32e764eeee0630
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
import numpy as np
from unittest import TestCase
from zoo.orca.automl.auto_estimator import AutoEstimator
import pytest
def model_creator(config):
model = tf.keras.models.Sequential([tf.keras.layers.Dense(config["hidden_size"],
input_shape=(1,)),
tf.keras.layers.Dense(1)])
model.compile(loss="mse",
optimizer=tf.keras.optimizers.SGD(config["lr"]),
metrics=["mse"])
return model
def get_train_val_data():
def get_x_y(size):
x = np.random.rand(size)
y = x / 2
x = x.reshape((-1, 1))
y = y.reshape((-1, 1))
return x, y
data = get_x_y(size=1000)
validation_data = get_x_y(size=400)
return data, validation_data
def create_linear_search_space():
from zoo.orca.automl import hp
return {
"hidden_size": hp.choice([5, 10]),
"lr": hp.choice([0.001, 0.003, 0.01]),
"batch_size": hp.choice([32, 64])
}
class TestTFKerasAutoEstimator(TestCase):
def setUp(self) -> None:
from zoo.orca import init_orca_context
init_orca_context(cores=4, init_ray_on_spark=True)
def tearDown(self) -> None:
from zoo.orca import stop_orca_context
stop_orca_context()
def test_fit(self):
auto_est = AutoEstimator.from_keras(model_creator=model_creator,
logs_dir="/tmp/zoo_automl_logs",
resources_per_trial={"cpu": 2},
name="test_fit")
data, validation_data = get_train_val_data()
auto_est.fit(data=data,
validation_data=validation_data,
search_space=create_linear_search_space(),
n_sampling=2,
epochs=1,
metric="mse")
assert auto_est.get_best_model()
best_config = auto_est.get_best_config()
assert "hidden_size" in best_config
assert all(k in best_config.keys() for k in create_linear_search_space().keys())
def test_fit_multiple_times(self):
auto_est = AutoEstimator.from_keras(model_creator=model_creator,
logs_dir="/tmp/zoo_automl_logs",
resources_per_trial={"cpu": 2},
name="test_fit")
data, validation_data = get_train_val_data()
auto_est.fit(data=data,
validation_data=validation_data,
search_space=create_linear_search_space(),
n_sampling=2,
epochs=1,
metric="mse")
with pytest.raises(RuntimeError):
auto_est.fit(data=data,
validation_data=validation_data,
search_space=create_linear_search_space(),
n_sampling=2,
epochs=1,
metric="mse")
def test_fit_metric_func(self):
auto_est = AutoEstimator.from_keras(model_creator=model_creator,
logs_dir="/tmp/zoo_automl_logs",
resources_per_trial={"cpu": 2},
name="test_fit")
data, validation_data = get_train_val_data()
def pyrmsle(y_true, y_pred):
y_pred[y_pred < -1] = -1 + 1e-6
elements = np.power(np.log1p(y_true) - np.log1p(y_pred), 2)
return float(np.sqrt(np.sum(elements) / len(y_true)))
with pytest.raises(ValueError) as exeinfo:
auto_est.fit(data=data,
validation_data=validation_data,
search_space=create_linear_search_space(),
n_sampling=2,
epochs=1,
metric=pyrmsle)
assert "metric_mode" in str(exeinfo)
auto_est.fit(data=data,
validation_data=validation_data,
search_space=create_linear_search_space(),
n_sampling=2,
epochs=1,
metric=pyrmsle,
metric_mode="min")
if __name__ == "__main__":
pytest.main([__file__])
|
intel-analytics/analytics-zoo
|
pyzoo/test/zoo/orca/automl/autoestimator/test_autoestimator_keras.py
|
Python
|
apache-2.0
| 5,054
|
[
"ORCA"
] |
72ae90f60556ed9e3ac46746b60f652a244a76ad4348b620c92234c21af919f8
|
# -*- coding: utf-8 -*-
# This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import os
import re
import string
from socket import inet_pton, AF_INET6, error as socket_error
from viper.common.abstracts import Module
from viper.common.objects import File
from viper.core.session import __sessions__
from viper.core.database import Database
from viper.core.storage import get_sample_path
DOMAIN_REGEX = re.compile('([a-z0-9][a-z0-9\-]{0,61}[a-z0-9]\.)+[a-z0-9][a-z0-9\-]*[a-z0-9]', re.IGNORECASE)
IPV4_REGEX = re.compile('[1-2]?[0-9]?[0-9]\.[1-2]?[0-9]?[0-9]\.[1-2]?[0-9]?[0-9]\.[1-2]?[0-9]?[0-9]')
IPV6_REGEX = re.compile('((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}'
'|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9'
'A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25['
'0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3'
'})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|['
'1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,'
'4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:'
'))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-'
'5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]'
'{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d'
'\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7}'
')|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d'
'\d|[1-9]?\d)){3}))|:)))(%.+)?', re.IGNORECASE | re.S)
PDB_REGEX = re.compile('\.pdb$', re.IGNORECASE)
URL_REGEX = re.compile('http(s){0,1}://', re.IGNORECASE)
GET_POST_REGEX = re.compile('(GET|POST) ')
HOST_REGEX = re.compile('Host: ')
USERAGENT_REGEX = re.compile('(Mozilla|curl|Wget|Opera)/.+\(.+\;.+\)', re.IGNORECASE)
EMAIL_REGEX = re.compile('[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,}', re.IGNORECASE)
REGKEY_REGEX = re.compile('(HKEY_CLASSES_ROOT|HKEY_CURRENT_USER|HKEY_LOCAL_MACHINE|HKEY_USERS|HKEY_CURRENT_CONFIG|HKCR|HKCU|HKLM|HKU|HKCC)(/|\x5c\x5c)', re.IGNORECASE)
REGKEY2_REGEX = re.compile('(CurrentVersion|Software\\\Microsoft|Windows NT|Microsoft\\\Interface)')
FILE_REGEX = re.compile('\w+\.(EXE|DLL|BAT|PS|INI|PIF|SCR|DOC|DOCX|DOCM|PPT|PPTX|PPTS|XLS|XLT|XLSX|XLTX|XLSM|XLTM|ZIP|RAR)$', re.U | re.IGNORECASE)
TLD = [
'AC', 'ACADEMY', 'ACTOR', 'AD', 'AE', 'AERO', 'AF', 'AG', 'AGENCY', 'AI', 'AL', 'AM', 'AN', 'AO', 'AQ', 'AR',
'ARPA', 'AS', 'ASIA', 'AT', 'AU', 'AW', 'AX', 'AZ', 'BA', 'BAR', 'BARGAINS', 'BB', 'BD', 'BE', 'BERLIN', 'BEST',
'BF', 'BG', 'BH', 'BI', 'BID', 'BIKE', 'BIZ', 'BJ', 'BLUE', 'BM', 'BN', 'BO', 'BOUTIQUE', 'BR', 'BS', 'BT',
'BUILD', 'BUILDERS', 'BUZZ', 'BV', 'BW', 'BY', 'BZ', 'CA', 'CAB', 'CAMERA', 'CAMP', 'CARDS', 'CAREERS', 'CAT',
'CATERING', 'CC', 'CD', 'CENTER', 'CEO', 'CF', 'CG', 'CH', 'CHEAP', 'CHRISTMAS', 'CI', 'CK', 'CL', 'CLEANING',
'CLOTHING', 'CLUB', 'CM', 'CN', 'CO', 'CODES', 'COFFEE', 'COM', 'COMMUNITY', 'COMPANY', 'COMPUTER', 'CONDOS',
'CONSTRUCTION', 'CONTRACTORS', 'COOL', 'COOP', 'CR', 'CRUISES', 'CU', 'CV', 'CW', 'CX', 'CY', 'CZ', 'DANCE',
'DATING', 'DE', 'DEMOCRAT', 'DIAMONDS', 'DIRECTORY', 'DJ', 'DK', 'DM', 'DNP', 'DO', 'DOMAINS', 'DZ', 'EC',
'EDU', 'EDUCATION', 'EE', 'EG', 'EMAIL', 'ENTERPRISES', 'EQUIPMENT', 'ER', 'ES', 'ESTATE', 'ET', 'EU', 'EVENTS',
'EXPERT', 'EXPOSED', 'FARM', 'FI', 'FISH', 'FJ', 'FK', 'FLIGHTS', 'FLORIST', 'FM', 'FO', 'FOUNDATION', 'FR',
'FUTBOL', 'GA', 'GALLERY', 'GB', 'GD', 'GE', 'GF', 'GG', 'GH', 'GI', 'GIFT', 'GL', 'GLASS', 'GM', 'GN', 'GOV',
'GP', 'GQ', 'GR', 'GRAPHICS', 'GS', 'GT', 'GU', 'GUITARS', 'GURU', 'GW', 'GY', 'HK', 'HM', 'HN', 'HOLDINGS',
'HOLIDAY', 'HOUSE', 'HR', 'HT', 'HU', 'ID', 'IE', 'IL', 'IM', 'IMMOBILIEN', 'IN', 'INDUSTRIES', 'INFO', 'INK',
'INSTITUTE', 'INT', 'INTERNATIONAL', 'IO', 'IQ', 'IR', 'IS', 'IT', 'JE', 'JM', 'JO', 'JOBS', 'JP', 'KAUFEN',
'KE', 'KG', 'KH', 'KI', 'KIM', 'KITCHEN', 'KIWI', 'KM', 'KN', 'KOELN', 'KP', 'KR', 'KRED', 'KW', 'KY', 'KZ',
'LA', 'LAND', 'LB', 'LC', 'LI', 'LIGHTING', 'LIMO', 'LINK', 'LK', 'LR', 'LS', 'LT', 'LU', 'LUXURY', 'LV', 'LY',
'MA', 'MAISON', 'MANAGEMENT', 'MANGO', 'MARKETING', 'MC', 'MD', 'ME', 'MENU', 'MG', 'MH', 'MIL', 'MK', 'ML',
'MM', 'MN', 'MO', 'MOBI', 'MODA', 'MONASH', 'MP', 'MQ', 'MR', 'MS', 'MT', 'MU', 'MUSEUM', 'MV', 'MW', 'MX',
'MY', 'MZ', 'NA', 'NAGOYA', 'NAME', 'NC', 'NE', 'NET', 'NEUSTAR', 'NF', 'NG', 'NI', 'NINJA', 'NL', 'NO', 'NP',
'NR', 'NU', 'NZ', 'OKINAWA', 'OM', 'ONION', 'ONL', 'ORG', 'PA', 'PARTNERS', 'PARTS', 'PE', 'PF', 'PG', 'PH',
'PHOTO', 'PHOTOGRAPHY', 'PHOTOS', 'PICS', 'PINK', 'PK', 'PL', 'PLUMBING', 'PM', 'PN', 'POST', 'PR', 'PRO',
'PRODUCTIONS', 'PROPERTIES', 'PS', 'PT', 'PUB', 'PW', 'PY', 'QA', 'QPON', 'RE', 'RECIPES', 'RED', 'RENTALS',
'REPAIR', 'REPORT', 'REVIEWS', 'RICH', 'RO', 'RS', 'RU', 'RUHR', 'RW', 'SA', 'SB', 'SC', 'SD', 'SE', 'SEXY',
'SG', 'SH', 'SHIKSHA', 'SHOES', 'SI', 'SINGLES', 'SJ', 'SK', 'SL', 'SM', 'SN', 'SO', 'SOCIAL', 'SOLAR',
'SOLUTIONS', 'SR', 'ST', 'SU', 'SUPPLIES', 'SUPPLY', 'SUPPORT', 'SV', 'SX', 'SY', 'SYSTEMS', 'SZ', 'TATTOO',
'TC', 'TD', 'TECHNOLOGY', 'TEL', 'TF', 'TG', 'TH', 'TIENDA', 'TIPS', 'TJ', 'TK', 'TL', 'TM', 'TN', 'TO',
'TODAY', 'TOKYO', 'TOOLS', 'TP', 'TR', 'TRAINING', 'TRAVEL', 'TT', 'TV', 'TW', 'TZ', 'UA', 'UG', 'UK', 'UNO',
'US', 'UY', 'UZ', 'VA', 'VACATIONS', 'VC', 'VE', 'VENTURES', 'VG', 'VI', 'VIAJES', 'VILLAS', 'VISION', 'VN',
'VOTE', 'VOTING', 'VOTO', 'VOYAGE', 'VU', 'WANG', 'WATCH', 'WED', 'WF', 'WIEN', 'WIKI', 'WORKS', 'WS',
'XN--3BST00M', 'XN--3DS443G', 'XN--3E0B707E', 'XN--45BRJ9C', 'XN--55QW42G', 'XN--55QX5D', 'XN--6FRZ82G',
'XN--6QQ986B3XL', 'XN--80AO21A', 'XN--80ASEHDB', 'XN--80ASWG', 'XN--90A3AC', 'XN--C1AVG', 'XN--CG4BKI',
'XN--CLCHC0EA0B2G2A9GCD', 'XN--D1ACJ3B', 'XN--FIQ228C5HS', 'XN--FIQ64B', 'XN--FIQS8S', 'XN--FIQZ9S',
'XN--FPCRJ9C3D', 'XN--FZC2C9E2C', 'XN--GECRJ9C', 'XN--H2BRJ9C', 'XN--I1B6B1A6A2E', 'XN--IO0A7I', 'XN--J1AMH',
'XN--J6W193G', 'XN--KPRW13D', 'XN--KPRY57D', 'XN--L1ACC', 'XN--LGBBAT1AD8J', 'XN--MGB9AWBF', 'XN--MGBA3A4F16A',
'XN--MGBAAM7A8H', 'XN--MGBAB2BD', 'XN--MGBAYH7GPA', 'XN--MGBBH1A71E', 'XN--MGBC0A9AZCG', 'XN--MGBERP4A5D4AR',
'XN--MGBX4CD0AB', 'XN--NGBC5AZD', 'XN--NQV7F', 'XN--NQV7FS00EMA', 'XN--O3CW4H', 'XN--OGBPF8FL', 'XN--P1AI',
'XN--PGBS0DH', 'XN--Q9JYB4C', 'XN--RHQV96G', 'XN--S9BRJ9C', 'XN--UNUP4Y', 'XN--WGBH1C', 'XN--WGBL6A',
'XN--XKC2AL3HYE2A', 'XN--XKC2DL3A5EE0H', 'XN--YFRO4I67O', 'XN--YGBI2AMMX', 'XN--ZFR164B', 'XXX', 'XYZ', 'YE',
'YT', 'ZA', 'ZM', 'ZONE', 'ZW']
class Strings(Module):
cmd = 'strings'
description = 'Extract strings from file'
authors = ['nex', 'Brian Wallace', 'Christophe Vandeplas']
def __init__(self):
super(Strings, self).__init__()
self.parser.add_argument('-a', '--all', action='store_true', help='Print all strings')
self.parser.add_argument('-F', '--files', action='store_true', help='Extract filenames from strings')
self.parser.add_argument('-H', '--hosts', action='store_true', help='Extract IP addresses and domains from strings')
self.parser.add_argument('-N', '--network', action='store_true', help='Extract various network related strings')
self.parser.add_argument('-I', '--interesting', action='store_true', help='Extract various interesting strings')
self.parser.add_argument('-s', '--scan', action='store_true', help='Scan all files in the project with all the scanners')
def extract_hosts(self, strings):
results = []
for entry in strings:
to_add = False
if IPV4_REGEX.search(entry):
to_add = True
elif IPV6_REGEX.search(entry):
try:
inet_pton(AF_INET6, entry)
except socket_error:
continue
else:
to_add = True
elif DOMAIN_REGEX.search(entry):
if entry[entry.rfind('.') + 1:].upper() in TLD:
to_add = True
if to_add:
if entry not in results:
results.append(entry)
return results
def extract_network(self, strings):
results = []
for entry in strings:
to_add = False
if URL_REGEX.search(entry):
to_add = True
if GET_POST_REGEX.search(entry):
to_add = True
if HOST_REGEX.search(entry):
to_add = True
if USERAGENT_REGEX.search(entry):
to_add = True
if EMAIL_REGEX.search(entry):
if entry[entry.rfind('.') + 1:].upper() in TLD:
to_add = True
if to_add:
if entry not in results:
results.append(entry)
return results
def extract_files(self, strings):
results = []
for entry in strings:
to_add = False
if FILE_REGEX.search(entry):
to_add = True
if to_add:
if entry not in results:
results.append(entry)
return results
def extract_interesting(self, strings):
results = []
for entry in strings:
to_add = False
if PDB_REGEX.search(entry):
to_add = True
if REGKEY_REGEX.search(entry):
to_add = True
if REGKEY2_REGEX.search(entry):
to_add = True
if to_add:
if entry not in results:
results.append(entry)
return results
def get_strings(self, f, min=4):
'''
String implementation see http://stackoverflow.com/a/17197027/6880819
Extended with Unicode support
'''
results = []
result = ""
counter = 1
wide_word = False
for c in f.data.decode('utf-8', 'ignore'):
# already have something, check if the second byte is a null
if counter == 2 and c == "\x00":
wide_word = True
counter += 1
continue
# every 2 chars we allow a 00
if wide_word and c == "\x00" and not counter % 2:
counter += 1
continue
# valid char, go to next - newlines are to be considered as the end of the string
if c in string.printable and c not in ['\n', '\r']:
result += c
counter += 1
continue
if len(result) >= min:
results.append(result)
# reset the variables
result = ""
counter = 1
wide_word = False
if len(result) >= min: # catch result at EOF
results.append(result)
return results
def process_strings(self, strings, sample_name=""):
if sample_name:
prefix = '{} - '.format(sample_name)
else:
prefix = ''
if self.args.all:
self.log('success', '{}All strings:'.format(prefix))
for entry in strings:
self.log('', entry)
if self.args.hosts:
results = self.extract_hosts(strings)
if results:
self.log('success', '{}IP addresses and domains:'.format(prefix))
for result in results:
self.log('item', result)
if self.args.network:
results = self.extract_network(strings)
if results:
self.log('success', '{}Network related:'.format(prefix))
for result in results:
self.log('item', result)
if self.args.files:
results = self.extract_files(strings)
if results:
self.log('success', '{}Filenames:'.format(prefix))
for result in results:
self.log('item', result)
if self.args.interesting:
results = self.extract_interesting(strings)
if results:
self.log('success', '{}Various interesting strings:'.format(prefix))
for result in results:
self.log('item', result)
def run(self):
super(Strings, self).run()
if self.args is None:
return
if not (self.args.all or self.args.files or self.args.hosts or self.args.network or self.args.interesting):
self.log('error', 'At least one of the parameters is required')
self.usage()
return
if self.args.scan:
db = Database()
samples = db.find(key='all')
for sample in samples:
sample_path = get_sample_path(sample.sha256)
strings = self.get_strings(File(sample_path))
self.process_strings(strings, sample.name)
else:
if not __sessions__.is_set():
self.log('error', "No open session")
return
if os.path.exists(__sessions__.current.file.path):
strings = self.get_strings(__sessions__.current.file)
self.process_strings(strings)
|
kevthehermit/viper
|
viper/modules/strings.py
|
Python
|
bsd-3-clause
| 13,867
|
[
"Brian"
] |
30417a4c80b4076553c55d4e84a0cfc021aa80c52e78b05c6342629810ed5443
|
#
# @BEGIN LICENSE
#
# QCDB: quantum chemistry common driver and databases
#
# Copyright (c) 2011-2017 The QCDB Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of QCDB.
#
# QCDB is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# QCDB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with QCDB; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
## Force Python 3 print syntax, if this is python 2.X
#if sys.hexversion < 0x03000000:
from __future__ import print_function
"""Module with functions to parse the input file and convert
Psithon into standard Python. Particularly, forms psi4
module calls that access the C++ side of Psi4.
"""
import re
import os
import sys
import random
#import pubchem
#from p4xcpt import * # CU
# inputfile contents to be preserved from the processor
literals = {}
def bad_option_syntax(line):
"""Function to report bad syntax to screen and output file."""
print('Unsupported syntax:\n\n%s\n\n' % (line))
sys.exit(1)
def process_word_quotes(matchobj):
"""Function to determine if argument needs wrapping in quotes as string."""
dollar = matchobj.group(2)
val = matchobj.group(3)
if dollar:
# This is a python variable, make sure that it starts with a letter
if re.match(r'^[A-Za-z][\w]*', val):
return val
else:
print("Invalid Python variable: %s" % (val))
sys.exit(1)
elif re.match(r'^-?\d+\.?\d*(?:[Ee]-?\d+)?$', val):
# This must be a number, don't wrap it in quotes
return val
elif re.match(r'^\'.*\'$', val) or re.match(r'^\".*\"$', val):
# This is already wrapped in quotes, do nothing
return val
else:
# This must be a string
return "\"%s\"" % (val)
def quotify(string):
"""Function to wrap anything that looks like a string in quotes
and to remove leading dollar signs from python variables.
"""
# This wraps anything that looks like a string in quotes, and removes leading
# dollar signs from python variables
wordre = re.compile(r'(([$]?)([-+()*.\w\"\']+))')
string = wordre.sub(process_word_quotes, string)
return string
def process_option(spaces, module, key, value, line):
"""Function to process a line with set or in a set block
into global/local domain and keyword/value.
"""
module = module.upper()
key = key.upper()
value = quotify(value.strip())
if module == "GLOBALS" or module == "GLOBAL" or module == "" or module.isspace():
# If it's really a global, we need slightly different syntax
bas = check_for_basis(spaces, key, value)
bas = ''
return """%s%soptions_psi4_yo["GLOBALS"]["%s"] = %s\n""" % (bas, spaces, key, value)
else:
# It's a local option, so we need the module name in there too
return """%soptions_psi4_yo["%s"]["%s"] = %s\n""" % (spaces, module, key, value)
def check_for_basis(spaces, key, val):
"""Function (effectively transplanted from src/bin/psi4/python.cc)
to define a python function to attach basis sets to atoms of Molecule.
"""
result = ""
if "BASIS" in key:
# check the exceptions
#if key in ["BASIS_PATH", "AO_BASIS", "DUAL_BASIS"]:
if key in ["AO_BASIS", "PRINT_BASIS", "BASIS_GUESS", "DF_BASIS_GUESS", "DFT_BASIS_TOLERANCE"]:
pass
# else set the basis for all atoms.
else:
cleanbas = basname(val[1:-1]).replace('-', '') # further remove hyphens so can be function name
result += """%sif 'basisspec_psi4_yo__%s' not in globals():\n""" % (spaces, cleanbas)
result += """%s def basisspec_psi4_yo__%s(mol, role):\n""" % (spaces, cleanbas)
result += """%s mol.set_basis_all_atoms(%s, role=role)\n""" % (spaces, val)
#text += """%sbasis_dict_psi4_yo["%s"] = basisspec_psi4_yo__%s\n""" % (spaces, val[1:-1], cleanbas)
return result
def process_set_command(matchobj):
"""Function to process match of all individual ``set (module_list)
key {[value_list] or $value or value}``.
"""
result = ""
module_string = ""
if matchobj.group(2):
module_string = matchobj.group(2)
for module in module_string.split(","):
result += process_option(matchobj.group(1), module, matchobj.group(3), matchobj.group(4), matchobj.group(0))
return result
def process_set_commands(matchobj):
"""Function to process match of ``set name? { ... }``."""
spaces = matchobj.group(1)
commands = matchobj.group(3)
command_lines = re.split('\n', commands)
# Remove trailing newline from each line
map(lambda x: x.strip(), command_lines)
result = ""
module_string = ""
command = ""
if matchobj.group(2):
module_string = matchobj.group(2)
for module in module_string.split(","):
for line in command_lines:
# Chomp the trailing newline and accumulate
command += line
if not check_parentheses_and_brackets(command, 0):
# If the brackets don't match up, we need to move on to the next line
# and keep going, until they do match. Only then do we process the command
continue
# Ignore blank/empty lines
if not line or line.isspace():
continue
matchobj = re.match(r'^\s*(\w+)[\s=]+(.*?)$', command)
# Is the syntax correct? If so, process the line
if matchobj:
result += process_option(spaces, module, matchobj.group(1), matchobj.group(2), command)
# Reset the string
command = ""
else:
bad_option_syntax(command)
return result
def process_pubchem_command(matchobj):
"""Function to process match of ``pubchem`` in molecule block."""
string = matchobj.group(2)
if re.match(r'^\s*[0-9]+\s*$', string):
# This is just a number - must be a CID
pcobj = pubchem.PubChemObj(int(string), '', '')
try:
return pcobj.getMoleculeString()
except Exception as e:
return e.message
else:
# Search pubchem for the provided string
try:
results = pubchem.getPubChemResults(string)
except Exception as e:
return e.message
# N.B. Anything starting with PubchemError will be handled correctly by the molecule parser
# in libmints, which will just print the rest of the string and exit gracefully.
if not results:
# Nothing!
return "PubchemError\n\tNo results were found when searching PubChem for %s.\n" % (string)
elif len(results) == 1:
# There's only 1 result - use it
return results[0].getMoleculeString()
else:
# There are multiple results. Print and exit
msg = "\tPubchemError\n"
msg += "\tMultiple pubchem results were found. Replace\n\n\t\tpubchem:%s\n\n" % (string)
msg += "\twith the Chemical ID number or exact name from one of the following and re-run.\n\n"
msg += "\t Chemical ID IUPAC Name\n\n"
for result in results:
msg += "%s" % (result)
if result.name().lower() == string.lower():
#We've found an exact match!
return result.getMoleculeString()
return msg
def process_molecule_command(matchobj):
"""Function to process match of ``molecule name? { ... }``."""
spaces = matchobj.group(1)
name = matchobj.group(2)
geometry = matchobj.group(3)
pubchemre = re.compile(r'^(\s*pubchem\s*:\s*(.*)\n)$', re.MULTILINE | re.IGNORECASE)
geometry = pubchemre.sub(process_pubchem_command, geometry)
molecule = spaces
if name != "":
molecule += '%s = ' % (name)
molecule += 'geometry("""%s"""' % (geometry)
if name != "":
molecule += ',"%s"' % (name)
molecule += ")\n"
molecule += '%spsi4.IO.set_default_namespace("%s")' % (spaces, name)
molecule = ''
if name != '':
molecule += """%s = """ % (name)
molecule += 'qcdb.Molecule("""\n%s\n""")\n' % (geometry)
if name != '':
molecule += """%s.set_name('%s')\n""" % (name, name)
return molecule
def process_literal_blocks(matchobj):
"""Function to process match of ``literals_psi4_yo-...``."""
return literals[matchobj.group(1)]
def process_cfour_command(matchobj):
"""Function to process match of ``cfour name? { ... }``."""
spaces = matchobj.group(1)
name = matchobj.group(2)
cfourblock = matchobj.group(3)
literalkey = str(random.randint(0, 99999))
literals[literalkey] = cfourblock
return "%spsi4.set_global_option(\"%s\", \"\"\"%s\n\"\"\")\n" % \
(spaces, 'LITERAL_CFOUR', 'literals_psi4_yo-' + literalkey)
def process_extract_command(matchobj):
"""Function to process match of ``extract_subsets``."""
spaces = matchobj.group(1)
name = matchobj.group(2)
result = matchobj.group(0)
result += '%s%s.set_name("%s")' % (spaces, name, name)
result += "\n%spsi4.set_active_molecule(%s)" % (spaces, name)
result += '\n%spsi4.IO.set_default_namespace("%s")' % (spaces, name)
return result
def process_print_command(matchobj):
"""Function to process match of ``print`` and transform
it to ``psi4.print_out()``.
"""
spaces = matchobj.group(1)
string = matchobj.group(2)
return "%spsi4.print_out(str(%s))\n" % (spaces, str(string))
def process_memory_command(matchobj):
"""Function to process match of ``memory ...``."""
spaces = str(matchobj.group(1))
sig = str(matchobj.group(2))
units = str(matchobj.group(3))
val = float(sig)
memory_amount = val
if units.upper() == 'KB':
memory_amount = val * 1000
elif units.upper() == 'MB':
memory_amount = val * 1000000
elif units.upper() == 'GB':
memory_amount = val * 1000000000
return "%spsi4.set_memory(%d)\n" % (spaces, int(memory_amount))
def basname(name):
"""Imitates BasisSet.make_filename() without the gbs extension"""
return name.lower().replace('+', 'p').replace('*', 's').replace('(', '_').replace(')', '_').replace(',', '_')
def process_basis_block(matchobj):
"""Function to process match of ``basis name? { ... }``."""
spaces = matchobj.group(1)
basistype = matchobj.group(2).upper()
name = matchobj.group(3) # TODO name as CUSTOM if not provided?
cleanbas = basname(name).replace('-', '') # further remove hyphens so can be function name
command_lines = re.split('\n', matchobj.group(4))
symbol_re = re.compile(r'^\s*assign\s+(?P<symbol>[A-Z]{1,3})\s+(?P<basis>[-*\(\)\w]+)\s*$', re.IGNORECASE)
label_re = re.compile(r'^\s*assign\s+(?P<label>(?P<symbol>[A-Z]{1,3})(?:(_\w+)|(\d+))?)\s+(?P<basis>[-*\(\)\w]+)\s*$', re.IGNORECASE)
all_re = re.compile(r'^\s*assign\s+(?P<basis>[-*\(\)\w]+)\s*$', re.IGNORECASE)
basislabel = re.compile(r'\s*\[\s*([-*\(\)\w]+)\s*\]\s*')
result = """%sdef basisspec_psi4_yo__%s(mol, role):\n""" % (spaces, cleanbas)
result += """%s basstrings = {}\n""" % (spaces)
# Start by looking for assign lines, and remove them
leftover_lines = []
for line in command_lines:
if symbol_re.match(line):
m = symbol_re.match(line)
result += """%s mol.set_basis_by_symbol("%s", "%s", role=role)\n""" % \
(spaces, m.group('symbol'), m.group('basis'))
elif label_re.match(line):
m = label_re.match(line)
result += """%s mol.set_basis_by_label("%s", "%s", role=role)\n""" % \
(spaces, m.group('label'), m.group('basis'))
elif all_re.match(line):
m = all_re.match(line)
result += """%s mol.set_basis_all_atoms("%s", role=role)\n""" % \
(spaces, m.group('basis'))
else:
# Ignore blank lines and accumulate remainder
if line and not line.isspace():
leftover_lines.append(line.strip())
# Now look for regular basis set definitions
basblock = filter(None, basislabel.split('\n'.join(leftover_lines)))
if len(basblock) == 1:
if len(result.split('\n') == 2): # TODO check
# case with no [basname] markers where whole block is contents of gbs file
result += """%s mol.set_basis_all_atoms("%s", role=role)\n""" % \
(spaces, name)
result += """%s basstrings['%s'] = \"\"\"\n%s\n\"\"\"\n""" % \
(spaces, basname(name), basblock[0])
else:
print("Conflicting basis set specification: assign lines present but shells have no [basname] label.""")
sys.exit(1)
else:
# case with specs separated by [basname] markers
for idx in range(0, len(basblock), 2):
result += """%s basstrings['%s'] = \"\"\"\n%s\n\"\"\"\n""" % \
(spaces, basname(basblock[idx]), basblock[idx + 1])
#result += """%sbasis_dict_psi4_yo["%s"] = basisspec_psi4_yo__%s\n""" % (spaces, name, cleanname)
result += """%s return basstrings\n""" % (spaces)
result += """%soptions_psi4_yo["GLOBALS"]["%s"] = "%s"\n""" % (spaces, basistype, name)
return result
def process_pcm_command(matchobj):
"""Function to process match of ``pcm name? { ... }``."""
spaces = str(matchobj.group(1)) # Ignore..
name = str(matchobj.group(2)) # Ignore..
block = str(matchobj.group(3))
fp = open('@pcmsolver.inp', 'w')
fp.write(block)
fp.close()
from pcmpreprocess import preprocess
preprocess()
return "" # The file has been written to disk; nothing needed in Psi4 input
def process_external_command(matchobj):
"""Function to process match of ``external name? { ... }``."""
spaces = str(matchobj.group(1))
name = str(matchobj.group(2))
if not name or name.isspace():
name = "extern"
block = str(matchobj.group(3))
lines = re.split('\n', block)
extern = "%sqmmm = QMMM()\n" % (spaces)
NUMBER = "((?:[-+]?\\d*\\.\\d+(?:[DdEe][-+]?\\d+)?)|(?:[-+]?\\d+\\.\\d*(?:[DdEe][-+]?\\d+)?))"
# Comments are all removed by this point
# 0. Remove blank lines
re_blank = re.compile(r'^\s*$')
lines2 = []
for line in lines:
mobj = re_blank.match(line)
if mobj:
pass
else:
lines2.append(line)
lines = lines2
# 1. Look for units [ang|bohr|au|a.u.] defaults to ang
re_units = re.compile(r'^\s*units?[\s=]+((ang)|(angstrom)|(bohr)|(au)|(a\.u\.))$\s*', re.IGNORECASE)
units = 'ang'
lines2 = []
for line in lines:
mobj = re_units.match(line)
if mobj:
unit = mobj.group(1)
if unit in ['bohr', 'au', 'a.u.']:
units = 'bohr'
else:
units = 'ang'
else:
lines2.append(line)
lines = lines2
# 2. Look for basis basisname, defaults to cc-pvdz
# 3. Look for df_basis_scf basisname, defaults to cc-pvdz-jkfit
re_basis = re.compile(r'\s*basis[\s=]+(\S+)\s*$', re.IGNORECASE)
re_df_basis = re.compile(r'\s*df_basis_scf[\s=]+(\S+)\s*$', re.IGNORECASE)
basis = 'cc-pvdz'
df_basis_scf = 'cc-pvdz-jkfit'
lines2 = []
for line in lines:
mobj = re_basis.match(line)
if mobj:
basis = mobj.group(1)
else:
mobj = re_df_basis.match(line)
if mobj:
df_basis_scf = mobj.group(1)
else:
lines2.append(line)
lines = lines2
# 4. Look for charge lines Z x y z, convert according to unit convention
charge_re = re.compile(r'^\s*' + NUMBER + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$')
lines2 = []
for line in lines:
mobj = charge_re.match(line)
if mobj:
if units == 'ang':
extern += '%sqmmm.addChargeAngstrom(%s,%s,%s,%s)\n' % (spaces, mobj.group(1), mobj.group(2), mobj.group(3), mobj.group(4))
if units == 'bohr':
extern += '%sqmmm.addChargeBohr(%s,%s,%s,%s)\n' % (spaces, mobj.group(1), mobj.group(2), mobj.group(3), mobj.group(4))
else:
lines2.append(line)
lines = lines2
# 5. Look for diffuse regions, which are XYZ molecules seperated by the usual -- lines
spacer_re = re.compile(r'^\s*--\s*$')
frags = []
frags.append([])
for line in lines:
mobj = spacer_re.match(line)
if mobj:
if len(frags[len(frags) - 1]):
frags.append([])
else:
frags[len(frags) - 1].append(line)
extern += '%sextern_mol_temp = psi4.get_active_molecule()\n' % (spaces)
mol_re = re.compile(r'\s*\S+\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$')
lines = []
for frag in frags:
if not len(frag):
continue
extern += '%sexternal_diffuse = geometry("""\n' % (spaces)
extern += '%s0 1\n' % (spaces)
for line in frag:
if not mol_re.match(line):
lines.append(line)
else:
extern += '%s%s\n' % (spaces, line)
extern += '%sunits %s\n' % (spaces, units)
extern += '%ssymmetry c1\n' % (spaces)
extern += '%sno_reorient\n' % (spaces)
extern += '%sno_com\n' % (spaces)
extern += '%s""")\n' % (spaces)
extern += "%sdiffuse = Diffuse(external_diffuse,'%s','%s')\n" % (spaces, basis, df_basis_scf)
extern += '%sdiffuse.fitScf()\n' % (spaces)
extern += '%sqmmm.addDiffuse(diffuse)\n' % (spaces)
extern += '\n'
extern += '%spsi4.set_active_molecule(extern_mol_temp)\n' % (spaces)
# 6. If there is anything left, the user messed up
if len(lines):
print('Input parsing for external {}: Extra line(s) present:')
for line in lines:
print(line)
sys.exit(1)
# Return is actually an ExternalPotential, not a QMMM
extern += '%sqmmm.populateExtern()\n' % (spaces)
extern += '%s%s = qmmm.extern\n' % (spaces, name)
extern += '%spsi4.set_global_option_python("EXTERN", extern)\n' % (spaces)
return extern
def check_parentheses_and_brackets(input_string, exit_on_error):
"""Function to check that all parenthesis and brackets
in *input_string* are paired. On that condition, *exit_on_error* =1,
otherwise 0.
"""
# This returns 1 if the string's all matched up, 0 otherwise
from collections import deque
# create left to right parenthesis mappings
lrmap = {"(": ")", "[": "]", "{": "}"}
# derive sets of left and right parentheses
lparens = set(lrmap.keys())
rparens = set(lrmap.values())
parenstack = deque()
all_matched = 1
for ch in input_string:
if ch in lparens:
parenstack.append(ch)
elif ch in rparens:
opench = ""
try:
opench = parenstack.pop()
except IndexError:
# Run out of opening parens
all_matched = 0
if exit_on_error:
print("Input error: extra %s" % (ch))
sys.exit(1)
if lrmap[opench] != ch:
# wrong type of parenthesis popped from stack
all_matched = 0
if exit_on_error:
print("Input error: %s closed with a %s" % (opench, ch))
sys.exit(1)
if len(parenstack) != 0:
all_matched = 0
if exit_on_error:
print("Input error: Unmatched %s" % (parenstack.pop()))
sys.exit(1)
return all_matched
def parse_multiline_array(input_list):
"""Function to squash multiline arrays into a single line
until all parentheses and brackets are fully paired.
"""
line = input_list.pop(0)
# Keep adding lines to the current one, until all parens match up
while not check_parentheses_and_brackets(line, 0):
thisline = input_list.pop(0).strip()
line += thisline
return "%s\n" % (line)
def process_multiline_arrays(inputfile):
"""Function to find array inputs that are spread across multiple
lines and squash them into a single line.
"""
# This function takes multiline array inputs, and puts them on a single line
# Start by converting the input to a list, splitting at newlines
input_list = inputfile.split("\n")
set_re = re.compile(r'^(\s*?)set\s+(?:([-,\w]+)\s+)?(\w+)[\s=]+\[.*', re.IGNORECASE)
newinput = ""
while len(input_list):
line = input_list[0]
if set_re.match(line):
# We've found the start of a set matrix [ .... line - hand it off for more checks
newinput += parse_multiline_array(input_list)
else:
# Nothing to do - just add the line to the string
newinput += "%s\n" % (input_list.pop(0))
return newinput
def process_input(raw_input, print_level=1):
"""Function to preprocess *raw input*, the text of the input file, then
parse it, validate it for format, and convert it into legitimate Python.
*raw_input* is printed to the output file unless *print_level* =0. Does
a series of regular expression filters, where the matching portion of the
input is replaced by the output of the corresponding function (in this
module) call. Returns a string concatenating module import lines, a copy
of the user's .psi4rc files, a setting of the scratch directory, a dummy
molecule, and the processed *raw_input*.
"""
# Check if the infile is actually an outfile (yeah we did)
psi4_id = re.compile(r'PSI4: An Open-Source Ab Initio Electronic Structure Package')
if re.search(psi4_id, raw_input):
input_lines = raw_input.split("\n")
input_re = re.compile(r'^\s*?\=\=> Input File <\=\=')
input_start = -1
for line_count in range(len(input_lines)):
line = input_lines[line_count]
if re.match(input_re, line):
input_start = line_count + 3
break
stop_re = re.compile(r'^-{74}')
input_stop = -1
for line_count in range(input_start, len(input_lines)):
line = input_lines[line_count]
if re.match(stop_re, line):
input_stop = line_count
break
if input_start == -1 or input_stop == -1:
print('Cannot extract infile from outfile.')
sys.exit(1)
raw_input = '\n'.join(input_lines[input_start:input_stop])
raw_input += '\n'
# Echo the infile on the outfile
if print_level > 0:
#psi4.print_out("\n ==> Input File <==\n\n")
#psi4.print_out("--------------------------------------------------------------------------\n")
#psi4.print_out(raw_input)
#psi4.print_out("--------------------------------------------------------------------------\n")
#psi4.flush_outfile()
print("\n ==> Input File <==\n\n")
print("--------------------------------------------------------------------------\n")
print(raw_input)
print("--------------------------------------------------------------------------\n")
#NOTE: If adding mulitline data to the preprocessor, use ONLY the following syntax:
# function [objname] { ... }
# which has the regex capture group:
#
# r'^(\s*?)FUNCTION\s*(\w*?)\s*\{(.*?)\}', re.MULTILINE | re.DOTALL | re.IGNORECASE
#
# your function is in capture group #1
# your objname is in capture group #2
# your data is in capture group #3
# Sections that are truly to be taken literally (spaces included)
# Must be stored then subbed in the end to escape the normal processing
# Process "cfour name? { ... }"
cfour = re.compile(r'^(\s*?)cfour[=\s]*(\w*?)\s*\{(.*?)\}',
re.MULTILINE | re.DOTALL | re.IGNORECASE)
temp = re.sub(cfour, process_cfour_command, raw_input)
# Return from handling literal blocks to normal processing
# Nuke all comments
comment = re.compile(r'[^\\]#.*')
temp = re.sub(comment, '', temp)
# Now, nuke any escapes from comment lines
comment = re.compile(r'\\#')
temp = re.sub(comment, '#', temp)
# Check the brackets and parentheses match up, as long as this is not a pickle input file
if not re.search(r'pickle_kw', temp):
check_parentheses_and_brackets(temp, 1)
# Initialize options dict (TODO temporary until class)
prep = """from collections import defaultdict\n"""
prep += """options_psi4_yo = defaultdict(dict)\n"""
prep += """basis_dict_psi4_yo = {}\n"""
# First, remove everything from lines containing only spaces
blankline = re.compile(r'^\s*$')
temp = re.sub(blankline, '', temp, re.MULTILINE)
# Look for things like
# set matrix [
# [ 1, 2 ],
# [ 3, 4 ]
# ]
# and put them on a single line
temp = process_multiline_arrays(temp)
# Process all "set name? { ... }"
set_commands = re.compile(r'^(\s*?)set\s*([-,\w]*?)[\s=]*\{(.*?)\}',
re.MULTILINE | re.DOTALL | re.IGNORECASE)
temp = re.sub(set_commands, process_set_commands, temp)
# Process all individual "set (module_list) key {[value_list] or $value or value}"
# N.B. We have to be careful here, because \s matches \n, leading to potential problems
# with undesired multiline matches. Better the double-negative [^\S\n] instead, which
# will match any space, tab, etc., except a newline
set_command = re.compile(r'^(\s*?)set\s+(?:([-,\w]+)[^\S\n]+)?(\w+)(?:[^\S\n]|=)+((\[.*\])|(\$?[-+,*()\.\w]+))\s*$',
re.MULTILINE | re.IGNORECASE)
temp = re.sub(set_command, process_set_command, temp)
# Process "molecule name? { ... }"
molecule = re.compile(r'^(\s*?)molecule[=\s]*(\w*?)\s*\{(.*?)\}',
re.MULTILINE | re.DOTALL | re.IGNORECASE)
temp = re.sub(molecule, process_molecule_command, temp)
# Process "external name? { ... }"
external = re.compile(r'^(\s*?)external[=\s]*(\w*?)\s*\{(.*?)\}',
re.MULTILINE | re.DOTALL | re.IGNORECASE)
temp = re.sub(external, process_external_command, temp)
# Process "pcm name? { ... }"
pcm = re.compile(r'^(\s*?)pcm[=\s]*(\w*?)\s*\{(.*?)^\}',
re.MULTILINE | re.DOTALL | re.IGNORECASE)
temp = re.sub(pcm, process_pcm_command, temp)
# Then remove repeated newlines
multiplenewlines = re.compile(r'\n+')
temp = re.sub(multiplenewlines, '\n', temp)
# Process " extract"
extract = re.compile(r'(\s*?)(\w+)\s*=\s*\w+\.extract_subsets.*',
re.IGNORECASE)
temp = re.sub(extract, process_extract_command, temp)
# Process "print" and transform it to "psi4.print_out()"
#print_string = re.compile(r'(\s*?)print\s+(.*)', re.IGNORECASE)
#temp = re.sub(print_string, process_print_command, temp)
# Process "memory ... "
memory_string = re.compile(r'(\s*?)memory\s+([+-]?\d*\.?\d+)\s+([KMG]i?B)',
re.IGNORECASE)
temp = re.sub(memory_string, process_memory_command, temp)
# Process "basis name? { ... }"
basis_block = re.compile(r'^(\s*?)(basis|df_basis_scf|df_basis_mp2)[=\s]*(\w*?)\s*\{(.*?)\}',
re.MULTILINE | re.DOTALL | re.IGNORECASE)
temp = re.sub(basis_block, process_basis_block, temp)
# Process literal blocks by substituting back in
lit_block = re.compile(r'literals_psi4_yo-(\d*\d)')
temp = re.sub(lit_block, process_literal_blocks, temp)
# imports
# imports = 'from psi4 import *\n'
# imports += 'from p4const import *\n'
# imports += 'from p4util import *\n'
# imports += 'from molutil import *\n'
##CU imports += 'from driver import *\n'
##CU imports += 'from wrappers import *\n'
##CU imports += 'from wrappers_cfour import *\n'
##CU imports += 'from gaussian_n import *\n'
# imports += 'from aliases import *\n'
##CU imports += 'from functional import *\n'
## imports += 'from qmmm import *\n'
# imports += 'psi4_io = psi4.IOManager.shared_object()\n'
imports = 'import qcdb\n'
# psirc (a baby PSIthon script that might live in ~/.psi4rc)
psirc = ''
homedir = os.path.expanduser('~')
psirc_file = homedir + '/.psi4rc'
if os.path.isfile(psirc_file):
fh = open(psirc_file)
psirc = fh.read()
fh.close()
# Override scratch directory if user specified via env_var
#scratch = ''
#scratch_env = psi4.Process.environment['PSI_SCRATCH']
#if len(scratch_env):
# scratch += 'psi4_io.set_default_path("%s")\n' % (scratch_env)
blank_mol = 'geometry("""\n'
blank_mol += '0 1\nH\nH 1 0.74\n'
blank_mol += '""","blank_molecule_psi4_yo")\n'
#temp = imports + psirc + scratch + blank_mol + temp
temp = prep + imports + psirc + temp
return temp
if __name__ == "__main__":
result = process_input("""
molecule h2 {
H
H 1 R
R = .9
}
set basis 6-31G**
""")
print("Result\n==========================")
print(result)
|
loriab/qcdb
|
qcdb/inputparser.py
|
Python
|
lgpl-3.0
| 29,904
|
[
"CFOUR",
"Psi4"
] |
773d4a98c3b7349363bfafea0756eb7ed479a715fd2efae5668550ea2433b804
|
import urllib
import re
class GitHg(object):
"""Class that handles various aspects of converting a hg commit to git.
"""
def __init__(self, warn):
"""Initializes a new GitHg object with the specified warner.
"""
self.warn = warn
def format_timezone(self, offset):
if offset % 60 != 0:
raise ValueError("Unable to handle non-minute offset.")
sign = (offset < 0) and '-' or '+'
offset = abs(offset)
return '%c%02d%02d' % (sign, offset / 3600, (offset / 60) % 60)
def get_committer(self, ctx):
extra = ctx.extra()
if 'committer' in extra:
# fixup timezone
(name_timestamp, timezone) = extra['committer'].rsplit(' ', 1)
try:
timezone = self.format_timezone(-int(timezone))
return '%s %s' % (name_timestamp, timezone)
except ValueError:
self.warn("Ignoring committer in extra, invalid timezone in r%s: '%s'.\n" % (ctx.rev(), timezone))
return None
def get_message(self, ctx):
extra = ctx.extra()
message = ctx.description() + "\n"
if 'message' in extra:
message = apply_delta(message, extra['message'])
# HG EXTRA INFORMATION
add_extras = False
extra_message = ''
if not ctx.branch() == 'default':
add_extras = True
extra_message += "branch : " + ctx.branch() + "\n"
renames = []
for f in ctx.files():
if f not in ctx.manifest():
continue
rename = ctx.filectx(f).renamed()
if rename:
renames.append((rename[0], f))
if renames:
add_extras = True
for oldfile, newfile in renames:
extra_message += "rename : " + oldfile + " => " + newfile + "\n"
for key, value in extra.iteritems():
if key in ('author', 'committer', 'encoding', 'message', 'branch', 'hg-git'):
continue
else:
add_extras = True
extra_message += "extra : " + key + " : " + urllib.quote(value) + "\n"
if add_extras:
message += "\n--HG--\n" + extra_message
return message
def get_author(self, ctx):
# hg authors might not have emails
author = ctx.user()
# check for git author pattern compliance
regex = re.compile('^(.*?) ?\<(.*?)(|\>(.*))$')
a = regex.match(author)
if a:
name = a.group(1)
email = a.group(2)
extra = a.group(4)
if not extra is None and len(extra) > 0:
if email.endswith(' <at'):
extra = extra.replace(' ', '')
extra = extra.replace(' <dot> ', '.')
extra = extra.replace('>', '')
email = email[:-4] + '@' + extra
else:
name += ' ext:(' + urllib.quote(extra) + ')'
author = name + ' <' + email + '>'
else:
if author.find('<') >= 0:
author = author + '>'
else:
author = author + ' <none@none>'
if 'author' in ctx.extra():
author = apply_delta(author, ctx.extra()['author'])
(time, timezone) = ctx.date()
date = str(int(time)) + ' ' + self.format_timezone(-timezone)
return author + ' ' + date
def get_parents(self, ctx):
def is_octopus_part(ctx):
return ctx.extra().get('hg-git', None) in ('octopus', 'octopus-done')
parents = []
if ctx.extra().get('hg-git', None) == 'octopus-done':
# implode octopus parents
part = ctx
while is_octopus_part(part):
(p1, p2) = part.parents()
assert not is_octopus_part(p1)
parents.append(p1)
part = p2
parents.append(p2)
else:
parents = ctx.parents()
return parents
|
gktomar/gaurav
|
git_remote_helpers/hg/hg.py
|
Python
|
gpl-2.0
| 4,072
|
[
"Octopus"
] |
9ec39d71b4bffe6da84b0dcc60f72899109cdfcf9dad0f13c005a2e904b0eb84
|
#
# $Id: tex2libplot.py,v 1.5 2002/08/18 22:04:07 mrnolta Exp $
#
# Copyright (C) 2000 Mike Nolta <mike@nolta.net>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
#
# This is just a quick and dirty converter from simple TeX strings
# to libplot Hershey font strings. Basically just a lookup table.
#
import re, string
class TeXLexer(object):
re_control_sequence = re.compile( r"^\\[a-zA-Z]+[ ]?|^\\[^a-zA-Z][ ]?" )
def __init__( self, str ):
self.str = str
self.len = len(str)
self.pos = 0
self.token_stack = []
def get_token( self ):
if self.pos == self.len:
return None
if len(self.token_stack) > 0:
return self.token_stack.pop()
str = self.str[self.pos:]
m = self.re_control_sequence.match(str)
if m is not None:
token = m.group()
self.pos = self.pos + len(token)
## consume trailing space
if len(token) > 2 and token[-1] == ' ':
token = token[:-1]
else:
token = str[0]
self.pos = self.pos + 1
return token
def put_token( self, token ):
self.token_stack.append( token )
def peek( self ):
token = self.get_token()
self.put_token( token )
return token
_common_token_dict = {
r'\\' : '\\',
r'\$' : r'$',
r'\%' : r'%',
r'\#' : r'#',
r'\&' : r'&',
# r'\~' : r'~',
r'\{' : r'{',
r'\}' : r'}',
r'\_' : r'_',
# r'\^' : r'^',
r'~' : r' ',
r'\/' : r'\r^',
## special letters (p52)
# r'\oe' : r'',
# r'\OE' : r'',
r'\ae' : r'\ae',
r'\AE' : r'\AE',
r'\aa' : r'\oa',
r'\AA' : r'\oA',
r'\o' : r'\/o',
r'\O' : r'\/O',
# r'\l' : r'',
# r'\L' : r'',
r'\ss' : r'\ss',
## ignore stray brackets
r'{' : r'',
r'}' : r'',
}
_text_token_dict = {
## punctuation (p52)
r'\`' : r'\`',
r"\'" : r"\'",
r'\^' : r'\^',
r'\"' : r'\:',
r'\~' : r'\~',
r'\c' : r'\,',
## non-math symbols (p438)
r'\S' : r'\sc',
r'\P' : r'\ps',
r'\dag' : r'\dg',
r'\ddag' : r'\dd',
}
_math_token_dict = {
r'*' : r'\**',
## spacing
# r' ' : r'',
r'\ ' : r' ',
r'\quad' : r'\r1', # 1 em
r'\qquad' : r'\r1\r1', # 2 em
r'\,' : r'\r6', # 3/18 em
# r'\>' : r'', # 4/18 em
# r'\;' : r'', # 5/18 em
r'\!' : r'\l6', # -1/6 em
## lowercase greek
r'\alpha' : r'\*a',
r'\beta' : r'\*b',
r'\gamma' : r'\*g',
r'\delta' : r'\*d',
r'\epsilon' : r'\*e',
# r'\varepsilon' : r'',
r'\zeta' : r'\*z',
r'\eta' : r'\*y',
r'\theta' : r'\*h',
r'\vartheta' : r'\+h',
r'\iota' : r'\*i',
r'\kappa' : r'\*k',
r'\lambda' : r'\*l',
r'\mu' : r'\*m',
r'\nu' : r'\*n',
r'\xi' : r'\*c',
r'\pi' : r'\*p',
# r'\varpi' : r'',
r'\rho' : r'\*r',
# r'\varrho' : r'',
r'\sigma' : r'\*s',
r'\varsigma' : r'\ts',
r'\tau' : r'\*t',
r'\upsilon' : r'\*u',
r'\phi' : r'\*f',
r'\varphi' : r'\+f',
r'\chi' : r'\*x',
r'\psi' : r'\*q',
r'\omega' : r'\*w',
## uppercase greek
r'\Alpha' : r'\*A',
r'\Beta' : r'\*B',
r'\Gamma' : r'\*G',
r'\Delta' : r'\*D',
r'\Epsilon' : r'\*E',
r'\Zeta' : r'\*Z',
r'\Eta' : r'\*Y',
r'\Theta' : r'\*H',
r'\Iota' : r'\*I',
r'\Kappa' : r'\*K',
r'\Lambda' : r'\*L',
r'\Mu' : r'\*M',
r'\Nu' : r'\*N',
r'\Xi' : r'\*C',
r'\Pi' : r'\*P',
r'\Rho' : r'\*R',
r'\Sigma' : r'\*S',
r'\Tau' : r'\*T',
r'\Upsilon' : r'\*U',
r'\Phi' : r'\*F',
r'\Chi' : r'\*X',
r'\Psi' : r'\*Q',
r'\Omega' : r'\*W',
## miscellaneous
r'\aleph' : r'\Ah',
r'\hbar' : r'\hb',
r'\ell' : r'\#H0662',
r'\wp' : r'\wp',
r'\Re' : r'\Re',
r'\Im' : r'\Im',
r'\partial' : r'\pd',
r'\infty' : r'\if',
r'\prime' : r'\fm',
r'\emptyset' : r'\es',
r'\nabla' : r'\gr',
r'\surd' : r'\sr',
# r'\top' : r'',
# r'\bot' : r'',
r'\|' : r'\||',
r'\angle' : r'\/_',
# r'\triangle' : r'',
r'\backslash' : r'\\',
r'\forall' : r'\fa',
r'\exists' : r'\te',
r'\neg' : r'\no',
# r'\flat' : r'',
# r'\natural' : r'',
# r'\sharp' : r'',
r'\clubsuit' : r'\CL',
r'\diamondsuit' : r'\DI',
r'\heartsuit' : r'\HE',
r'\spadesuit' : r'\SP',
r'\int' : r'\is',
## binary operations
r'\pm' : r'\+-',
r'\mp' : r'\-+',
# r'\setminus' : r'',
r'\cdot' : r'\md',
r'\times' : r'\mu',
r'\ast' : r'\**',
# r'\star' : r'',
# r'\diamond' : r'',
# r'\circ' : r'',
r'\bullet' : r'\bu',
r'\div' : r'\di',
r'\cap' : r'\ca',
r'\cup' : r'\cu',
# r'\uplus' : r'',
# r'\sqcap' : r'',
# r'\sqcup' : r'',
# r'\triangleleft' : r'',
# r'\triangleright' : r'',
# r'\wr' : r'',
# r'\bigcirc' : r'',
# r'\bigtriangleup' : r'',
# r'\bigtriangledown' : r'',
# r'\vee' : r'',
# r'\wedge' : r'',
r'\oplus' : r'\c+',
# r'\ominus' : r'',
r'\otimes' : r'\c*',
# r'\oslash' : r'',
r'\odot' : r'\SO',
r'\dagger' : r'\dg',
r'\ddagger' : r'\dd',
# r'\amalg' : r'',
## relations
r'\leq' : r'\<=',
# r'\prec' : r'',
# r'\preceq' : r'',
r'\ll' : r'<<',
r'\subset' : r'\SB',
# r'\subseteq' : r'',
# r'\sqsubseteq' : r'',
r'\in' : r'\mo',
# r'\vdash' : r'',
# r'\smile' : r'',
# r'\frown' : r'',
r'\geq' : r'\>=',
# r'\succ' : r'',
# r'\succeq' : r'',
r'\gg' : r'>>',
r'\supset' : r'\SS',
# r'\supseteq' : r'',
# r'\sqsupseteq' : r'',
# r'\ni' : r'',
# r'\dashv' : r'',
r'\mid' : r'|',
r'\parallel' : r'\||',
r'\equiv' : r'\==',
r'\sim' : r'\ap',
r'\simeq' : r'\~-',
# r'\asymp' : r'',
r'\approx' : r'\~~',
r'\cong' : r'\=~',
# r'\bowtie' : r'',
r'\propto' : r'\pt',
# r'\models' : r'',
# r'\doteq' : r'',
r'\perp' : r'\pp',
## arrows
r'\leftarrow' : r'\<-',
r'\Leftarrow' : r'\lA',
r'\rightarrow' : r'\->',
r'\Rightarrow' : r'\rA',
r'\leftrightarrow' : r'\<>',
r'\Leftrightarrow' : r'\hA',
# r'\mapsto' : r'',
# r'\hookleftarrow' : r'',
# r'\leftharpoonup' : r'',
# r'\leftharpoondown' : r'',
# r'\rightleftharpoons' : r'',
# ...
r'\uparrow' : r'\ua',
r'\Uparrow' : r'\uA',
r'\downarrow' : r'\da',
r'\Downarrow' : r'\dA',
# r'\updownarrow' : r'',
# r'\Updownarrow' : r'',
# r'\nearrow' : r'',
# r'\searrow' : r'',
# r'\swarrow' : r'',
# r'\nwarrow' : r'',
## openings
r'\lbrack' : r'[',
r'\lbrace' : r'{',
r'\langle' : r'\la',
# r'\lfloor' : r'',
# r'\lceil' : r'',
## closings
r'\rbrack' : r']',
r'\rbrace' : r'}',
r'\rangle' : r'\ra',
# r'\rfloor' : r'',
# r'\rceil' : r'',
## alternate names
r'\ne' : r'\!=',
r'\neq' : r'\!=',
r'\le' : r'\<=',
r'\ge' : r'\>=',
r'\to' : r'\->',
r'\gets' : r'\<-',
# r'\owns' : r'',
r'\land' : r'\AN',
r'\lor' : r'\OR',
r'\lnot' : r'\no',
r'\vert' : r'|',
r'\Vert' : r'\||',
## extensions
r'\degree' : r'\de',
r'\deg' : r'\de',
r'\degr' : r'\de',
r'\arcdeg' : r'\de',
}
def map_text_token( token ):
if _text_token_dict.has_key(token):
return _text_token_dict[token]
else:
return _common_token_dict.get( token, token )
def map_math_token( token ):
if _math_token_dict.has_key(token):
return _math_token_dict[token]
else:
return _common_token_dict.get( token, token )
def math_group( lexer ):
output = ''
bracketmode = 0
while 1:
token = lexer.get_token()
if token is None:
break
if token == '{':
bracketmode = 1
elif token == '}':
break
else:
output = output + map_math_token( token )
if not bracketmode:
break
return output
font_code = [ r'\f0', r'\f1', r'\f2', r'\f3' ]
def tex2libplot( str ):
output = ''
mathmode = 0
font_stack = []
font = 1
lexer = TeXLexer( str )
while 1:
token = lexer.get_token()
if token is None:
break
append = ''
if token == '$':
mathmode = not mathmode
elif token == '{':
font_stack.append( font )
elif token == '}':
old_font = font_stack.pop()
if old_font != font:
font = old_font
append = font_code[font]
elif token == r'\rm':
font = 1
append = font_code[font]
elif token == r'\it':
font = 2
append = font_code[font]
elif token == r'\bf':
font = 3
append = font_code[font]
elif not mathmode:
append = map_text_token( token )
elif token == '_':
append = r'\sb' + math_group(lexer) + r'\eb'
if lexer.peek() == '^':
append = r'\mk' + append + r'\rt'
elif token == '^':
append = r'\sp' + math_group(lexer) + r'\ep'
if lexer.peek() == '_':
append = r'\mk' + append + r'\rt'
else:
append = map_math_token( token )
output = output + append
return output
|
kstory8/biggles
|
biggles/libplot/tex2libplot.py
|
Python
|
gpl-2.0
| 16,267
|
[
"Bowtie"
] |
7ad2068f19d9ce7430accaaceef348bd96800d5f2194ee6fe96c45aec3581eae
|
#!/bin/env python2
'''
CalcuMLator testing file
Copyright (C) 2016 Luiz Eduardo Amaral <luizamaral306@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
import argparse
import json
from os import path
from sklearn.externals import joblib
FOLDER = path.dirname(path.realpath(__file__))+'/'
ESTIMATOR_FOLDER = FOLDER+'estimators/'
ESTIMATOR_CONF = FOLDER+'estimator_conf.json'
with open(ESTIMATOR_CONF) as text:
conf = json.loads(text.read())
def predict(number1, number2, operator, estimator):
'''
Predicts the value of the operation
'''
args = np.array([number1, number2]).reshape(1, -1)
if estimator in conf['estimators'] and operator in conf['types']:
estimator_path = ESTIMATOR_FOLDER+operator+'_'+conf[estimator]
clf = joblib.load(estimator_path)
result = clf.predict(args)[0]
if type(result) is np.ndarray:
result = result[0]
return result
else:
return 0
def main():
parser = argparse.ArgumentParser(description='Calculates the operation using Machine Learning')
parser.add_argument('number1', metavar='N1', help='a number for the calculation')
parser.add_argument('operator', metavar='OP', help='The operator for the calculation (+, -, *, /)')
parser.add_argument('number2', metavar='N2', help='a number for the calculation')
parser.add_argument('estimator', metavar='E', help='The estimator for the calculation (linear, ridge, lasso, elastic, bayesian, theil, PAR, SVR, bagging, dtree, gaussian, PLS, MLP, knnr, k_ridge, forest)')
args = parser.parse_args()
try:
number1 = float(args.number1)
operator = args.operator
number2 = float(args.number2)
estimator = args.estimator
if operator not in conf['types']:
raise TypeError('Operator must be one of: ' +
str(conf['types']))
if estimator not in conf['estimators']:
raise TypeError('Estimator must be one of: ' +
str(conf['estimators']))
print(predict(number1, number2, operator, estimator))
return 0
except Exception as error:
if type(error) == ValueError:
error.args = ('Input (N1, N2) must be numbers',)
print(error)
return 1
if __name__ == '__main__':
main()
|
ArmlessJohn404/calcuMLator
|
calcuMLator/estimate.py
|
Python
|
gpl-3.0
| 2,928
|
[
"Gaussian"
] |
42849737140c6a35e21427d7f89f522553d0a9b4f629e0944439f3cbf3b9dd0a
|
#!/usr/bin/env python3
# Copyright (c) 2019 The ungoogled-chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Update binary pruning and domain substitution lists automatically.
It will download and unpack into the source tree as necessary.
No binary pruning or domain substitution will be applied to the source tree after
the process has finished.
"""
import argparse
import os
import sys
from itertools import repeat
from multiprocessing import Pool
from pathlib import Path, PurePosixPath
sys.path.insert(0, str(Path(__file__).resolve().parent.parent / 'utils'))
from _common import get_logger
from domain_substitution import DomainRegexList, TREE_ENCODINGS
sys.path.pop(0)
# Encoding for output files
_ENCODING = 'UTF-8'
# NOTE: Include patterns have precedence over exclude patterns
# pathlib.Path.match() paths to include in binary pruning
PRUNING_INCLUDE_PATTERNS = [
'components/domain_reliability/baked_in_configs/*',
# Removals for patches/core/ungoogled-chromium/remove-unused-preferences-fields.patch
'components/safe_browsing/core/common/safe_browsing_prefs.cc',
'components/safe_browsing/core/common/safe_browsing_prefs.h',
'components/signin/public/base/signin_pref_names.cc',
'components/signin/public/base/signin_pref_names.h',
]
# pathlib.Path.match() paths to exclude from binary pruning
PRUNING_EXCLUDE_PATTERNS = [
'chrome/common/win/eventlog_messages.mc', # TODO: False positive textfile
# TabRanker example preprocessor config
# Details in chrome/browser/resource_coordinator/tab_ranker/README.md
'chrome/browser/resource_coordinator/tab_ranker/example_preprocessor_config.pb',
'chrome/browser/resource_coordinator/tab_ranker/pairwise_preprocessor_config.pb',
# Exclusions for DOM distiller (contains model data only)
'components/dom_distiller/core/data/distillable_page_model_new.bin',
'components/dom_distiller/core/data/long_page_model.bin',
# Exclusions for GeoLanguage data
# Details: https://docs.google.com/document/d/18WqVHz5F9vaUiE32E8Ge6QHmku2QSJKvlqB9JjnIM-g/edit
# Introduced with: https://chromium.googlesource.com/chromium/src/+/6647da61
'components/language/content/browser/ulp_language_code_locator/geolanguage-data_rank0.bin',
'components/language/content/browser/ulp_language_code_locator/geolanguage-data_rank1.bin',
'components/language/content/browser/ulp_language_code_locator/geolanguage-data_rank2.bin',
# Exclusion for required prebuilt object for Windows arm64 builds
'third_party/crashpad/crashpad/util/misc/capture_context_win_arm64.obj',
'third_party/icu/common/icudtl.dat', # Exclusion for ICU data
# Exclusion for Android
'build/android/chromium-debug.keystore',
'third_party/icu/android/icudtl.dat',
'third_party/icu/android_small/icudtl.dat',
'third_party/icu/android_small/icudtl_extra.dat',
'third_party/icu/common/icudtb.dat',
# Exclusions for safe file extensions
'*.avif',
'*.ttf',
'*.png',
'*.jpg',
'*.webp',
'*.gif',
'*.ico',
'*.mp3',
'*.wav',
'*.flac',
'*.icns',
'*.woff',
'*.woff2',
'*makefile',
'*.xcf',
'*.cur',
'*.pdf',
'*.ai',
'*.h',
'*.c',
'*.cpp',
'*.cc',
'*.mk',
'*.bmp',
'*.py',
'*.xml',
'*.html',
'*.js',
'*.json',
'*.txt',
'*.xtb'
]
# NOTE: Domain substitution path prefix exclusion has precedence over inclusion patterns
# Paths to exclude by prefixes of the POSIX representation for domain substitution
DOMAIN_EXCLUDE_PREFIXES = [
'components/test/',
'net/http/transport_security_state_static.json',
# Exclusions for Visual Studio Project generation with GN (PR #445)
'tools/gn/src/gn/visual_studio_writer.cc',
# Exclusions for files covered with other patches/unnecessary
'components/search_engines/prepopulated_engines.json',
'third_party/blink/renderer/core/dom/document.cc',
]
# pathlib.Path.match() patterns to include in domain substitution
DOMAIN_INCLUDE_PATTERNS = [
'*.h', '*.hh', '*.hpp', '*.hxx', '*.cc', '*.cpp', '*.cxx', '*.c', '*.h', '*.json', '*.js',
'*.html', '*.htm', '*.css', '*.py*', '*.grd*', '*.sql', '*.idl', '*.mk', '*.gyp*', 'makefile',
'*.txt', '*.xml', '*.mm', '*.jinja*', '*.gn', '*.gni'
]
# Binary-detection constant
_TEXTCHARS = bytearray({7, 8, 9, 10, 12, 13, 27} | set(range(0x20, 0x100)) - {0x7f})
class UnusedPatterns: #pylint: disable=too-few-public-methods
"""Tracks unused prefixes and patterns"""
_all_names = ('pruning_include_patterns', 'pruning_exclude_patterns', 'domain_include_patterns',
'domain_exclude_prefixes')
def __init__(self):
# Initialize all tracked patterns and prefixes in sets
# Users will discard elements that are used
for name in self._all_names:
setattr(self, name, set(globals()[name.upper()]))
def log_unused(self):
"""
Logs unused patterns and prefixes
Returns True if there are unused patterns or prefixes; False otherwise
"""
have_unused = False
for name in self._all_names:
current_set = getattr(self, name, None)
if current_set:
get_logger().error('Unused from %s: %s', name.upper(), current_set)
have_unused = True
return have_unused
def _is_binary(bytes_data):
"""
Returns True if the data seems to be binary data (i.e. not human readable); False otherwise
"""
# From: https://stackoverflow.com/a/7392391
return bool(bytes_data.translate(None, _TEXTCHARS))
def _dir_empty(path):
"""
Returns True if the directory is empty; False otherwise
path is a pathlib.Path or string to a directory to test.
"""
try:
next(os.scandir(str(path)))
except StopIteration:
return True
return False
def should_prune(path, relative_path, used_pep_set, used_pip_set):
"""
Returns True if a path should be pruned from the source tree; False otherwise
path is the pathlib.Path to the file from the current working directory.
relative_path is the pathlib.Path to the file from the source tree
used_pep_set is a list of PRUNING_EXCLUDE_PATTERNS that have been matched
used_pip_set is a list of PRUNING_INCLUDE_PATTERNS that have been matched
"""
# Match against include patterns
for pattern in filter(relative_path.match, PRUNING_INCLUDE_PATTERNS):
used_pip_set.add(pattern)
return True
# Match against exclude patterns
for pattern in filter(Path(str(relative_path).lower()).match, PRUNING_EXCLUDE_PATTERNS):
used_pep_set.add(pattern)
return False
# Do binary data detection
with path.open('rb') as file_obj:
if _is_binary(file_obj.read()):
return True
# Passed all filtering; do not prune
return False
def _check_regex_match(file_path, search_regex):
"""
Returns True if a regex pattern matches a file; False otherwise
file_path is a pathlib.Path to the file to test
search_regex is a compiled regex object to search for domain names
"""
with file_path.open("rb") as file_obj:
file_bytes = file_obj.read()
content = None
for encoding in TREE_ENCODINGS:
try:
content = file_bytes.decode(encoding)
break
except UnicodeDecodeError:
continue
if not search_regex.search(content) is None:
return True
return False
def should_domain_substitute(path, relative_path, search_regex, used_dep_set, used_dip_set):
"""
Returns True if a path should be domain substituted in the source tree; False otherwise
path is the pathlib.Path to the file from the current working directory.
relative_path is the pathlib.Path to the file from the source tree.
used_dep_set is a list of DOMAIN_EXCLUDE_PREFIXES that have been matched
used_dip_set is a list of DOMAIN_INCLUDE_PATTERNS that have been matched
"""
relative_path_posix = relative_path.as_posix().lower()
for include_pattern in DOMAIN_INCLUDE_PATTERNS:
if PurePosixPath(relative_path_posix).match(include_pattern):
used_dip_set.add(include_pattern)
for exclude_prefix in DOMAIN_EXCLUDE_PREFIXES:
if relative_path_posix.startswith(exclude_prefix):
used_dep_set.add(exclude_prefix)
return False
return _check_regex_match(path, search_regex)
return False
def compute_lists_proc(path, source_tree, search_regex):
"""
Adds the path to appropriate lists to be used by compute_lists.
path is the pathlib.Path to the file from the current working directory.
source_tree is a pathlib.Path to the source tree
search_regex is a compiled regex object to search for domain names
"""
used_pep_set = set() # PRUNING_EXCLUDE_PATTERNS
used_pip_set = set() # PRUNING_INCLUDE_PATTERNS
used_dep_set = set() # DOMAIN_EXCLUDE_PREFIXES
used_dip_set = set() # DOMAIN_INCLUDE_PATTERNS
pruning_set = set()
domain_substitution_set = set()
symlink_set = set()
if path.is_file():
relative_path = path.relative_to(source_tree)
if path.is_symlink():
try:
resolved_relative_posix = path.resolve().relative_to(source_tree).as_posix()
symlink_set.add((resolved_relative_posix, relative_path.as_posix()))
except ValueError:
# Symlink leads out of the source tree
pass
else:
try:
if should_prune(path, relative_path, used_pep_set, used_pip_set):
pruning_set.add(relative_path.as_posix())
elif should_domain_substitute(path, relative_path, search_regex, used_dep_set,
used_dip_set):
domain_substitution_set.add(relative_path.as_posix())
except: #pylint: disable=bare-except
get_logger().exception('Unhandled exception while processing %s', relative_path)
return (used_pep_set, used_pip_set, used_dep_set, used_dip_set, pruning_set,
domain_substitution_set, symlink_set)
def compute_lists(source_tree, search_regex, processes):
"""
Compute the binary pruning and domain substitution lists of the source tree.
Returns a tuple of three items in the following order:
1. The sorted binary pruning list
2. The sorted domain substitution list
3. An UnusedPatterns object
source_tree is a pathlib.Path to the source tree
search_regex is a compiled regex object to search for domain names
processes is the maximum number of worker processes to create
"""
pruning_set = set()
domain_substitution_set = set()
symlink_set = set() # POSIX resolved path -> set of POSIX symlink paths
source_tree = source_tree.resolve()
unused_patterns = UnusedPatterns()
# Launch multiple processes iterating over the source tree
with Pool(processes) as procpool:
returned_data = procpool.starmap(
compute_lists_proc,
zip(source_tree.rglob('*'), repeat(source_tree), repeat(search_regex)))
# Handle the returned data
for (used_pep_set, used_pip_set, used_dep_set, used_dip_set, returned_pruning_set,
returned_domain_sub_set, returned_symlink_set) in returned_data:
unused_patterns.pruning_exclude_patterns.difference_update(used_pep_set)
unused_patterns.pruning_include_patterns.difference_update(used_pip_set)
unused_patterns.domain_exclude_prefixes.difference_update(used_dep_set)
unused_patterns.domain_include_patterns.difference_update(used_dip_set)
pruning_set.update(returned_pruning_set)
domain_substitution_set.update(returned_domain_sub_set)
symlink_set.update(returned_symlink_set)
# Prune symlinks for pruned files
for (resolved, symlink) in symlink_set:
if resolved in pruning_set:
pruning_set.add(symlink)
return sorted(pruning_set), sorted(domain_substitution_set), unused_patterns
def main(args_list=None):
"""CLI entrypoint"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--pruning',
metavar='PATH',
type=Path,
default='pruning.list',
help='The path to store pruning.list. Default: %(default)s')
parser.add_argument(
'--domain-substitution',
metavar='PATH',
type=Path,
default='domain_substitution.list',
help='The path to store domain_substitution.list. Default: %(default)s')
parser.add_argument(
'--domain-regex',
metavar='PATH',
type=Path,
default='domain_regex.list',
help='The path to domain_regex.list. Default: %(default)s')
parser.add_argument(
'-t',
'--tree',
metavar='PATH',
type=Path,
required=True,
help='The path to the source tree to use.')
parser.add_argument(
'--processes',
metavar='NUM',
type=int,
default=None,
help=
'The maximum number of worker processes to create. Defaults to the number of system CPUs.')
args = parser.parse_args(args_list)
if args.tree.exists() and not _dir_empty(args.tree):
get_logger().info('Using existing source tree at %s', args.tree)
else:
get_logger().error('No source tree found. Aborting.')
exit(1)
get_logger().info('Computing lists...')
pruning_set, domain_substitution_set, unused_patterns = compute_lists(
args.tree,
DomainRegexList(args.domain_regex).search_regex, args.processes)
with args.pruning.open('w', encoding=_ENCODING) as file_obj:
file_obj.writelines('%s\n' % line for line in pruning_set)
with args.domain_substitution.open('w', encoding=_ENCODING) as file_obj:
file_obj.writelines('%s\n' % line for line in domain_substitution_set)
if unused_patterns.log_unused():
get_logger().error('Please update or remove unused patterns and/or prefixes. '
'The lists have still been updated with the remaining valid entries.')
exit(1)
if __name__ == "__main__":
main()
|
Eloston/ungoogled-chromium
|
devutils/update_lists.py
|
Python
|
bsd-3-clause
| 14,456
|
[
"xTB"
] |
b1986a3b5ffb947561e65bcb590ef85f8d52ffbd0597b624407546d22406eab4
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from np import Neuron
t = [[ [0,0],
[0,1],
[1,0],
[1,1]],
[0,1,1,0]]
learning_rate = 0.5
max_epocas = 200
max_erro = 0.01
curr_erro = 100
curr_epoca = 1
# Instancia os neurônios que serão utilizados.
ne0 = Neuron({"inputs":t[0][0]})
nh0, nh1 = Neuron({"inputs":[ne0.output()]}), Neuron({"inputs":[ne0.output()]})
ns0 = Neuron({"inputs":[nh0.output(), nh1.output()]})
def run_net():
while(curr_erro>max_erro):
##criar laço for para inserir todas as entradas.
#passando a informação pela rede.
nh0.inputs = [ne0.output()]
nh1.inputs = nh0.inputs
ns0.inputs = [nh0.output(), nh1.output()]
x = ne0.output()
y = ns0.output()
h0 = nh0.output()
h1 = nh1.output()
#calculando erro do neuronio da camada de saida.
delta2 = y*(1-y)*(t[1][1]-y)
#calculando erro dos neuronios da camada escondida.
delta1_0 = h0*(1-h0)*delta2*nh0.weights[0]
delta1_1 = h1*(1-h1)*delta2*nh1.weights[0]
#atualizando pesos da camada de saida.
ns0.weights = [learning_rate*delta2*h0, learning_rate*delta2*h1]
#atualizando pesos da camada escondida.
nh0.weights = [learning_rate*delta1_0*x]
nh1.weights = [learning_rate*delta1_1*x]
#atualizando erro atual
#atualizando epoca atual
#criar funcao de erro quadratico medio
#realizar retropropagação
def backpropagation():
pass
|
aulusdiniz/neuron
|
neuropy/net.py
|
Python
|
mit
| 1,536
|
[
"NEURON"
] |
3c244ce0d0d9aebf1aed1cabf876c43d51f8befa683e69bf5abe0b0955678bfe
|
"""
Student Views
"""
import datetime
import logging
import uuid
import json
import warnings
import jwt
from collections import defaultdict
from urlparse import urljoin
from pytz import UTC
from requests import HTTPError
from django.conf import settings
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_reset_confirm
from django.contrib import messages
from django.core.context_processors import csrf
from django.core import mail
from django.core.mail import EmailMessage
from django.core.urlresolvers import reverse, NoReverseMatch
from django.core.validators import validate_email, ValidationError
from django.db import IntegrityError, transaction
from django.http import (HttpResponse, HttpResponseBadRequest, HttpResponseForbidden,
HttpResponseServerError, Http404)
from django.shortcuts import redirect
from django.utils.encoding import force_bytes, force_text
from django.utils.translation import ungettext
from django.utils.http import base36_to_int, urlsafe_base64_encode
from django.utils.translation import ugettext as _, get_language
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.views.decorators.http import require_POST, require_GET
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.template.response import TemplateResponse
from ratelimitbackend.exceptions import RateLimitException
from social_django import utils as social_utils
from social_core.backends import oauth as social_oauth
from social_core.exceptions import AuthException, AuthAlreadyAssociated
from edxmako.shortcuts import render_to_response, render_to_string
from course_modes.models import CourseMode
from shoppingcart.api import order_history
from student.models import (
Registration, UserProfile,
PendingEmailChange, CourseEnrollment, CourseEnrollmentAttribute, unique_id_for_user,
CourseEnrollmentAllowed, UserStanding, LoginFailures,
create_comments_service_user, PasswordHistory, UserSignupSource,
DashboardConfiguration, LinkedInAddToProfileConfiguration, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED)
from student.forms import AccountCreationForm, PasswordResetFormNoActive
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification
from certificates.models import CertificateStatuses, certificate_status_for_student
from certificates.api import ( # pylint: disable=import-error
get_certificate_url,
has_html_certificates_enabled,
)
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator
from xmodule.modulestore import ModuleStoreEnum
from collections import namedtuple
from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date # pylint: disable=import-error
from courseware.access import has_access
from django_comment_common.models import Role
from external_auth.models import ExternalAuthMap
import external_auth.views
from external_auth.login_and_register import (
login as external_auth_login,
register as external_auth_register
)
from bulk_email.models import Optout, CourseAuthorization
from lang_pref import LANGUAGE_KEY
import track.views
import dogstats_wrapper as dog_stats_api
from util.db import outer_atomic
from util.json_request import JsonResponse
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.milestones_helpers import (
get_pre_requisite_courses_not_completed,
)
from microsite_configuration import microsite
from util.password_policy_validators import (
validate_password_length, validate_password_complexity,
validate_password_dictionary
)
import third_party_auth
from third_party_auth import pipeline, provider
from student.helpers import (
check_verify_status_by_course,
auth_pipeline_urls, get_next_url_for_login_page,
DISABLE_UNENROLL_CERT_STATES,
)
from student.cookies import set_logged_in_cookies, delete_logged_in_cookies
from student.models import anonymous_id_for_user
from shoppingcart.models import DonationConfiguration, CourseRegistrationCode
import analytics
from eventtracking import tracker
from edraak_validation import validate_username
from edraak_misc.utils import edraak_courses_logic
# Note that this lives in LMS, so this dependency should be refactored.
from notification_prefs.views import enable_notifications
# Note that this lives in openedx, so this dependency should be refactored.
from openedx.core.djangoapps.user_api.preferences import api as preferences_api
from openedx.core.djangoapps.programs.utils import get_programs_for_dashboard
from .constants import CHILD_USER_PERMISSION_GROUP
from course_modes.helpers import get_progs_url
from .helpers import enroll, get_spam_name
log = logging.getLogger("edx.student")
AUDIT_LOG = logging.getLogger("audit")
ReverifyInfo = namedtuple('ReverifyInfo', 'course_id course_name course_number date status display') # pylint: disable=invalid-name
SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated'
def csrf_token(context):
"""A csrf token that can be included in a form."""
token = context.get('csrf_token', '')
if token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="%s" /></div>' % (token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context=None, user=AnonymousUser()):
"""
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
"""
if extra_context is None:
extra_context = {}
# The course selection work is done in courseware.courses.
domain = settings.FEATURES.get('FORCE_UNIVERSITY_DOMAIN') # normally False
# do explicit check, because domain=None is valid
if domain is False:
domain = request.META.get('HTTP_HOST')
# Hardcoded `AnonymousUser()` to hide unpublished courses always
courses = get_courses(AnonymousUser(), domain=domain)
if microsite.get_value("ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"]):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
courses = edraak_courses_logic(courses)
context = {'courses': courses}
context.update(extra_context)
return render_to_response('index.html', context)
def process_survey_link(survey_link, user):
"""
If {UNIQUE_ID} appears in the link, replace it with a unique id for the user.
Currently, this is sha1(user.username). Otherwise, return survey_link.
"""
return survey_link.format(UNIQUE_ID=unique_id_for_user(user))
def cert_info(user, course_overview, course_mode):
"""
Get the certificate info needed to render the dashboard section for the given
student and course.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
Returns:
dict: Empty dict if certificates are disabled or hidden, or a dictionary with keys:
'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted'
'show_download_url': bool
'download_url': url, only present if show_download_url is True
'show_disabled_download_button': bool -- true if state is 'generating'
'show_survey_button': bool
'survey_url': url, only if show_survey_button is True
'grade': if status is not 'processing'
'can_unenroll': if status allows for unenrollment
"""
if not course_overview.may_certify():
return {}
return _cert_info(
user,
course_overview,
certificate_status_for_student(user, course_overview.id),
course_mode
)
def reverification_info(statuses):
"""
Returns reverification-related information for *all* of user's enrollments whose
reverification status is in statuses.
Args:
statuses (list): a list of reverification statuses we want information for
example: ["must_reverify", "denied"]
Returns:
dictionary of lists: dictionary with one key per status, e.g.
dict["must_reverify"] = []
dict["must_reverify"] = [some information]
"""
reverifications = defaultdict(list)
# Sort the data by the reverification_end_date
for status in statuses:
if reverifications[status]:
reverifications[status].sort(key=lambda x: x.date)
return reverifications
def get_course_enrollments(user, org_to_include, orgs_to_exclude):
"""
Given a user, return a filtered set of his or her course enrollments.
Arguments:
user (User): the user in question.
org_to_include (str): for use in Microsites. If not None, ONLY courses
of this org will be returned.
orgs_to_exclude (list[str]): If org_to_include is not None, this
argument is ignored. Else, courses of this org will be excluded.
Returns:
generator[CourseEnrollment]: a sequence of enrollments to be displayed
on the user's dashboard.
"""
for enrollment in CourseEnrollment.enrollments_for_user(user):
# If the course is missing or broken, log an error and skip it.
course_overview = enrollment.course_overview
if not course_overview:
log.error(
"User %s enrolled in broken or non-existent course %s",
user.username,
enrollment.course_id
)
continue
# If we are in a Microsite, then filter out anything that is not
# attributed (by ORG) to that Microsite.
if org_to_include and course_overview.location.org != org_to_include:
continue
# Conversely, if we are not in a Microsite, then filter out any enrollments
# with courses attributed (by ORG) to Microsites.
elif course_overview.location.org in orgs_to_exclude:
continue
# Else, include the enrollment.
else:
yield enrollment
def _cert_info(user, course_overview, cert_status, course_mode): # pylint: disable=unused-argument
"""
Implements the logic for cert_info -- split out for testing.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
"""
# simplify the status for the template using this lookup table
template_state = {
CertificateStatuses.generating: 'generating',
CertificateStatuses.regenerating: 'generating',
CertificateStatuses.downloadable: 'ready',
CertificateStatuses.notpassing: 'notpassing',
CertificateStatuses.restricted: 'restricted',
CertificateStatuses.auditing: 'auditing',
}
default_status = 'processing'
default_info = {'status': default_status,
'show_disabled_download_button': False,
'show_download_url': False,
'show_survey_button': False,
'can_unenroll': True
}
if cert_status is None:
return default_info
is_hidden_status = cert_status['status'] in ('unavailable', 'processing', 'generating', 'notpassing', 'auditing')
if course_overview.certificates_display_behavior == 'early_no_info' and is_hidden_status:
return {}
status = template_state.get(cert_status['status'], default_status)
status_dict = {
'status': status,
'show_download_url': status == 'ready',
'show_disabled_download_button': status == 'generating',
'mode': cert_status.get('mode', None),
'linked_in_url': None,
'can_unenroll': status not in DISABLE_UNENROLL_CERT_STATES,
}
if (status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing') and
course_overview.end_of_course_survey_url is not None):
status_dict.update({
'show_survey_button': True,
'survey_url': process_survey_link(course_overview.end_of_course_survey_url, user)})
else:
status_dict['show_survey_button'] = False
if status == 'ready':
# showing the certificate web view button if certificate is ready state and feature flags are enabled.
if has_html_certificates_enabled(course_overview.id, course_overview):
if course_overview.has_any_active_web_certificate:
status_dict.update({
'show_cert_web_view': True,
'cert_web_view_url': get_certificate_url(course_id=course_overview.id, uuid=cert_status['uuid'])
})
else:
# don't show download certificate button if we don't have an active certificate for course
status_dict['show_download_url'] = False
elif 'download_url' not in cert_status:
log.warning(
u"User %s has a downloadable cert for %s, but no download url",
user.username,
course_overview.id
)
return default_info
else:
status_dict['download_url'] = cert_status['download_url']
# If enabled, show the LinkedIn "add to profile" button
# Clicking this button sends the user to LinkedIn where they
# can add the certificate information to their profile.
linkedin_config = LinkedInAddToProfileConfiguration.current()
# posting certificates to LinkedIn is not currently
# supported in microsites/White Labels
if linkedin_config.enabled and not microsite.is_request_in_microsite():
status_dict['linked_in_url'] = linkedin_config.add_to_profile_url(
course_overview.id,
course_overview.display_name,
cert_status.get('mode'),
cert_status['download_url']
)
if status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing'):
if 'grade' not in cert_status:
# Note: as of 11/20/2012, we know there are students in this state-- cs169.1x,
# who need to be regraded (we weren't tracking 'notpassing' at first).
# We can add a log.warning here once we think it shouldn't happen.
return default_info
else:
status_dict['grade'] = cert_status['grade']
return status_dict
@ensure_csrf_cookie
def signin_user(request):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
external_auth_response = external_auth_login(request)
if external_auth_response is not None:
return external_auth_response
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
third_party_auth_error = None
for msg in messages.get_messages(request):
if msg.extra_tags.split()[0] == "social-auth":
# msg may or may not be translated. Try translating [again] in case we are able to:
third_party_auth_error = _(unicode(msg)) # pylint: disable=translation-of-non-string
break
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
# Bool injected into JS to submit form if we're inside a running third-
# party auth pipeline; distinct from the actual instance of the running
# pipeline, if any.
'pipeline_running': 'true' if pipeline.running(request) else 'false',
'pipeline_url': auth_pipeline_urls(pipeline.AUTH_ENTRY_LOGIN, redirect_url=redirect_to),
'platform_name': microsite.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'third_party_auth_error': third_party_auth_error
}
return render_to_response('login.html', context)
@ensure_csrf_cookie
def register_user(request, extra_context=None):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
external_auth_response = external_auth_register(request)
if external_auth_response is not None:
return external_auth_response
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
'email': '',
'name': '',
'running_pipeline': None,
'pipeline_urls': auth_pipeline_urls(pipeline.AUTH_ENTRY_REGISTER, redirect_url=redirect_to),
'platform_name': microsite.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'selected_provider': '',
'username': '',
}
if extra_context is not None:
context.update(extra_context)
if context.get("extauth_domain", '').startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return render_to_response('register-shib.html', context)
# If third-party auth is enabled, prepopulate the form with data from the
# selected provider.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
current_provider = provider.Registry.get_from_pipeline(running_pipeline)
if current_provider is not None:
overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs'))
overrides['running_pipeline'] = running_pipeline
overrides['selected_provider'] = current_provider.name
context.update(overrides)
return render_to_response('register.html', context)
def complete_course_mode_info(course_id, enrollment, modes=None):
"""
We would like to compute some more information from the given course modes
and the user's current enrollment
Returns the given information:
- whether to show the course upsell information
- numbers of days until they can't upsell anymore
"""
if modes is None:
modes = CourseMode.modes_for_course_dict(course_id)
mode_info = {'show_upsell': False, 'days_for_upsell': None}
# we want to know if the user is already enrolled as verified or credit and
# if verified is an option.
if CourseMode.VERIFIED in modes and enrollment.mode in CourseMode.UPSELL_TO_VERIFIED_MODES:
mode_info['show_upsell'] = True
# if there is an expiration date, find out how long from now it is
if modes['verified'].expiration_datetime:
today = datetime.datetime.now(UTC).date()
mode_info['days_for_upsell'] = (modes['verified'].expiration_datetime.date() - today).days
return mode_info
def is_course_blocked(request, redeemed_registration_codes, course_key):
"""Checking either registration is blocked or not ."""
blocked = False
for redeemed_registration in redeemed_registration_codes:
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
if redeemed_registration.invoice_item:
if not redeemed_registration.invoice_item.invoice.is_valid:
blocked = True
# disabling email notifications for unpaid registration courses
Optout.objects.get_or_create(user=request.user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
request.user.username,
request.user.email,
course_key
)
track.views.server_track(request, "change-email1-settings", {"receive_emails": "no", "course": course_key.to_deprecated_string()}, page='dashboard')
break
return blocked
@login_required
@ensure_csrf_cookie
def dashboard(request):
if settings.PROGS_URLS and settings.PROGS_URLS.get('DASHBOARD', None):
return redirect(get_progs_url(settings.PROGS_URLS.get('DASHBOARD')))
user = request.user
platform_name = microsite.get_value("platform_name", settings.PLATFORM_NAME)
# for microsites, we want to filter and only show enrollments for courses within
# the microsites 'ORG'
course_org_filter = microsite.get_value('course_org_filter')
# Let's filter out any courses in an "org" that has been declared to be
# in a Microsite
org_filter_out_set = microsite.get_all_orgs()
# remove our current Microsite from the "filter out" list, if applicable
if course_org_filter:
org_filter_out_set.remove(course_org_filter)
# Build our (course, enrollment) list for the user, but ignore any courses that no
# longer exist (because the course IDs have changed). Still, we don't delete those
# enrollments, because it could have been a data push snafu.
course_enrollments = list(get_course_enrollments(user, course_org_filter, org_filter_out_set))
# sort the enrollment pairs by the enrollment date
course_enrollments.sort(key=lambda x: x.created, reverse=True)
# Retrieve the course modes for each course
enrolled_course_ids = [enrollment.course_id for enrollment in course_enrollments]
__, unexpired_course_modes = CourseMode.all_and_unexpired_modes_for_courses(enrolled_course_ids)
course_modes_by_course = {
course_id: {
mode.slug: mode
for mode in modes
}
for course_id, modes in unexpired_course_modes.iteritems()
}
# Check to see if the student has recently enrolled in a course.
# If so, display a notification message confirming the enrollment.
enrollment_message = _create_recent_enrollment_message(
course_enrollments, course_modes_by_course
)
course_optouts = Optout.objects.filter(user=user).values_list('course_id', flat=True)
message = ""
if not user.is_active:
spam_name = get_spam_name(user.email)
message = render_to_string(
'registration/activate_account_notice.html',
{'email': user.email, 'platform_name': platform_name, 'spam_name': spam_name}
)
# Global staff can see what courses errored on their dashboard
staff_access = False
errored_courses = {}
if has_access(user, 'staff', 'global'):
# Show any courses that errored on load
staff_access = True
errored_courses = modulestore().get_errored_courses()
show_courseware_links_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if has_access(request.user, 'load', enrollment.course_overview)
and has_access(request.user, 'view_courseware_with_prerequisites', enrollment.course_overview)
)
# Get any programs associated with courses being displayed.
# This is passed along in the template context to allow rendering of
# program-related information on the dashboard.
course_programs = _get_course_programs(user, [enrollment.course_id for enrollment in course_enrollments])
# Construct a dictionary of course mode information
# used to render the course list. We re-use the course modes dict
# we loaded earlier to avoid hitting the database.
course_mode_info = {
enrollment.course_id: complete_course_mode_info(
enrollment.course_id, enrollment,
modes=course_modes_by_course[enrollment.course_id]
)
for enrollment in course_enrollments
}
# Determine the per-course verification status
# This is a dictionary in which the keys are course locators
# and the values are one of:
#
# VERIFY_STATUS_NEED_TO_VERIFY
# VERIFY_STATUS_SUBMITTED
# VERIFY_STATUS_APPROVED
# VERIFY_STATUS_MISSED_DEADLINE
#
# Each of which correspond to a particular message to display
# next to the course on the dashboard.
#
# If a course is not included in this dictionary,
# there is no verification messaging to display.
verify_status_by_course = check_verify_status_by_course(user, course_enrollments)
cert_statuses = {
enrollment.course_id: cert_info(request.user, enrollment.course_overview, enrollment.mode)
for enrollment in course_enrollments
}
# only show email settings for Mongo course and when bulk email is turned on
show_email_settings_for = frozenset(
enrollment.course_id for enrollment in course_enrollments if (
settings.FEATURES['ENABLE_INSTRUCTOR_EMAIL'] and
modulestore().get_modulestore_type(enrollment.course_id) != ModuleStoreEnum.Type.xml and
CourseAuthorization.instructor_email_enabled(enrollment.course_id)
)
)
# Verification Attempts
# Used to generate the "you must reverify for course x" banner
verification_status, verification_msg = SoftwareSecurePhotoVerification.user_status(user)
# Gets data for midcourse reverifications, if any are necessary or have failed
statuses = ["approved", "denied", "pending", "must_reverify"]
reverifications = reverification_info(statuses)
show_refund_option_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.refundable()
)
block_courses = frozenset(
enrollment.course_id for enrollment in course_enrollments
if is_course_blocked(
request,
CourseRegistrationCode.objects.filter(
course_id=enrollment.course_id,
registrationcoderedemption__redeemed_by=request.user
),
enrollment.course_id
)
)
enrolled_courses_either_paid = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.is_paid_course()
)
# If there are *any* denied reverifications that have not been toggled off,
# we'll display the banner
denied_banner = any(item.display for item in reverifications["denied"])
# Populate the Order History for the side-bar.
order_history_list = order_history(user, course_org_filter=course_org_filter, org_filter_out_set=org_filter_out_set)
# get list of courses having pre-requisites yet to be completed
courses_having_prerequisites = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.course_overview.pre_requisite_courses
)
courses_requirements_not_met = get_pre_requisite_courses_not_completed(user, courses_having_prerequisites)
if 'notlive' in request.GET:
redirect_message = _("The course you are looking for does not start until {date}.").format(
date=request.GET['notlive']
)
else:
redirect_message = ''
context = {
'enrollment_message': enrollment_message,
'redirect_message': redirect_message,
'course_enrollments': course_enrollments,
'course_optouts': course_optouts,
'message': message,
'staff_access': staff_access,
'errored_courses': errored_courses,
'show_courseware_links_for': show_courseware_links_for,
'all_course_modes': course_mode_info,
'cert_statuses': cert_statuses,
'credit_statuses': _credit_statuses(user, course_enrollments),
'show_email_settings_for': show_email_settings_for,
'reverifications': reverifications,
'verification_status': verification_status,
'verification_status_by_course': verify_status_by_course,
'verification_msg': verification_msg,
'show_refund_option_for': show_refund_option_for,
'block_courses': block_courses,
'denied_banner': denied_banner,
'billing_email': settings.PAYMENT_SUPPORT_EMAIL,
'user': user,
'logout_url': reverse(logout_user),
'platform_name': platform_name,
'enrolled_courses_either_paid': enrolled_courses_either_paid,
'provider_states': [],
'order_history_list': order_history_list,
'courses_requirements_not_met': courses_requirements_not_met,
'nav_hidden': True,
'course_programs': course_programs,
}
return render_to_response('dashboard.html', context)
def _create_recent_enrollment_message(course_enrollments, course_modes): # pylint: disable=invalid-name
"""
Builds a recent course enrollment message.
Constructs a new message template based on any recent course enrollments
for the student.
Args:
course_enrollments (list[CourseEnrollment]): a list of course enrollments.
course_modes (dict): Mapping of course ID's to course mode dictionaries.
Returns:
A string representing the HTML message output from the message template.
None if there are no recently enrolled courses.
"""
recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollments)
if recently_enrolled_courses:
messages = [
{
"course_id": enrollment.course_overview.id,
"course_name": enrollment.course_overview.display_name,
"allow_donation": _allow_donation(course_modes, enrollment.course_overview.id, enrollment)
}
for enrollment in recently_enrolled_courses
]
platform_name = microsite.get_value('platform_name', settings.PLATFORM_NAME)
return render_to_string(
'enrollment/course_enrollment_message.html',
{'course_enrollment_messages': messages, 'platform_name': platform_name}
)
def _get_recently_enrolled_courses(course_enrollments):
"""
Given a list of enrollments, filter out all but recent enrollments.
Args:
course_enrollments (list[CourseEnrollment]): A list of course enrollments.
Returns:
list[CourseEnrollment]: A list of recent course enrollments.
"""
seconds = DashboardConfiguration.current().recent_enrollment_time_delta
time_delta = (datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds))
return [
enrollment for enrollment in course_enrollments
# If the enrollment has no created date, we are explicitly excluding the course
# from the list of recent enrollments.
if enrollment.is_active and enrollment.created > time_delta
]
def _allow_donation(course_modes, course_id, enrollment):
"""Determines if the dashboard will request donations for the given course.
Check if donations are configured for the platform, and if the current course is accepting donations.
Args:
course_modes (dict): Mapping of course ID's to course mode dictionaries.
course_id (str): The unique identifier for the course.
enrollment(CourseEnrollment): The enrollment object in which the user is enrolled
Returns:
True if the course is allowing donations.
"""
donations_enabled = DonationConfiguration.current().enabled
return donations_enabled and enrollment.mode in course_modes[course_id] and course_modes[course_id][enrollment.mode].min_price == 0
def _update_email_opt_in(request, org):
"""Helper function used to hit the profile API if email opt-in is enabled."""
email_opt_in = request.POST.get('email_opt_in')
if email_opt_in is not None:
email_opt_in_boolean = email_opt_in == 'true'
preferences_api.update_email_opt_in(request.user, org, email_opt_in_boolean)
def _credit_statuses(user, course_enrollments): # SYNCTODO: we might have to use course_enrollment_pairs insted
"""
Retrieve the status for credit courses.
A credit course is a course for which a user can purchased
college credit. The current flow is:
1. User becomes eligible for credit (submits verifications, passes the course, etc.)
2. User purchases credit from a particular credit provider.
3. User requests credit from the provider, usually creating an account on the provider's site.
4. The credit provider notifies us whether the user's request for credit has been accepted or rejected.
The dashboard is responsible for communicating the user's state in this flow.
Arguments:
user (User): The currently logged-in user.
course_enrollments (list[CourseEnrollment]): List of enrollments for the
user.
Returns: dict
The returned dictionary has keys that are `CourseKey`s and values that
are dictionaries with:
* eligible (bool): True if the user is eligible for credit in this course.
* deadline (datetime): The deadline for purchasing and requesting credit for this course.
* purchased (bool): Whether the user has purchased credit for this course.
* provider_name (string): The display name of the credit provider.
* provider_status_url (string): A URL the user can visit to check on their credit request status.
* request_status (string): Either "pending", "approved", or "rejected"
* error (bool): If true, an unexpected error occurred when retrieving the credit status,
so the user should contact the support team.
Example:
>>> _credit_statuses(user, course_enrollments)
{
CourseKey.from_string("edX/DemoX/Demo_Course"): {
"course_key": "edX/DemoX/Demo_Course",
"eligible": True,
"deadline": 2015-11-23 00:00:00 UTC,
"purchased": True,
"provider_name": "Hogwarts",
"provider_status_url": "http://example.com/status",
"request_status": "pending",
"error": False
}
}
"""
from openedx.core.djangoapps.credit import api as credit_api
# Feature flag off
if not settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY"):
return {}
request_status_by_course = {
request["course_key"]: request["status"]
for request in credit_api.get_credit_requests_for_user(user.username)
}
credit_enrollments = {
enrollment.course_id: enrollment
for enrollment in course_enrollments
if enrollment.mode == "credit"
}
# When a user purchases credit in a course, the user's enrollment
# mode is set to "credit" and an enrollment attribute is set
# with the ID of the credit provider. We retrieve *all* such attributes
# here to minimize the number of database queries.
purchased_credit_providers = {
attribute.enrollment.course_id: attribute.value
for attribute in CourseEnrollmentAttribute.objects.filter(
namespace="credit",
name="provider_id",
enrollment__in=credit_enrollments.values()
).select_related("enrollment")
}
provider_info_by_id = {
provider["id"]: provider
for provider in credit_api.get_credit_providers()
}
statuses = {}
for eligibility in credit_api.get_eligibilities_for_user(user.username):
course_key = CourseKey.from_string(unicode(eligibility["course_key"]))
status = {
"course_key": unicode(course_key),
"eligible": True,
"deadline": eligibility["deadline"],
"purchased": course_key in credit_enrollments,
"provider_name": None,
"provider_status_url": None,
"provider_id": None,
"request_status": request_status_by_course.get(course_key),
"error": False,
}
# If the user has purchased credit, then include information about the credit
# provider from which the user purchased credit.
# We retrieve the provider's ID from the an "enrollment attribute" set on the user's
# enrollment when the user's order for credit is fulfilled by the E-Commerce service.
if status["purchased"]:
provider_id = purchased_credit_providers.get(course_key)
if provider_id is None:
status["error"] = True
log.error(
u"Could not find credit provider associated with credit enrollment "
u"for user %s in course %s. The user will not be able to see his or her "
u"credit request status on the student dashboard. This attribute should "
u"have been set when the user purchased credit in the course.",
user.id, course_key
)
else:
provider_info = provider_info_by_id.get(provider_id, {})
status["provider_name"] = provider_info.get("display_name")
status["provider_status_url"] = provider_info.get("status_url")
status["provider_id"] = provider_id
statuses[course_key] = status
return statuses
@transaction.non_atomic_requests
@require_POST
@outer_atomic(read_committed=True)
def change_enrollment(request, check_access=True):
"""
Modify the enrollment status for the logged-in user.
The request parameter must be a POST request (other methods return 405)
that specifies course_id and enrollment_action parameters. If course_id or
enrollment_action is not specified, if course_id is not valid, if
enrollment_action is something other than "enroll" or "unenroll", if
enrollment_action is "enroll" and enrollment is closed for the course, or
if enrollment_action is "unenroll" and the user is not enrolled in the
course, a 400 error will be returned. If the user is not logged in, 403
will be returned; it is important that only this case return 403 so the
front end can redirect the user to a registration or login page when this
happens. This function should only be called from an AJAX request, so
the error messages in the responses should never actually be user-visible.
Args:
request (`Request`): The Django request object
Keyword Args:
check_access (boolean): If True, we check that an accessible course actually
exists for the given course_key before we enroll the student.
The default is set to False to avoid breaking legacy code or
code with non-standard flows (ex. beta tester invitations), but
for any standard enrollment flow you probably want this to be True.
Returns:
Response
"""
# Get the user
user = request.user
# Ensure the user is authenticated
if not user.is_authenticated():
return HttpResponseForbidden()
# Ensure we received a course_id
action = request.POST.get("enrollment_action")
if 'course_id' not in request.POST:
return HttpResponseBadRequest(_("Course id not specified"))
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(request.POST.get("course_id"))
except InvalidKeyError:
log.warning(
u"User %s tried to %s with invalid course id: %s",
user.username,
action,
request.POST.get("course_id"),
)
return HttpResponseBadRequest(_("Invalid course id"))
if action == "enroll": # SYNCTODO: Omar took enrollment login
try:
url = enroll(user, course_id, request, check_access)
return HttpResponse(url)
except Exception:
return HttpResponseBadRequest(_("Could not enroll"))
elif action == "unenroll":
enrollment = CourseEnrollment.get_enrollment(user, course_id)
if not enrollment:
return HttpResponseBadRequest(_("You are not enrolled in this course"))
certificate_info = cert_info(user, enrollment.course_overview, enrollment.mode)
if certificate_info.get('status') in DISABLE_UNENROLL_CERT_STATES:
return HttpResponseBadRequest(_("Your certificate prevents you from unenrolling from this course"))
CourseEnrollment.unenroll(user, course_id)
return HttpResponse()
else:
return HttpResponseBadRequest(_("Enrollment action is invalid"))
# Need different levels of logging
@ensure_csrf_cookie
def login_user(request, error=""): # pylint: disable=too-many-statements,unused-argument
"""AJAX request to log in the user."""
post_data = request.POST.copy()
# Decrypt form data if it is encrypted
if 'data_token' in request.POST:
data_token = request.POST.get('data_token')
try:
decoded_data = jwt.decode(data_token,
settings.EDRAAK_LOGISTRATION_SECRET_KEY,
verify=False,
algorithms=[settings.EDRAAK_LOGISTRATION_SIGNING_ALGORITHM])
post_data.update(decoded_data)
except jwt.ExpiredSignatureError:
err_msg = u"The provided data_token has been expired"
AUDIT_LOG.warning(err_msg)
return JsonResponse({
"success": False,
"value": err_msg,
}, status=400)
except jwt.DecodeError:
err_msg = u"Signature verification failed"
AUDIT_LOG.warning(err_msg)
return JsonResponse({
"success": False,
"value": err_msg,
}, status=400)
except (jwt.InvalidTokenError, ValueError):
err_msg = u"Invalid token"
AUDIT_LOG.warning(err_msg)
return JsonResponse({
"success": False,
"value": err_msg,
}, status=400)
backend_name = None
email = None
password = None
redirect_url = None
response = None
running_pipeline = None
third_party_auth_requested = third_party_auth.is_enabled() and pipeline.running(request)
third_party_auth_successful = False
trumped_by_first_party_auth = bool(post_data.get('email')) or bool(post_data.get('password'))
user = None
parent_user = None
child_user = None
platform_name = microsite.get_value("platform_name", settings.PLATFORM_NAME)
if third_party_auth_requested and not trumped_by_first_party_auth:
# The user has already authenticated via third-party auth and has not
# asked to do first party auth by supplying a username or password. We
# now want to put them through the same logging and cookie calculation
# logic as with first-party auth.
running_pipeline = pipeline.get(request)
username = running_pipeline['kwargs'].get('username')
backend_name = running_pipeline['backend']
third_party_uid = running_pipeline['kwargs']['uid']
requested_provider = provider.Registry.get_from_pipeline(running_pipeline)
try:
user = pipeline.get_authenticated_user(requested_provider, username, third_party_uid)
third_party_auth_successful = True
except User.DoesNotExist:
AUDIT_LOG.warning(
u'Login failed - user with username {username} has no social auth with backend_name {backend_name}'.format(
username=username, backend_name=backend_name))
return HttpResponse(
_("You've successfully logged into your {provider_name} account, but this account isn't linked with an {platform_name} account yet.").format(
platform_name=platform_name, provider_name=requested_provider.name
)
+ "<br/><br/>" +
_("Use your {platform_name} username and password to log into {platform_name} below, "
"and then link your {platform_name} account with {provider_name} from your dashboard.").format(
platform_name=platform_name, provider_name=requested_provider.name
)
+ "<br/><br/>" +
_("If you don't have an {platform_name} account yet, "
"click <strong>Register</strong> at the top of the page.").format(
platform_name=platform_name),
content_type="text/plain",
status=403
)
elif 'child_user_id' in post_data:
child_user_id = post_data['child_user_id']
try:
child_user = User.objects.get(id=child_user_id)
except User.DoesNotExist:
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Child login failed - Unknown child user id")
else:
AUDIT_LOG.warning(u"Child login failed - Unknown child user id: {0}".format(child_user_id))
else:
if 'email' not in post_data or 'password' not in post_data:
return JsonResponse({
"success": False,
"value": _('There was an error receiving your login information. Please email us.'), # TODO: User error message
}) # TODO: this should be status code 400 # pylint: disable=fixme
email = post_data['email']
password = post_data['password']
try:
user = User.objects.exclude(groups__name=CHILD_USER_PERMISSION_GROUP).get(email=email)
except User.DoesNotExist:
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Unknown user email")
else:
AUDIT_LOG.warning(u"Login failed - Unknown user email: {0}".format(email))
# check if the user has a linked shibboleth account, if so, redirect the user to shib-login
# This behavior is pretty much like what gmail does for shibboleth. Try entering some @stanford.edu
# address into the Gmail login.
if settings.FEATURES.get('AUTH_USE_SHIB') and user:
try:
eamap = ExternalAuthMap.objects.get(user=user)
if eamap.external_domain.startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return JsonResponse({
"success": False,
"redirect": reverse('shib-login'),
}) # TODO: this should be status code 301 # pylint: disable=fixme
except ExternalAuthMap.DoesNotExist:
# This is actually the common case, logging in user without external linked login
AUDIT_LOG.info(u"User %s w/o external auth attempting login", user)
# see if account has been locked out due to excessive login failures
user_found_by_email_lookup = user
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
if LoginFailures.is_user_locked_out(user_found_by_email_lookup):
minutes = settings.MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS / 60
message = ungettext(
"This account has been temporarily locked due to excessive login failures. Try again in {number} minute.", # noqa
"This account has been temporarily locked due to excessive login failures. Try again in {number} minutes.", # noqa
minutes,
).format(number=minutes)
return JsonResponse({
"success": False,
"value": message,
}) # TODO: this should be status code 429 # pylint: disable=fixme
# see if the user must reset his/her password due to any policy settings
if user_found_by_email_lookup and PasswordHistory.should_user_reset_password_now(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('Your password has expired due to password policy on this account. You must '
'reset your password before you can log in again. Please click the '
'"Forgot Password" link on this page to reset your password before logging in again.'),
}) # TODO: this should be status code 403 # pylint: disable=fixme
# set the user object to child_user object if a child is being
# loged in
if child_user:
parent_user = request.user
user = child_user
# if the user doesn't exist, we want to set the username to an invalid
# username so that authentication is guaranteed to fail and we can take
# advantage of the ratelimited backend
username = user.username if user else ""
if not third_party_auth_successful:
try:
user = authenticate(username=username, password=password, request=request)
# this occurs when there are too many attempts from the same IP address
except RateLimitException:
minutes = BadRequestRateLimiter.minutes
message = ungettext(
"Too many failed login attempts. Try again in {number} minute.",
"Too many failed login attempts. Try again in {number} minutes.",
minutes,
).format(number=minutes)
return JsonResponse({
"success": False,
"value": message,
}) # TODO: this should be status code 429 # pylint: disable=fixme
if user is None:
# tick the failed login counters if the user exists in the database
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
LoginFailures.increment_lockout_counter(user_found_by_email_lookup)
# if we didn't find this username earlier, the account for this email
# doesn't exist, and doesn't have a corresponding password
if username != "":
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
loggable_id = user_found_by_email_lookup.id if user_found_by_email_lookup else "<unknown>"
AUDIT_LOG.warning(u"Login failed - password for user.id: {0} is invalid".format(loggable_id))
else:
AUDIT_LOG.warning(u"Login failed - password for {0} is invalid".format(email))
return JsonResponse({
"success": False,
"value": _('Email or password is incorrect.'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
# successful login, clear failed login attempts counters, if applicable
if LoginFailures.is_feature_enabled():
LoginFailures.clear_lockout_counter(user)
# Track the user's sign in
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.identify(user.id, {
'email': email,
'username': username
})
analytics.track(
user.id,
"edx.bi.user.account.authenticated",
{
'category': "conversion",
'label': post_data.get('course_id'),
'provider': None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
if user is not None and user.is_active:
try:
# We do not log here, because we have a handler registered
# to perform logging on successful logins.
login(request, user)
if post_data.get('remember') == 'true':
request.session.set_expiry(604800)
log.debug("Setting user session to never expire")
else:
request.session.set_expiry(0)
if parent_user:
request.session['parent_user'] = json.dumps({
'user_id': parent_user.id,
'username': parent_user.username,
'email': parent_user.email,
'name': parent_user.profile.name
})
except Exception as exc: # pylint: disable=broad-except
AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?")
log.critical("Login failed - Could not create session. Is memcached running?")
log.exception(exc)
raise
redirect_url = None # The AJAX method calling should know the default destination upon success
if third_party_auth_successful:
redirect_url = pipeline.get_complete_url(backend_name)
response = JsonResponse({
"success": True,
"redirect_url": redirect_url,
})
# Ensure that the external marketing site can
# detect that the user is logged in.
return set_logged_in_cookies(request, response, user)
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Account not active for user.id: {0}, resending activation".format(user.id))
else:
AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username))
reactivation_email_for_user(user)
not_activated_msg = (
_("This account has not been activated. We have sent another activation message. Please check your email for the activation instructions.") + " " +
_("If you're unable to find the password reset email, please check your email account's \"Spam\" or \"Junk\" folders to ensure the message was not filtered.")
)
return JsonResponse({
"success": False,
"value": not_activated_msg,
}) # TODO: this should be status code 400 # pylint: disable=fixme
@csrf_exempt
@require_POST
@social_utils.psa("social:complete")
def login_oauth_token(request, backend):
"""
Authenticate the client using an OAuth access token by using the token to
retrieve information from a third party and matching that information to an
existing user.
"""
warnings.warn("Please use AccessTokenExchangeView instead.", DeprecationWarning)
backend = request.backend
if isinstance(backend, social_oauth.BaseOAuth1) or isinstance(backend, social_oauth.BaseOAuth2):
if "access_token" in request.POST:
# Tell third party auth pipeline that this is an API call
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API
user = None
access_token = request.POST["access_token"]
try:
user = backend.do_auth(access_token)
except (HTTPError, AuthException):
pass
# do_auth can return a non-User object if it fails
if user and isinstance(user, User):
login(request, user)
return JsonResponse(status=204)
else:
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
return JsonResponse({"error": "invalid_token"}, status=401)
else:
return JsonResponse({"error": "invalid_request"}, status=400)
raise Http404
@ensure_csrf_cookie
def logout_user(request):
"""
HTTP request to log out the user. Redirects to marketing page.
Deletes both the CSRF and sessionid cookies so the marketing
site can determine the logged in state of the user
"""
# We do not log here, because we have a handler registered
# to perform logging on successful logouts.
logout(request)
if settings.FEATURES.get('AUTH_USE_CAS'):
target = reverse('cas-logout')
else:
target = '/'
response = redirect(target)
delete_logged_in_cookies(response)
return response
@require_GET
@login_required
@ensure_csrf_cookie
def manage_user_standing(request):
"""
Renders the view used to manage user standing. Also displays a table
of user accounts that have been disabled and who disabled them.
"""
if not request.user.is_staff:
raise Http404
all_disabled_accounts = UserStanding.objects.filter(
account_status=UserStanding.ACCOUNT_DISABLED
)
all_disabled_users = [standing.user for standing in all_disabled_accounts]
headers = ['username', 'account_changed_by']
rows = []
for user in all_disabled_users:
row = [user.username, user.standing.all()[0].changed_by]
rows.append(row)
context = {'headers': headers, 'rows': rows}
return render_to_response("manage_user_standing.html", context)
@require_POST
@login_required
@ensure_csrf_cookie
def disable_account_ajax(request):
"""
Ajax call to change user standing. Endpoint of the form
in manage_user_standing.html
"""
if not request.user.is_staff:
raise Http404
username = request.POST.get('username')
context = {}
if username is None or username.strip() == '':
context['message'] = _('Please enter a username')
return JsonResponse(context, status=400)
account_action = request.POST.get('account_action')
if account_action is None:
context['message'] = _('Please choose an option')
return JsonResponse(context, status=400)
username = username.strip()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
context['message'] = _("User with username {} does not exist").format(username)
return JsonResponse(context, status=400)
else:
user_account, _success = UserStanding.objects.get_or_create(
user=user, defaults={'changed_by': request.user},
)
if account_action == 'disable':
user_account.account_status = UserStanding.ACCOUNT_DISABLED
context['message'] = _("Successfully disabled {}'s account").format(username)
log.info(u"%s disabled %s's account", request.user, username)
elif account_action == 'reenable':
user_account.account_status = UserStanding.ACCOUNT_ENABLED
context['message'] = _("Successfully reenabled {}'s account").format(username)
log.info(u"%s reenabled %s's account", request.user, username)
else:
context['message'] = _("Unexpected account status")
return JsonResponse(context, status=400)
user_account.changed_by = request.user
user_account.standing_last_changed_at = datetime.datetime.now(UTC)
user_account.save()
return JsonResponse(context)
@login_required
@ensure_csrf_cookie
def change_setting(request):
"""JSON call to change a profile setting: Right now, location"""
# TODO (vshnayder): location is no longer used
u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache
if 'location' in request.POST:
u_prof.location = request.POST['location']
u_prof.save()
return JsonResponse({
"success": True,
"location": u_prof.location,
})
class AccountValidationError(Exception):
def __init__(self, message, field):
super(AccountValidationError, self).__init__(message)
self.field = field
@receiver(post_save, sender=User)
def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
handler that saves the user Signup Source
when the user is created
"""
if 'created' in kwargs and kwargs['created']:
site = microsite.get_value('SITE_NAME')
if site:
user_signup_source = UserSignupSource(user=kwargs['instance'], site=site)
user_signup_source.save()
log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id))
def _do_create_account(form, is_active=False):
"""
Given cleaned post variables, create the User and UserProfile objects, as well as the
registration for this user.
Returns a tuple (User, UserProfile, Registration).
Note: this function is also used for creating test users.
"""
if not form.is_valid():
raise ValidationError(form.errors)
user = User(
username=form.cleaned_data["username"],
email=form.cleaned_data["email"],
is_active=is_active
)
user.set_password(form.cleaned_data["password"])
registration = Registration()
# TODO: Rearrange so that if part of the process fails, the whole process fails.
# Right now, we can have e.g. no registration e-mail sent out and a zombie account
try:
with transaction.atomic():
user.save()
except IntegrityError:
# Figure out the cause of the integrity error
if len(User.objects.filter(username=user.username)) > 0:
raise AccountValidationError(
_("An account with the Public Username '{username}' already exists.").format(username=user.username),
field="username"
)
elif len(User.objects.filter(email=user.email)) > 0:
raise AccountValidationError(
_("An account with the Email '{email}' already exists.").format(email=user.email),
field="email"
)
else:
raise
# add this account creation to password history
# NOTE, this will be a NOP unless the feature has been turned on in configuration
password_history_entry = PasswordHistory()
password_history_entry.create(user)
registration.register(user)
profile_fields = [
"name", "level_of_education", "gender", "mailing_address", "city", "country", "goals",
"year_of_birth"
]
profile = UserProfile(
user=user,
**{key: form.cleaned_data.get(key) for key in profile_fields}
)
extended_profile = form.cleaned_extended_profile
if extended_profile:
profile.meta = json.dumps(extended_profile)
try:
profile.save()
except Exception: # pylint: disable=broad-except
log.exception("UserProfile creation failed for user {id}.".format(id=user.id))
raise
return (user, profile, registration)
def create_account_with_params(request, params):
"""
Given a request and a dict of parameters (which may or may not have come
from the request), create an account for the requesting user, including
creating a comments service user object and sending an activation email.
This also takes external/third-party auth into account, updates that as
necessary, and authenticates the user for the request's session.
Does not return anything.
Raises AccountValidationError if an account with the username or email
specified by params already exists, or ValidationError if any of the given
parameters is invalid for any other reason.
Issues with this code:
* It is not transactional. If there is a failure part-way, an incomplete
account will be created and left in the database.
* Third-party auth passwords are not verified. There is a comment that
they are unused, but it would be helpful to have a sanity check that
they are sane.
* It is over 300 lines long (!) and includes disprate functionality, from
registration e-mails to all sorts of other things. It should be broken
up into semantically meaningful functions.
* The user-facing text is rather unfriendly (e.g. "Username must be a
minimum of two characters long" rather than "Please use a username of
at least two characters").
"""
# Copy params so we can modify it; we can't just do dict(params) because if
# params is request.POST, that results in a dict containing lists of values
params = dict(params.items())
# allow for microsites to define their own set of required/optional/hidden fields
extra_fields = microsite.get_value(
'REGISTRATION_EXTRA_FIELDS',
getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
)
# Boolean of whether a 3rd party auth provider and credentials were provided in
# the API so the newly created account can link with the 3rd party account.
#
# Note: this is orthogonal to the 3rd party authentication pipeline that occurs
# when the account is created via the browser and redirect URLs.
should_link_with_social_auth = third_party_auth.is_enabled() and 'provider' in params
if (should_link_with_social_auth and params.get('random_password', False)) or (third_party_auth.is_enabled() and pipeline.running(request)):
params["password"] = pipeline.make_random_password()
params["confirm_password"] = params["password"]
params["confirm_email"] = params["email"]
elif params.get("is_third_party_auth"):
# Hack by Edraak to detect timed-out social login attempts.
raise ValidationError({
"password": _("Registration process has timed out. Please refresh the page and start over.")
}, code=400)
# if doing signup for an external authorization, then get email, password, name from the eamap
# don't use the ones from the form, since the user could have hacked those
# unless originally we didn't get a valid email or name from the external auth
# TODO: We do not check whether these values meet all necessary criteria, such as email length
do_external_auth = 'ExternalAuthMap' in request.session
if do_external_auth:
eamap = request.session['ExternalAuthMap']
try:
validate_email(eamap.external_email)
params["email"] = eamap.external_email
params["confirm_email"] = params["email"]
except ValidationError:
pass
if eamap.external_name.strip() != '':
params["name"] = eamap.external_name
params["password"] = eamap.internal_password
params["confirm_password"] = params["password"]
log.debug(u'In create_account with external_auth: user = %s, email=%s', params["name"], params["email"])
extended_profile_fields = microsite.get_value('extended_profile_fields', [])
enforce_password_policy = (
settings.FEATURES.get("ENFORCE_PASSWORD_POLICY", False) and
not do_external_auth
)
# Can't have terms of service for certain SHIB users, like at Stanford
tos_required = (
not settings.FEATURES.get("AUTH_USE_SHIB") or
not settings.FEATURES.get("SHIB_DISABLE_TOS") or
not do_external_auth or
not eamap.external_domain.startswith(
external_auth.views.SHIBBOLETH_DOMAIN_PREFIX
)
)
form = AccountCreationForm(
data=params,
extra_fields=extra_fields,
extended_profile_fields=extended_profile_fields,
enforce_username_neq_password=True,
enforce_password_policy=enforce_password_policy,
tos_required=tos_required,
)
# Perform operations within a transaction that are critical to account creation
with transaction.atomic():
# first, create the account
(user, profile, registration) = _do_create_account(form, is_active=params['is_third_party_registration'])
# next, link the account with social auth, if provided via the API.
# (If the user is using the normal register page, the social auth pipeline does the linking, not this code)
if should_link_with_social_auth:
backend_name = params['provider']
request.social_strategy = social_utils.load_strategy(request)
redirect_uri = reverse('social:complete', args=(backend_name, ))
request.backend = social_utils.load_backend(request.social_strategy, backend_name, redirect_uri)
social_access_token = params.get('access_token')
if not social_access_token:
raise ValidationError({
'access_token': [
_("An access_token is required when passing value ({}) for provider.").format(
params['provider']
)
]
})
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_REGISTER_API
pipeline_user = None
error_message = ""
try:
pipeline_user = request.backend.do_auth(social_access_token, user=user)
except AuthAlreadyAssociated:
error_message = _("The provided access_token is already associated with another user.")
except (HTTPError, AuthException):
error_message = _("The provided access_token is not valid.")
if not pipeline_user or not isinstance(pipeline_user, User):
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline(access_token)
raise ValidationError({'access_token': [error_message]})
# Perform operations that are non-critical parts of account creation
preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language())
if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'):
try:
enable_notifications(user)
except Exception:
log.exception("Enable discussion notifications failed for user {id}.".format(id=user.id))
dog_stats_api.increment("common.student.account_created")
# If the user is registering via 3rd party auth, track which provider they use
third_party_provider = None
running_pipeline = None
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
third_party_provider = provider.Registry.get_from_pipeline(running_pipeline)
# Track the user's registration
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
identity_args = [
user.id, # pylint: disable=no-member
{
'email': user.email,
'username': user.username,
'name': profile.name,
'age': profile.age,
'education': profile.level_of_education_display,
'address': profile.mailing_address,
'gender': profile.gender_display,
'country': unicode(profile.country),
}
]
if hasattr(settings, 'MAILCHIMP_NEW_USER_LIST_ID'):
identity_args.append({
"MailChimp": {
"listId": settings.MAILCHIMP_NEW_USER_LIST_ID
}
})
analytics.identify(*identity_args)
analytics.track(
user.id,
"edx.bi.user.account.registered",
{
'category': 'conversion',
'label': params.get('course_id'),
'provider': third_party_provider.name if third_party_provider else None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
create_comments_service_user(user)
# Don't send email if we are:
#
# 1. Doing load testing.
# 2. Random user generation for other forms of testing.
# 3. External auth bypassing activation.
# 4. Have the platform configured to not require e-mail activation.
# 5. Registering a new user using a trusted third party provider (with skip_email_verification=True)
#
# Note that this feature is only tested as a flag set one way or
# the other for *new* systems. we need to be careful about
# changing settings on a running system to make sure no users are
# left in an inconsistent state (or doing a migration if they are).
send_email = (
not settings.FEATURES.get('SKIP_EMAIL_VALIDATION', None) and
not settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING') and
not (do_external_auth and settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH')) and
not (
third_party_provider and third_party_provider.skip_email_verification and
user.email == running_pipeline['kwargs'].get('details', {}).get('email')
)
)
if send_email:
context = {
'lms_base': settings.LMS_BASE,
'name': profile.name.split(' ')[0],
'key': registration.activation_key,
'direction': 'rtl' if get_language() == 'ar' else 'ltr'
}
# composes activation email
subject = render_to_string('emails/activation_email_subject.txt', context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.html', context)
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
from_address = '{sender_name} <{email_addr}>'.format(sender_name=_('Edraak Platform').encode('utf-8'), email_addr=from_address)
try:
if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'):
dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL']
message = ("Activation for %s (%s): %s\n" % (user, user.email, profile.name) +
'-' * 80 + '\n\n' + message)
else:
dest_addr = user.email
msg = EmailMessage(subject, message, from_email=from_address, to=[dest_addr])
msg.content_subtype = 'html'
msg.send(fail_silently=False)
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send activation email to user from "%s"', from_address, exc_info=True)
else:
registration.activate()
# Immediately after a user creates an account, we log them in. They are only
# logged in until they close the browser. They can't log in again until they click
# the activation link from the email.
new_user = authenticate(username=user.username, password=params['password'])
login(request, new_user)
request.session.set_expiry(0)
# TODO: there is no error checking here to see that the user actually logged in successfully,
# and is not yet an active user.
if new_user is not None:
AUDIT_LOG.info(u"Login success on new account creation - {0}".format(new_user.username))
if do_external_auth:
eamap.user = new_user
eamap.dtsignup = datetime.datetime.now(UTC)
eamap.save()
AUDIT_LOG.info(u"User registered with external_auth %s", new_user.username)
AUDIT_LOG.info(u'Updated ExternalAuthMap for %s to be %s', new_user.username, eamap)
if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):
log.info('bypassing activation email')
new_user.is_active = True
new_user.save()
AUDIT_LOG.info(u"Login activated on extauth account - {0} ({1})".format(new_user.username, new_user.email))
return new_user
@csrf_exempt
def create_account(request, post_override=None):
"""
JSON call to create new edX account.
Used by form in signup_modal.html, which is included into navigation.html
"""
warnings.warn("Please use RegistrationView instead.", DeprecationWarning)
try:
user = create_account_with_params(request, post_override or request.POST)
except AccountValidationError as exc:
return JsonResponse({'success': False, 'value': exc.message, 'field': exc.field}, status=400)
except ValidationError as exc:
field, error_list = next(exc.message_dict.iteritems())
return JsonResponse(
{
"success": False,
"field": field,
"value": error_list[0],
},
status=400
)
redirect_url = None # The AJAX method calling should know the default destination upon success
# Resume the third-party-auth pipeline if necessary.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
redirect_url = pipeline.get_complete_url(running_pipeline['backend'])
response = JsonResponse({
'success': True,
'redirect_url': redirect_url,
})
set_logged_in_cookies(request, response, user)
return response
def auto_auth(request):
"""
Create or configure a user account, then log in as that user.
Enabled only when
settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] is true.
Accepts the following querystring parameters:
* `username`, `email`, and `password` for the user account
* `full_name` for the user profile (the user's full name; defaults to the username)
* `staff`: Set to "true" to make the user global staff.
* `course_id`: Enroll the student in the course with `course_id`
* `roles`: Comma-separated list of roles to grant the student in the course with `course_id`
* `no_login`: Define this to create the user but not login
* `redirect`: Set to "true" will redirect to course if course_id is defined, otherwise it will redirect to dashboard
If username, email, or password are not provided, use
randomly generated credentials.
"""
# Generate a unique name to use if none provided
unique_name = uuid.uuid4().hex[0:30]
# Use the params from the request, otherwise use these defaults
username = request.GET.get('username', unique_name)
password = request.GET.get('password', unique_name)
email = request.GET.get('email', unique_name + "@example.com")
full_name = request.GET.get('full_name', username)
is_staff = request.GET.get('staff', None)
is_superuser = request.GET.get('superuser', None)
course_id = request.GET.get('course_id', None)
# mode has to be one of 'honor'/'professional'/'verified'/'audit'/'no-id-professional'/'credit'
enrollment_mode = request.GET.get('enrollment_mode', 'honor')
course_key = None
if course_id:
course_key = CourseLocator.from_string(course_id)
role_names = [v.strip() for v in request.GET.get('roles', '').split(',') if v.strip()]
redirect_when_done = request.GET.get('redirect', '').lower() == 'true'
login_when_done = 'no_login' not in request.GET
form = AccountCreationForm(
data={
'username': username,
'email': email,
'password': password,
'name': full_name,
},
tos_required=False
)
# Attempt to create the account.
# If successful, this will return a tuple containing
# the new user object.
try:
user, profile, reg = _do_create_account(form)
except AccountValidationError:
# Attempt to retrieve the existing user.
user = User.objects.get(username=username)
user.email = email
user.set_password(password)
user.save()
profile = UserProfile.objects.get(user=user)
reg = Registration.objects.get(user=user)
# Set the user's global staff bit
if is_staff is not None:
user.is_staff = (is_staff == "true")
user.save()
if is_superuser is not None:
user.is_superuser = (is_superuser == "true")
user.save()
# Activate the user
reg.activate()
reg.save()
# ensure parental consent threshold is met
year = datetime.date.today().year
age_limit = settings.PARENTAL_CONSENT_AGE_LIMIT
profile.year_of_birth = (year - age_limit) - 1
profile.save()
# Enroll the user in a course
if course_key is not None:
CourseEnrollment.enroll(user, course_key, mode=enrollment_mode)
# Apply the roles
for role_name in role_names:
role = Role.objects.get(name=role_name, course_id=course_key)
user.roles.add(role)
# Log in as the user
if login_when_done:
user = authenticate(username=username, password=password)
login(request, user)
create_comments_service_user(user)
# Provide the user with a valid CSRF token
# then return a 200 response unless redirect is true
if redirect_when_done:
# Redirect to course info page if course_id is known
if course_id:
try:
# redirect to course info page in LMS
redirect_url = reverse(
'info',
kwargs={'course_id': course_id}
)
except NoReverseMatch:
# redirect to course outline page in Studio
redirect_url = reverse(
'course_handler',
kwargs={'course_key_string': course_id}
)
else:
try:
# redirect to dashboard for LMS
redirect_url = reverse('dashboard')
except NoReverseMatch:
# redirect to home for Studio
redirect_url = reverse('home')
return redirect(redirect_url)
elif request.META.get('HTTP_ACCEPT') == 'application/json':
response = JsonResponse({
'created_status': u"Logged in" if login_when_done else "Created",
'username': username,
'email': email,
'password': password,
'user_id': user.id, # pylint: disable=no-member
'anonymous_id': anonymous_id_for_user(user, None),
})
else:
success_msg = u"{} user {} ({}) with password {} and user_id {}".format(
u"Logged in" if login_when_done else "Created",
username, email, password, user.id # pylint: disable=no-member
)
response = HttpResponse(success_msg)
response.set_cookie('csrftoken', csrf(request)['csrf_token'])
return response
@ensure_csrf_cookie
def activate_account(request, key):
"""When link in activation e-mail is clicked"""
regs = Registration.objects.filter(activation_key=key)
if len(regs) == 1:
user_logged_in = request.user.is_authenticated()
already_active = True
if not regs[0].user.is_active:
regs[0].activate()
already_active = False
# Enroll student in any pending courses he/she may have if auto_enroll flag is set
student = User.objects.filter(id=regs[0].user_id)
if student:
ceas = CourseEnrollmentAllowed.objects.filter(email=student[0].email)
for cea in ceas:
if cea.auto_enroll:
enrollment = CourseEnrollment.enroll(student[0], cea.course_id)
manual_enrollment_audit = ManualEnrollmentAudit.get_manual_enrollment_by_email(student[0].email)
if manual_enrollment_audit is not None:
# get the enrolled by user and reason from the ManualEnrollmentAudit table.
# then create a new ManualEnrollmentAudit table entry for the same email
# different transition state.
ManualEnrollmentAudit.create_manual_enrollment_audit(
manual_enrollment_audit.enrolled_by, student[0].email, ALLOWEDTOENROLL_TO_ENROLLED,
manual_enrollment_audit.reason, enrollment
)
resp = render_to_response(
"registration/activation_complete.html",
{
'user_logged_in': user_logged_in,
'already_active': already_active
}
)
return resp
if len(regs) == 0:
return render_to_response(
"registration/activation_invalid.html",
{'csrf': csrf(request)['csrf_token']}
)
return HttpResponseServerError(_("Unknown error. Please e-mail us to let us know how it happened."))
@csrf_exempt
@require_POST
def password_reset(request):
""" Attempts to send a password reset e-mail. """
# Add some rate limiting here by re-using the RateLimitMixin as a helper class
limiter = BadRequestRateLimiter()
if limiter.is_rate_limit_exceeded(request):
AUDIT_LOG.warning("Rate limit exceeded in password_reset")
return HttpResponseForbidden()
form = PasswordResetFormNoActive(request.POST)
if form.is_valid():
form.save(use_https=request.is_secure(),
from_email=microsite.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL),
request=request,
domain_override=request.get_host())
# When password change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the password is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "password",
"old": None,
"new": None,
"user_id": request.user.id,
}
)
else:
# bad user? tick the rate limiter counter
AUDIT_LOG.info("Bad password_reset user passed in.")
limiter.tick_bad_request_counter(request)
return JsonResponse({
'success': True,
'value': render_to_string('registration/password_reset_done.html', {}),
})
def password_reset_confirm_wrapper(
request,
uidb36=None,
token=None,
):
""" A wrapper around django.contrib.auth.views.password_reset_confirm.
Needed because we want to set the user as active at this step.
"""
# cribbed from django.contrib.auth.views.password_reset_confirm
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
user.is_active = True
user.save()
except (ValueError, User.DoesNotExist):
pass
# tie in password strength enforcement as an optional level of
# security protection
err_msg = None
if request.method == 'POST':
password = request.POST['new_password1']
if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False):
try:
validate_password_length(password)
validate_password_complexity(password)
validate_password_dictionary(password)
except ValidationError, err:
err_msg = _('Password: ') + '; '.join(err.messages)
# also, check the password reuse policy
if not PasswordHistory.is_allowable_password_reuse(user, password):
if user.is_staff:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']
else:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']
err_msg = ungettext(
"You are re-using a password that you have used recently. You must have {num} distinct password before reusing a previous password.",
"You are re-using a password that you have used recently. You must have {num} distinct passwords before reusing a previous password.",
num_distinct
).format(num=num_distinct)
# also, check to see if passwords are getting reset too frequent
if PasswordHistory.is_password_reset_too_soon(user):
num_days = settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']
err_msg = ungettext(
"You are resetting passwords too frequently. Due to security policies, {num} day must elapse between password resets.",
"You are resetting passwords too frequently. Due to security policies, {num} days must elapse between password resets.",
num_days
).format(num=num_days)
if err_msg:
# We have an password reset attempt which violates some security policy, use the
# existing Django template to communicate this back to the user
context = {
'validlink': True,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': err_msg,
'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME),
}
return TemplateResponse(request, 'registration/password_reset_confirm.html', context)
else:
# we also want to pass settings.PLATFORM_NAME in as extra_context
extra_context = {"platform_name": microsite.get_value('platform_name', settings.PLATFORM_NAME)}
# Support old password reset URLs that used base36 encoded user IDs.
# https://github.com/django/django/commit/1184d077893ff1bc947e45b00a4d565f3df81776#diff-c571286052438b2e3190f8db8331a92bR231
try:
uidb64 = force_text(urlsafe_base64_encode(force_bytes(base36_to_int(uidb36))))
except ValueError:
uidb64 = '1' # dummy invalid ID (incorrect padding for base64)
if request.method == 'POST':
# remember what the old password hash is before we call down
old_password_hash = user.password
result = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=extra_context
)
# get the updated user
updated_user = User.objects.get(id=uid_int)
# did the password hash change, if so record it in the PasswordHistory
if updated_user.password != old_password_hash:
entry = PasswordHistory()
entry.create(updated_user)
return result
else:
return password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=extra_context
)
def reactivation_email_for_user(user):
try:
reg = Registration.objects.get(user=user)
except Registration.DoesNotExist:
return JsonResponse({
"success": False,
"error": _('No inactive user with this e-mail exists'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
context = {
'lms_base': settings.LMS_BASE,
'name': user.profile.name.split(' ')[0],
'key': reg.activation_key,
'direction': 'rtl' if get_language() == 'ar' else 'ltr'
}
subject = render_to_string('emails/activation_email_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.html', context)
from_address = '{sender_name} <{email_addr}>'.format(sender_name=_('Edraak Platform').encode('utf-8'),
email_addr=settings.DEFAULT_FROM_EMAIL)
try:
msg = EmailMessage(subject, message, from_email=from_address, to=[user.email])
msg.content_subtype = 'html'
msg.send(fail_silently=False)
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send reactivation email from "%s"', settings.DEFAULT_FROM_EMAIL, exc_info=True)
return JsonResponse({
"success": False,
"error": _('Unable to send reactivation email')
}) # TODO: this should be status code 500 # pylint: disable=fixme
return JsonResponse({"success": True})
def validate_new_email(user, new_email):
"""
Given a new email for a user, does some basic verification of the new address If any issues are encountered
with verification a ValueError will be thrown.
"""
try:
validate_email(new_email)
except ValidationError:
raise ValueError(_('Valid e-mail address required.'))
if new_email == user.email:
raise ValueError(_('Old email is the same as the new email.'))
if User.objects.filter(email=new_email).count() != 0:
raise ValueError(_('An account with this e-mail already exists.'))
def do_email_change_request(user, new_email, activation_key=None):
"""
Given a new email for a user, does some basic verification of the new address and sends an activation message
to the new address. If any issues are encountered with verification or sending the message, a ValueError will
be thrown.
"""
pec_list = PendingEmailChange.objects.filter(user=user)
if len(pec_list) == 0:
pec = PendingEmailChange()
pec.user = user
else:
pec = pec_list[0]
# if activation_key is not passing as an argument, generate a random key
if not activation_key:
activation_key = uuid.uuid4().hex
pec.new_email = new_email
pec.activation_key = activation_key
pec.save()
context = {
'key': pec.activation_key,
'old_email': user.email,
'new_email': pec.new_email
}
subject = render_to_string('emails/email_change_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/email_change.txt', context)
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
mail.send_mail(subject, message, from_address, [pec.new_email])
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send email activation link to user from "%s"', from_address, exc_info=True)
raise ValueError(_('Unable to send email activation link. Please try again later.'))
# When the email address change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the email address is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "email",
"old": context['old_email'],
"new": context['new_email'],
"user_id": user.id,
}
)
@ensure_csrf_cookie
def confirm_email_change(request, key): # pylint: disable=unused-argument
"""
User requested a new e-mail. This is called when the activation
link is clicked. We confirm with the old e-mail, and update
"""
with transaction.atomic():
try:
pec = PendingEmailChange.objects.get(activation_key=key)
except PendingEmailChange.DoesNotExist:
response = render_to_response("invalid_email_key.html", {})
transaction.set_rollback(True)
return response
user = pec.user
address_context = {
'old_email': user.email,
'new_email': pec.new_email
}
if len(User.objects.filter(email=pec.new_email)) != 0:
response = render_to_response("email_exists.html", {})
transaction.set_rollback(True)
return response
subject = render_to_string('emails/email_change_subject.txt', address_context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/confirm_email_change.txt', address_context)
u_prof = UserProfile.objects.get(user=user)
meta = u_prof.get_meta()
if 'old_emails' not in meta:
meta['old_emails'] = []
meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()])
u_prof.set_meta(meta)
u_prof.save()
# Send it to the old email...
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to old address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': user.email})
transaction.set_rollback(True)
return response
user.email = pec.new_email
user.save()
pec.delete()
# And send it to the new email...
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to new address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': pec.new_email})
transaction.set_rollback(True)
return response
response = render_to_response("email_change_successful.html", address_context)
return response
@require_POST
@login_required
@ensure_csrf_cookie
def change_email_settings(request):
"""Modify logged-in user's setting for receiving emails from a course."""
user = request.user
course_id = request.POST.get("course_id")
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
receive_emails = request.POST.get("receive_emails")
if receive_emails:
optout_object = Optout.objects.filter(user=user, course_id=course_key)
if optout_object:
optout_object.delete()
log.info(
u"User %s (%s) opted in to receive emails from course %s",
user.username,
user.email,
course_id
)
track.views.server_track(request, "change-email-settings", {"receive_emails": "yes", "course": course_id}, page='dashboard')
else:
Optout.objects.get_or_create(user=user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
user.username,
user.email,
course_id
)
track.views.server_track(request, "change-email-settings", {"receive_emails": "no", "course": course_id}, page='dashboard')
return JsonResponse({"success": True})
def _get_course_programs(user, user_enrolled_courses): # pylint: disable=invalid-name
"""Build a dictionary of program data required for display on the student dashboard.
Given a user and an iterable of course keys, find all programs relevant to the
user and return them in a dictionary keyed by course key.
Arguments:
user (User): The user to authenticate as when requesting programs.
user_enrolled_courses (list): List of course keys representing the courses in which
the given user has active enrollments.
Returns:
dict, containing programs keyed by course. Empty if programs cannot be retrieved.
"""
course_programs = get_programs_for_dashboard(user, user_enrolled_courses)
programs_data = {}
for course_key, program in course_programs.viewitems():
if program.get('status') == 'active' and program.get('category') == 'xseries':
try:
programs_data[course_key] = {
'course_count': len(program['course_codes']),
'display_name': program['name'],
'category': program.get('category'),
'program_marketing_url': urljoin(
settings.MKTG_URLS.get('ROOT'), 'xseries' + '/{}'
).format(program['marketing_slug']),
'display_category': 'XSeries'
}
except KeyError:
log.warning('Program structure is invalid, skipping display: %r', program)
return programs_data
|
Edraak/edx-platform
|
common/djangoapps/student/views.py
|
Python
|
agpl-3.0
| 99,571
|
[
"VisIt"
] |
ca60be5f0cfbf6301e5e39be4703483312cb2a6e19f8d8c7b20f032dfc355c57
|
"""
:mod: ReqClient
.. module: ReqClient
:synopsis: implementation of client for RequestDB using DISET framework
"""
import os
import time
import random
import json
import datetime
# # from DIRAC
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.Core.Utilities.List import randomize, fromChar
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.Core.Base.Client import Client
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.private.RequestValidator import RequestValidator
class ReqClient(Client):
"""ReqClient is a class manipulating and operation on Requests.
:param ~RPCClient.RPCClient requestManager: RPC client to RequestManager
:param dict requestProxiesDict: RPC client to ReqestProxy
:param ~DIRAC.RequestManagementSystem.private.RequestValidator.RequestValidator requestValidator: RequestValidator instance
"""
__requestProxiesDict = {}
__requestValidator = None
def __init__(self, url=None, **kwargs):
"""c'tor
:param self: self reference
:param url: url of the ReqManager
:param kwargs: forwarded to the Base Client class
"""
super(ReqClient, self).__init__(**kwargs)
self.serverURL = 'RequestManagement/ReqManager' if not url else url
self.log = gLogger.getSubLogger("RequestManagement/ReqClient/pid_%s" % (os.getpid()))
def requestProxies(self, timeout=120):
""" get request proxies dict """
if not self.__requestProxiesDict:
self.__requestProxiesDict = {}
proxiesURLs = fromChar(PathFinder.getServiceURL("RequestManagement/ReqProxyURLs"))
if not proxiesURLs:
self.log.warn("CS option RequestManagement/ReqProxyURLs is not set!")
for proxyURL in proxiesURLs:
self.log.debug("creating RequestProxy for url = %s" % proxyURL)
self.__requestProxiesDict[proxyURL] = RPCClient(proxyURL, timeout=timeout)
return self.__requestProxiesDict
def requestValidator(self):
""" get request validator """
if not self.__requestValidator:
self.__requestValidator = RequestValidator()
return self.__requestValidator
def putRequest(self, request, useFailoverProxy=True, retryMainService=0):
"""Put request to RequestManager
:param self: self reference
:param ~Request.Request request: Request instance
:param bool useFailoverProxy: if False, will not attempt to forward the request to ReqProxies
:param int retryMainService: Amount of time we retry on the main ReqHandler in case of failures
:return: S_OK/S_ERROR
"""
errorsDict = {"OK": False}
valid = self.requestValidator().validate(request)
if not valid["OK"]:
self.log.error("putRequest: request not valid", "%s" % valid["Message"])
return valid
# # dump to json
requestJSON = request.toJSON()
if not requestJSON["OK"]:
return requestJSON
requestJSON = requestJSON["Value"]
retryMainService += 1
while retryMainService:
retryMainService -= 1
setRequestMgr = self._getRPC().putRequest(requestJSON)
if setRequestMgr["OK"]:
return setRequestMgr
errorsDict["RequestManager"] = setRequestMgr["Message"]
# sleep a bit
time.sleep(random.randint(1, 5))
self.log.warn("putRequest: unable to set request '%s' at RequestManager" %
request.RequestName, setRequestMgr["Message"])
proxies = self.requestProxies() if useFailoverProxy else {}
for proxyURL in randomize(proxies.keys()):
proxyClient = proxies[proxyURL]
self.log.debug("putRequest: trying RequestProxy at %s" % proxyURL)
setRequestProxy = proxyClient.putRequest(requestJSON)
if setRequestProxy["OK"]:
if setRequestProxy["Value"]["set"]:
self.log.info("putRequest: request '%s' successfully set using RequestProxy %s" % (request.RequestName,
proxyURL))
elif setRequestProxy["Value"]["saved"]:
self.log.info("putRequest: request '%s' successfully forwarded to RequestProxy %s" % (request.RequestName,
proxyURL))
return setRequestProxy
else:
self.log.warn("putRequest: unable to set request using RequestProxy %s: %s" % (proxyURL,
setRequestProxy["Message"]))
errorsDict["RequestProxy(%s)" % proxyURL] = setRequestProxy["Message"]
# # if we're here neither requestManager nor requestProxy were successful
self.log.error("putRequest: unable to set request", "'%s'" % request.RequestName)
errorsDict["Message"] = "ReqClient.putRequest: unable to set request '%s'" % request.RequestName
return errorsDict
def getRequest(self, requestID=0):
"""Get request from RequestDB
:param self: self reference
:param int requestID: ID of the request. If 0, choice is made for you
:return: S_OK( Request instance ) or S_OK() or S_ERROR
"""
self.log.debug("getRequest: attempting to get request.")
getRequest = self._getRPC().getRequest(requestID)
if not getRequest["OK"]:
self.log.error("getRequest: unable to get request", "'%s' %s" % (requestID, getRequest["Message"]))
return getRequest
if not getRequest["Value"]:
return getRequest
return S_OK(Request(getRequest["Value"]))
def getBulkRequests(self, numberOfRequest=10, assigned=True):
""" get bulk requests from RequestDB
:param self: self reference
:param str numberOfRequest: size of the bulk (default 10)
:return: S_OK( Successful : { requestID, RequestInstance }, Failed : message ) or S_ERROR
"""
self.log.debug("getRequests: attempting to get request.")
getRequests = self._getRPC().getBulkRequests(numberOfRequest, assigned)
if not getRequests["OK"]:
self.log.error("getRequests: unable to get '%s' requests: %s" % (numberOfRequest, getRequests["Message"]))
return getRequests
# No Request returned
if not getRequests["Value"]:
return getRequests
# No successful Request
if not getRequests["Value"]["Successful"]:
return getRequests
jsonReq = getRequests["Value"]["Successful"]
reqInstances = dict((rId, Request(jsonReq[rId])) for rId in jsonReq)
return S_OK({"Successful": reqInstances, "Failed": getRequests["Value"]["Failed"]})
def peekRequest(self, requestID):
""" peek request """
self.log.debug("peekRequest: attempting to get request.")
peekRequest = self._getRPC().peekRequest(int(requestID))
if not peekRequest["OK"]:
self.log.error("peekRequest: unable to peek request", "request: '%s' %s" % (requestID, peekRequest["Message"]))
return peekRequest
if not peekRequest["Value"]:
return peekRequest
return S_OK(Request(peekRequest["Value"]))
def deleteRequest(self, requestID):
""" delete request given it's ID
:param self: self reference
:param str requestID: request ID
"""
requestID = int(requestID)
self.log.debug("deleteRequest: attempt to delete '%s' request" % requestID)
deleteRequest = self._getRPC().deleteRequest(requestID)
if not deleteRequest["OK"]:
self.log.error("deleteRequest: unable to delete request",
"'%s' request: %s" % (requestID, deleteRequest["Message"]))
return deleteRequest
def getRequestIDsList(self, statusList=None, limit=None, since=None, until=None, getJobID=False):
""" get at most :limit: request ids with statuses in :statusList: """
statusList = statusList if statusList else list(Request.FINAL_STATES)
limit = limit if limit else 100
since = since.strftime('%Y-%m-%d') if since else ""
until = until.strftime('%Y-%m-%d') if until else ""
return self._getRPC().getRequestIDsList(statusList, limit, since, until, getJobID)
def getScheduledRequest(self, operationID):
""" get scheduled request given its scheduled OperationID """
self.log.debug("getScheduledRequest: attempt to get scheduled request...")
scheduled = self._getRPC().getScheduledRequest(operationID)
if not scheduled["OK"]:
self.log.error("getScheduledRequest failed", scheduled["Message"])
return scheduled
if scheduled["Value"]:
return S_OK(Request(scheduled["Value"]))
return scheduled
def getDBSummary(self):
""" Get the summary of requests in the RequestDBs. """
self.log.debug("getDBSummary: attempting to get RequestDB summary.")
dbSummary = self._getRPC().getDBSummary()
if not dbSummary["OK"]:
self.log.error("getDBSummary: unable to get RequestDB summary", dbSummary["Message"])
return dbSummary
def getDigest(self, requestID):
""" Get the request digest given a request ID.
:param self: self reference
:param str requestID: request id
"""
self.log.debug("getDigest: attempting to get digest for '%s' request." % requestID)
digest = self._getRPC().getDigest(int(requestID))
if not digest["OK"]:
self.log.error("getDigest: unable to get digest for request",
"request: '%s' %s" % (requestID, digest["Message"]))
return digest
def getRequestStatus(self, requestID):
""" Get the request status given a request id.
:param self: self reference
:param int requestID: id of the request
"""
if isinstance(requestID, basestring):
requestID = int(requestID)
self.log.debug("getRequestStatus: attempting to get status for '%d' request." % requestID)
requestStatus = self._getRPC().getRequestStatus(requestID)
if not requestStatus["OK"]:
self.log.error("getRequestStatus: unable to get status for request",
": '%d' %s" % (requestID, requestStatus["Message"]))
return requestStatus
# def getRequestName( self, requestID ):
# """ get request name for a given requestID """
# return self._getRPC().getRequestName( requestID )
def getRequestInfo(self, requestID):
""" The the request info given a request id.
:param self: self reference
:param int requestID: request nid
"""
self.log.debug("getRequestInfo: attempting to get info for '%s' request." % requestID)
requestInfo = self._getRPC().getRequestInfo(int(requestID))
if not requestInfo["OK"]:
self.log.error("getRequestInfo: unable to get status for request",
"request: '%s' %s" % (requestID, requestInfo["Message"]))
return requestInfo
def getRequestFileStatus(self, requestID, lfns):
""" Get file status for request given a request id.
:param self: self reference
:param int requestID: request id
:param lfns: list of LFNs
:type lfns: python:list
"""
self.log.debug("getRequestFileStatus: attempting to get file statuses for '%s' request." % requestID)
fileStatus = self._getRPC().getRequestFileStatus(int(requestID), lfns)
if not fileStatus["OK"]:
self.log.verbose("getRequestFileStatus: unable to get file status for request",
"request: '%s' %s" % (requestID, fileStatus["Message"]))
return fileStatus
def finalizeRequest(self, requestID, jobID, useCertificates=True):
""" check request status and perform finalization if necessary
update the request status and the corresponding job parameter
:param self: self reference
:param str requestID: request id
:param int jobID: job id
"""
stateServer = RPCClient("WorkloadManagement/JobStateUpdate", useCertificates=useCertificates)
# Checking if to update the job status - we should fail here, so it will be re-tried later
# Checking the state, first
res = self.getRequestStatus(requestID)
if not res['OK']:
self.log.error("finalizeRequest: failed to get request",
"request: %s status: %s" % (requestID, res["Message"]))
return res
if res["Value"] != "Done":
return S_ERROR("The request %s isn't 'Done' but '%s', this should never happen, why are we here?" %
(requestID, res['Value']))
# The request is 'Done', let's update the job status. If we fail, we should re-try later
monitorServer = RPCClient("WorkloadManagement/JobMonitoring", useCertificates=useCertificates)
res = monitorServer.getJobPrimarySummary(int(jobID))
if not res["OK"]:
self.log.error("finalizeRequest: Failed to get job status", "JobID: %d" % jobID)
return S_ERROR("finalizeRequest: Failed to get job %d status" % jobID)
elif not res['Value']:
self.log.info("finalizeRequest: job %d does not exist (anymore): finalizing" % jobID)
return S_OK()
else:
jobStatus = res["Value"]["Status"]
newJobStatus = jobStatus
jobMinorStatus = res["Value"]["MinorStatus"]
# update the job pending request digest in any case since it is modified
self.log.info("finalizeRequest: Updating request digest for job %d" % jobID)
digest = self.getDigest(requestID)
if digest["OK"]:
digest = digest["Value"]
self.log.verbose(digest)
res = stateServer.setJobParameter(jobID, "PendingRequest", digest)
if not res["OK"]:
self.log.info("finalizeRequest: Failed to set job %d parameter: %s" % (jobID, res["Message"]))
return res
else:
self.log.error("finalizeRequest: Failed to get request digest for %s: %s" % (requestID,
digest["Message"]))
stateUpdate = None
if jobStatus == 'Completed':
# What to do? Depends on what we have in the minorStatus
if jobMinorStatus == "Pending Requests":
newJobStatus = 'Done'
elif jobMinorStatus == "Application Finished With Errors":
newJobStatus = 'Failed'
if newJobStatus != jobStatus:
self.log.info("finalizeRequest: Updating job status for %d to %s/Requests done" % (jobID, newJobStatus))
stateUpdate = stateServer.setJobStatus(jobID, newJobStatus, "Requests done", "")
else:
self.log.info(
"finalizeRequest: Updating job minor status for %d to Requests done (status is %s)" %
(jobID, jobStatus))
stateUpdate = stateServer.setJobStatus(jobID, jobStatus, "Requests done", "")
if not stateUpdate["OK"]:
self.log.error("finalizeRequest: Failed to set job status",
"JobID: %d status: %s" % (jobID, stateUpdate['Message']))
return stateUpdate
return S_OK(newJobStatus)
def getRequestIDsForJobs(self, jobIDs):
""" get the request ids for the supplied jobIDs.
:param self: self reference
:param jobIDs: list of job IDs (integers)
:type jobIDs: python:list
:return: S_ERROR or S_OK( "Successful": { jobID1: reqID1, jobID2: requID2, ... },
"Failed" : { jobIDn: errMsg, jobIDm: errMsg, ...} )
"""
self.log.verbose("getRequestIDsForJobs: attempt to get request(s) for job %s" % jobIDs)
requests = self._getRPC().getRequestIDsForJobs(jobIDs)
if not requests["OK"]:
self.log.error("getRequestIDsForJobs: unable to get request(s) for jobs",
"%s: %s" % (jobIDs, requests["Message"]))
return requests
def readRequestsForJobs(self, jobIDs):
""" read requests for jobs
:param jobIDs: list with jobIDs
:type jobIDs: python:list
:return: S_OK( { "Successful" : { jobID1 : Request, ... },
"Failed" : { jobIDn : "Fail reason" } } )
"""
readReqsForJobs = self._getRPC().readRequestsForJobs(jobIDs)
if not readReqsForJobs["OK"]:
return readReqsForJobs
ret = readReqsForJobs["Value"]
# # create Requests out of JSONs for successful reads
if "Successful" in ret:
for jobID, fromJSON in ret["Successful"].items():
ret["Successful"][jobID] = Request(fromJSON)
return S_OK(ret)
def resetFailedRequest(self, requestID, allR=False):
""" Reset a failed request to "Waiting" status
"""
# # we can safely only peek the request as it is Failed and therefore not owned by an agent
res = self.peekRequest(requestID)
if not res['OK']:
return res
req = res['Value']
if allR or recoverableRequest(req):
# Only reset requests that can be recovered
if req.Status != 'Failed':
gLogger.notice("Reset NotBefore time, was %s" % str(req.NotBefore))
else:
for i, op in enumerate(req):
op.Error = ''
if op.Status == 'Failed':
printOperation((i, op), onlyFailed=True)
for fi in op:
if fi.Status == 'Failed':
fi.Attempt = 1
fi.Error = ''
fi.Status = 'Waiting'
if op.Status == 'Failed':
op.Status = 'Waiting'
# Reset also NotBefore
req.NotBefore = datetime.datetime.utcnow().replace(microsecond=0)
return self.putRequest(req)
return S_OK("Not reset")
#============= Some useful functions to be shared ===========
output = ''
def prettyPrint(mainItem, key='', offset=0):
global output
if key:
key += ': '
blanks = offset * ' '
if mainItem and isinstance(mainItem, dict):
output += "%s%s%s\n" % (blanks, key, '{') if blanks or key else ''
for key in sorted(mainItem):
prettyPrint(mainItem[key], key=key, offset=offset)
output += "%s%s\n" % (blanks, '}') if blanks else ''
elif mainItem and isinstance(mainItem, list) or isinstance(mainItem, tuple):
output += "%s%s%s\n" % (blanks, key, '[' if isinstance(mainItem, list) else '(')
for item in mainItem:
prettyPrint(item, offset=offset + 2)
output += "%s%s\n" % (blanks, ']' if isinstance(mainItem, list) else ')')
elif isinstance(mainItem, basestring):
if '\n' in mainItem:
prettyPrint(mainItem.strip('\n').split('\n'), offset=offset)
else:
output += "%s%s'%s'\n" % (blanks, key, mainItem)
else:
output += "%s%s%s\n" % (blanks, key, str(mainItem))
output = output.replace('[\n%s{' % blanks, '[{').replace('}\n%s]' % blanks, '}]') \
.replace('(\n%s{' % blanks, '({').replace('}\n%s)' % blanks, '})') \
.replace('(\n%s(' % blanks, '((').replace(')\n%s)' % blanks, '))') \
.replace('(\n%s[' % blanks, '[').replace(']\n%s)' % blanks, ']')
def printFTSJobs(request):
""" Prints the FTSJobs associated to a request
:param request: Request object
"""
try:
if request.RequestID:
# We try first the new FTS3 system
from DIRAC.DataManagementSystem.Client.FTS3Client import FTS3Client
fts3Client = FTS3Client()
res = fts3Client.ping()
if res['OK']:
associatedFTS3Jobs = []
for op in request:
res = fts3Client.getOperationsFromRMSOpID(op.OperationID)
if res['OK']:
for fts3Op in res['Value']:
associatedFTS3Jobs.extend(fts3Op.ftsJobs)
if associatedFTS3Jobs:
gLogger.always(
'\n\nFTS3 jobs associated: \n%s' %
'\n'.join(
'%s@%s (%s)' %
(job.ftsGUID,
job.ftsServer,
job.status) for job in associatedFTS3Jobs))
return
# If we are here, the attempt with the new FTS3 system did not work, let's try the old FTS system
gLogger.debug("Could not instantiate FTS3Client", res)
from DIRAC.DataManagementSystem.Client.FTSClient import FTSClient
ftsClient = FTSClient()
res = ftsClient.ping()
if not res['OK']:
gLogger.debug("Could not instantiate FtsClient", res)
return
res = ftsClient.getFTSJobsForRequest(request.RequestID)
if res['OK']:
ftsJobs = res['Value']
if ftsJobs:
gLogger.always(' FTS jobs associated: %s' % ','.join('%s (%s)' % (job.FTSGUID, job.Status)
for job in ftsJobs))
# ImportError can be thrown for the old client
# AttributeError can be thrown because the deserialization will not have
# happened correctly on the new fts3 (CC7 typically), and the error is not
# properly propagated
except (ImportError, AttributeError) as err:
gLogger.debug("Could not instantiate FtsClient because of Exception", repr(err))
def printRequest(request, status=None, full=False, verbose=True, terse=False):
global output
if full:
output = ''
prettyPrint(json.loads(request.toJSON()['Value']))
gLogger.always(output)
else:
if not status:
status = request.Status
gLogger.always("Request name='%s' ID=%s Status='%s'%s%s%s" %
(request.RequestName,
request.RequestID if hasattr(request, 'RequestID') else '(not set yet)',
request.Status, " ('%s' in DB)" % status if status != request.Status else '',
(" Error='%s'" % request.Error) if request.Error and request.Error.strip() else "",
(" Job=%s" % request.JobID) if request.JobID else ""))
gLogger.always("Created %s, Updated %s%s" % (request.CreationTime,
request.LastUpdate,
(", NotBefore %s" % request.NotBefore) if request.NotBefore else ""))
if request.OwnerDN:
gLogger.always("Owner: '%s', Group: %s" % (request.OwnerDN, request.OwnerGroup))
for indexOperation in enumerate(request):
op = indexOperation[1]
if not terse or op.Status == 'Failed':
printOperation(indexOperation, verbose, onlyFailed=terse)
printFTSJobs(request)
def printOperation(indexOperation, verbose=True, onlyFailed=False):
global output
i, op = indexOperation
prStr = ''
if op.SourceSE:
prStr += 'SourceSE: %s' % op.SourceSE
if op.TargetSE:
prStr += (' - ' if prStr else '') + 'TargetSE: %s' % op.TargetSE
if prStr:
prStr += ' - '
prStr += 'Created %s, Updated %s' % (op.CreationTime, op.LastUpdate)
if op.Type == 'ForwardDISET' and op.Arguments:
from DIRAC.Core.Utilities import DEncode
decode, _length = DEncode.decode(op.Arguments)
if verbose:
output = ''
prettyPrint(decode, offset=10)
prStr += '\n Arguments:\n' + output.strip('\n')
else:
prStr += '\n Service: %s' % decode[0][0]
gLogger.always(" [%s] Operation Type='%s' ID=%s Order=%s Status='%s'%s%s" %
(i, op.Type,
op.OperationID if hasattr(op, 'OperationID') else '(not set yet)',
op.Order, op.Status,
(" Error='%s'" % op.Error) if op.Error and op.Error.strip() else "",
(" Catalog=%s" % op.Catalog) if op.Catalog else ""))
if prStr:
gLogger.always(" %s" % prStr)
for indexFile in enumerate(op):
if not onlyFailed or indexFile[1].Status == 'Failed':
printFile(indexFile)
def printFile(indexFile):
ind, fi = indexFile
gLogger.always(" [%02d] ID=%s LFN='%s' Status='%s'%s%s%s" %
(ind + 1, fi.FileID if hasattr(fi, 'FileID') else '(not set yet)', fi.LFN, fi.Status,
(" Checksum='%s'" % fi.Checksum) if fi.Checksum or
(fi.Error and 'checksum' in fi.Error.lower()) else "",
(" Error='%s'" % fi.Error) if fi.Error and fi.Error.strip() else "",
(" Attempts=%d" % fi.Attempt) if fi.Attempt > 1 else "")
)
def recoverableRequest(request):
excludedErrors = ('File does not exist', 'No such file or directory',
'sourceSURL equals to targetSURL',
'Max attempts limit reached', 'Max attempts reached')
operationErrorsOK = ('is banned for', 'Failed to perform exists from any catalog')
for op in request:
if op.Status == 'Failed' and (not op.Error or not [errStr for errStr in operationErrorsOK if errStr in op.Error]):
for fi in op:
if fi.Status == 'Failed':
if [errStr for errStr in excludedErrors if errStr in fi.Error]:
return False
return True
return True
|
andresailer/DIRAC
|
RequestManagementSystem/Client/ReqClient.py
|
Python
|
gpl-3.0
| 24,299
|
[
"DIRAC"
] |
eb4e8cbd7a46bdf08fab6126998b330cd547ae9f6d1ac71fe1af335344bda65c
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import numpy as np
logging.basicConfig(level=logging.INFO)
import espressomd
espressomd.assert_features(['LENNARD_JONES'])
import espressomd.accumulators
import espressomd.observables
import espressomd.polymer
# Setup constant
TIME_STEP = 0.01
LOOPS = 5000
STEPS = 100
# System setup
system = espressomd.System(box_l=[32.0, 32.0, 32.0])
system.cell_system.skin = 0.4
# Lennard-Jones interaction
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=1.0, sigma=1.0,
shift="auto", cutoff=2.0**(1.0 / 6.0))
# Fene interaction
fene = espressomd.interactions.FeneBond(k=7, d_r_max=2)
system.bonded_inter.add(fene)
N_MONOMERS = [10, 20, 40]
rh_results = np.zeros(len(N_MONOMERS))
diffusion_constant_results = np.zeros(len(N_MONOMERS))
for index, N in enumerate(N_MONOMERS):
logging.info("Polymer size: {}".format(N))
system.part.clear()
system.thermostat.turn_off()
system.actors.clear()
system.auto_update_accumulators.clear()
# Setup polymer of part_id 0 with fene bond
positions = espressomd.polymer.positions(n_polymers=1,
beads_per_chain=N,
bond_length=1, seed=5642,
min_distance=0.9)
for i, pos in enumerate(positions[0]):
pid = len(system.part)
system.part.add(id=pid, pos=pos)
if i > 0:
system.part[pid].add_bond((fene, pid - 1))
logging.info("Warming up the polymer chain.")
system.time_step = 0.002
system.minimize_energy.init(
f_max=1.0,
gamma=10,
max_steps=2000,
max_displacement=0.01)
system.minimize_energy.minimize()
logging.info("Warmup finished.")
logging.info("Equilibration.")
system.time_step = TIME_STEP
system.thermostat.set_langevin(kT=1.0, gamma=10, seed=42)
system.integrator.run(50000)
logging.info("Equilibration finished.")
system.thermostat.turn_off()
lbf = espressomd.lb.LBFluidGPU(
kT=1,
seed=123,
agrid=1,
dens=1,
visc=5,
tau=TIME_STEP)
system.actors.add(lbf)
system.thermostat.set_lb(LB_fluid=lbf, seed=142, gamma=5)
logging.info("Warming up the system with LB fluid.")
system.integrator.run(1000)
logging.info("Warming up the system with LB fluid finished.")
# configure correlator
com_pos = espressomd.observables.ComPosition(ids=range(N))
correlator = espressomd.accumulators.Correlator(
obs1=com_pos, tau_lin=16, tau_max=LOOPS * STEPS, delta_N=1,
corr_operation="square_distance_componentwise", compress1="discard1")
system.auto_update_accumulators.add(correlator)
logging.info("Sampling started.")
rhs = np.zeros(LOOPS)
for i in range(LOOPS):
system.integrator.run(STEPS)
rhs[i] = system.analysis.calc_rh(
chain_start=0,
number_of_chains=1,
chain_length=N)[0]
logging.info("Sampling finished.")
correlator.finalize()
corrdata = correlator.result()
rh_results[index] = np.average(rhs)
tau = corrdata[1:, 0]
msd = corrdata[1:, 2] + corrdata[1:, 3] + corrdata[1:, 4]
np.save('msd_{}'.format(N), np.c_[tau, msd])
np.save('rh.npy', rh_results)
|
psci2195/espresso-ffans
|
doc/tutorials/04-lattice_boltzmann/scripts/04-lattice_boltzmann_part3_solution.py
|
Python
|
gpl-3.0
| 4,003
|
[
"ESPResSo"
] |
c74d4b2cdb057f10bc515ea9eb6ced274937d2a8df73c3b86b4ad2b87ef74e16
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
from mantid.api import PythonAlgorithm, AlgorithmFactory, MatrixWorkspaceProperty
from mantid.kernel import Direction, IntBoundedValidator
from mantid.simpleapi import Fit
import numpy as np
class FitGaussian(PythonAlgorithm):
# pylint: disable = no-init
def category(self):
return "Optimization"
def seeAlso(self):
return [ "Fit" ]
def PyInit(self):
# input
self.declareProperty(MatrixWorkspaceProperty("Workspace", "", Direction.Input),
doc="input workspace")
self.declareProperty(name="Index",defaultValue=0,validator=IntBoundedValidator(lower=0),
doc="workspace index - which spectrum to fit")
# output
self.declareProperty(name="PeakCentre",defaultValue=0.,direction=Direction.Output,
doc="the centre of the fitted peak")
self.declareProperty(name="Sigma",defaultValue=0.,direction=Direction.Output,
doc="the sigma of the fitted peak; 0. if fitting was not successful")
def _setOutput(self,peakCentre,sigma):
self.setProperty("PeakCentre",peakCentre)
self.setProperty("Sigma",sigma)
self.log().notice("Fitted Gaussian peak: [" + str(peakCentre) + "," + str(sigma) + "]")
def _error(self,message):
self.log().error(message)
raise RuntimeError(message)
def _warning(self,message):
self.log().warning(message)
def PyExec(self):
workspace = self.getProperty("Workspace").value
index = self.getProperty("Index").value
nhist = workspace.getNumberHistograms()
# index must be in <0,nhist)
if index >= nhist:
self._error("Index " + str(index) +
" is out of range for the workspace " + workspace.name())
x_values = np.array(workspace.readX(index))
y_values = np.array(workspace.readY(index))
# get peak centre position, assuming that it is the point with the highest value
imax = np.argmax(y_values)
height = y_values[imax]
# check for zero or negative signal
if height <= 0.:
self._warning("Workspace %s, detector %d has maximum <= 0" % (workspace.name(), index))
return
# guess sigma (assume the signal is sufficiently smooth)
# the _only_ peak is at least three samples wide
# selecting samples above .5 ("full width at half maximum")
indices = np.argwhere(y_values > 0.5*height)
nentries = len(indices)
if nentries < 3:
self._warning("Spectrum " + str(index) + " in workspace " + workspace.name() +
" has a too narrow peak. Cannot guess sigma. Check your data.")
return
minIndex = indices[0,0]
maxIndex = indices[-1,0]
# full width at half maximum: fwhm = sigma * (2.*np.sqrt(2.*np.log(2.)))
fwhm = np.fabs(x_values[maxIndex] - x_values[minIndex])
sigma = fwhm / (2.*np.sqrt(2.*np.log(2.)))
# execute Fit algorithm
tryCentre = x_values[imax]
fitFun = "name=Gaussian,PeakCentre=%s,Height=%s,Sigma=%s" % (tryCentre,height,sigma)
startX = tryCentre - 3.0*fwhm
endX = tryCentre + 3.0*fwhm
# pylint: disable = unpacking-non-sequence, assignment-from-none
fit_output = Fit(
InputWorkspace=workspace, WorkspaceIndex=index,
Function=fitFun, CreateOutput=True, OutputParametersOnly=True,
StartX=startX, EndX=endX)
if not 'success' == fit_output.OutputStatus:
self._warning("For detector " + str(index) + " in workspace " + workspace.name() +
"fit was not successful. Input guess parameters were " + str(fitFun))
return
fitParams = fit_output.OutputParameters.column(1)
self._setOutput(fitParams[1],fitParams[2]) # [peakCentre,sigma]
AlgorithmFactory.subscribe(FitGaussian)
|
mganeva/mantid
|
Framework/PythonInterface/plugins/algorithms/FitGaussian.py
|
Python
|
gpl-3.0
| 4,364
|
[
"Gaussian"
] |
bf9f2dcc3d77f6c3c676b0f3cd959306bca8ccbdc92b9c271c434e1351089281
|
#!/usr/bin/env python
# -*- coding : utf-8 -*-
#
# This file is part of the Siesta help scripts
#
# (c) Andrey Sobolev, 2013
#
import numpy as np
import matplotlib.pyplot as plt
from shs.calc import SiestaCalc
if __name__== '__main__':
calc_dir = '../../examples/FeCANI'
crd = []
forces = []
nsteps_beg = 1400
nsteps_end = 0
sc = SiestaCalc(calc_dir, calc_type='out', steps = range(-nsteps_beg,-nsteps_end,1))
n = sc.evol[0].filter('label',lambda x: x == 'C')
for step, g in sc.evol:
crd.append(g.atoms['crd'][n])
forces.append(g.forces[n])
crd = np.array(crd)
forces = np.array(forces)
r = crd[:, 1] - crd[:, 0]
r_mod = np.sum(r * r, axis = 1) ** 0.5
# indices
r_near_ind = np.where(r_mod < 3.34)[0]
r_far_ind = np.where(r_mod > 3.42)[0]
# force module
f_mod = np.sum(forces * forces, axis = 2) ** 0.5
# cos_f1 = np.sum(r * forces[:,0], axis = 1) / r_mod / f_mod[:,0]
# cos_f2 = np.sum(r * forces[:,1], axis = 1) / r_mod / f_mod[:,1]
# projections
f1_r = (np.sum(forces[:,0] * r, axis = 1) / np.sum(r * r, axis = 1))[:, np.newaxis] * r
f1_r_mod = np.sum(f1_r * f1_r, axis = 1) ** 0.5
f2_r = (np.sum(forces[:,1] * r, axis = 1) / np.sum(r * r, axis = 1))[:, np.newaxis] * r
f2_r_mod = np.sum(f2_r * f2_r, axis = 1) ** 0.5
# linear approximation
# poly = np.polyfit(r_mod, f2_r_mod, 2)
# poly = np.polyfit(r_mod[r_near_ind], f2_r_mod[r_near_ind], 1)
poly = np.polyfit(r_mod[r_far_ind], f2_r_mod[r_far_ind], 1)
print poly
p = np.poly1d(poly)
steps = np.arange(-nsteps_beg, -nsteps_end)
xp = np.linspace(3.1, 3.6, 300)
# print np.min(p(xp)), (p - np.poly1d(np.min(p(xp)))).r
# plt.plot(steps, r_mod)
# plt.plot(steps, f2_r_mod)
# plt.plot(steps, f_mod[:,0])
# plt.scatter(r_mod, f_mod[:,0])
plt.plot(r_mod, f2_r_mod, '.', xp, p(xp), '-')
# plt.scatter(r_mod, f1_r_mod, color = "blue")
# plt.scatter(r_mod, f2_r_mod, color = "red")
# plt.scatter(r_mod, f_mod[:,1], c = 'red')
plt.show()
|
ansobolev/shs
|
bin/additional/forces.py
|
Python
|
mit
| 2,076
|
[
"SIESTA"
] |
f42fadd371e39fc245f6b7438fdd12655f6bda93dbf7afe339502da33a15781e
|
from random import randint, uniform, random, gauss
from MemoizationUtils import get_random_protocol
class ParamVector(object):
"""
This class represents the vectors that defines a firewall
a ParamVector is a class that represents a vector that defines a firewall.
we have our indicator functions that should get parameters, lets call these functions g1...gn
for each gi we can say that there is a vector (ai1,...,aim) of scalars. so we can represent every firewall Fl
as the sum of wi*gi where wi(weight) is a scalar.
so we can think about a vector of different sized vectors, where every vector i is:
fi, ai1,...aim
:ivar weight_of: weight_of contains the following keys: DST_IP, SRC_IP, DST_PORT, SRC_PORT, SIZE, TTL, PROTOCOL, SEQ
"""
# helper lambdas:
@staticmethod
def random_ip():
return tuple([randint(0, 255) for _ in range(4)])
@staticmethod
def random_port():
return randint(0, 65535)
# constants:
DST_IP = "dstIP"
SRC_IP = "srcIP"
DST_PORT = "dstPort"
SRC_PORT = "srcPort"
SIZE = "size"
TTL = "ttl"
PROTOCOL = "protocol"
SEQ = "seq_number"
weight_keys = {DST_IP, SRC_IP, DST_PORT,
SRC_PORT,
SIZE,
TTL,
PROTOCOL,
SEQ}
# random data constants:
SIZE_RANDOM_LOW_MIN = 0
SIZE_RANDOM_LOW_MAX = 2000
SIZE_RANDOM_HIGH_MIN = 1
SIZE_RANDOM_HIGH_MAX = 2000
TTL_THRESH_MIN = 10
TTL_THRESH_MAX = 255
SEQ_THRESH_MIN = 0
SEQ_THRESH_MAX = 2 ** 31
WEIGHT_MAX_VAL = 10
# the functions that can be used to mutate a ParamVector, instances of ProbabilityFunction
mutate_functions = []
def __init__(self, ip_src_set=set(), ip_dst_set=set(), port_src_set=set(), port_dst_set=set(),
sizes_lower_bound=0, sizes_upper_bound=0, ttl_lower_bound=0, protocol_set=set(),
seq_lower_bound=0, seq_upper_bound=0, weight_of={key: 0 for key in weight_keys},
malicious_threshold=0.5):
"""
init self with all the needed variables
:param ip_src_set: a set of tuples (with 4 ints) representing the source ip addresses.
:param ip_dst_set: a set of tuples (with 4 ints) representing the destination ip addresses.
:param src_port_set: a set of ints representing the source port addresses.
:param dst_port_set: a set of ints representing the destination port addresses.
:param sizes_lower_bound: a number that is the low bound for the sizes
:param sizes_upper_bound: a number that is the high bound for the sizes
:param ttl_lower_bound: a number that is the thresh hold for the TTL(time to live)
:param protocol_set: a set of strings representing the protocols
:param seq_lower_bound: a number that is the low bound for the sequence number
:param seq_upper_bound: a number that is the high bound for the sequence number
:param weight_of: a dictionary that takes the name of a variable and returns the weight of it in the fitness
:param malicious_threshold: if the result of the packet is greater than this threshhold, it will be considered malicious
"""
self.ip_src_set = ip_src_set
self.ip_dst_set = ip_dst_set
self.port_src_set = port_src_set
self.port_dst_set = port_dst_set
self.sizes_lower_bound = sizes_lower_bound
self.sizes_upper_bound = sizes_upper_bound
self.ttl_lower_bound = ttl_lower_bound
self.protocol_set = protocol_set
self.seq_lower_bound = seq_lower_bound
self.seq_upper_bound = seq_upper_bound
self.weight_of = weight_of
self.malicious_threshold = malicious_threshold
@staticmethod
def _mutate_ordinals(set_of_values, generator):
"""
mutates the ordinal indicator representation by taking some out and changing some other using the given generator
:param set_of_values: the previous representation
:param generator: the generator for this specific ordinal type
:return: the new representation
"""
tmp_set = set(set_of_values)
for ordinal in tmp_set:
r = random()
if r < 0.25:
set_of_values.remove(ordinal)
if r < 0.235:
set_of_values.add(generator())
return set_of_values
@staticmethod
def _mutate_numeric(value, step):
"""
mutates the given numeric value using gaussian distribution.
:param value: the value to be mutated
:return: the new value
"""
if random() < 0.5:
return value
return gauss(value, step)
def mutate(self):
"""
applying the mutate mechanism on self
:return: a new ParamVector instance with the mutated data in self
"""
self.ip_src_set = ParamVector._mutate_ordinals(self.ip_src_set, ParamVector.random_ip)
self.ip_dst_set = ParamVector._mutate_ordinals(self.ip_dst_set, ParamVector.random_ip)
self.port_src_set = ParamVector._mutate_ordinals(self.port_src_set, ParamVector.random_port)
self.port_dst_set = ParamVector._mutate_ordinals(self.port_dst_set, ParamVector.random_port)
self.sizes_lower_bound = int(ParamVector._mutate_numeric(self.sizes_lower_bound, 2))
self.sizes_upper_bound = int(ParamVector._mutate_numeric(self.sizes_upper_bound, 2))
self.ttl_lower_bound = int(ParamVector._mutate_numeric(self.ttl_lower_bound, 2))
self.protocol_set = ParamVector._mutate_ordinals(self.protocol_set, get_random_protocol)
self.seq_lower_bound = int(ParamVector._mutate_numeric(self.seq_lower_bound, 2))
self.seq_upper_bound = int(ParamVector._mutate_numeric(self.seq_upper_bound, 2))
for key in self.weight_keys:
self.weight_of[key] = ParamVector._mutate_numeric(self.weight_of[key], 0.2)
self.malicious_threshold = ParamVector._mutate_numeric(self.malicious_threshold, 0.1)
return self
@staticmethod
def generate_random_data():
"""
creates a ParamVector with random data
:return: an instance of ParamVector that is defined using random data
"""
src_ip = ParamVector.random_ip()
dst_ip = ParamVector.random_ip()
src_port = ParamVector.random_port()
dst_port = ParamVector.random_port()
size_low = randint(ParamVector.SIZE_RANDOM_LOW_MIN, ParamVector.SIZE_RANDOM_LOW_MAX)
size_high = size_low + randint(ParamVector.SIZE_RANDOM_HIGH_MIN, ParamVector.SIZE_RANDOM_HIGH_MAX)
ttl = randint(ParamVector.TTL_THRESH_MIN, ParamVector.TTL_THRESH_MAX)
protoc = get_random_protocol()
seq_low = randint(ParamVector.SEQ_THRESH_MIN, ParamVector.SEQ_THRESH_MAX)
seq_high = seq_low + randint(ParamVector.SEQ_THRESH_MIN, ParamVector.SEQ_THRESH_MAX)
weight_func = lambda: uniform(0, ParamVector.WEIGHT_MAX_VAL)
weights = {ParamVector.DST_IP: weight_func(),
ParamVector.SRC_IP: weight_func(),
ParamVector.DST_PORT: weight_func(),
ParamVector.SRC_PORT: weight_func(),
ParamVector.SIZE: weight_func(),
ParamVector.TTL: weight_func(),
ParamVector.PROTOCOL: weight_func(),
ParamVector.SEQ: weight_func()}
sum_weights = sum(weights.values())
# normalizing the values:
for key in weights.keys():
weights[key] = weights[key] / sum_weights
return ParamVector(ip_src_set={src_ip},
ip_dst_set={dst_ip},
port_src_set={src_port},
port_dst_set={dst_port},
sizes_lower_bound=size_low,
sizes_upper_bound=size_high,
ttl_lower_bound=ttl,
protocol_set={protoc},
seq_lower_bound=seq_low,
seq_upper_bound=seq_high,
weight_of=weights,
malicious_threshold=random()
)
@staticmethod
def _mate_ordinals(*ordinals):
"""
mates two ordinal sets representations
:param ordinals: the ordinals
:return: one of the given ordinal sets or their union.
"""
return {0: ordinals[0], 1: ordinals[1], 2: ordinals[0].union(ordinals[1])}[randint(0, 2)]
@staticmethod
def _mate_bounds(*bounds):
"""
calculates the child's bounds.
:param bounds: the list of the parents' bounds
:return: one of the parents bounds
"""
return bounds[randint(0, len(bounds) - 1)]
@staticmethod
def _mate_threshold(*thresholds):
r = randint(0, 2)
if r < 2:
return thresholds[r]
else:
return sum(thresholds) / 2.0
def __add__(self, other):
"""
creates a mate between these ParamVectors
:param other: another ParamVector
:return: a "child" of the given ParamVectors
"""
assert isinstance(other, ParamVector)
weights = {}
for param in self.weight_keys:
if randint(0, 1):
weights[param] = self[param]
else:
weights[param] = other[param]
result = ParamVector(ParamVector._mate_ordinals(self.ip_src_set, other.ip_src_set),
ParamVector._mate_ordinals(self.ip_dst_set, other.ip_dst_set),
ParamVector._mate_ordinals(self.port_dst_set, other.port_dst_set),
ParamVector._mate_ordinals(self.port_src_set, other.port_src_set),
ParamVector._mate_bounds(self.sizes_lower_bound, other.sizes_lower_bound),
ParamVector._mate_bounds(self.sizes_upper_bound, other.sizes_upper_bound),
ParamVector._mate_bounds(self.ttl_lower_bound, other.ttl_lower_bound),
ParamVector._mate_ordinals(self.protocol_set, other.protocol_set),
ParamVector._mate_bounds(self.seq_lower_bound, other.seq_lower_bound),
ParamVector._mate_bounds(self.seq_upper_bound, other.seq_lower_bound), weights,
ParamVector._mate_threshold(self.malicious_threshold, other.malicious_threshold))
return result.mutate()
def __getitem__(self, item):
"""
:param item: name of the field which should be returned
:return: the value of this field
"""
# assert item in self.weight_keys
return self.weight_of[item]
def __repr__(self):
"""
:return: a string that represents a ParamVector
"""
repr = ""
suffix = ","
repr += "src_ip" + suffix + str(self.ip_src_set) + "::"
repr += "dst_ip" + suffix + str(self.ip_dst_set) + "::"
repr += "src_port" + suffix + str(self.port_src_set) + "::"
repr += "dst_port" + suffix + str(self.port_dst_set) + "::"
repr += "sizes_low" + suffix + str(self.sizes_lower_bound) + "::"
repr += "sizes_high" + suffix + str(self.sizes_upper_bound) + "::"
repr += "ttl" + suffix + str(self.ttl_lower_bound) + "::"
repr += "protocol" + suffix + str(self.protocol_set) + "::"
repr += "seq_low" + suffix + str(self.seq_lower_bound) + "::"
repr += "seq_high" + suffix + str(self.seq_upper_bound) + "::"
repr += "weight" + suffix + str(self.weight_of) + "::"
repr += "malicious_threshold" + suffix + str(self.malicious_threshold)
return repr
|
LeonAgmonNacht/Genetic-Algorithm-Firewall-TAU
|
ParamVector.py
|
Python
|
mit
| 11,886
|
[
"Gaussian"
] |
b474129406377f4c535a8e395674b6bdf83e16b90c348018efa779bf6bee7da5
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ThreadPoolExecutor."""
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
import atexit
from concurrent.futures import _base
import queue
import threading
import weakref
import os
# Workers are created as daemon threads. This is done to allow the interpreter
# to exit when there are still idle threads in a ThreadPoolExecutor's thread
# pool (i.e. shutdown() was not called). However, allowing workers to die with
# the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items())
for t, q in items:
q.put(None)
for t, q in items:
t.join()
atexit.register(_python_exit)
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException as e:
self.future.set_exception(e)
else:
self.future.set_result(result)
def _worker(executor_reference, work_queue):
try:
while True:
work_item = work_queue.get(block=True)
if work_item is not None:
work_item.run()
# Delete references to object. See issue16284
del work_item
continue
executor = executor_reference()
# Exit if:
# - The interpreter is shutting down OR
# - The executor that owns the worker has been collected OR
# - The executor that owns the worker has been shutdown.
if _shutdown or executor is None or executor._shutdown:
# Notice other workers
work_queue.put(None)
return
del executor
except BaseException:
_base.LOGGER.critical('Exception in worker', exc_info=True)
class ThreadPoolExecutor(_base.Executor):
def __init__(self, max_workers=None):
"""Initializes a new ThreadPoolExecutor instance.
Args:
max_workers: The maximum number of threads that can be used to
execute the given calls.
"""
if max_workers is None:
# Use this number because ThreadPoolExecutor is often
# used to overlap I/O instead of CPU work.
max_workers = (os.cpu_count() or 1) * 5
if max_workers <= 0:
raise ValueError("max_workers must be greater than 0")
self._max_workers = max_workers
self._work_queue = queue.Queue()
self._threads = set()
self._shutdown = False
self._shutdown_lock = threading.Lock()
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def _adjust_thread_count(self):
# When the executor gets lost, the weakref callback will wake up
# the worker threads.
def weakref_cb(_, q=self._work_queue):
q.put(None)
# TODO(bquinlan): Should avoid creating new threads if there are more
# idle threads than items in the work queue.
if len(self._threads) < self._max_workers:
t = threading.Thread(target=_worker,
args=(weakref.ref(self, weakref_cb),
self._work_queue))
t.daemon = True
t.start()
self._threads.add(t)
_threads_queues[t] = self._work_queue
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown = True
self._work_queue.put(None)
if wait:
for t in self._threads:
t.join()
shutdown.__doc__ = _base.Executor.shutdown.__doc__
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/python/lib/concurrent/futures/thread.py
|
Python
|
gpl-3.0
| 4,865
|
[
"Brian"
] |
26c226308838b367402efc915ffaec9b8d1ff4d3de45c581f14f8fadd3bfee54
|
"""
Utility functions for cloud endpoints.
"""
import sys
import os
import six
from DIRAC import S_OK, S_ERROR
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
STATE_MAP = {
0: "RUNNING",
1: "REBOOTING",
2: "TERMINATED",
3: "PENDING",
4: "UNKNOWN",
5: "STOPPED",
6: "SUSPENDED",
7: "ERROR",
8: "PAUSED",
}
def createMimeData(userDataTuple):
userData = MIMEMultipart()
for contents, mtype, fname in userDataTuple:
try:
mimeText = MIMEText(contents, mtype, sys.getdefaultencoding())
mimeText.add_header("Content-Disposition", 'attachment; filename="%s"' % fname)
userData.attach(mimeText)
except Exception as e:
return S_ERROR(str(e))
return S_OK(userData)
def createPilotDataScript(vmParameters, bootstrapParameters):
userDataDict = {}
# Arguments to the vm-bootstrap command
parameters = dict(vmParameters)
parameters.update(bootstrapParameters)
bootstrapArgs = {
"dirac-site": parameters.get("Site"),
"submit-pool": parameters.get("SubmitPool", ""),
"ce-name": parameters.get("CEName"),
"image-name": parameters.get("Image"),
"vm-uuid": parameters.get("VMUUID"),
"vmtype": parameters.get("VMType"),
"vo": parameters.get("VO", ""),
"running-pod": parameters.get("RunningPod", parameters.get("VO", "")),
"cvmfs-proxy": parameters.get("CVMFSProxy", "DIRECT"),
"cs-servers": ",".join(parameters.get("CSServers", [])),
"number-of-processors": parameters.get("NumberOfProcessors", 1),
"whole-node": parameters.get("WholeNode", True),
"required-tag": parameters.get("RequiredTag", ""),
"release-version": parameters.get("Version"),
"lcgbundle-version": parameters.get("LCGBundleVersion", ""),
"release-project": parameters.get("Project"),
"setup": parameters.get("Setup"),
}
bootstrapString = ""
for key, value in bootstrapArgs.items():
bootstrapString += " --%s=%s \\\n" % (key, value)
userDataDict["bootstrapArgs"] = bootstrapString
userDataDict["user_data_commands_base_url"] = bootstrapParameters.get("user_data_commands_base_url")
if not userDataDict["user_data_commands_base_url"]:
return S_ERROR("user_data_commands_base_url is not defined")
with open(bootstrapParameters["CloudPilotCert"]) as cfile:
userDataDict["user_data_file_hostkey"] = cfile.read().strip()
with open(bootstrapParameters["CloudPilotKey"]) as kfile:
userDataDict["user_data_file_hostcert"] = kfile.read().strip()
sshKey = None
userDataDict["add_root_ssh_key"] = ""
if "SshKey" in parameters:
with open(parameters["SshKey"]) as sfile:
sshKey = sfile.read().strip()
userDataDict["add_root_ssh_key"] = (
"""
# Allow root login
sed -i 's/PermitRootLogin no/PermitRootLogin yes/g' /etc/ssh/sshd_config
# Copy id_rsa.pub to authorized_keys
echo \" """
+ sshKey
+ """\" > /root/.ssh/authorized_keys
service sshd restart
"""
)
# List of commands to be downloaded
bootstrapCommands = bootstrapParameters.get("user_data_commands")
if isinstance(bootstrapCommands, six.string_types):
bootstrapCommands = bootstrapCommands.split(",")
if not bootstrapCommands:
return S_ERROR("user_data_commands list is not defined")
userDataDict["bootstrapCommands"] = " ".join(bootstrapCommands)
script = (
"""
cat <<X5_EOF >/root/hostkey.pem
%(user_data_file_hostkey)s
%(user_data_file_hostcert)s
X5_EOF
mkdir -p /var/spool/checkout/context
cd /var/spool/checkout/context
for dfile in %(bootstrapCommands)s
do
echo curl --insecure -s %(user_data_commands_base_url)s/$dfile -o $dfile
i=7
while [ $i -eq 7 ]
do
curl --insecure -s %(user_data_commands_base_url)s/$dfile -o $dfile
i=$?
if [ $i -eq 7 ]; then
echo curl connection failure for file $dfile
sleep 10
fi
done
curl --insecure -s %(user_data_commands_base_url)s/$dfile -o $dfile || echo Download of $dfile failed with $? !
done
%(add_root_ssh_key)s
chmod +x vm-bootstrap
/var/spool/checkout/context/vm-bootstrap %(bootstrapArgs)s
#/sbin/shutdown -h now
"""
% userDataDict
)
if "HEPIX" in vmParameters:
script = (
"""
cat <<EP_EOF >>/var/lib/hepix/context/epilog.sh
#!/bin/sh
%s
EP_EOF
chmod +x /var/lib/hepix/context/epilog.sh
"""
% script
)
user_data = (
"""#!/bin/bash
mkdir -p /etc/joboutputs
(
%s
) > /etc/joboutputs/user_data.log 2>&1 &
exit 0
"""
% script
)
cloud_config = """#cloud-config
output: {all: '| tee -a /var/log/cloud-init-output.log'}
cloud_final_modules:
- [scripts-user, always]
"""
# Also try to add ssh key using standart cloudinit approach(may not work)
if sshKey:
cloud_config += (
"""
users:
- name: diracroot
sudo: ALL=(ALL) NOPASSWD:ALL
lock_passwd: true
ssh-authorized-keys:
- ssh-rsa %s
"""
% sshKey
)
# print "AT >>> user_data", user_data
# print "AT >>> cloud_config", cloud_config
return createMimeData(
((user_data, "x-shellscript", "dirac_boot.sh"), (cloud_config, "cloud-config", "cloud-config"))
)
def createUserDataScript(parameters):
defaultUser = os.environ.get("USER", parameters.get("User", "root"))
sshUser = parameters.get("SshUser", defaultUser)
defaultKey = os.path.expandvars("$HOME/.ssh/id_rsa.pub")
sshKeyFile = parameters.get("SshKey", defaultKey)
with open(sshKeyFile) as skf:
sshKey = skf.read().strip()
script = (
"""
# Allow root login
sed -i 's/PermitRootLogin no/PermitRootLogin yes/g' /etc/ssh/sshd_config
sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
# Copy id_rsa.pub to authorized_keys
echo \" """
+ sshKey
+ """\" > /root/.ssh/authorized_keys
service sshd restart
"""
)
if "HEPIX" in parameters:
script = (
"""
cat <<EP_EOF >>/var/lib/hepix/context/epilog.sh
#!/bin/sh
%s
EP_EOF
chmod +x /var/lib/hepix/context/epilog.sh
"""
% script
)
user_data = (
"""#!/bin/bash
mkdir -p /etc/joboutputs
(
%s
) > /etc/joboutputs/user_data.log 2>&1 &
exit 0
"""
% script
)
cloud_config = """#cloud-config
output: {all: '| tee -a /var/log/cloud-init-output.log'}
cloud_final_modules:
- [scripts-user, always]
"""
if sshKey:
cloud_config += """
users:
- name: %s
sudo: ALL=(ALL) NOPASSWD:ALL
lock_passwd: false
ssh-authorized-keys:
- %s
""" % (
sshUser,
sshKey,
)
mime = createMimeData(
((user_data, "x-shellscript", "dirac_boot.sh"), (cloud_config, "cloud-config", "cloud-config"))
)
return mime
def createCloudInitScript(vmParameters, bootstrapParameters):
"""Create a user data script for cloud-init based images."""
parameters = dict(vmParameters)
parameters.update(bootstrapParameters)
extraOpts = ""
lcgVer = parameters.get("LCGBundleVersion", None)
if lcgVer:
extraOpts = "-g %s" % lcgVer
# add extra yum installable packages
extraPackages = ""
if parameters.get("ExtraPackages"):
packages = parameters.get("ExtraPackages")
extraPackages = "\n".join([" - %s" % pp.strip() for pp in packages.split(",")])
# add user account to connect by ssh
sshUserConnect = ""
sshUser = parameters.get("SshUser")
sshKeyFile = parameters.get("SshKey")
sshKey = ""
if sshKeyFile:
with open(sshKeyFile) as sshFile:
sshKey = sshFile.read()
if sshUser and sshKey:
sshUserConnect = """
users:
- name: %s
sudo: ALL=(ALL) NOPASSWD:ALL
lock_passwd: false
ssh-authorized-keys:
- %s
""" % (
sshUser,
sshKey,
)
bootstrapArgs = {
"dirac-site": parameters.get("Site"),
"submit-pool": parameters.get("SubmitPool", ""),
"ce-name": parameters.get("CEName"),
"ce-type": parameters.get("InnerCEType", "Singularity"),
"image-name": parameters.get("Image"),
"vm-uuid": parameters.get("VMUUID"),
"vmtype": parameters.get("VMType"),
"vo": parameters.get("VO", ""),
"running-pod": parameters.get("RunningPod", parameters.get("VO", "")),
"cvmfs-proxy": parameters.get("CVMFSProxy", "DIRECT"),
"cs-servers": ",".join(parameters.get("CSServers", [])),
"number-of-processors": parameters.get("NumberOfProcessors", 1),
"whole-node": parameters.get("WholeNode", True),
"required-tag": parameters.get("RequiredTag", ""),
"release-version": parameters.get("Version"),
"extraopts": extraOpts,
"release-project": parameters.get("Project"),
"setup": parameters.get("Setup"),
"user-root": parameters.get("UserRoot", "/cvmfs/cernvm-prod.cern.ch/cvm4"),
"timezone": parameters.get("Timezone", "UTC"),
"pilot-server": parameters.get("pilotFileServer", "localhost"),
"extra-packages": extraPackages,
"ssh-user": sshUserConnect,
"max-cycles": parameters.get("MaxCycles", "100"),
}
default_template = os.path.join(os.path.dirname(__file__), "cloudinit.template")
template_path = parameters.get("CITemplate", default_template)
# Cert/Key need extra indents to keep yaml formatting happy
with open(bootstrapParameters["CloudPilotCert"]) as cfile:
raw_str = cfile.read().strip()
raw_str = raw_str.replace("\n", "\n ")
bootstrapArgs["hostkey"] = raw_str
with open(bootstrapParameters["CloudPilotKey"]) as kfile:
raw_str = kfile.read().strip()
raw_str = raw_str.replace("\n", "\n ")
bootstrapArgs["hostcert"] = raw_str
with open(template_path) as template_fd:
template = template_fd.read()
template = template % bootstrapArgs
mime = createMimeData(((template, "cloud-config", "pilotconfig"),))
return mime
|
DIRACGrid/DIRAC
|
src/DIRAC/Resources/Cloud/Utilities.py
|
Python
|
gpl-3.0
| 10,256
|
[
"DIRAC"
] |
6014c1bde55eca7b0ab408748f201c6d710c8f24da5aa514ff1bad05d608b14b
|
# Copyright 2012, 2013 by the Micromagnum authors.
#
# This file is part of MicroMagnum.
#
# MicroMagnum is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MicroMagnum is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MicroMagnum. If not, see <http://www.gnu.org/licenses/>.
from .storage_stephandler import StorageStepHandler
from magnum.micromagnetics.io import writeVTK
from magnum.micromagnetics.io.vtk import VtkGroup
import os.path
class VTKStorage(StorageStepHandler):
def __init__(self, output_dir, field_id_or_ids = []):
super(VTKStorage, self).__init__(output_dir)
if hasattr(field_id_or_ids, "__iter__"):
field_ids = list(field_id_or_ids)
else:
field_ids = [field_id_or_ids]
if not all(isinstance(x, str) for x in field_ids):
raise ValueError("VTKStorage: 'field_id' parameter must be a either a string or a collection of strings.")
def make_file_fn(field_id):
# Create file name creating function 'file_fn'
pattern = "%s-%%07i.vtr" % field_id
return lambda state: pattern % state.step
self.__groups = {}
for field_id in field_ids:
self.addVariable(field_id, make_file_fn(field_id))
self.addComment("timestep", lambda state: state.t)
self.addComment("stepsize", lambda state: state.h)
self.addComment("step", lambda state: state.step)
def store(self, id, path, field, comments):
writeVTK(path, field)
# To add the entry to the pvd group, strip the file name from path.
self.__groups[id].addFile(filepath=os.path.basename(path), **dict(comments))
def done(self):
for group in self.__groups.values():
group.save()
def addVariable(self, var_id, file_fn, field_fn = None):
super(VTKStorage, self).addVariable(var_id, file_fn, field_fn)
pvd_filename = "Group-%s.pvd" % var_id
self.__groups[var_id] = VtkGroup(self.getOutputDirectory(), pvd_filename)
|
MicroMagnum/MicroMagnum
|
src/magnum/micromagnetics/stephandler/vtk_storage.py
|
Python
|
gpl-3.0
| 2,464
|
[
"VTK"
] |
580b644fd95bf8b15d47f77ee877fc021bc41f3c80dc8d5385b4b9c1ec2c50e4
|
"""
@name: PyHouse/src/Modules/Communication/_test/test_phone.py
@author: D. Brian Kimmel
@contact: d.briankimmel@gmail.com
@copyright: 2016-2017 by D. Brian Kimmel
@date: Created on May 30, 2016
@licencse: MIT License
@summary:
"""
import unittest
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testName(self):
pass
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/Computer/Communication/_test/test_phone.py
|
Python
|
mit
| 532
|
[
"Brian"
] |
18b56758fe1a45df594091d1cc3e3ef143cc69fa98926693f41b605fc77740df
|
"""
============
constants.py
============
Physical constants for astronomy & astrophysics
Classes
=======
Constant:
Defines a physical constant for use in astronomical or physical computation.
A Constant instance is defined by its value, description, and units attributes.
"""
class Constant(float):
"""
Creates a physical or astronomical constant instance, which inherits the float
properties of the value argument but has a description string and unit string
as attributes.
Attributes:
value (float) : The numerical value of the constant
description (str) : A name and/or physical description of the constant
units (str) : The units of the constant (i.e. [Constant])
"""
def __new__(cls, value, description, units):
"""
Arguments:
value (float) : The numerical value of the constant
description (str) : A name and/or physical description of the constant
units (str) : The units of the constant (i.e. [Constant])
"""
# Raise an error if description/units attributes are not strings
if not isinstance(description, str) or not isinstance(units, str):
raise TypeError(
'Constant description and units must be entered as strings')
instance = super().__new__(cls, value)
instance.value = value
instance.description = description
instance.units = units
return instance
def __repr__(self):
# Ensures constants are represented by a custom long-form description,
# not just values
return 'Constant({self.value}, {self.description}, {self.units})'.format(self=self)
# Astronomical constants
r_sun = Constant(
6.9598e10, 'r_sun: Solar radius', 'cm')
l_sun = Constant(
3.8418e33, 'l_sun: Solar luminosity', 'erg/s')
m_sun = Constant(
1.9892e33, 'm_sun: Solar mass', 'g')
m_earth = Constant(
5.9764e27, 'm_earth: Earth mass', 'g')
au_cm = Constant(
1.495978921e13, 'au_cm: 1 AU (Astronomical Unit), or distance from Earth to Sun', 'cm')
speed_light = Constant(
2.99792458e10, 'speed_light: Speed of light in vacuum (c)', 'cm/s')
grav_const = Constant(
6.67428e-8, 'grav_const: Gravitational constant (G)', 'cm^3 g^-1 s^-2')
# Physical constants
stef_boltz_const = Constant(
5.670400e-5, 'stef_boltz_const: Stefan-Boltzmann constant (sigma)', 'erg cm^-2 s^-1 K^-4')
mass_H_atom = Constant(
1.6726231e-24, 'mass_H_atom: Mass of hydrogen atom', 'g')
mass_electron = Constant(
9.1093826e-28, 'mass_electron: Mass of the electron', 'g')
planck_const = Constant(
6.62606896E-27, 'planck_const: Planck\'s constant (h)', 'erg s')
atomic_mass_unit = Constant(
1.660538782e-24, 'atomic_mass_unit: Atomic mass unit (u)', 'g')
boltzmann_const = Constant(
1.3806504e-16, 'boltzmann_const: Boltzmann constant (k)', 'erg K^-1')
avogadro_const = Constant(
6.02214179e23, 'avogadro_const: Avogadro\'s number', 'mol^-1')
rad_const = Constant(
4 * stef_boltz_const / speed_light,
'rad_const: Radiation constant, total energy radiated by a blackbody',
'erg cm^-3 K^-4')
day_secs = Constant(
86400., 'day_secs: Number of seconds in a day', 's')
# Miscellaneous coefficients and other constants for astronomy functions
visc_mol_const = Constant(
1.84e-17, 'visc_mol_const: Constant for calculating molecular diffusivity', 'g cm^-1 K^-2.5')
nu_rad_const = Constant(
6.88e-26, 'nu_rad_const: Constant for calculating radiative diffusivity', 'g^2 cm^-2 K^-4')
core_mass_coeff = Constant(
6.22e5, 'core_mass_coeff: Core mass multiplicative constant', 'Dimensionless')
core_mass_offset = Constant(
0.487, 'core_mass_offset: Core mass additive constant', 'M_sun')
idrad_const = Constant(
3.2e7, 'idrad_const: radiation/gas pressure boundary', 'K cm')
iddeg_const = Constant(
1.207e5, 'iddeg_const: ideal gas/degenerate pressure boundary', 'K')
# Polynomial coefficients for Ferrario (2005) initial-final mass relation
mimf_coeff_6 = Constant(
-0.00012336, 'mimf_coeff_6: 6th power IFMR coefficient', 'M_sun^-5')
mimf_coeff_5 = Constant(
0.003160, 'mimf_coeff_5: 5th power IFMR coefficient', 'M_sun^-4')
mimf_coeff_4 = Constant(
0.02960, 'mimf_coeff_4: 4th power IFMR coefficient', 'M_sun^-3')
mimf_coeff_3 = Constant(
0.12350, 'mimf_coeff_3: 3rd power IFMR coefficient', 'M_sun^-2')
mimf_coeff_2 = Constant(
0.21550, 'mimf_coeff_2: 2nd power IFMR coefficient', 'M_sun^-1')
mimf_coeff_1 = Constant(
0.19022, 'mimf_coeff_1: 1st power IFMR coefficient', 'Dimensionless')
mimf_coeff_0 = Constant(
0.46575, 'mimf_coeff_0: IFMR constant term', 'M_sun')
# Kroupa IMF mass bounds and alpha values
imf_m1 = Constant(
0.08, 'imf_m1: Lower bound for IMF mass range', 'M_sun')
imf_m2 = Constant(
0.50, 'imf_m2: Middle bound for IMF mass range', 'M_sun')
imf_a1 = Constant(
0.30, 'imf_a1: Kroupa IMF alpha value', 'Dimensionless')
imf_a2 = Constant(
1.30, 'imf_a2: Kroupa IMF alpha value', 'Dimensionless')
imf_a3 = Constant(
2.3, 'imf_a3: Kroupa IMF alpha value', 'Dimensionless')
# van Loon (2005) mass loss rate constants
van_loon_1 = Constant(
-5.65, 'van_loon_1: van Loon (2005) mass loss rate constant', 'M_sun yr^-1')
van_loon_2 = Constant(
6.3, 'van_loon_2: van Loon (2005) mass loss rate constant', 'M_sun yr^-1')
van_loon_3 = Constant(
3500., 'van_loon_3: van Loon (2005) mass loss rate constant', 'K')
|
NuGrid/NuGridPy
|
nugridpy/constants.py
|
Python
|
bsd-3-clause
| 5,488
|
[
"Avogadro"
] |
e605676f1f24560978620c8826fbb0fc2b60b5dd51515233cb034eb82de4a623
|
#
# A file that opens the neuroConstruct project LarkumEtAl2009 and run multiple simulations stimulating ech terminal apical branch with varying number of synapses.
#
# Author: Matteo Farinella
from sys import *
from java.io import File
from java.lang import System
from java.util import ArrayList
from ucl.physiol.neuroconstruct.project import ProjectManager
from ucl.physiol.neuroconstruct.neuron import NeuronFileManager
from ucl.physiol.neuroconstruct.utils import NumberGenerator
from ucl.physiol.neuroconstruct.nmodleditor.processes import ProcessManager
from ucl.physiol.neuroconstruct.simulation import SimulationData
from ucl.physiol.neuroconstruct.gui import SimulationRerunFrame
from ucl.physiol.neuroconstruct.gui.plotter import PlotManager
from ucl.physiol.neuroconstruct.gui.plotter import PlotCanvas
from ucl.physiol.neuroconstruct.dataset import DataSet
from math import *
import time
import shutil
import random
import os
import subprocess
# Load the original project
projName = "LarkumEtAl2009"
projFile = File("/home/matteo/neuroConstruct/models/"+projName+"/"+projName+".ncx")
print "Loading project from file: " + projFile.getAbsolutePath()+", exists: "+ str(projFile.exists())
pm = ProjectManager()
myProject = pm.loadProject(projFile)
simConfig = myProject.simConfigInfo.getSimConfig("Default Simulation Configuration")#
randomseed = random.randint(1000,5000)
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
numGenerated = myProject.generatedCellPositions.getNumberInAllCellGroups()
simsRunning = []
def updateSimsRunning():
simsFinished = []
for sim in simsRunning:
timeFile = File(myProject.getProjectMainDirectory(), "simulations/"+sim+"/time.dat")
#print "Checking file: "+timeFile.getAbsolutePath() +", exists: "+ str(timeFile.exists())
if (timeFile.exists()):
simsFinished.append(sim)
if(len(simsFinished)>0):
for sim in simsFinished:
simsRunning.remove(sim)
if numGenerated > 0:
print "Generating NEURON scripts..."
myProject.neuronFileManager.setQuitAfterRun(1) # Remove this line to leave the NEURON sim windows open after finishing
myProject.neuronSettings.setCopySimFiles(1) # 1 copies hoc/mod files to PySim_0 etc. and will allow multiple sims to run at once
myProject.neuronSettings.setGraphicsMode(False) # Run NEURON without GUI
# Note same network structure will be used for each!
# Change this number to the number of processors you wish to use on your local machine
maxNumSimultaneousSims = 100
#multiple simulation settings:
prefix = "" #string that will be added to the name of the simulations to identify the simulation set
trials = 100
Nbranches = 10
Configuration = ["NMDAspike input"]
apical_branch = ["apical17","apical18","apical21","apical23","apical24","apical25","apical27","apical28","apical31","apical34","apical35","apical37","apical38","apical44","apical46","apical52","apical53","apical54","apical56","apical57","apical61","apical62","apical65","apical67","apical68","apical69","apical72","apical73"]
apical_stim = ["NMDAs_17","NMDAs_18","NMDAs_21","NMDAs_23","NMDAs_24","NMDAs_25","NMDAs_27","NMDAs_28","NMDAs_31","NMDAs_34","NMDAs_35","NMDAs_37","NMDAs_38","NMDAs_44","NMDAs_46","NMDAs_52","NMDAs_53","NMDAs_54","NMDAs_56","NMDAs_57","NMDAs_61","NMDAs_62","NMDAs_65","NMDAs_67","NMDAs_68","NMDAs_69","NMDAs_72","NMDAs_73"]
apical_ID =[4460,4571,4793,4961,4994,5225,5477,5526,5990,6221,6274,6523,6542,6972,7462,8026,8044,8088,8324,8468,8685,8800,8966,9137,9160,9186,9592,9639]
apical_lenght = [98,69,78,26,34,166,161,49,143,55,87,25,38,73,194,19,22,26,25,129,138,95,42,89,21,62,26,18]
apical_plot = ["pyrCML_apical17_V","pyrCML_apical18_V","pyrCML_apical21_V","pyrCML_apical23_V","pyrCML_apical24_V","pyrCML_apical25_V","pyrCML_apical27_V","pyrCML_apical28_V","pyrCML_apical31_V","pyrCML_apical34_V","pyrCML_apical35_V","pyrCML_apical37_V","pyrCML_apical38_V","pyrCML_apical44_V","pyrCML_apical46_V","pyrCML_apical52_V","pyrCML_apical53_V","pyrCML_apical54_V","pyrCML_apical56_V","pyrCML_apical57_V","pyrCML_apical61_V","pyrCML_apical62_V","pyrCML_apical65_V","pyrCML_apical67_V","pyrCML_apical68_V","pyrCML_apical69_V","pyrCML_apical72_V","pyrCML_apical73_V"]
print "Going to run " +str(int(trials*Nbranches)) + " simulations"
refStored = []
simGroups = ArrayList()
simInputs = ArrayList()
simPlots = ArrayList()
stringConfig = Configuration[0]
print "nConstruct using SIMULATION CONFIGURATION: " +stringConfig
simConfig = myProject.simConfigInfo.getSimConfig(stringConfig)
for y in range(7, Nbranches):
j=y+1
selectedBranches = []
prefix = "w100NMDA15b"+str(j) #number of branches stimulated
print
print "-----------------------------------------------------------------------"
print str(trials)+" trials, stimulating " +str(int(j))+" branches"
print "reference name: " + prefix +"..."
print "-----------------------------------------------------------------------"
print
for i in range(0, trials):
randomseed = random.randint(1000,5000)
print ""
selectedBranches = []
#empty vectors
simGroups = ArrayList()
simInputs = ArrayList()
simPlots = ArrayList()
######## Selecting j random different apical branches to Input and Plot ###############
for r in range(0,j):
randomApicalBranch = random.randint(0,int(len(apical_branch))-1)
while randomApicalBranch in selectedBranches:
randomApicalBranch = random.randint(0,int(len(apical_branch))-1)
selectedBranches.append(randomApicalBranch)
print "selected branch "+apical_branch[randomApicalBranch]
### modyfing NMDA spike delay
stim = myProject.elecInputInfo.getStim(apical_stim[randomApicalBranch])
delay = random.randint(300, 400)
stim.setDelay(delay)
myProject.elecInputInfo.updateStim(stim)
simInputs.add(apical_stim[randomApicalBranch])
simPlots.add(apical_plot[randomApicalBranch])
simGroups.add("pyrCML_group")
simPlots.add("pyrCML_soma_V")
simConfig.setCellGroups(simGroups)
simConfig.setInputs(simInputs)
simConfig.setPlots(simPlots)
print "group generated: "+simConfig.getCellGroups().toString()
print "going to stimulate: "+simConfig.getInputs().toString()
print "going to record: "+simConfig.getPlots().toString()
'''######## configuration exc 1500 ###############
simInputs.add("backgroundExc")
simConfig.setInputs(simInputs)
print "group generated: "+simConfig.getCellGroups().toString()
print "going to stimulate: "+simConfig.getInputs().toString()
print "going to record: "+simConfig.getPlots().toString()
##########################################################################################
simRef = prefix+"E1500__"+str(i)
print "Simref: "+simRef
myProject.simulationParameters.setReference(simRef)
refStored.append(simRef)
##### RUN BLOCK #####
randomseed = random.randint(1000,5000)
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
myProject.neuronFileManager.setSuggestedRemoteRunTime(10)
myProject.neuronFileManager.generateTheNeuronFiles(simConfig, None, NeuronFileManager.RUN_HOC, randomseed)
print "Generated NEURON files for: "+simRef
compileProcess = ProcessManager(myProject.neuronFileManager.getMainHocFile())
compileSuccess = compileProcess.compileFileWithNeuron(0,0)
print "Compiled NEURON files for: "+simRef
if compileSuccess:
pm.doRunNeuron(simConfig)
print "Set running simulation: "+simRef
time.sleep(2) # Wait for sim to be kicked off
simInputs.remove("backgroundExc")
#####################'''
'''######## configuration exc 1500 + balanced inh ###############
simInputs.add("backgroundExc")
simInputs.add("backgroundInh")
simConfig.setInputs(simInputs)
print "group generated: "+simConfig.getCellGroups().toString()
print "going to stimulate: "+simConfig.getInputs().toString()
print "going to record: "+simConfig.getPlots().toString()
##########################################################################################
simRef = prefix+"E1500I43__"+str(i)
print "Simref: "+simRef
myProject.simulationParameters.setReference(simRef)
refStored.append(simRef)
##### RUN BLOCK #####
randomseed = random.randint(1000,5000)
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
myProject.neuronFileManager.setSuggestedRemoteRunTime(10)
myProject.neuronFileManager.generateTheNeuronFiles(simConfig, None, NeuronFileManager.RUN_HOC, randomseed)
print "Generated NEURON files for: "+simRef
compileProcess = ProcessManager(myProject.neuronFileManager.getMainHocFile())
compileSuccess = compileProcess.compileFileWithNeuron(0,0)
print "Compiled NEURON files for: "+simRef
if compileSuccess:
pm.doRunNeuron(simConfig)
print "Set running simulation: "+simRef
time.sleep(2) # Wait for sim to be kicked off
simInputs.remove("backgroundExc")
simInputs.remove("backgroundInh")
#####################'''
'''######## configuration exc 1200 + balanced inh ###############
simInputs.add("backgroundExc1200")
simInputs.add("backgroundInh1200")
simConfig.setInputs(simInputs)
print "group generated: "+simConfig.getCellGroups().toString()
print "going to stimulate: "+simConfig.getInputs().toString()
print "going to record: "+simConfig.getPlots().toString()
##########################################################################################
simRef = prefix+"excinh1200_"+str(i)
print "Simref: "+simRef
myProject.simulationParameters.setReference(simRef)
refStored.append(simRef)
##### RUN BLOCK #####
randomseed = random.randint(1000,5000)
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
myProject.neuronFileManager.setSuggestedRemoteRunTime(10)
myProject.neuronFileManager.generateTheNeuronFiles(simConfig, None, NeuronFileManager.RUN_HOC, randomseed)
print "Generated NEURON files for: "+simRef
compileProcess = ProcessManager(myProject.neuronFileManager.getMainHocFile())
compileSuccess = compileProcess.compileFileWithNeuron(0,0)
print "Compiled NEURON files for: "+simRef
if compileSuccess:
pm.doRunNeuron(simConfig)
print "Set running simulation: "+simRef
time.sleep(5) # Wait for sim to be kicked off
simInputs.remove("backgroundExc1200")
simInputs.remove("backgroundInh1200")
#####################'''
######## configuration exc 900 ###############
simInputs.add("backgroundExc900")
simConfig.setInputs(simInputs)
print "group generated: "+simConfig.getCellGroups().toString()
print "going to stimulate: "+simConfig.getInputs().toString()
print "going to record: "+simConfig.getPlots().toString()
##########################################################################################
simRef = prefix+"E900_"+str(i)
print "Simref: "+simRef
myProject.simulationParameters.setReference(simRef)
refStored.append(simRef)
'''##### RUN BLOCK #####
randomseed = random.randint(1000,5000)
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
myProject.neuronFileManager.setSuggestedRemoteRunTime(10)
myProject.neuronFileManager.generateTheNeuronFiles(simConfig, None, NeuronFileManager.RUN_HOC, randomseed)
print "Generated NEURON files for: "+simRef
compileProcess = ProcessManager(myProject.neuronFileManager.getMainHocFile())
compileSuccess = compileProcess.compileFileWithNeuron(0,0)
print "Compiled NEURON files for: "+simRef
if compileSuccess:
pm.doRunNeuron(simConfig)
print "Set running simulation: "+simRef
time.sleep(2) # Wait for sim to be kicked off
simInputs.remove("backgroundExc900")
#####################'''
######## configuration exc 900 + balanced inh ###############
simInputs.add("backgroundExc900")
simInputs.add("backgroundInh900")
simConfig.setInputs(simInputs)
print "group generated: "+simConfig.getCellGroups().toString()
print "going to stimulate: "+simConfig.getInputs().toString()
print "going to record: "+simConfig.getPlots().toString()
##########################################################################################
simRef = prefix+"EI900_"+str(i)
print "Simref: "+simRef
myProject.simulationParameters.setReference(simRef)
refStored.append(simRef)
'''##### RUN BLOCK #####
randomseed = random.randint(1000,5000)
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
myProject.neuronFileManager.setSuggestedRemoteRunTime(10)
myProject.neuronFileManager.generateTheNeuronFiles(simConfig, None, NeuronFileManager.RUN_HOC, randomseed)
print "Generated NEURON files for: "+simRef
compileProcess = ProcessManager(myProject.neuronFileManager.getMainHocFile())
compileSuccess = compileProcess.compileFileWithNeuron(0,0)
print "Compiled NEURON files for: "+simRef
if compileSuccess:
pm.doRunNeuron(simConfig)
print "Set running simulation: "+simRef
time.sleep(2) # Wait for sim to be kicked off
simInputs.remove("backgroundExc900")
simInputs.remove("backgroundInh900")
#####################'''
'''##### configuration exc 1500 + 50% inh #####
simRef = prefix+"excinh050pc_"+str(i)
print "Simref: "+simRef
myProject.simulationParameters.setReference(simRef)
refStored.append(simRef)
##### RUN BLOCK #####
simInputs.add("backgroundExc")
simInputs.add("backgroundInh050")
simConfig.setInputs(simInputs)
print "group generated: "+simConfig.getCellGroups().toString()
print "going to stimulate: "+simConfig.getInputs().toString()
print "going to record: "+simConfig.getPlots().toString()
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
myProject.neuronFileManager.setSuggestedRemoteRunTime(10)
myProject.neuronFileManager.generateTheNeuronFiles(simConfig, None, NeuronFileManager.RUN_HOC, randomseed)
print "Generated NEURON files for: "+simRef
compileProcess = ProcessManager(myProject.neuronFileManager.getMainHocFile())
compileSuccess = compileProcess.compileFileWithNeuron(0,0)
print "Compiled NEURON files for: "+simRef
if compileSuccess:
pm.doRunNeuron(simConfig)
print "Set running simulation: "+simRef
time.sleep(3) # wait for the process to be sent out
simInputs.remove("backgroundExc")
simInputs.remove("backgroundInh050")
#####################'''
'''##### configuration exc 1500 + 75% inh #####
simRef = prefix+"excinh075pc_"+str(i)
print "Simref: "+simRef
myProject.simulationParameters.setReference(simRef)
refStored.append(simRef)
##### RUN BLOCK #####
simInputs.add("backgroundExc")
simInputs.add("backgroundInh075")
simConfig.setInputs(simInputs)
print "group generated: "+simConfig.getCellGroups().toString()
print "going to stimulate: "+simConfig.getInputs().toString()
print "going to record: "+simConfig.getPlots().toString()
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
myProject.neuronFileManager.setSuggestedRemoteRunTime(10)
myProject.neuronFileManager.generateTheNeuronFiles(simConfig, None, NeuronFileManager.RUN_HOC, randomseed)
print "Generated NEURON files for: "+simRef
compileProcess = ProcessManager(myProject.neuronFileManager.getMainHocFile())
compileSuccess = compileProcess.compileFileWithNeuron(0,0)
print "Compiled NEURON files for: "+simRef
if compileSuccess:
pm.doRunNeuron(simConfig)
print "Set running simulation: "+simRef
time.sleep(2) # wait for the process to be sent out
simInputs.remove("backgroundExc")
simInputs.remove("backgroundInh075")
#####################'''
'''##### configuration exc 1500 + 125% inh #####
simRef = prefix+"excinh125pc_"+str(i)
print "Simref: "+simRef
myProject.simulationParameters.setReference(simRef)
refStored.append(simRef)
##### RUN BLOCK #####
simInputs.add("backgroundExc")
simInputs.add("backgroundInh125")
simConfig.setInputs(simInputs)
print "group generated: "+simConfig.getCellGroups().toString()
print "going to stimulate: "+simConfig.getInputs().toString()
print "going to record: "+simConfig.getPlots().toString()
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
myProject.neuronFileManager.setSuggestedRemoteRunTime(10)
myProject.neuronFileManager.generateTheNeuronFiles(simConfig, None, NeuronFileManager.RUN_HOC, randomseed)
print "Generated NEURON files for: "+simRef
compileProcess = ProcessManager(myProject.neuronFileManager.getMainHocFile())
compileSuccess = compileProcess.compileFileWithNeuron(0,0)
print "Compiled NEURON files for: "+simRef
if compileSuccess:
pm.doRunNeuron(simConfig)
print "Set running simulation: "+simRef
time.sleep(2) # wait for the process to be sent out
simInputs.remove("backgroundExc")
simInputs.remove("backgroundInh125")
#####################'''
'''##### configuration exc 1500 + 150% inh #####
simRef = prefix+"E1500I150pc_"+str(i)
print "Simref: "+simRef
myProject.simulationParameters.setReference(simRef)
refStored.append(simRef)
##### RUN BLOCK #####
simInputs.add("backgroundExc")
simInputs.add("backgroundInh150")
simConfig.setInputs(simInputs)
print "group generated: "+simConfig.getCellGroups().toString()
print "going to stimulate: "+simConfig.getInputs().toString()
print "going to record: "+simConfig.getPlots().toString()
pm.doGenerate(simConfig.getName(), randomseed)
while pm.isGenerating():
print "Waiting for the project to be generated..."
time.sleep(2)
myProject.neuronFileManager.setSuggestedRemoteRunTime(10)
myProject.neuronFileManager.generateTheNeuronFiles(simConfig, None, NeuronFileManager.RUN_HOC, randomseed)
print "Generated NEURON files for: "+simRef
compileProcess = ProcessManager(myProject.neuronFileManager.getMainHocFile())
compileSuccess = compileProcess.compileFileWithNeuron(0,0)
print "Compiled NEURON files for: "+simRef
if compileSuccess:
pm.doRunNeuron(simConfig)
print "Set running simulation: "+simRef
time.sleep(2) # wait for the process to be sent out
simInputs.remove("backgroundExc")
simInputs.remove("backgroundInh150")
#####################'''
### end for i (trials)
### end for j (noise)
######## Extracting simulations results ###############
time.sleep(240)
y=-1
for sim in refStored:
y=y+1
pullSimFilename = "pullsim.sh"
path = "/home/matteo/neuroConstruct/models/"+projName
print "\n------ Checking directory: " + path +"/simulations"+"/"+sim
pullsimFile = path+"/simulations/"+sim+"/"+pullSimFilename
if os.path.isfile(pullsimFile):
print pullSimFilename+" exists and will be executed..."
process = subprocess.Popen("cd "+path+"/simulations/"+sim+"/"+";./"+pullSimFilename, shell=True, stdout=subprocess.PIPE)
stdout_value = process.communicate()[0]
process.wait()
else:
print "Simulation not finished"
if os.path.isfile(path+"/simulations/"+sim+"/pyrCML_group_0.dat"):
print "Simulation results recovered from remote cluster."
simDir = File(path+"/simulations/"+sim)
newFileSoma = path+"/recordings/"+sim+".soma"
shutil.copyfile(path+"/simulations/"+sim+"/pyrCML_group_0.dat" , newFileSoma)
for ID in apical_ID:
if os.path.isfile(path+"/simulations/"+sim+"/pyrCML_group_0."+str(ID)+".dat"):
newFileApical = path+"/recordings/"+sim+"_ID"+str(ID)+".apical"
shutil.copyfile(path+"/simulations/"+sim+"/pyrCML_group_0."+str(ID)+".dat" , newFileApical)
print "Simulation was successful. "
print "Results saved."
print
else:
print "Simulation failed!"
### '''
|
pgleeson/TestArea
|
models/LarkumEtAl2009/pythonScripts/PAP_multibranches_inhibition100ms.py
|
Python
|
gpl-2.0
| 22,071
|
[
"NEURON"
] |
3feca849006afc0c6b345fd81d51c8d48722163dffcd2e34d57dc96bafef1b4d
|
# Copyright 2001 by Gavin E. Crooks. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Unit test for Hie"""
import unittest
from Bio.SCOP import Hie
class HieTests(unittest.TestCase):
def setUp(self):
self.filename = './SCOP/dir.hie.scop.txt_test'
def testParse(self):
"""Test if all records in a HIE file are being read"""
f = open(self.filename)
try:
count = 0
for record in Hie.parse(f):
count +=1
self.assertEqual(count, 21)
finally:
f.close()
def testStr(self):
"""Test if we can convert each record to a string correctly"""
f = open(self.filename)
try:
for line in f:
record = Hie.Record(line)
# End of line is platform dependent. Strip it off
self.assertEqual(str(record).rstrip(), line.rstrip())
finally:
f.close()
def testError(self):
"""Test if a corrupt record raises the appropriate exception"""
corruptRec = "4926sdfhjhfgyjdfyg"
self.assertRaises(ValueError, Hie.Record, corruptRec)
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
updownlife/multipleK
|
dependencies/biopython-1.65/Tests/test_SCOP_Hie.py
|
Python
|
gpl-2.0
| 1,413
|
[
"Biopython"
] |
42d1745ff0eed36e35773f84b183e0ff17823de5e068d69287e7d2a3368438e3
|
"""Utility functions for printing version information."""
import importlib
import locale
import os
import platform
import struct
import subprocess
import sys
def get_sys_info():
"""Returns system information as a dict"""
blob = []
# get full commit hash
commit = None
if os.path.isdir(".git") and os.path.isdir("xarray"):
try:
pipe = subprocess.Popen(
'git log --format="%H" -n 1'.split(" "),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
so, _ = pipe.communicate()
except Exception:
pass
else:
if pipe.returncode == 0:
commit = so
try:
commit = so.decode("utf-8")
except ValueError:
pass
commit = commit.strip().strip('"')
blob.append(("commit", commit))
try:
(sysname, _nodename, release, _version, machine, processor) = platform.uname()
blob.extend(
[
("python", sys.version),
("python-bits", struct.calcsize("P") * 8),
("OS", f"{sysname}"),
("OS-release", f"{release}"),
# ("Version", f"{version}"),
("machine", f"{machine}"),
("processor", f"{processor}"),
("byteorder", f"{sys.byteorder}"),
("LC_ALL", f'{os.environ.get("LC_ALL", "None")}'),
("LANG", f'{os.environ.get("LANG", "None")}'),
("LOCALE", f"{locale.getlocale()}"),
]
)
except Exception:
pass
return blob
def netcdf_and_hdf5_versions():
libhdf5_version = None
libnetcdf_version = None
try:
import netCDF4
libhdf5_version = netCDF4.__hdf5libversion__
libnetcdf_version = netCDF4.__netcdf4libversion__
except ImportError:
try:
import h5py
libhdf5_version = h5py.version.hdf5_version
except ImportError:
pass
return [("libhdf5", libhdf5_version), ("libnetcdf", libnetcdf_version)]
def show_versions(file=sys.stdout):
"""print the versions of xarray and its dependencies
Parameters
----------
file : file-like, optional
print to the given file-like object. Defaults to sys.stdout.
"""
sys_info = get_sys_info()
try:
sys_info.extend(netcdf_and_hdf5_versions())
except Exception as e:
print(f"Error collecting netcdf / hdf5 version: {e}")
deps = [
# (MODULE_NAME, f(mod) -> mod version)
("xarray", lambda mod: mod.__version__),
("pandas", lambda mod: mod.__version__),
("numpy", lambda mod: mod.__version__),
("scipy", lambda mod: mod.__version__),
# xarray optionals
("netCDF4", lambda mod: mod.__version__),
("pydap", lambda mod: mod.__version__),
("h5netcdf", lambda mod: mod.__version__),
("h5py", lambda mod: mod.__version__),
("Nio", lambda mod: mod.__version__),
("zarr", lambda mod: mod.__version__),
("cftime", lambda mod: mod.__version__),
("nc_time_axis", lambda mod: mod.__version__),
("PseudoNetCDF", lambda mod: mod.__version__),
("rasterio", lambda mod: mod.__version__),
("cfgrib", lambda mod: mod.__version__),
("iris", lambda mod: mod.__version__),
("bottleneck", lambda mod: mod.__version__),
("dask", lambda mod: mod.__version__),
("distributed", lambda mod: mod.__version__),
("matplotlib", lambda mod: mod.__version__),
("cartopy", lambda mod: mod.__version__),
("seaborn", lambda mod: mod.__version__),
("numbagg", lambda mod: mod.__version__),
("fsspec", lambda mod: mod.__version__),
("cupy", lambda mod: mod.__version__),
("pint", lambda mod: mod.__version__),
("sparse", lambda mod: mod.__version__),
# xarray setup/test
("setuptools", lambda mod: mod.__version__),
("pip", lambda mod: mod.__version__),
("conda", lambda mod: mod.__version__),
("pytest", lambda mod: mod.__version__),
# Misc.
("IPython", lambda mod: mod.__version__),
("sphinx", lambda mod: mod.__version__),
]
deps_blob = []
for (modname, ver_f) in deps:
try:
if modname in sys.modules:
mod = sys.modules[modname]
else:
mod = importlib.import_module(modname)
except Exception:
deps_blob.append((modname, None))
else:
try:
ver = ver_f(mod)
deps_blob.append((modname, ver))
except Exception:
deps_blob.append((modname, "installed"))
print("\nINSTALLED VERSIONS", file=file)
print("------------------", file=file)
for k, stat in sys_info:
print(f"{k}: {stat}", file=file)
print("", file=file)
for k, stat in deps_blob:
print(f"{k}: {stat}", file=file)
if __name__ == "__main__":
show_versions()
|
pydata/xarray
|
xarray/util/print_versions.py
|
Python
|
apache-2.0
| 5,145
|
[
"NetCDF"
] |
912aa587472b9e91336a7858995dc5ed1b86132b3c9eb3a133f61ffe2ec3edb3
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import astropy.units as u
from astropy.extern import six
from astropy import log
import warnings
from .extern.validator import validate_array, validate_scalar
__all__ = ["generate_energy_edges", "sed_conversion",
"build_data_table", "generate_diagnostic_plots"]
# Input validation tools
def validate_column(data_table, key, pt, domain='positive'):
try:
column = data_table[key]
array = validate_array(key, u.Quantity(column, unit=column.unit),
physical_type=pt, domain=domain)
except KeyError as e:
raise TypeError(
'Data table does not contain required column "{0}"'.format(key))
return array
def validate_data_table(data_table):
data = {}
flux_types = ['flux', 'differential flux', 'power', 'differential power']
# Energy and flux arrays
data['energy'] = validate_column(data_table, 'energy', 'energy')
data['flux'] = validate_column(data_table, 'flux', flux_types)
# Flux uncertainties
if 'flux_error' in data_table.keys():
dflux = validate_column(data_table, 'flux_error', flux_types)
data['dflux'] = u.Quantity((dflux, dflux))
elif 'flux_error_lo' in data_table.keys() and 'flux_error_hi' in data_table.keys():
data['dflux'] = u.Quantity((
validate_column(data_table, 'flux_error_lo', flux_types),
validate_column(data_table, 'flux_error_hi', flux_types)))
else:
raise TypeError('Data table does not contain required column'
' "flux_error" or columns "flux_error_lo" and "flux_error_hi"')
# Energy bin edges
if 'ene_width' in data_table.keys():
ene_width = validate_column(data_table, 'ene_width', 'energy')
data['dene'] = u.Quantity((ene_width / 2., ene_width / 2.))
elif 'ene_lo' in data_table.keys() and 'ene_hi' in data_table.keys():
ene_lo = validate_column(data_table, 'ene_lo', 'energy')
ene_hi = validate_column(data_table, 'ene_hi', 'energy')
data['dene'] = u.Quantity(
(data['energy'] - ene_lo, ene_hi - data['energy']))
else:
data['dene'] = generate_energy_edges(data['energy'])
# Upper limit flags
if 'ul' in data_table.keys():
# Check if it is a integer or boolean flag
ul_col = data_table['ul']
if ul_col.dtype.type is np.int_ or ul_col.dtype.type is np.bool_:
data['ul'] = np.array(ul_col, dtype=np.bool)
elif ul_col.dtype.type is np.str_:
strbool = True
for ul in ul_col:
if ul != 'True' and ul != 'False':
strbool = False
if strbool:
data['ul'] = np.array((eval(ul)
for ul in ul_col), dtype=np.bool)
else:
raise TypeError('UL column is in wrong format')
else:
raise TypeError('UL column is in wrong format')
else:
data['ul'] = np.array([False, ] * len(data['energy']))
HAS_CL = False
if 'keywords' in data_table.meta.keys():
if 'cl' in data_table.meta['keywords'].keys():
HAS_CL = True
data['cl'] = validate_scalar(
'cl', data_table.meta['keywords']['cl']['value'])
if not HAS_CL:
data['cl'] = 0.9
if 'ul' in data_table.keys():
log.warning('"cl" keyword not provided in input data table, upper limits'
'will be assumed to be at 90% confidence level')
return data
# Convenience tools
def sed_conversion(energy, model_unit, sed):
"""
Manage conversion between differential spectrum and SED
"""
model_pt = model_unit.physical_type
ones = np.ones(energy.shape)
if sed:
# SED
f_unit = u.Unit('erg/s')
if model_pt == 'power' or model_pt == 'flux' or model_pt == 'energy':
sedf = ones
elif 'differential' in model_pt:
sedf = (energy ** 2)
else:
raise u.UnitsError(
'Model physical type ({0}) is not supported'.format(model_pt),
'Supported physical types are: power, flux, differential'
' power, differential flux')
if 'flux' in model_pt:
f_unit /= u.cm ** 2
elif 'energy' in model_pt:
# particle energy distributions
f_unit = u.erg
elif sed is None:
# Use original units
f_unit = model_unit
sedf = ones
else:
# Differential spectrum
f_unit = u.Unit('1/(s TeV)')
if 'differential' in model_pt:
sedf = ones
elif model_pt == 'power' or model_pt == 'flux' or model_pt == 'energy':
# From SED to differential
sedf = 1 / (energy ** 2)
else:
raise u.UnitsError(
'Model physical type ({0}) is not supported'.format(model_pt),
'Supported physical types are: power, flux, differential'
' power, differential flux')
if 'flux' in model_pt:
f_unit /= u.cm ** 2
elif 'energy' in model_pt:
# particle energy distributions
f_unit = u.Unit('1/TeV')
log.debug(
'Converted from {0} ({1}) into {2} ({3}) for sed={4}'.format(model_unit, model_pt,
f_unit, f_unit.physical_type, sed))
return f_unit, sedf
def trapz_loglog(y, x, axis=-1, intervals=False):
"""
Integrate along the given axis using the composite trapezoidal rule in
loglog space.
Integrate `y` (`x`) along given axis in loglog space.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
Independent variable to integrate over.
axis : int, optional
Specify the axis.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule in loglog space.
"""
try:
y_unit = y.unit
y = y.value
except AttributeError:
y_unit = 1.
try:
x_unit = x.unit
x = x.value
except AttributeError:
x_unit = 1.
y = np.asanyarray(y)
x = np.asanyarray(x)
slice1 = [slice(None)] * y.ndim
slice2 = [slice(None)] * y.ndim
slice1[axis] = slice(None, -1)
slice2[axis] = slice(1, None)
if x.ndim == 1:
shape = [1] * y.ndim
shape[axis] = x.shape[0]
x = x.reshape(shape)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Compute the power law indices in each integration bin
b = np.log10(y[slice2] / y[slice1]) / np.log10(x[slice2] / x[slice1])
# if local powerlaw index is -1, use \int 1/x = log(x); otherwise use normal
# powerlaw integration
trapzs = np.where(np.abs(b+1.) > 1e-10,
(y[slice1] * (x[slice2] * (x[slice2]/x[slice1]) ** b - x[slice1]))/(b+1),
x[slice1] * y[slice1] * np.log(x[slice2]/x[slice1]))
tozero = (y[slice1] == 0.) + (y[slice2] == 0.) + (x[slice1] == x[slice2])
trapzs[tozero] = 0.
if intervals:
return trapzs * x_unit * y_unit
ret = np.add.reduce(trapzs, axis) * x_unit * y_unit
return ret
def generate_energy_edges(ene):
"""Generate energy bin edges from given energy array.
Generate an array of energy edges from given energy array to be used as
abcissa error bar limits when no energy uncertainty or energy band is
provided.
Parameters
----------
ene : `astropy.units.Quantity` array instance
1-D array of energies with associated phsyical units.
Returns
-------
edge_array : `astropy.units.Quantity` array instance of shape ``(2,len(ene))``
Array of energy edge pairs corresponding to each given energy of the
input array.
"""
midene = np.sqrt((ene[1:] * ene[:-1]))
elo, ehi = np.zeros(len(ene)) * ene.unit, np.zeros(len(ene)) * ene.unit
elo[1:] = ene[1:] - midene
ehi[:-1] = midene - ene[:-1]
elo[0] = ene[0] * ( 1 - ene[0] / (ene[0] + ehi[0]))
ehi[-1] = elo[-1]
return np.array((elo, ehi)) * ene.unit
def build_data_table(energy, flux, flux_error=None, flux_error_lo=None,
flux_error_hi=None, ene_width=None, ene_lo=None, ene_hi=None, ul=None,
cl=None):
"""
Read data into data dict.
Parameters
----------
energy : :class:`~astropy.units.Quantity` array instance
Observed photon energy array [physical type ``energy``]
flux : :class:`~astropy.units.Quantity` array instance
Observed flux array [physical type ``flux`` or ``differential flux``]
flux_error, flux_error_hi, flux_error_lo : :class:`~astropy.units.Quantity` array instance
68% CL gaussian uncertainty of the flux [physical type ``flux`` or
``differential flux``]. Either ``flux_error`` (symmetrical uncertainty) or
``flux_error_hi`` and ``flux_error_lo`` (asymmetrical uncertainties) must be
provided.
ene_width, ene_lo, ene_hi : :class:`~astropy.units.Quantity` array instance, optional
Width of the energy bins [physical type ``energy``]. Either ``ene_width``
(bin width) or ``ene_lo`` and ``ene_hi`` (Energies of the lower and upper
bin edges) can be provided. If none are provided,
``generate_energy_edges`` will be used.
ul : boolean or int array, optional
Boolean array indicating which of the flux values given in ``flux``
correspond to upper limits.
cl : float, optional
Confidence level of the flux upper limits given by ``ul``.
Returns
-------
data : dict
Data stored in a `dict`.
"""
from astropy.table import Table, Column
table = Table()
if cl is not None:
cl = validate_scalar('cl', cl)
table.meta['keywords'] = {'cl': {'value': cl}}
table.add_column(Column(name='energy', data=energy))
if ene_width is not None:
table.add_column(Column(name='ene_width', data=ene_width))
elif ene_lo is not None and ene_hi is not None:
table.add_column(Column(name='ene_lo', data=ene_lo))
table.add_column(Column(name='ene_hi', data=ene_hi))
table.add_column(Column(name='flux', data=flux))
if flux_error is not None:
table.add_column(Column(name='flux_error', data=flux_error))
elif flux_error_lo is not None and flux_error_hi is not None:
table.add_column(Column(name='flux_error_lo', data=flux_error_lo))
table.add_column(Column(name='flux_error_hi', data=flux_error_hi))
else:
raise TypeError('Flux error not provided!')
if ul is not None:
ul = np.array(ul, dtype=np.int)
table.add_column(Column(name='ul', data=ul))
table.meta['comments'] = [
'Table generated with naima.build_data_table', ]
# test table units, format, etc
data = validate_data_table(table)
return table
def generate_diagnostic_plots(outname, sampler, modelidxs=None, pdf=False, sed=None, **kwargs):
"""
Generate diagnostic plots.
- A corner plot of sample density in the two dimensional parameter space of
all parameter pairs of the run: ``outname_corner.png``
- A plot for each of the chain parameters showing walker progression, final
sample distribution and several statistical measures of this distribution:
``outname_chain_parN.png``
- A plot for each of the models returned as blobs by the model function. The
maximum likelihood model is shown, as well as the 1 and 3 sigma confidence
level contours. The first model will be compared with observational data
and residuals shown. ``outname_fit_modelN.png``
Parameters
----------
outname : str
Name to be used to save diagnostic plot files.
sampler : `emcee.EnsembleSampler` instance
Sampler instance from which chains, blobs and data are read.
modelidxs : iterable (optional)
Model numbers to be plotted. Default: All returned in sampler.blobs
pdf : bool (optional)
Whether to save plots to multipage pdf.
"""
from .plot import plot_chain, plot_blob
if pdf:
from matplotlib import pyplot as plt
plt.rc('pdf', fonttype=42)
print(
'Generating diagnostic plots in file {0}_plots.pdf'.format(outname))
from matplotlib.backends.backend_pdf import PdfPages
outpdf = PdfPages('{0}_plots.pdf'.format(outname))
# Chains
for par, label in zip(six.moves.range(sampler.chain.shape[-1]), sampler.labels):
try:
log.info('Plotting chain of parameter {0}...'.format(label))
f = plot_chain(sampler, par, **kwargs)
if pdf:
f.savefig(outpdf, format='pdf')
else:
if 'log(' in label or 'log10(' in label:
label = label.split('(')[-1].split(')')[0]
f.savefig('{0}_chain_{1}.png'.format(outname, label))
del f
except Exception as e:
log.warning('plot_chain failed for paramter {0} ({1}): {2}'.format(label,par,e))
# Corner plot
try:
from triangle import corner
from .plot import find_ML
log.info('Plotting corner plot...')
ML, MLp, MLvar, model_ML = find_ML(sampler, 0)
f = corner(sampler.flatchain, labels=sampler.labels,
truths=MLp, quantiles=[0.16, 0.5, 0.84],
verbose=False, **kwargs)
if pdf:
f.savefig(outpdf, format='pdf')
else:
f.savefig('{0}_corner.png'.format(outname))
del f
except ImportError:
print('triangle_plot not installed, corner plot not available')
# Fit
if modelidxs is None:
nmodels = len(sampler.blobs[-1][0])
modelidxs = list(range(nmodels))
if sed is None:
sed = [None for idx in modelidxs]
elif isinstance(sed, bool):
sed = [sed for idx in modelidxs]
for modelidx, plot_sed in zip(modelidxs, sed):
try:
log.info('Plotting model output {0}...'.format(modelidx))
f = plot_blob(sampler, blobidx=modelidx, label='Model output {0}'.format(modelidx),
sed=plot_sed, n_samples=100, **kwargs)
if pdf:
f.savefig(outpdf, format='pdf')
else:
f.savefig('{0}_model{1}.png'.format(outname, modelidx))
del f
except Exception as e:
log.warning('plot_blob failed for model output {0}: {1}'.format(par,e))
if pdf:
outpdf.close()
|
cdeil/naima
|
naima/utils.py
|
Python
|
bsd-3-clause
| 14,921
|
[
"Gaussian"
] |
3ef60987d1fdb3849257b3be1b63baf009ab517e8a4f60fa6f9b057395f71dc3
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2015 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
import sys
PYVERSION = sys.version.split()[0]
if PYVERSION >= "3" or PYVERSION < "2.6":
exit("[CRITICAL] incompatible Python version detected ('%s'). For successfully running Maltrail you'll have to use version 2.6 or 2.7 (visit 'http://www.python.org/download/')" % PYVERSION)
|
hxp2k6/https-github.com-stamparm-maltrail
|
core/versioncheck.py
|
Python
|
mit
| 415
|
[
"VisIt"
] |
60830bf8b3f186e26d13b268bffa68d0204db746570025017dd008dc9a840d83
|
#
# tsne.py
#
# Implementation of t-SNE in Python. The implementation was tested on Python 2.5.1, and it requires a working
# installation of NumPy. The implementation comes with an example on the MNIST dataset. In order to plot the
# results of this example, a working installation of matplotlib is required.
# The example can be run by executing: ipython tsne.py -pylab
#
#
# Created by Laurens van der Maaten on 20-12-08.
# Copyright (c) 2008 Tilburg University. All rights reserved.
import numpy as Math
import pylab as Plot
def Hbeta(D = Math.array([]), beta = 1.0):
"""Compute the perplexity and the P-row for a specific value of the precision of a Gaussian distribution."""
# Compute P-row and corresponding perplexity
P = Math.exp(-D.copy() * beta);
sumP = sum(P);
H = Math.log(sumP) + beta * Math.sum(D * P) / sumP;
P = P / sumP;
return H, P;
def x2p(X = Math.array([]), tol = 1e-5, perplexity = 30.0):
"""Performs a binary search to get P-values in such a way that each conditional Gaussian has the same perplexity."""
# Initialize some variables
print "Computing pairwise distances..."
(n, d) = X.shape;
sum_X = Math.sum(Math.square(X), 1);
D = Math.add(Math.add(-2 * Math.dot(X, X.T), sum_X).T, sum_X);
P = Math.zeros((n, n));
beta = Math.ones((n, 1));
logU = Math.log(perplexity);
# Loop over all datapoints
for i in range(n):
# Print progress
if i % 500 == 0:
print "Computing P-values for point ", i, " of ", n, "..."
# Compute the Gaussian kernel and entropy for the current precision
betamin = -Math.inf;
betamax = Math.inf;
Di = D[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))];
(H, thisP) = Hbeta(Di, beta[i]);
# Evaluate whether the perplexity is within tolerance
Hdiff = H - logU;
tries = 0;
while Math.abs(Hdiff) > tol and tries < 50:
# If not, increase or decrease precision
if Hdiff > 0:
betamin = beta[i];
if betamax == Math.inf or betamax == -Math.inf:
beta[i] = beta[i] * 2;
else:
beta[i] = (beta[i] + betamax) / 2;
else:
betamax = beta[i];
if betamin == Math.inf or betamin == -Math.inf:
beta[i] = beta[i] / 2;
else:
beta[i] = (beta[i] + betamin) / 2;
# Recompute the values
(H, thisP) = Hbeta(Di, beta[i]);
Hdiff = H - logU;
tries = tries + 1;
# Set the final row of P
P[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))] = thisP;
# Return final P-matrix
print "Mean value of sigma: ", Math.mean(Math.sqrt(1 / beta))
return P;
def pca(X = Math.array([]), no_dims = 50):
"""Runs PCA on the NxD array X in order to reduce its dimensionality to no_dims dimensions."""
print "Preprocessing the data using PCA..."
(n, d) = X.shape;
X = X - Math.tile(Math.mean(X, 0), (n, 1));
(l, M) = Math.linalg.eig(Math.dot(X.T, X));
Y = Math.dot(X, M[:,0:no_dims]);
return Y;
def tsne(X = Math.array([]), no_dims = 2, initial_dims = 50, perplexity = 30.0):
"""Runs t-SNE on the dataset in the NxD array X to reduce its dimensionality to no_dims dimensions.
The syntaxis of the function is Y = tsne.tsne(X, no_dims, perplexity), where X is an NxD NumPy array."""
# Check inputs
if X.dtype != "float64":
print "Error: array X should have type float64.";
return -1;
#if no_dims.__class__ != "<type 'int'>": # doesn't work yet!
# print "Error: number of dimensions should be an integer.";
# return -1;
# Initialize variables
X = pca(X, initial_dims);
(n, d) = X.shape;
max_iter = 1000;
initial_momentum = 0.5;
final_momentum = 0.8;
eta = 500;
min_gain = 0.01;
Y = Math.random.randn(n, no_dims);
dY = Math.zeros((n, no_dims));
iY = Math.zeros((n, no_dims));
gains = Math.ones((n, no_dims));
# Compute P-values
P = x2p(X, 1e-5, perplexity);
P = P + Math.transpose(P);
P = P / Math.sum(P);
P = P * 4; # early exaggeration
P = Math.maximum(P, 1e-12);
# Run iterations
for iter in range(max_iter):
# Compute pairwise affinities
sum_Y = Math.sum(Math.square(Y), 1);
num = 1 / (1 + Math.add(Math.add(-2 * Math.dot(Y, Y.T), sum_Y).T, sum_Y));
num[range(n), range(n)] = 0;
Q = num / Math.sum(num);
Q = Math.maximum(Q, 1e-12);
# Compute gradient
PQ = P - Q;
for i in range(n):
dY[i,:] = Math.sum(Math.tile(PQ[:,i] * num[:,i], (no_dims, 1)).T * (Y[i,:] - Y), 0);
# Perform the update
if iter < 20:
momentum = initial_momentum
else:
momentum = final_momentum
gains = (gains + 0.2) * ((dY > 0) != (iY > 0)) + (gains * 0.8) * ((dY > 0) == (iY > 0));
gains[gains < min_gain] = min_gain;
iY = momentum * iY - eta * (gains * dY);
Y = Y + iY;
Y = Y - Math.tile(Math.mean(Y, 0), (n, 1));
# Compute current value of cost function
if (iter + 1) % 10 == 0:
C = Math.sum(P * Math.log(P / Q));
print "Iteration ", (iter + 1), ": error is ", C
# Stop lying about P-values
if iter == 100:
P = P / 4;
# Return solution
return Y;
if __name__ == "__main__":
print "Run Y = tsne.tsne(X, no_dims, perplexity) to perform t-SNE on your dataset."
print "Running example on 2,500 MNIST digits..."
X = Math.loadtxt("mnist2500_X.txt");
labels = Math.loadtxt("mnist2500_labels.txt");
Y = tsne(X, 2, 50, 20.0);
Plot.scatter(Y[:,0], Y[:,1], 20, labels);
|
robinjia/cgpotts-cs224u
|
tsne.py
|
Python
|
gpl-2.0
| 5,249
|
[
"Gaussian"
] |
0f380d834cfb2f654bbd15980e94ef570a2f0cff86261ee014226f568c8ef742
|
# module pyparsing.py
#
# Copyright (c) 2003-2021 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
pyparsing module - Classes and methods to define and execute parsing grammars
=============================================================================
The pyparsing module is an alternative approach to creating and
executing simple grammars, vs. the traditional lex/yacc approach, or the
use of regular expressions. With pyparsing, you don't need to learn
a new syntax for defining grammars or matching expressions - the parsing
module provides a library of classes that you use to construct the
grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form
``"<salutation>, <addressee>!"``), built up using :class:`Word`,
:class:`Literal`, and :class:`And` elements
(the :meth:`'+'<ParserElement.__add__>` operators create :class:`And` expressions,
and the strings are auto-converted to :class:`Literal` expressions)::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print(hello, "->", greet.parse_string(hello))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the
self-explanatory class names, and the use of :class:`'+'<And>`,
:class:`'|'<MatchFirst>`, :class:`'^'<Or>` and :class:`'&'<Each>` operators.
The :class:`ParseResults` object returned from
:class:`ParserElement.parseString` can be
accessed as a nested list, a dictionary, or an object with named
attributes.
The pyparsing module handles some of the problems that are typically
vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle
"Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
Getting Started -
-----------------
Visit the classes :class:`ParserElement` and :class:`ParseResults` to
see the base classes that most other pyparsing
classes inherit from. Use the docstrings for examples of how to:
- construct literal match expressions from :class:`Literal` and
:class:`CaselessLiteral` classes
- construct character word-group expressions using the :class:`Word`
class
- see how to create repetitive expressions using :class:`ZeroOrMore`
and :class:`OneOrMore` classes
- use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`,
and :class:`'&'<Each>` operators to combine simple expressions into
more complex ones
- associate names with your parsed results using
:class:`ParserElement.setResultsName`
- access the parsed data, which is returned as a :class:`ParseResults`
object
- find some helpful expression short-cuts like :class:`delimitedList`
and :class:`oneOf`
- find more useful common expressions in the :class:`pyparsing_common`
namespace class
"""
from typing import NamedTuple
class version_info(NamedTuple):
major: int
minor: int
micro: int
releaselevel: str
serial: int
@property
def __version__(self):
return "{}.{}.{}".format(self.major, self.minor, self.micro) + (
"{}{}{}".format(
"r" if self.releaselevel[0] == "c" else "",
self.releaselevel[0],
self.serial,
),
"",
)[self.releaselevel == "final"]
def __str__(self):
return "{} {} / {}".format(__name__, self.__version__, __version_time__)
def __repr__(self):
return "{}.{}({})".format(
__name__,
type(self).__name__,
", ".join("{}={!r}".format(*nv) for nv in zip(self._fields, self)),
)
__version_info__ = version_info(3, 0, 6, "final", 0)
__version_time__ = "12 Nov 2021 16:06 UTC"
__version__ = __version_info__.__version__
__versionTime__ = __version_time__
__author__ = "Paul McGuire <ptmcg.gm+pyparsing@gmail.com>"
from .util import *
from .exceptions import *
from .actions import *
from .core import __diag__, __compat__
from .results import *
from .core import *
from .core import _builtin_exprs as core_builtin_exprs
from .helpers import *
from .helpers import _builtin_exprs as helper_builtin_exprs
from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode
from .testing import pyparsing_test as testing
from .common import (
pyparsing_common as common,
_builtin_exprs as common_builtin_exprs,
)
# define backward compat synonyms
if "pyparsing_unicode" not in globals():
pyparsing_unicode = unicode
if "pyparsing_common" not in globals():
pyparsing_common = common
if "pyparsing_test" not in globals():
pyparsing_test = testing
core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs
__all__ = [
"__version__",
"__version_time__",
"__author__",
"__compat__",
"__diag__",
"And",
"AtLineStart",
"AtStringStart",
"CaselessKeyword",
"CaselessLiteral",
"CharsNotIn",
"Combine",
"Dict",
"Each",
"Empty",
"FollowedBy",
"Forward",
"GoToColumn",
"Group",
"IndentedBlock",
"Keyword",
"LineEnd",
"LineStart",
"Literal",
"Located",
"PrecededBy",
"MatchFirst",
"NoMatch",
"NotAny",
"OneOrMore",
"OnlyOnce",
"OpAssoc",
"Opt",
"Optional",
"Or",
"ParseBaseException",
"ParseElementEnhance",
"ParseException",
"ParseExpression",
"ParseFatalException",
"ParseResults",
"ParseSyntaxException",
"ParserElement",
"PositionToken",
"QuotedString",
"RecursiveGrammarException",
"Regex",
"SkipTo",
"StringEnd",
"StringStart",
"Suppress",
"Token",
"TokenConverter",
"White",
"Word",
"WordEnd",
"WordStart",
"ZeroOrMore",
"Char",
"alphanums",
"alphas",
"alphas8bit",
"any_close_tag",
"any_open_tag",
"c_style_comment",
"col",
"common_html_entity",
"counted_array",
"cpp_style_comment",
"dbl_quoted_string",
"dbl_slash_comment",
"delimited_list",
"dict_of",
"empty",
"hexnums",
"html_comment",
"identchars",
"identbodychars",
"java_style_comment",
"line",
"line_end",
"line_start",
"lineno",
"make_html_tags",
"make_xml_tags",
"match_only_at_col",
"match_previous_expr",
"match_previous_literal",
"nested_expr",
"null_debug_action",
"nums",
"one_of",
"printables",
"punc8bit",
"python_style_comment",
"quoted_string",
"remove_quotes",
"replace_with",
"replace_html_entity",
"rest_of_line",
"sgl_quoted_string",
"srange",
"string_end",
"string_start",
"trace_parse_action",
"unicode_string",
"with_attribute",
"indentedBlock",
"original_text_for",
"ungroup",
"infix_notation",
"locatedExpr",
"with_class",
"CloseMatch",
"token_map",
"pyparsing_common",
"pyparsing_unicode",
"unicode_set",
"condition_as_parse_action",
"pyparsing_test",
# pre-PEP8 compatibility names
"__versionTime__",
"anyCloseTag",
"anyOpenTag",
"cStyleComment",
"commonHTMLEntity",
"countedArray",
"cppStyleComment",
"dblQuotedString",
"dblSlashComment",
"delimitedList",
"dictOf",
"htmlComment",
"javaStyleComment",
"lineEnd",
"lineStart",
"makeHTMLTags",
"makeXMLTags",
"matchOnlyAtCol",
"matchPreviousExpr",
"matchPreviousLiteral",
"nestedExpr",
"nullDebugAction",
"oneOf",
"opAssoc",
"pythonStyleComment",
"quotedString",
"removeQuotes",
"replaceHTMLEntity",
"replaceWith",
"restOfLine",
"sglQuotedString",
"stringEnd",
"stringStart",
"traceParseAction",
"unicodeString",
"withAttribute",
"indentedBlock",
"originalTextFor",
"infixNotation",
"locatedExpr",
"withClass",
"tokenMap",
"conditionAsParseAction",
"autoname_elements",
]
|
JonnyWong16/plexpy
|
lib/pyparsing/__init__.py
|
Python
|
gpl-3.0
| 9,095
|
[
"VisIt"
] |
dcff933662b1fc7fff62093df0209930bfe4b263ebe597929218a02fa47350f4
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
import re
import numpy as np
from mantid.api import AlgorithmFactory, FileProperty, FileAction, PythonAlgorithm, ITableWorkspaceProperty, IPeaksWorkspace
from mantid.kernel import StringListValidator, Direction
from mantid.simpleapi import SaveHKL
# List of file format names supported by this algorithm
SUPPORTED_FORMATS = ["Fullprof", "GSAS", "Jana", "SHELX"]
def has_modulated_indexing(workspace):
"""Check if this workspace has more than 3 indicies
:params: workspace :: the workspace to check
:returns: True if the workspace > 3 indicies else False
"""
return num_additional_indicies(workspace) > 0
def num_additional_indicies(workspace):
"""Check if this workspace has more than 3 indicies
:params: workspace :: the workspace count indicies in
:returns: the number of additional indicies present in the workspace
"""
return len(get_additional_index_names(workspace))
def get_additional_index_names(workspace):
"""Get the names of the additional indicies to export
:params: workspace :: the workspace to get column names from
:returns: the names of any additional columns in the workspace
"""
pattern = re.compile("m[1-9]+")
names = workspace.getColumnNames()
return list(filter(pattern.match, names))
class SaveReflections(PythonAlgorithm):
def category(self):
return "DataHandling\\Text;Crystal\\DataHandling"
def summary(self):
return "Saves single crystal reflections to a variety of formats"
def PyInit(self):
"""Initilize the algorithms properties"""
self.declareProperty(ITableWorkspaceProperty("InputWorkspace", '', Direction.Input),
doc="The name of the peaks worksapce to save")
self.declareProperty(FileProperty("Filename", "",
action=FileAction.Save,
direction=Direction.Input),
doc="File with the data from a phonon calculation.")
self.declareProperty(name="Format",
direction=Direction.Input,
defaultValue="Fullprof",
validator=StringListValidator(SUPPORTED_FORMATS),
doc="The output format to export reflections to")
def PyExec(self):
"""Execute the algorithm"""
workspace = self.getProperty("InputWorkspace").value
output_format = self.getPropertyValue("Format")
file_name = self.getPropertyValue("Filename")
file_writer = self.choose_format(output_format)
file_writer(file_name, workspace)
def choose_format(self, output_format):
"""Choose the function to use to write out data for this format
:param output_format: the format to use to output refelctions as. Options are
"Fullprof", "GSAS", "Jana", and "SHELX".
:returns: file format to use for saving reflections to an ASCII file.
"""
if output_format == "Fullprof":
return FullprofFormat()
elif output_format == "Jana":
return JanaFormat()
elif output_format == "GSAS" or output_format == "SHELX":
return SaveHKLFormat()
else:
raise ValueError("Unexpected file format {}. Format should be one of {}."
.format(output_format, ", ".join(SUPPORTED_FORMATS)))
# ------------------------------------------------------------------------------------------------------
class FullprofFormat(object):
"""Writes a PeaksWorkspace to an ASCII file in the format required
by the Fullprof crystallographic refinement program.
This is a 7 columns file format consisting of H, K, L, instensity,
sigma, crystal domain, and wavelength.
"""
def __call__(self, file_name, workspace):
"""Write a PeaksWorkspace to an ASCII file using this formatter.
:param file_name: the file name to output data to.
:param workspace: the PeaksWorkspace to write to file.
"""
with open(file_name, 'w') as f_handle:
self.write_header(f_handle, workspace)
self.write_peaks(f_handle, workspace)
def write_header(self, f_handle, workspace):
"""Write the header of the Fullprof file format
:param f_handle: handle to the file to write to.
:param workspace: the PeaksWorkspace to save to file.
"""
num_hkl = 3+num_additional_indicies(workspace)
f_handle.write(workspace.getTitle())
f_handle.write("({}i4,2f12.2,i5,4f10.4)\n".format(num_hkl))
f_handle.write(" 0 0 0\n")
names = "".join([" {}".format(name) for name in get_additional_index_names(workspace)])
f_handle.write("# h k l{} Fsqr s(Fsqr) Cod Lambda\n".format(names))
def write_peaks(self, f_handle, workspace):
"""Write all the peaks in the workspace to file.
:param f_handle: handle to the file to write to.
:param workspace: the PeaksWorkspace to save to file.
"""
for i, peak in enumerate(workspace):
data = [peak['h'],peak['k'],peak['l']]
data.extend([peak[name] for name in get_additional_index_names(workspace)])
hkls = "".join(["{:>4.0f}".format(item) for item in data])
data = (peak['Intens'],peak['SigInt'],i+1,peak['Wavelength'])
line = "{:>12.2f}{:>12.2f}{:>5.0f}{:>10.4f}\n".format(*data)
line = "".join([hkls, line])
f_handle.write(line)
# ------------------------------------------------------------------------------------------------------
class JanaFormat(object):
"""Writes a PeaksWorkspace to an ASCII file in the format required
by the Jana2006 crystallographic refinement program.
This is an 11 column file format consisting of H, K, L, intensity, sigma,
crystal domain, wavelength, 2*theta, transmission, absorption weighted path length (Tbar),
and thermal diffuse scattering correction (TDS).
Currently the last three columns are shard coded to 1.0, 0.0, and 0.0 respectively.
"""
def __call__(self, file_name, workspace):
"""Write a PeaksWorkspace to an ASCII file using this formatter.
:param file_name: the file name to output data to.
:param workspace: the PeaksWorkspace to write to file.
"""
with open(file_name, 'w') as f_handle:
self.write_header(f_handle, workspace)
self.write_peaks(f_handle, workspace)
def write_header(self, f_handle, workspace):
"""Write the header of the Fullprof file format
:param f_handle: handle to the file to write to.
:param workspace: the PeaksWorkspace to save to file.
"""
if isinstance(workspace, IPeaksWorkspace):
sample = workspace.sample()
lattice = sample.getOrientedLattice()
lattice_params = [lattice.a(), lattice.b(), lattice.c(), lattice.alpha(), lattice.beta(), lattice.gamma()]
lattice_params = "".join(["{: >10.4f}".format(value) for value in lattice_params])
f_handle.write("# Lattice parameters {}\n".format(lattice_params))
f_handle.write("(3i5,2f12.2,i5,4f10.4)\n")
def write_peaks(self, f_handle, workspace):
"""Write all the peaks in the workspace to file.
:param f_handle: handle to the file to write to.
:param workspace: the PeaksWorkspace to save to file.
"""
column_names = ["h", "k", "l"]
column_names.extend(get_additional_index_names(workspace))
column_names.extend(["Fsqr", "s(Fsqr)", "Cod", "Lambda", "Twotheta", "Transm.", "Tbar", "TDS"])
column_format = "#{:>4}{:>4}{:>4}"
column_format += "".join(["{:>4}" for _ in range(num_additional_indicies(workspace))])
column_format += "{:>12}{:>12}{:>5}{:>10}{:>10}{:>10}{:>10}{:>10}\n"
f_handle.write(column_format.format(*column_names))
for row in workspace:
self.write_peak(f_handle, row, workspace)
def write_peak(self, f_handle, peak, workspace):
"""Write a single Peak from the peaks workspace to file.
:param f_handle: handle to the file to write to.
:param peak: the peak object to write to the file.
"""
ms = ["{: >5.0f}".format(peak[name]) for name in get_additional_index_names(workspace)]
f_handle.write("{h: >5.0f}{k: >5.0f}{l: >5.0f}".format(**peak))
f_handle.write("".join(ms))
f_handle.write("{Intens: >12.2f}".format(**peak))
f_handle.write("{SigInt: >12.2f}".format(**peak))
f_handle.write("{: >5.0f}".format(1))
f_handle.write("{Wavelength: >10.4f}".format(**peak))
f_handle.write("{: >10.4f}".format(self._get_two_theta(peak)))
f_handle.write("{: >10.4f}{: >10.4f}{: >10.4f}".format(1.0, 0.0, 0.0))
f_handle.write("\n")
def _get_two_theta(self, peak):
"""Get the two theta value for this peak.
This is just Bragg's law relating wavelength to scattering angle.
:param peak: peak object to get the scattering angle for.
:returns: the scattering angle for the peak.
"""
d = peak['DSpacing']
wavelength = peak['Wavelength']
theta = 2.*np.arcsin(0.5*(wavelength / d))
return np.rad2deg(theta)
# ------------------------------------------------------------------------------------------------------
class SaveHKLFormat(object):
"""Writes a PeaksWorkspace to an ASCII file in the format output from
the SaveHKL algorithm.
The SaveHKL algorithm currently supports both the GSAS and SHELX formats. For
more information see the SaveHKL algorithm documentation.
"""
def __call__(self, file_name, workspace):
"""Write a PeaksWorkspace to an ASCII file using this formatter.
:param file_name: the file name to output data to.
:param workspace: the PeaksWorkspace to write to file.
"""
if has_modulated_indexing(workspace):
raise NotImplementedError("Cannot currently save modulated structures to GSAS or SHELX formats")
SaveHKL(Filename=file_name, InputWorkspace=workspace, OutputWorkspace=workspace.name())
AlgorithmFactory.subscribe(SaveReflections)
|
mganeva/mantid
|
Framework/PythonInterface/plugins/algorithms/SaveReflections.py
|
Python
|
gpl-3.0
| 10,638
|
[
"CRYSTAL"
] |
deee178641315d3673a194d3f09da6fc81c9a4c3e85e110fb687b6448aa3c412
|
#!/bin/env python
"""
ec2-init.py
---------------------------
Initialize Amazon EC2 Host
Brian Parsons <brian@pmex.com>
Features:
-------------
Sets hostname based on instance user-data hostname
Sends email with hostname, instance type, and IP address
Will update DNS in Route53 if boto finds credentials or has IAM role and zone file is found
Requires:
------------
boto - https://github.com/boto/boto
Usage:
-------
This script is meant to run on boot of an Amazon EC2 server.
This script will find the following metadata if listed in the user-metadata
field of the instance as key value pairs delimited by pipes (|):
hostname - the hostname to set for the instance
mailto - the address to email with a message listing the instance information and ip address
mailfrom - the from address of the message
sendemail - a 1 or 0 flag on whether to send a message or not (1 sends - default)
The email address and flag can also be set in a configuration file in /etc/conf.d or /etc/defaults
This script performs the following operations:
- Creates or updates the hostname
- Adds root ssh access keys
- Updates the Route53 DNS entry for the hostname *
- Sends an email message to the specified address listing the hostname, instance type and external IP address of the instance **
* Updating the DNS entry in Route53 requires the instance be granted permission to Route 53 via IAM or boto credentials in /root/.boto
** Requires functioning MTA on the instance image. If no configuration file is found and mailto, mailfrom are not specified in user-metadata for the instance,
it will send the message to root from root.
The MIT License (MIT)
---------------------
Copyright (c) 2012-2013 Brian Parsons
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Changelog:
---------------
2012-06-20 - bcp - added bootalert
2012-06-20 - bcp - grabs domain name from user-data and sets DNS for instance ID
2012-09-15 - bcp - added additional grep for hostname and domainname in case both are returned
2012-12-14 - bcp - updated for systemd, bash functions moved to single python script
2013-01-14 - bcp - grabs public keys from metadata and creates or updates authorized_keys for root
2013-01-14 - bcp - pulls mailto, mailfrom from user metadata or config file /etc/conf.d/ec2-init
2013-01-17 - bcp - pulls sendemail from config file or user-metadata
2020-05-29 - bcp - python 3 / boto
"""
import configparser
import datetime
import os
import re
import socket
import smtplib
import sys
import subprocess
import urllib.request, urllib.parse, urllib.error
from boto.route53.connection import Route53Connection
from boto.route53.record import ResourceRecordSets
from boto.route53.exception import DNSServerError
from boto.utils import get_instance_metadata, get_instance_userdata
from socket import gethostname
#
# updatedns - Updates DNS for given hostname to newip
#
def updatedns(hostname, newip):
try:
hostname
except NameError:
print ('Hostname not specified and not able to detect.')
return False
# Add trailing dot to hostname if it doesn't have one
if hostname[-1:] != ".":
hostname += "."
print(('Hostname: %s' % hostname))
print(('Current IP: %s' % newip))
# Initialize the connection to AWS Route53
route53 = Route53Connection()
# Get the zoneid
try:
route53zones = route53.get_all_hosted_zones()
except (DNSServerError, e):
print ('Connection error to AWS. Check your credentials.')
print(('Error %s - %s' % (e.code, str(e))))
return False
for zone in route53zones['ListHostedZonesResponse']['HostedZones']:
if zone['Name'][0:-1] in hostname:
zoneid = zone['Id'].replace('/hostedzone/', '')
print(('Found Route53 Zone %s for hostname %s' % (zoneid, hostname)))
try:
zoneid
except NameError:
print(('Unable to find Route53 Zone for %s' % hostname))
return False
# Find the old record if it exists
try:
sets = route53.get_all_rrsets(zoneid)
except (DNSServerError, e):
print('Connection error to AWS.')
print('Error %s - %s' % (e.code, str(e)))
return False
for rset in sets:
if rset.name == hostname and rset.type == 'A':
curiprecord = rset.resource_records
if type(curiprecord) in [list, tuple, set]:
for record in curiprecord:
curip = record
print('Current DNS IP: %s' % curip)
curttl = rset.ttl
print('Current DNS TTL: %s' % curttl)
if curip != newip:
# Remove the old record
print('Removing old record...')
change1 = ResourceRecordSets(route53, zoneid)
removeold = change1.add_change("DELETE", hostname, "A", curttl)
removeold.add_value(curip)
change1.commit()
else:
print('IPs match, not making any changes in DNS.')
return
try:
curip
except NameError:
print('Hostname %s not found in current zone record' % hostname)
# Add the new record
print('Adding %s to DNS as %s...' % ( hostname, newip))
change2 = ResourceRecordSets(route53, zoneid)
change = change2.add_change("CREATE", hostname, "A", 60)
change.add_value(newip)
change2.commit()
# Parse Config File
parseconfigfile = 0
confsendemail = 0
if os.path.isfile("/etc/conf.d/ec2-init"):
parseconfigfile = 1
configfile = "/etc/conf.d/ec2-init"
if os.path.isfile("/etc/default/ec2-init"):
parseconfigfile = 1
configfile = "/etc/default/ec2-init"
if parseconfigfile:
config = configparser.ConfigParser()
config.read(configfile)
try:
confmailto = config.get("ec2-init", "mailto")
confmailfrom = config.get("ec2-init", "mailfrom")
confsendemail = config.get("ec2-init", "sendemail")
except configparser.NoSectionError:
print("Error: Unable to parse config file")
# Collect Instance Meta Data
inst_data = get_instance_metadata()
INSTANCETYPE=inst_data["instance-type"]
INSTANCEID=inst_data["instance-id"]
PUBLICIP=inst_data["public-ipv4"]
PUBLICKEYS=inst_data["public-keys"]
AVAILABILITYZONE=inst_data["placement"]["availability-zone"]
now = datetime.datetime.now()
# make sure /root/.ssh exists
if not os.path.exists('/root/.ssh'):
os.makedirs('/root/.ssh')
os.chmod('/root/.ssh',0o700)
# save public key to authorized_keys file
if type(list(PUBLICKEYS.items())) in [list, tuple, set]:
try:
currentkeys = open('/root/.ssh/authorized_keys').read()
except:
currentkeys = ""
try:
with open('/root/.ssh/authorized_keys', 'a') as authkeyfile:
for key in list(PUBLICKEYS.items()):
if not key[1][0] in currentkeys:
authkeyfile.write(key[1][0])
authkeyfile.write('\n')
authkeyfile.close()
os.chmod('/root/.ssh/authorized_keys',0o600)
except:
print ('Could not open authorized_keys file for writing!')
# Collect User Meta Data
try:
user_data = get_instance_userdata(sep='|')
except:
print('No user data found for instance')
try:
hostname = user_data['hostname']
except:
hostname = gethostname()
# set hostname in /etc/hostname
try:
with open('/etc/hostname', 'w') as hostfile:
hostfile.write(hostname)
hostfile.write('\n')
hostfile.close()
except:
print('Could not open /etc/hostname for writing')
# set hostname with the system
subcmd = "hostname " + hostname
subprocess.call(subcmd,shell=True)
# update dns
try:
updatedns(hostname, PUBLICIP)
except:
print('DNS Update failed. Check credentials or IAM roles.')
# Check if we are to send email
try:
sendemail = user_data['sendemail']
except (TypeError, KeyError):
sendemail = confsendemail
except NameError:
sendemail = 1
if int(sendemail) == 1:
# Get mail to address from user metadata or conf file or default to root
try:
mailto = user_data['mailto']
except (TypeError, KeyError):
mailto = confmailto
except NameError:
mailto = "root"
# Get mail from address from user metadata or conf file or default to root
try:
mailfrom = user_data['mailfrom']
except (TypeError, KeyError):
mailfrom = confmailfrom
except NameError:
mailfrom = "root"
print(("Sending mail from " + mailfrom + " to " + mailto + "."))
# compose boot email
messageheader = "From: EC2-Init <" + mailfrom + ">\n"
messageheader += "To: " + mailto + "\n"
messageheader += "Subject: " + hostname + "\n\n"
message = messageheader + hostname + " booted " + now.strftime("%a %b %d %H:%M:%S %Z %Y") + ". A " + INSTANCETYPE + " in " + AVAILABILITYZONE + " with IP: " + PUBLICIP + ".\n\n"
# send boot email
try:
smtpObj = smtplib.SMTP('localhost')
smtpObj.sendmail(mailfrom, mailto, message)
except smtplib.SMTPException:
print('Error: unable to send boot alert email')
else:
print('Not sending mail')
|
bparsons/ec2-init
|
ec2-init.py
|
Python
|
mit
| 9,935
|
[
"Brian"
] |
3684bf9f2868f3af40990465921ba3d2feb3e877b75d99298a49e9c1f3fa5a3a
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.